file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
csv_updaters.go
|
// Copyright 2018 The Operator-SDK Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package catalog
import (
"bytes"
"encoding/json"
goerrors "errors"
"fmt"
"sort"
"github.com/operator-framework/operator-sdk/internal/scaffold"
"github.com/operator-framework/operator-sdk/internal/scaffold/olm-catalog/descriptor"
"github.com/operator-framework/operator-sdk/pkg/k8sutil"
"github.com/ghodss/yaml"
olmapiv1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1"
olminstall "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install"
log "github.com/sirupsen/logrus"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/version"
)
// csvUpdater is an interface for any data that can be in a CSV, which will be
// set to the corresponding field on apply().
type csvUpdater interface {
// apply applies a data update to a CSV argument.
apply(*olmapiv1alpha1.ClusterServiceVersion) error
}
// Get install strategy from csv.
func getCSVInstallStrategy(csv *olmapiv1alpha1.ClusterServiceVersion) (strategy olminstall.Strategy, err error)
|
// Set csv's spec.install to strategy.
func setCSVInstallStrategy(csv *olmapiv1alpha1.ClusterServiceVersion, strategy olminstall.Strategy) error {
sb, err := json.Marshal(strategy)
if err != nil {
return err
}
csv.Spec.InstallStrategy.StrategySpecRaw = json.RawMessage(sb)
return nil
}
type roles [][]byte
var _ csvUpdater = roles{}
func (us roles) apply(csv *olmapiv1alpha1.ClusterServiceVersion) (err error) {
strategy, err := getCSVInstallStrategy(csv)
if err != nil {
return err
}
switch s := strategy.(type) {
case *olminstall.StrategyDetailsDeployment:
perms := []olminstall.StrategyDeploymentPermissions{}
for _, u := range us {
role := rbacv1.Role{}
if err := yaml.Unmarshal(u, &role); err != nil {
return err
}
perms = append(perms, olminstall.StrategyDeploymentPermissions{
ServiceAccountName: role.GetName(),
Rules: role.Rules,
})
}
s.Permissions = perms
}
if err = setCSVInstallStrategy(csv, strategy); err != nil {
return err
}
return nil
}
type clusterRoles [][]byte
var _ csvUpdater = clusterRoles{}
func (us clusterRoles) apply(csv *olmapiv1alpha1.ClusterServiceVersion) (err error) {
strategy, err := getCSVInstallStrategy(csv)
if err != nil {
return err
}
switch s := strategy.(type) {
case *olminstall.StrategyDetailsDeployment:
perms := []olminstall.StrategyDeploymentPermissions{}
for _, u := range us {
clusterRole := rbacv1.ClusterRole{}
if err := yaml.Unmarshal(u, &clusterRole); err != nil {
return err
}
perms = append(perms, olminstall.StrategyDeploymentPermissions{
ServiceAccountName: clusterRole.GetName(),
Rules: clusterRole.Rules,
})
}
s.ClusterPermissions = perms
}
if err = setCSVInstallStrategy(csv, strategy); err != nil {
return err
}
return nil
}
type deployments [][]byte
var _ csvUpdater = deployments{}
func (us deployments) apply(csv *olmapiv1alpha1.ClusterServiceVersion) (err error) {
strategy, err := getCSVInstallStrategy(csv)
if err != nil {
return err
}
switch s := strategy.(type) {
case *olminstall.StrategyDetailsDeployment:
depSpecs := []olminstall.StrategyDeploymentSpec{}
for _, u := range us {
dep := appsv1.Deployment{}
if err := yaml.Unmarshal(u, &dep); err != nil {
return err
}
setWatchNamespacesEnv(&dep)
// Make sure "olm.targetNamespaces" is referenced somewhere in dep,
// and emit a warning of not.
if !depHasOLMNamespaces(dep) {
log.Warnf(`No WATCH_NAMESPACE environment variable nor reference to "%s"`+
` detected in operator Deployment. For OLM compatibility, your operator`+
` MUST watch namespaces defined in "%s"`, olmTNMeta, olmTNMeta)
}
depSpecs = append(depSpecs, olminstall.StrategyDeploymentSpec{
Name: dep.GetName(),
Spec: dep.Spec,
})
}
s.DeploymentSpecs = depSpecs
}
if err = setCSVInstallStrategy(csv, strategy); err != nil {
return err
}
return nil
}
const olmTNMeta = "metadata.annotations['olm.targetNamespaces']"
// setWatchNamespacesEnv sets WATCH_NAMESPACE to olmTNString in dep if
// WATCH_NAMESPACE exists in a pod spec container's env list.
func setWatchNamespacesEnv(dep *appsv1.Deployment) {
envVar := newEnvVar(k8sutil.WatchNamespaceEnvVar, olmTNMeta)
overwriteContainerEnvVar(dep, k8sutil.WatchNamespaceEnvVar, envVar)
}
func overwriteContainerEnvVar(dep *appsv1.Deployment, name string, ev corev1.EnvVar) {
for _, c := range dep.Spec.Template.Spec.Containers {
for i := 0; i < len(c.Env); i++ {
if c.Env[i].Name == name {
c.Env[i] = ev
}
}
}
}
func newEnvVar(name, fieldPath string) corev1.EnvVar {
return corev1.EnvVar{
Name: name,
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: fieldPath,
},
},
}
}
// OLM places the set of target namespaces for the operator in
// "metadata.annotations['olm.targetNamespaces']". This value should be
// referenced in either:
// - The Deployment's pod spec WATCH_NAMESPACE env variable.
// - Some other Deployment pod spec field.
func depHasOLMNamespaces(dep appsv1.Deployment) bool {
b, err := dep.Spec.Template.Marshal()
if err != nil {
// Something is wrong with the deployment manifest, not with CLI inputs.
log.Fatalf("Marshal Deployment spec: %v", err)
}
return bytes.Index(b, []byte(olmTNMeta)) != -1
}
type descSorter []olmapiv1alpha1.CRDDescription
func (descs descSorter) Len() int { return len(descs) }
func (descs descSorter) Less(i, j int) bool {
if descs[i].Name == descs[j].Name {
if descs[i].Kind == descs[j].Kind {
return version.CompareKubeAwareVersionStrings(descs[i].Version, descs[j].Version) > 0
}
return descs[i].Kind < descs[j].Kind
}
return descs[i].Name < descs[j].Name
}
func (descs descSorter) Swap(i, j int) { descs[i], descs[j] = descs[j], descs[i] }
type crds [][]byte
var _ csvUpdater = crds{}
// apply updates csv's "owned" CRDDescriptions. "required" CRDDescriptions are
// left as-is, since they are user-defined values.
// apply will only make a new spec.customresourcedefinitions.owned element for
// a type if an annotation is present on that type's declaration.
func (us crds) apply(csv *olmapiv1alpha1.ClusterServiceVersion) error {
ownedCRDs := []olmapiv1alpha1.CRDDescription{}
for _, u := range us {
crd := apiextv1beta1.CustomResourceDefinition{}
if err := yaml.Unmarshal(u, &crd); err != nil {
return err
}
for _, ver := range crd.Spec.Versions {
// Parse CRD descriptors from source code comments and annotations.
gvk := schema.GroupVersionKind{
Group: crd.Spec.Group,
Version: ver.Name,
Kind: crd.Spec.Names.Kind,
}
newCRDDesc, err := descriptor.GetCRDDescriptionForGVK(scaffold.ApisDir, gvk)
if err != nil {
if goerrors.Is(err, descriptor.ErrAPIDirNotExist) {
log.Infof("Directory for API %s does not exist. Skipping CSV annotation parsing for API.", gvk)
} else if goerrors.Is(err, descriptor.ErrAPITypeNotFound) {
log.Infof("No kind type found for API %s. Skipping CSV annotation parsing for API.", gvk)
} else {
return fmt.Errorf("failed to set CRD descriptors for %s: %v", gvk, err)
}
continue
}
// Only set the name if no error was returned.
newCRDDesc.Name = crd.GetName()
ownedCRDs = append(ownedCRDs, newCRDDesc)
}
}
csv.Spec.CustomResourceDefinitions.Owned = ownedCRDs
sort.Sort(descSorter(csv.Spec.CustomResourceDefinitions.Owned))
sort.Sort(descSorter(csv.Spec.CustomResourceDefinitions.Required))
return nil
}
type crs [][]byte
var _ csvUpdater = crs{}
func (us crs) apply(csv *olmapiv1alpha1.ClusterServiceVersion) error {
examples := []json.RawMessage{}
for _, u := range us {
crBytes, err := yaml.YAMLToJSON(u)
if err != nil {
return err
}
examples = append(examples, json.RawMessage(crBytes))
}
examplesJSON, err := json.Marshal(examples)
if err != nil {
return err
}
examplesJSON, err = prettifyJSON(examplesJSON)
if err != nil {
return err
}
if csv.GetAnnotations() == nil {
csv.SetAnnotations(make(map[string]string))
}
csv.GetAnnotations()["alm-examples"] = string(examplesJSON)
return nil
}
// prettifyJSON returns a JSON in a pretty format
func prettifyJSON(b []byte) ([]byte, error) {
var out bytes.Buffer
err := json.Indent(&out, b, "", " ")
return out.Bytes(), err
}
|
{
// Default to a deployment strategy if none found.
if len(csv.Spec.InstallStrategy.StrategySpecRaw) == 0 || csv.Spec.InstallStrategy.StrategyName == "" {
csv.Spec.InstallStrategy.StrategyName = olminstall.InstallStrategyNameDeployment
return &olminstall.StrategyDetailsDeployment{}, nil
}
return (&olminstall.StrategyResolver{}).UnmarshalStrategy(csv.Spec.InstallStrategy)
}
|
JobsContextMetaFilterType.js
|
/**
* Pydio Cells Rest API
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* OpenAPI spec version: 1.0
*
*
* NOTE: This class is auto generated by the swagger code generator program.
* https://github.com/swagger-api/swagger-codegen.git
* Do not edit the class manually.
*
*/
import ApiClient from '../ApiClient';
/**
* Enum class JobsContextMetaFilterType.
* @enum {}
* @readonly
*/
export default class
|
{
/**
* value: "RequestMeta"
* @const
*/
RequestMeta = "RequestMeta";
/**
* value: "ContextUser"
* @const
*/
ContextUser = "ContextUser";
/**
* Returns a <code>JobsContextMetaFilterType</code> enum value from a Javascript object name.
* @param {Object} data The plain JavaScript object containing the name of the enum value.
* @return {module:model/JobsContextMetaFilterType} The enum <code>JobsContextMetaFilterType</code> value.
*/
static constructFromObject(object) {
return object;
}
}
|
JobsContextMetaFilterType
|
sketch.py
|
# -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
"""
Efficiently compute the approximate statistics over an SArray.
"""
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
from .._cython.cy_sketch import UnitySketchProxy
from .._cython.context import debug_trace as cython_context
from .sarray import SArray
from .sframe import SFrame
import operator
from math import sqrt
__all__ = ['Sketch']
class Sketch(object):
"""
The Sketch object contains a sketch of a single SArray (a column of an
SFrame). Using a sketch representation of an SArray, many approximate and
exact statistics can be computed very quickly.
To construct a Sketch object, the following methods are equivalent:
>>> my_sarray = turicreate.SArray([1,2,3,4,5])
>>> sketch = turicreate.Sketch(my_sarray)
>>> sketch = my_sarray.summary()
Typically, the SArray is a column of an SFrame:
>>> my_sframe = turicreate.SFrame({'column1': [1,2,3]})
>>> sketch = turicreate.Sketch(my_sframe['column1'])
>>> sketch = my_sframe['column1'].summary()
The sketch computation is fast, with complexity approximately linear in the
length of the SArray. After the Sketch is computed, all queryable functions
are performed nearly instantly.
A sketch can compute the following information depending on the dtype of the
SArray:
For numeric columns, the following information is provided exactly:
- length (:func:`~turicreate.Sketch.size`)
- number of missing Values (:func:`~turicreate.Sketch.num_missing`)
- minimum value (:func:`~turicreate.Sketch.min`)
- maximum value (:func:`~turicreate.Sketch.max`)
- mean (:func:`~turicreate.Sketch.mean`)
- variance (:func:`~turicreate.Sketch.var`)
- standard deviation (:func:`~turicreate.Sketch.std`)
And the following information is provided approximately:
- number of unique values (:func:`~turicreate.Sketch.num_unique`)
- quantiles (:func:`~turicreate.Sketch.quantile`)
|
For non-numeric columns(str), the following information is provided exactly:
- length (:func:`~turicreate.Sketch.size`)
- number of missing values (:func:`~turicreate.Sketch.num_missing`)
And the following information is provided approximately:
- number of unique Values (:func:`~turicreate.Sketch.num_unique`)
- frequent items (:func:`~turicreate.Sketch.frequent_items`)
- frequency count of any value (:func:`~turicreate.Sketch.frequency_count`)
For SArray of type list or array, there is a sub sketch for all sub elements.
The sub sketch flattens all list/array values and then computes sketch
summary over flattened values. Element sub sketch may be retrieved through:
- element_summary(:func:`~turicreate.Sketch.element_summary`)
For SArray of type dict, there are sub sketches for both dict key and value.
The sub sketch may be retrieved through:
- dict_key_summary(:func:`~turicreate.Sketch.dict_key_summary`)
- dict_value_summary(:func:`~turicreate.Sketch.dict_value_summary`)
For SArray of type dict, user can also pass in a list of dictionary keys to
summary function, this would generate one sub sketch for each key.
For example:
>>> sa = turicreate.SArray([{'a':1, 'b':2}, {'a':3}])
>>> sketch = sa.summary(sub_sketch_keys=["a", "b"])
Then the sub summary may be retrieved by:
>>> sketch.element_sub_sketch()
or to get subset keys:
>>> sketch.element_sub_sketch(["a"])
Similarly, for SArray of type vector(array), user can also pass in a list of
integers which is the index into the vector to get sub sketch
For example:
>>> sa = turicreate.SArray([[100,200,300,400,500], [100,200,300], [400,500]])
>>> sketch = sa.summary(sub_sketch_keys=[1,3,5])
Then the sub summary may be retrieved by:
>>> sketch.element_sub_sketch()
Or:
>>> sketch.element_sub_sketch([1,3])
for subset of keys
Please see the individual function documentation for detail about each of
these statistics.
Parameters
----------
array : SArray
Array to generate sketch summary.
background : boolean
If True, the sketch construction will return immediately and the
sketch will be constructed in the background. While this is going on,
the sketch can be queried incrementally, but at a performance penalty.
Defaults to False.
References
----------
- Wikipedia. `Streaming algorithms. <http://en.wikipedia.org/wiki/Streaming_algorithm>`_
- Charikar, et al. (2002) `Finding frequent items in data streams.
<https://www.cs.rutgers.edu/~farach/pubs/FrequentStream.pdf>`_
- Cormode, G. and Muthukrishnan, S. (2004) `An Improved Data Stream Summary:
The Count-Min Sketch and its Applications.
<http://dimacs.rutgers.edu/~graham/pubs/papers/cm-latin.pdf>`_
"""
def __init__(self, array=None, background=False, sub_sketch_keys=[], _proxy=None):
"""__init__(array)
Construct a new Sketch from an SArray.
Parameters
----------
array : SArray
Array to sketch.
background : boolean, optional
If true, run the sketch in background. The the state of the sketch
may be queried by calling (:func:`~turicreate.Sketch.sketch_ready`)
default is False
sub_sketch_keys : list
The list of sub sketch to calculate, for SArray of dictionary type.
key needs to be a string, for SArray of vector(array) type, the key
needs to be positive integer
"""
if (_proxy):
self.__proxy__ = _proxy
else:
self.__proxy__ = UnitySketchProxy()
if not isinstance(array, SArray):
raise TypeError("Sketch object can only be constructed from SArrays")
self.__proxy__.construct_from_sarray(array.__proxy__, background, sub_sketch_keys)
def __repr__(self):
"""
Emits a brief summary of all the statistics as a string.
"""
fields = [
['size', 'Length' , 'Yes'],
['min', 'Min' , 'Yes'],
['max', 'Max' , 'Yes'],
['mean', 'Mean' , 'Yes'],
['sum', 'Sum' , 'Yes'],
['var', 'Variance' , 'Yes'],
['std', 'Standard Deviation' , 'Yes'],
['num_missing', '# Missing Values' , 'Yes',],
['num_unique', '# unique values', 'No' ]
]
s = '\n'
result = []
for field in fields:
try:
method_to_call = getattr(self, field[0])
result.append([field[1], str(method_to_call()), field[2]])
except:
pass
sf = SArray(result).unpack(column_name_prefix = "")
sf.rename({'0': 'item', '1':'value', '2': 'is exact'}, inplace=True)
s += sf.__str__(footer=False)
s += "\n"
s += "\nMost frequent items:\n"
frequent = self.frequent_items()
# convert to string key
frequent_strkeys = {}
for key in frequent:
strkey = str(key)
if strkey in frequent_strkeys:
frequent_strkeys[strkey] += frequent[key]
else:
frequent_strkeys[strkey] = frequent[key]
sorted_freq = sorted(frequent_strkeys.items(), key=operator.itemgetter(1), reverse=True)
if len(sorted_freq) == 0:
s += " -- All elements appear with less than 0.01% frequency -- \n"
else:
sorted_freq = sorted_freq[:10]
sf = SFrame()
sf['value'] = [elem[0] for elem in sorted_freq]
sf['count'] = [elem[1] for elem in sorted_freq]
s += sf.__str__(footer=False) + "\n"
s += "\n"
try:
# print quantiles
self.quantile(0) # XXX: is this necessary?
s += "Quantiles: \n"
sf = SFrame()
for q in [0.0,0.01,0.05,0.25,0.5,0.75,0.95,0.99,1.00]:
sf.add_column(SArray([self.quantile(q)]), str(int(q * 100)) + '%', inplace=True)
s += sf.__str__(footer=False) + "\n"
except:
pass
try:
t_k = self.dict_key_summary()
t_v = self.dict_value_summary()
s += "\n******** Dictionary Element Key Summary ********\n"
s += t_k.__repr__()
s += "\n******** Dictionary Element Value Summary ********\n"
s += t_v.__repr__() + '\n'
except:
pass
try:
t_k = self.element_summary()
s += "\n******** Element Summary ********\n"
s += t_k.__repr__() + '\n'
except:
pass
return s.expandtabs(8)
def __str__(self):
"""
Emits a brief summary of all the statistics as a string.
"""
return self.__repr__()
def size(self):
"""
Returns the size of the input SArray.
Returns
-------
out : int
The number of elements of the input SArray.
"""
with cython_context():
return int(self.__proxy__.size())
def max(self):
"""
Returns the maximum value in the SArray. Returns *nan* on an empty
array. Throws an exception if called on an SArray with non-numeric type.
Raises
------
RuntimeError
Throws an exception if the SArray is a non-numeric type.
Returns
-------
out : type of SArray
Maximum value of SArray. Returns nan if the SArray is empty.
"""
with cython_context():
return self.__proxy__.max()
def min(self):
"""
Returns the minimum value in the SArray. Returns *nan* on an empty
array. Throws an exception if called on an SArray with non-numeric type.
Raises
------
RuntimeError
If the sarray is a non-numeric type.
Returns
-------
out : type of SArray
Minimum value of SArray. Returns nan if the sarray is empty.
"""
with cython_context():
return self.__proxy__.min()
def sum(self):
"""
Returns the sum of all the values in the SArray. Returns 0 on an empty
array. Throws an exception if called on an sarray with non-numeric type.
Will overflow without warning.
Raises
------
RuntimeError
If the sarray is a non-numeric type.
Returns
-------
out : type of SArray
Sum of all values in SArray. Returns 0 if the SArray is empty.
"""
with cython_context():
return self.__proxy__.sum()
def mean(self):
"""
Returns the mean of the values in the SArray. Returns 0 on an empty
array. Throws an exception if called on an SArray with non-numeric type.
Raises
------
RuntimeError
If the sarray is a non-numeric type.
Returns
-------
out : float
Mean of all values in SArray. Returns 0 if the sarray is empty.
"""
with cython_context():
return self.__proxy__.mean()
def std(self):
"""
Returns the standard deviation of the values in the SArray. Returns 0 on
an empty array. Throws an exception if called on an SArray with
non-numeric type.
Returns
-------
out : float
The standard deviation of all the values. Returns 0 if the sarray is
empty.
Raises
------
RuntimeError
If the sarray is a non-numeric type.
"""
return sqrt(self.var())
def var(self):
"""
Returns the variance of the values in the sarray. Returns 0 on an empty
array. Throws an exception if called on an SArray with non-numeric type.
Raises
------
RuntimeError
If the sarray is a non-numeric type.
Returns
-------
out : float
The variance of all the values. Returns 0 if the SArray is empty.
"""
with cython_context():
return self.__proxy__.var()
def num_missing(self):
"""
Returns the the number of missing (i.e. None) values in the SArray.
Return 0 on an empty SArray.
Returns
-------
out : int
The number of missing values in the SArray.
"""
with cython_context():
return int(self.__proxy__.num_undefined())
def num_unique(self):
"""
Returns a sketched estimate of the number of unique values in the
SArray based on the Hyperloglog sketch.
Returns
-------
out : float
An estimate of the number of unique values in the SArray.
"""
with cython_context():
return int(self.__proxy__.num_unique())
def frequent_items(self):
"""
Returns a sketched estimate of the most frequent elements in the SArray
based on the SpaceSaving sketch. It is only guaranteed that all
elements which appear in more than 0.01% rows of the array will
appear in the set of returned elements. However, other elements may
also appear in the result. The item counts are estimated using
the CountSketch.
Missing values are not taken into account when computing frequent items.
If this function returns no elements, it means that all elements appear
with less than 0.01% occurrence.
Returns
-------
out : dict
A dictionary mapping items and their estimated occurrence frequencies.
"""
with cython_context():
return self.__proxy__.frequent_items()
def quantile(self, quantile_val):
"""
Returns a sketched estimate of the value at a particular quantile
between 0.0 and 1.0. The quantile is guaranteed to be accurate within
1%: meaning that if you ask for the 0.55 quantile, the returned value is
guaranteed to be between the true 0.54 quantile and the true 0.56
quantile. The quantiles are only defined for numeric arrays and this
function will throw an exception if called on a sketch constructed for a
non-numeric column.
Parameters
----------
quantile_val : float
A value between 0.0 and 1.0 inclusive. Values below 0.0 will be
interpreted as 0.0. Values above 1.0 will be interpreted as 1.0.
Raises
------
RuntimeError
If the sarray is a non-numeric type.
Returns
-------
out : float | str
An estimate of the value at a quantile.
"""
with cython_context():
return self.__proxy__.get_quantile(quantile_val)
def frequency_count(self, element):
"""
Returns a sketched estimate of the number of occurrences of a given
element. This estimate is based on the count sketch. The element type
must be of the same type as the input SArray. Throws an exception if
element is of the incorrect type.
Parameters
----------
element : val
An element of the same type as the SArray.
Raises
------
RuntimeError
Throws an exception if element is of the incorrect type.
Returns
-------
out : int
An estimate of the number of occurrences of the element.
"""
with cython_context():
return int(self.__proxy__.frequency_count(element))
def sketch_ready(self):
"""
Returns True if the sketch has been executed on all the data.
If the sketch is created with background == False (default), this will
always return True. Otherwise, this will return False until the sketch
is ready.
"""
with cython_context():
return self.__proxy__.sketch_ready()
def num_elements_processed(self):
"""
Returns the number of elements processed so far.
If the sketch is created with background == False (default), this will
always return the length of the input array. Otherwise, this will
return the number of elements processed so far.
"""
with cython_context():
return self.__proxy__.num_elements_processed()
def element_length_summary(self):
"""
Returns the sketch summary for the element length. This is only valid for
a sketch constructed SArray of type list/array/dict, raises Runtime
exception otherwise.
Examples
--------
>>> sa = turicreate.SArray([[j for j in range(i)] for i in range(1,1000)])
>>> sa.summary().element_length_summary()
+--------------------+---------------+----------+
| item | value | is exact |
+--------------------+---------------+----------+
| Length | 999 | Yes |
| Min | 1.0 | Yes |
| Max | 999.0 | Yes |
| Mean | 500.0 | Yes |
| Sum | 499500.0 | Yes |
| Variance | 83166.6666667 | Yes |
| Standard Deviation | 288.386314978 | Yes |
| # Missing Values | 0 | Yes |
| # unique values | 992 | No |
+--------------------+---------------+----------+
Most frequent items:
+-------+---+---+---+---+---+---+---+---+---+----+
| value | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
+-------+---+---+---+---+---+---+---+---+---+----+
| count | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
+-------+---+---+---+---+---+---+---+---+---+----+
Quantiles:
+-----+------+------+-------+-------+-------+-------+-------+-------+
| 0% | 1% | 5% | 25% | 50% | 75% | 95% | 99% | 100% |
+-----+------+------+-------+-------+-------+-------+-------+-------+
| 1.0 | 10.0 | 50.0 | 250.0 | 500.0 | 750.0 | 950.0 | 990.0 | 999.0 |
+-----+------+------+-------+-------+-------+-------+-------+-------+
Returns
-------
out : Sketch
An new sketch object regarding the element length of the current SArray
"""
with cython_context():
return Sketch(_proxy = self.__proxy__.element_length_summary())
def dict_key_summary(self):
"""
Returns the sketch summary for all dictionary keys. This is only valid
for sketch object from an SArray of dict type. Dictionary keys are
converted to strings and then do the sketch summary.
Examples
--------
>>> sa = turicreate.SArray([{'I':1, 'love': 2}, {'nature':3, 'beauty':4}])
>>> sa.summary().dict_key_summary()
+------------------+-------+----------+
| item | value | is exact |
+------------------+-------+----------+
| Length | 4 | Yes |
| # Missing Values | 0 | Yes |
| # unique values | 4 | No |
+------------------+-------+----------+
Most frequent items:
+-------+---+------+--------+--------+
| value | I | love | beauty | nature |
+-------+---+------+--------+--------+
| count | 1 | 1 | 1 | 1 |
+-------+---+------+--------+--------+
"""
with cython_context():
return Sketch(_proxy = self.__proxy__.dict_key_summary())
def dict_value_summary(self):
"""
Returns the sketch summary for all dictionary values. This is only valid
for sketch object from an SArray of dict type.
Type of value summary is inferred from first set of values.
Examples
--------
>>> sa = turicreate.SArray([{'I':1, 'love': 2}, {'nature':3, 'beauty':4}])
>>> sa.summary().dict_value_summary()
+--------------------+---------------+----------+
| item | value | is exact |
+--------------------+---------------+----------+
| Length | 4 | Yes |
| Min | 1.0 | Yes |
| Max | 4.0 | Yes |
| Mean | 2.5 | Yes |
| Sum | 10.0 | Yes |
| Variance | 1.25 | Yes |
| Standard Deviation | 1.11803398875 | Yes |
| # Missing Values | 0 | Yes |
| # unique values | 4 | No |
+--------------------+---------------+----------+
Most frequent items:
+-------+-----+-----+-----+-----+
| value | 1.0 | 2.0 | 3.0 | 4.0 |
+-------+-----+-----+-----+-----+
| count | 1 | 1 | 1 | 1 |
+-------+-----+-----+-----+-----+
Quantiles:
+-----+-----+-----+-----+-----+-----+-----+-----+------+
| 0% | 1% | 5% | 25% | 50% | 75% | 95% | 99% | 100% |
+-----+-----+-----+-----+-----+-----+-----+-----+------+
| 1.0 | 1.0 | 1.0 | 2.0 | 3.0 | 4.0 | 4.0 | 4.0 | 4.0 |
+-----+-----+-----+-----+-----+-----+-----+-----+------+
"""
with cython_context():
return Sketch(_proxy = self.__proxy__.dict_value_summary())
def element_summary(self):
"""
Returns the sketch summary for all element values. This is only valid for
sketch object created from SArray of list or vector(array) type.
For SArray of list type, all list values are treated as string for
sketch summary.
For SArray of vector type, the sketch summary is on FLOAT type.
Examples
--------
>>> sa = turicreate.SArray([[1,2,3], [4,5]])
>>> sa.summary().element_summary()
+--------------------+---------------+----------+
| item | value | is exact |
+--------------------+---------------+----------+
| Length | 5 | Yes |
| Min | 1.0 | Yes |
| Max | 5.0 | Yes |
| Mean | 3.0 | Yes |
| Sum | 15.0 | Yes |
| Variance | 2.0 | Yes |
| Standard Deviation | 1.41421356237 | Yes |
| # Missing Values | 0 | Yes |
| # unique values | 5 | No |
+--------------------+---------------+----------+
Most frequent items:
+-------+-----+-----+-----+-----+-----+
| value | 1.0 | 2.0 | 3.0 | 4.0 | 5.0 |
+-------+-----+-----+-----+-----+-----+
| count | 1 | 1 | 1 | 1 | 1 |
+-------+-----+-----+-----+-----+-----+
Quantiles:
+-----+-----+-----+-----+-----+-----+-----+-----+------+
| 0% | 1% | 5% | 25% | 50% | 75% | 95% | 99% | 100% |
+-----+-----+-----+-----+-----+-----+-----+-----+------+
| 1.0 | 1.0 | 1.0 | 2.0 | 3.0 | 4.0 | 5.0 | 5.0 | 5.0 |
+-----+-----+-----+-----+-----+-----+-----+-----+------+
"""
with cython_context():
return Sketch(_proxy = self.__proxy__.element_summary())
def element_sub_sketch(self, keys = None):
"""
Returns the sketch summary for the given set of keys. This is only
applicable for sketch summary created from SArray of sarray or dict type.
For dict SArray, the keys are the keys in dict value.
For array Sarray, the keys are indexes into the array value.
The keys must be passed into original summary() call in order to
be able to be retrieved later
Parameters
-----------
keys : list of str | str | list of int | int
The list of dictionary keys or array index to get sub sketch from.
if not given, then retrieve all sub sketches that are available
Returns
-------
A dictionary that maps from the key(index) to the actual sketch summary
for that key(index)
Examples
--------
>>> sa = turicreate.SArray([{'a':1, 'b':2}, {'a':4, 'd':1}])
>>> s = sa.summary(sub_sketch_keys=['a','b'])
>>> s.element_sub_sketch(['a'])
{'a':
+--------------------+-------+----------+
| item | value | is exact |
+--------------------+-------+----------+
| Length | 2 | Yes |
| Min | 1.0 | Yes |
| Max | 4.0 | Yes |
| Mean | 2.5 | Yes |
| Sum | 5.0 | Yes |
| Variance | 2.25 | Yes |
| Standard Deviation | 1.5 | Yes |
| # Missing Values | 0 | Yes |
| # unique values | 2 | No |
+--------------------+-------+----------+
Most frequent items:
+-------+-----+-----+
| value | 1.0 | 4.0 |
+-------+-----+-----+
| count | 1 | 1 |
+-------+-----+-----+
Quantiles:
+-----+-----+-----+-----+-----+-----+-----+-----+------+
| 0% | 1% | 5% | 25% | 50% | 75% | 95% | 99% | 100% |
+-----+-----+-----+-----+-----+-----+-----+-----+------+
| 1.0 | 1.0 | 1.0 | 1.0 | 4.0 | 4.0 | 4.0 | 4.0 | 4.0 |
+-----+-----+-----+-----+-----+-----+-----+-----+------+}
"""
single_val = False
if keys is None:
keys = []
else:
if not isinstance(keys, list):
single_val = True
keys = [keys]
value_types = set([type(i) for i in keys])
if (len(value_types) > 1):
raise ValueError("All keys should have the same type.")
with cython_context():
ret_sketches = self.__proxy__.element_sub_sketch(keys)
ret = {}
# check return key matches input key
for key in keys:
if key not in ret_sketches:
raise KeyError("Cannot retrieve element sub sketch for key '" + str(key) + "'. Element sub sketch can only be retrieved when the summary object was created using the 'sub_sketch_keys' option.")
for key in ret_sketches:
ret[key] = Sketch(_proxy = ret_sketches[key])
if single_val:
return ret[keys[0]]
else:
return ret
def cancel(self):
"""
Cancels a background sketch computation immediately if one is ongoing.
Does nothing otherwise.
Examples
--------
>>> s = sa.summary(array, background=True)
>>> s.cancel()
"""
with cython_context():
self.__proxy__.cancel()
|
- frequent items (:func:`~turicreate.Sketch.frequent_items`)
- frequency count for any value (:func:`~turicreate.Sketch.frequency_count`)
|
canvas-to-blob.js
|
/*
* JavaScript Canvas to Blob 2.0.5
* https://github.com/blueimp/JavaScript-Canvas-to-Blob
*
* Copyright 2012, Sebastian Tschan
* https://blueimp.net
*
* Licensed under the MIT license:
* http://www.opensource.org/licenses/MIT
*
* Based on stackoverflow user Stoive's code snippet:
* http://stackoverflow.com/q/4998908
*/
/*jslint nomen: true, regexp: true */
/*global window, atob, Blob, ArrayBuffer, Uint8Array, define */
module.exports = function(dataURLInput) {
var CanvasPrototype = window.HTMLCanvasElement &&
window.HTMLCanvasElement.prototype,
hasBlobConstructor = window.Blob && (function () {
try {
return Boolean(new Blob());
} catch (e) {
return false;
}
}()),
hasArrayBufferViewSupport = hasBlobConstructor && window.Uint8Array &&
(function () {
try {
return new Blob([new Uint8Array(100)]).size === 100;
} catch (e) {
return false;
}
}()),
BlobBuilder = window.BlobBuilder || window.WebKitBlobBuilder ||
window.MozBlobBuilder || window.MSBlobBuilder,
dataURLtoBlob = (hasBlobConstructor || BlobBuilder) && window.atob &&
window.ArrayBuffer && window.Uint8Array && function (dataURI) {
var byteString,
arrayBuffer,
intArray,
i,
mimeString,
bb;
if (dataURI.split(',')[0].indexOf('base64') >= 0) {
// Convert base64 to raw binary data held in a string:
byteString = atob(dataURI.split(',')[1]);
} else {
// Convert base64/URLEncoded data component to raw binary data:
byteString = decodeURIComponent(dataURI.split(',')[1]);
}
// Write the bytes of the string to an ArrayBuffer:
arrayBuffer = new ArrayBuffer(byteString.length);
intArray = new Uint8Array(arrayBuffer);
for (i = 0; i < byteString.length; i += 1) {
intArray[i] = byteString.charCodeAt(i);
}
// Separate out the mime component:
mimeString = dataURI.split(',')[0].split(':')[1].split(';')[0];
// Write the ArrayBuffer (or ArrayBufferView) to a blob:
if (hasBlobConstructor) {
return new Blob(
[hasArrayBufferViewSupport ? intArray : arrayBuffer],
{type: mimeString}
);
}
bb = new BlobBuilder();
bb.append(arrayBuffer);
return bb.getBlob(mimeString);
};
// if (window.HTMLCanvasElement && !CanvasPrototype.toBlob) {
// if (CanvasPrototype.mozGetAsFile) {
// CanvasPrototype.toBlob = function (callback, type, quality) {
// if (quality && CanvasPrototype.toDataURL && dataURLtoBlob) {
// callback(dataURLtoBlob(this.toDataURL(type, quality)));
// } else {
// callback(this.mozGetAsFile('blob', type));
// }
// };
|
// callback(dataURLtoBlob(this.toDataURL(type, quality)));
// };
// }
// }
return dataURLtoBlob(dataURLInput);
}
|
// } else if (CanvasPrototype.toDataURL && dataURLtoBlob) {
// CanvasPrototype.toBlob = function (callback, type, quality) {
|
test_immutable_registers.py
|
#!/usr/bin/env python2
# coding: utf-8
"""Test immutable registers."""
import unittest
import random
from triton import *
class TestImmutableAArch64Registers(unittest.TestCase):
def setUp(self):
"""Define the arch."""
self.ctx = TritonContext()
self.ctx.setArchitecture(ARCH.AARCH64)
def test_immutable(self):
x1 = self.ctx.registers.x1
x2 = self.ctx.registers.x2
xzr = self.ctx.registers.xzr
wzr = self.ctx.registers.wzr
self.assertEqual(self.ctx.getConcreteRegisterValue(xzr), 0)
self.assertEqual(self.ctx.getConcreteRegisterValue(wzr), 0)
# Set concrete values
self.ctx.setConcreteRegisterValue(xzr, 1234)
self.ctx.setConcreteRegisterValue(x1, 10)
self.ctx.setConcreteRegisterValue(x2, 20)
self.assertEqual(self.ctx.getConcreteRegisterValue(xzr), 0)
self.assertEqual(self.ctx.getConcreteRegisterValue(wzr), 0)
self.assertEqual(self.ctx.getConcreteRegisterValue(x1), 10)
|
self.ctx.processing(inst)
# Concrete
self.assertEqual(self.ctx.getConcreteRegisterValue(xzr), 0)
self.assertEqual(self.ctx.getConcreteRegisterValue(wzr), 0)
# Symbolic
self.assertEqual(self.ctx.getSymbolicRegisterValue(xzr), 0)
self.assertEqual(self.ctx.getSymbolicRegisterValue(wzr), 0)
# Assignment
self.assertEqual(REG.AARCH64.XZR in self.ctx.getSymbolicRegisters(), False)
self.assertEqual(REG.AARCH64.WZR in self.ctx.getSymbolicRegisters(), False)
self.assertEqual(REG.AARCH64.PC in self.ctx.getSymbolicRegisters(), True)
|
self.assertEqual(self.ctx.getConcreteRegisterValue(x2), 20)
inst = Instruction("\x3f\x00\x02\x8b") # add xzr, x1, x2
|
mc_link_test.go
|
package k8s
import (
"bytes"
"context"
"testing"
link "github.com/linkerd/linkerd2/controller/gen/apis/link/v1alpha1"
l5dk8s "github.com/linkerd/linkerd2/pkg/k8s"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubectl/pkg/scheme"
)
func TestGetMcLinks(t *testing.T) {
mcLink := link.Link{
TypeMeta: metav1.TypeMeta{
APIVersion: link.SchemeGroupVersion.Identifier(),
Kind: l5dk8s.LinkKind,
},
ObjectMeta: metav1.ObjectMeta{
Name: "linkname",
Namespace: "linkns",
},
Spec: link.LinkSpec{
TargetClusterName: "tcn",
TargetClusterDomain: "tcd",
TargetClusterLinkerdNamespace: "tcln",
ClusterCredentialsSecret: "ccs",
GatewayAddress: "ga",
|
GatewayIdentity: "identity",
ProbeSpec: link.ProbeSpec{
Path: "pth",
Port: "80",
Period: "8s",
},
Selector: *metav1.SetAsLabelSelector(labels.Set(map[string]string{"l": "v"})),
},
}
client := fakeClient(&mcLink)
result, err := client.GetMulticlusterLinks(context.Background())
if err != nil {
t.Error(err)
}
var buf bytes.Buffer
jsonSerializer := scheme.DefaultJSONEncoder()
if err := jsonSerializer.Encode(&mcLink, &buf); err != nil {
t.Error(err)
}
expected := buf.String()
actual := string(result[0].MulticlusterLink)
if expected != actual {
t.Fatalf("exepected %s, got %s", expected, actual)
}
}
|
GatewayPort: "555",
|
lib.rs
|
mod converters;
mod winit_config;
mod winit_windows;
use bevy_input::{
keyboard::KeyboardInput,
mouse::{MouseButtonInput, MouseMotion, MouseScrollUnit, MouseWheel},
touch::TouchInput,
};
pub use winit_config::*;
pub use winit_windows::*;
use bevy_app::{prelude::*, AppExit};
use bevy_ecs::{IntoThreadLocalSystem, Resources, World};
use bevy_math::Vec2;
use bevy_window::{
CreateWindow, CursorMoved, Window, WindowCloseRequested, WindowCreated, WindowResized, Windows,
};
use winit::{
event::{self, DeviceEvent, Event, WindowEvent},
event_loop::{ControlFlow, EventLoop, EventLoopWindowTarget},
};
#[derive(Default)]
pub struct WinitPlugin;
#[derive(Debug)]
pub struct EventLoopProxyPtr(pub usize);
impl Plugin for WinitPlugin {
fn build(&self, app: &mut AppBuilder) {
app
// TODO: It would be great to provide a raw winit WindowEvent here, but the lifetime on it is
// stopping us. there are plans to remove the lifetime: https://github.com/rust-windowing/winit/pull/1456
// .add_event::<winit::event::WindowEvent>()
.init_resource::<WinitWindows>()
.set_runner(winit_runner)
.add_system(change_window.thread_local_system());
}
}
fn change_window(_: &mut World, resources: &mut Resources)
|
fn run<F>(event_loop: EventLoop<()>, event_handler: F) -> !
where
F: 'static + FnMut(Event<'_, ()>, &EventLoopWindowTarget<()>, &mut ControlFlow),
{
event_loop.run(event_handler)
}
// TODO: It may be worth moving this cfg into a procedural macro so that it can be referenced by
// a single name instead of being copied around.
// https://gist.github.com/jakerr/231dee4a138f7a5f25148ea8f39b382e seems to work.
#[cfg(any(
target_os = "windows",
target_os = "macos",
target_os = "linux",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd"
))]
fn run_return<F>(event_loop: &mut EventLoop<()>, event_handler: F)
where
F: FnMut(Event<'_, ()>, &EventLoopWindowTarget<()>, &mut ControlFlow),
{
use winit::platform::desktop::EventLoopExtDesktop;
event_loop.run_return(event_handler)
}
#[cfg(not(any(
target_os = "windows",
target_os = "macos",
target_os = "linux",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd"
)))]
fn run_return<F>(_event_loop: &mut EventLoop<()>, _event_handler: F)
where
F: FnMut(Event<'_, ()>, &EventLoopWindowTarget<()>, &mut ControlFlow),
{
panic!("Run return is not supported on this platform!")
}
pub fn winit_runner(mut app: App) {
let mut event_loop = EventLoop::new();
let mut create_window_event_reader = EventReader::<CreateWindow>::default();
let mut app_exit_event_reader = EventReader::<AppExit>::default();
app.resources
.insert_thread_local(EventLoopProxyPtr(
Box::into_raw(Box::new(event_loop.create_proxy())) as usize,
));
handle_create_window_events(
&mut app.resources,
&event_loop,
&mut create_window_event_reader,
);
app.initialize();
log::debug!("Entering winit event loop");
let should_return_from_run = app
.resources
.get::<WinitConfig>()
.map_or(false, |config| config.return_from_run);
let event_handler = move |event: Event<()>,
event_loop: &EventLoopWindowTarget<()>,
control_flow: &mut ControlFlow| {
*control_flow = if cfg!(feature = "metal-auto-capture") {
ControlFlow::Exit
} else {
ControlFlow::Poll
};
if let Some(app_exit_events) = app.resources.get_mut::<Events<AppExit>>() {
if app_exit_event_reader.latest(&app_exit_events).is_some() {
*control_flow = ControlFlow::Exit;
}
}
match event {
event::Event::WindowEvent {
event: WindowEvent::Resized(size),
window_id: winit_window_id,
..
} => {
let winit_windows = app.resources.get_mut::<WinitWindows>().unwrap();
let mut windows = app.resources.get_mut::<Windows>().unwrap();
let window_id = winit_windows.get_window_id(winit_window_id).unwrap();
let window = windows.get_mut(window_id).unwrap();
window.update_resolution_from_backend(size.width, size.height);
let mut resize_events = app.resources.get_mut::<Events<WindowResized>>().unwrap();
resize_events.send(WindowResized {
id: window_id,
height: window.height() as usize,
width: window.width() as usize,
});
}
event::Event::WindowEvent {
event,
window_id: winit_window_id,
..
} => match event {
WindowEvent::CloseRequested => {
let mut window_close_requested_events = app
.resources
.get_mut::<Events<WindowCloseRequested>>()
.unwrap();
let winit_windows = app.resources.get_mut::<WinitWindows>().unwrap();
let window_id = winit_windows.get_window_id(winit_window_id).unwrap();
window_close_requested_events.send(WindowCloseRequested { id: window_id });
}
WindowEvent::KeyboardInput { ref input, .. } => {
let mut keyboard_input_events =
app.resources.get_mut::<Events<KeyboardInput>>().unwrap();
keyboard_input_events.send(converters::convert_keyboard_input(input));
}
WindowEvent::CursorMoved { position, .. } => {
let mut cursor_moved_events =
app.resources.get_mut::<Events<CursorMoved>>().unwrap();
let winit_windows = app.resources.get_mut::<WinitWindows>().unwrap();
let window_id = winit_windows.get_window_id(winit_window_id).unwrap();
let window = winit_windows.get_window(window_id).unwrap();
let inner_size = window.inner_size();
// move origin to bottom left
let y_position = inner_size.height as f32 - position.y as f32;
cursor_moved_events.send(CursorMoved {
id: window_id,
position: Vec2::new(position.x as f32, y_position as f32),
});
}
WindowEvent::MouseInput { state, button, .. } => {
let mut mouse_button_input_events =
app.resources.get_mut::<Events<MouseButtonInput>>().unwrap();
mouse_button_input_events.send(MouseButtonInput {
button: converters::convert_mouse_button(button),
state: converters::convert_element_state(state),
});
}
WindowEvent::MouseWheel { delta, .. } => match delta {
event::MouseScrollDelta::LineDelta(x, y) => {
let mut mouse_wheel_input_events =
app.resources.get_mut::<Events<MouseWheel>>().unwrap();
mouse_wheel_input_events.send(MouseWheel {
unit: MouseScrollUnit::Line,
x,
y,
});
}
event::MouseScrollDelta::PixelDelta(p) => {
let mut mouse_wheel_input_events =
app.resources.get_mut::<Events<MouseWheel>>().unwrap();
mouse_wheel_input_events.send(MouseWheel {
unit: MouseScrollUnit::Pixel,
x: p.x as f32,
y: p.y as f32,
});
}
},
WindowEvent::Touch(touch) => {
let mut touch_input_events =
app.resources.get_mut::<Events<TouchInput>>().unwrap();
touch_input_events.send(converters::convert_touch_input(touch));
}
_ => {}
},
event::Event::DeviceEvent { ref event, .. } => {
if let DeviceEvent::MouseMotion { delta } = event {
let mut mouse_motion_events =
app.resources.get_mut::<Events<MouseMotion>>().unwrap();
mouse_motion_events.send(MouseMotion {
delta: Vec2::new(delta.0 as f32, delta.1 as f32),
});
}
}
event::Event::MainEventsCleared => {
handle_create_window_events(
&mut app.resources,
event_loop,
&mut create_window_event_reader,
);
app.update();
}
_ => (),
}
};
if should_return_from_run {
run_return(&mut event_loop, event_handler);
} else {
run(event_loop, event_handler);
}
}
fn handle_create_window_events(
resources: &mut Resources,
event_loop: &EventLoopWindowTarget<()>,
create_window_event_reader: &mut EventReader<CreateWindow>,
) {
let mut winit_windows = resources.get_mut::<WinitWindows>().unwrap();
let mut windows = resources.get_mut::<Windows>().unwrap();
let create_window_events = resources.get::<Events<CreateWindow>>().unwrap();
let mut window_created_events = resources.get_mut::<Events<WindowCreated>>().unwrap();
for create_window_event in create_window_event_reader.iter(&create_window_events) {
let window = Window::new(create_window_event.id, &create_window_event.descriptor);
winit_windows.create_window(event_loop, &window);
let window_id = window.id();
windows.add(window);
window_created_events.send(WindowCreated { id: window_id });
}
}
|
{
let winit_windows = resources.get::<WinitWindows>().unwrap();
let mut windows = resources.get_mut::<Windows>().unwrap();
for bevy_window in windows.iter_mut() {
let id = bevy_window.id();
for command in bevy_window.drain_commands() {
match command {
bevy_window::WindowCommand::SetWindowMode {
mode,
resolution: (width, height),
} => {
let window = winit_windows.get_window(id).unwrap();
match mode {
bevy_window::WindowMode::BorderlessFullscreen => {
window.set_fullscreen(Some(winit::window::Fullscreen::Borderless(None)))
}
bevy_window::WindowMode::Fullscreen { use_size } => window.set_fullscreen(
Some(winit::window::Fullscreen::Exclusive(match use_size {
true => get_fitting_videomode(
&window.current_monitor().unwrap(),
width,
height,
),
false => get_best_videomode(&window.current_monitor().unwrap()),
})),
),
bevy_window::WindowMode::Windowed => window.set_fullscreen(None),
}
}
bevy_window::WindowCommand::SetTitle { title } => {
let window = winit_windows.get_window(id).unwrap();
window.set_title(&title);
}
bevy_window::WindowCommand::SetResolution { width, height } => {
let window = winit_windows.get_window(id).unwrap();
window.set_inner_size(winit::dpi::PhysicalSize::new(width, height));
}
bevy_window::WindowCommand::SetVsync { .. } => (),
bevy_window::WindowCommand::SetResizable { resizable } => {
let window = winit_windows.get_window(id).unwrap();
window.set_resizable(resizable);
}
bevy_window::WindowCommand::SetDecorations { decorations } => {
let window = winit_windows.get_window(id).unwrap();
window.set_decorations(decorations);
}
bevy_window::WindowCommand::SetCursorLockMode { locked } => {
let window = winit_windows.get_window(id).unwrap();
window.set_cursor_grab(locked).unwrap();
}
bevy_window::WindowCommand::SetCursorVisibility { visible } => {
let window = winit_windows.get_window(id).unwrap();
window.set_cursor_visible(visible);
}
}
}
}
}
|
policy.go
|
// Code generated by informer-gen. DO NOT EDIT.
package v1
import (
time "time"
authorization_v1 "github.com/openshift/api/authorization/v1"
versioned "github.com/openshift/client-go/authorization/clientset/versioned"
internalinterfaces "github.com/openshift/client-go/authorization/informers/externalversions/internalinterfaces"
v1 "github.com/openshift/client-go/authorization/listers/authorization/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// PolicyInformer provides access to a shared informer and lister for
// Policies.
type PolicyInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1.PolicyLister
}
type policyInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewPolicyInformer constructs a new informer for Policy type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredPolicyInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredPolicyInformer constructs a new informer for Policy type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func
|
(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.AuthorizationV1().Policies(namespace).List(options)
},
WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.AuthorizationV1().Policies(namespace).Watch(options)
},
},
&authorization_v1.Policy{},
resyncPeriod,
indexers,
)
}
func (f *policyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *policyInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&authorization_v1.Policy{}, f.defaultInformer)
}
func (f *policyInformer) Lister() v1.PolicyLister {
return v1.NewPolicyLister(f.Informer().GetIndexer())
}
|
NewFilteredPolicyInformer
|
overloaded_deref_with_ref_pattern.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we choose Deref or DerefMut appropriately based on mutability of ref bindings (#15609).
use std::ops::{Deref, DerefMut};
struct DerefOk<T>(T);
struct DerefMutOk<T>(T);
impl<T> Deref for DerefOk<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T> DerefMut for DerefOk<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
panic!()
}
}
impl<T> Deref for DerefMutOk<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
panic!()
}
}
impl<T> DerefMut for DerefMutOk<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
fn main() {
// Check that mutable ref binding in match picks DerefMut
|
};
// Check that mutable ref binding in let picks DerefMut
let mut y = DerefMutOk(1);
let ref mut z = *y;
// Check that immutable ref binding in match picks Deref
let mut b = DerefOk(2);
match *b {
ref n => n,
};
// Check that immutable ref binding in let picks Deref
let mut y = DerefOk(3);
let ref z = *y;
// Check that mixed mutable/immutable ref binding in match picks DerefMut
let mut b = DerefMutOk((0, 9));
match *b {
(ref mut n, ref m) => (n, m),
};
let mut b = DerefMutOk((0, 9));
match *b {
(ref n, ref mut m) => (n, m),
};
// Check that mixed mutable/immutable ref binding in let picks DerefMut
let mut y = DerefMutOk((1, 8));
let (ref mut z, ref a) = *y;
let mut y = DerefMutOk((1, 8));
let (ref z, ref mut a) = *y;
// Check that multiple immutable ref bindings in match picks Deref
let mut b = DerefOk((2, 7));
match *b {
(ref n, ref m) => (n, m),
};
// Check that multiple immutable ref bindings in let picks Deref
let mut y = DerefOk((3, 6));
let (ref z, ref a) = *y;
// Check that multiple mutable ref bindings in match picks DerefMut
let mut b = DerefMutOk((4, 5));
match *b {
(ref mut n, ref mut m) => (n, m),
};
// Check that multiple mutable ref bindings in let picks DerefMut
let mut y = DerefMutOk((5, 4));
let (ref mut z, ref mut a) = *y;
}
|
let mut b = DerefMutOk(0);
match *b {
ref mut n => n,
|
cli.py
|
"""Console script for calh."""
import sys
import click
import click_spinner
@click.group(invoke_without_command=True)
@click.option("-v", "--version", is_flag=True, default=False)
def
|
(version):
if version:
import calh
click.echo(f"Version: {calh.__version__}")
@cli.command()
@click.option(
"--input-file", required=True, type=click.Path(), help="Calendar filepath"
)
@click.option("--re-pattern", default=None, help="Regular expression pattern")
@click.option(
"--start-date", default=None, help="Date starts from. Format in YYYY-MM-DD"
)
@click.option("--end-date", default=None, help="Date untill. Format in YYYY-MM-DD")
@click.option(
"--output-format",
type=click.Choice(["json", "ics"]),
default="json",
help="Output format",
)
@click.option("--output-file", type=click.Path(), help="Filepath for your output")
def filter_calendar(
input_file, re_pattern, start_date, end_date, output_format, output_file
):
"""
Filter ics-format calendar
"""
from calh.filter import CalendarFilter
with click_spinner.spinner():
CalendarFilter.filter(
input_file=input_file,
re_pattern=re_pattern,
start_date=start_date,
end_date=end_date,
output_format=output_format,
output_file=output_file,
)
click.echo(f"Your filtered calendar is ready: {output_file}")
@cli.command()
@click.option("--input-file", required=True, type=click.Path(), help="Input file")
@click.option("--title", required=True, help="Title of the calendar")
@click.option("--width", default=None, help="Width in inch")
@click.option("--height", default=None, help="Height in inch")
@click.option("--full-year", is_flag=True, default=False, help="Show the full year")
@click.option("--output-file", type=click.Path(), help="out json file")
def draw_calendar(input_file, title, full_year, width, height, output_file):
"""
Drawing calender with weekdays and iso-weeks
"""
from calh.visualization import Heatmap
with click_spinner.spinner():
Heatmap.create(
input_file=input_file,
full_year=full_year,
output_file=output_file,
title=title,
width=width,
height=height,
)
click.echo(f"Your calendar is ready: {output_file}")
if __name__ == "__main__":
sys.exit(cli()) # pragma: no cover
|
cli
|
143. reorder-list.py
|
from LinkedList import *
class Solution(LinkedList):
def reverse(self, head):
|
def reorderList(self, head: ListNode) -> None:
# find the midddle node of the linkedlist
slow, fast = head, head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
# reverse the second part
tail = self.reverse(slow)
# reorder the linkedlist
rs = head
while tail.next:
# save the next head and tail first
head_next = head.next
tail_next = tail.next
# reorder, connects the head and tail
tail.next = head.next
head.next = tail
# move head and tail forward
tail = tail_next
head = head_next
return rs
values = [1, 2, 3, 4, 5]
s = Solution(values)
s.reorderList(s.head)
s.print_list()
|
pre = None
while head:
next = head.next
head.next = pre
pre = head
head = next
return pre
|
forms.py
|
from django import forms
from .models import UserAccount
class UserCreationForm(forms.ModelForm):
"""
A form for creating new users. Includes all the required
fields, plus a repeated password.
"""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = UserAccount
fields = ('email',)
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
|
# Save the provided password in hashed format
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""
A form for updating users. Includes all the fields on the user.
"""
class Meta:
model = UserAccount
fields = ()
| |
instr_mem26.rs
|
#[doc = "Reader of register INSTR_MEM26"]
pub type R = crate::R<u32, super::INSTR_MEM26>;
#[doc = "Writer for register INSTR_MEM26"]
pub type W = crate::W<u32, super::INSTR_MEM26>;
#[doc = "Register INSTR_MEM26 `reset()`'s with value 0"]
impl crate::ResetValue for super::INSTR_MEM26 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `INSTR_MEM26`"]
pub type INSTR_MEM26_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `INSTR_MEM26`"]
pub struct INSTR_MEM26_W<'a> {
w: &'a mut W,
}
impl<'a> INSTR_MEM26_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn
|
(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff) | ((value as u32) & 0xffff);
self.w
}
}
impl R {
#[doc = "Bits 0:15"]
#[inline(always)]
pub fn instr_mem26(&self) -> INSTR_MEM26_R {
INSTR_MEM26_R::new((self.bits & 0xffff) as u16)
}
}
impl W {
#[doc = "Bits 0:15"]
#[inline(always)]
pub fn instr_mem26(&mut self) -> INSTR_MEM26_W {
INSTR_MEM26_W { w: self }
}
}
|
bits
|
altitude.py
|
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
|
class Altitude(object):
"""
An object containing the altitude information of the device.
:param altitude_in_meters: A double representing the altitude of the device in meters.
:type altitude_in_meters: (optional) float
:param accuracy_in_meters: A double representing the accuracy of the altitude measurement in meters.
:type accuracy_in_meters: (optional) float
"""
deserialized_types = {
'altitude_in_meters': 'float',
'accuracy_in_meters': 'float'
} # type: Dict
attribute_map = {
'altitude_in_meters': 'altitudeInMeters',
'accuracy_in_meters': 'accuracyInMeters'
} # type: Dict
def __init__(self, altitude_in_meters=None, accuracy_in_meters=None):
# type: (Optional[float], Optional[float]) -> None
"""An object containing the altitude information of the device.
:param altitude_in_meters: A double representing the altitude of the device in meters.
:type altitude_in_meters: (optional) float
:param accuracy_in_meters: A double representing the accuracy of the altitude measurement in meters.
:type accuracy_in_meters: (optional) float
"""
self.__discriminator_value = None # type: str
self.altitude_in_meters = altitude_in_meters
self.accuracy_in_meters = accuracy_in_meters
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, Altitude):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
from typing import Dict, List, Optional, Union
from datetime import datetime
|
193.rs
|
/// We should work from bottom-up. Compute how many prime numbers we have
/// beneath the specified value and use some counting techniques to combine
/// count the known prime computations.
///
/// We need to sieve all primes beneath 2^50.
///
|
/// The longer portion is determining the multiplications of these primes which
/// are less than 2^50.
|
|
demos.ts
|
import ArcProgress from '../src/arc-progress';
import { dateFormat } from './utils';
import * as image from '../static/fill.png';
import '../static/style.css';
// use customText and observer method
const customText = [
{ text: '$', size: '12px', color: '#fff', x: 156, y:98 },
{ text: 'remaining amount', size: '14px', color: '#fff', x: 100, y:150 },
];
const arcProgress = new ArcProgress({
customText,
el: '#progress-container',
progress: .78,
text: '6439.68',
size: 200,
textStyle: { y: 95, size: '24px', color: '#fff' },
emptyColor: 'rgba(73, 201, 245, .9)',
fillColor: { gradient: ['#0c7bb3', '#1498da', '#1498da'] },
observer(e, t) {
console.log('observer:', e, t);
},
animationEnd(e) {
console.log('animationEnd', e);
},
});
document.getElementById('button').addEventListener('click', () => {
arcProgress.updateProgress({ progress: .89, text: '7347.84' });
}, false);
document.getElementById('button2').addEventListener('click', () => {
arcProgress.updateProgress({ progress: .31, text: '2559.36' });
}, false);
// demo2 custom thickness
|
progress: .68,
text: '356',
textStyle: { size: '44px', color: '#7591af' },
arcStart: -90,
arcEnd: 270,
fillThickness: 14,
thickness: 8,
emptyColor: '#3d5875',
fillColor: '#00e0ff',
animation: 2200,
});
// demo3 today time remaining
function getTodayProgress(): any {
const oneDayTimeStamp = 86400000;
const today = new Date();
const todatTimeStamp = +new Date(today.getFullYear(), today.getMonth(), today.getDate(),
0, 0, 0, 0);
const tomorrowTimeStamp = todatTimeStamp + oneDayTimeStamp;
const todayRemaining = tomorrowTimeStamp - +today;
const progress = todayRemaining / 86400000;
const text = dateFormat(todayRemaining + todatTimeStamp, 'hh:mm:ss');
return { progress, text };
}
const { progress, text } = getTodayProgress();
const customText3 = [
{ text, size: '28px', color: '#02ce9c', x: 100, y:98 },
{ text: 'time remaining', size: '16px', color: '#636467', x: 100, y:128 },
];
const arcProgress3 = new ArcProgress({
progress,
el: '#progress-container3',
size: 200,
customText: customText3,
emptyColor: '#3b3a3f',
fillColor: '#00ce9b',
lineCap: 'butt',
arcStart: -90,
arcEnd: 270,
animation: false,
});
(function setTime() {
setTimeout(() => {
const { progress, text } = getTodayProgress();
customText3[0].text = text;
arcProgress3.updateProgress({ progress, customText: customText3 });
setTime();
}, 1000);
})();
// demo4 use image fillColor
const customText4 = [
{ text: '%', size: '12px', font: 'Impact', color: '#76a4ef', x: 156, y:104 },
];
const arcProgress4 = new ArcProgress({
el: document.getElementById('progress-container4'),
progress: .7826,
text: '78.26',
size: 200,
customText: customText4,
textStyle: { size: '34px', color: '#76a4ef', font: 'Arial Black' },
emptyColor: '#ebf4f8',
fillColor: { image },
arcStart: -180,
arcEnd: 180,
thickness: 18,
speed: -30,
animationEnd(e) {
const { progress } = e;
setTimeout(() => {
if (progress === .7826) {
arcProgress4.updateProgress({ progress: .3423, text: '34.23' });
} else if (progress === .3423) {
arcProgress4.updateProgress({ progress: .8016, text: '80.16' });
}
}, 500);
},
});
|
new ArcProgress({
el: '#progress-container2',
|
montgomery.rs
|
// -*- mode: rust; -*-
//
// This file is part of curve25519-dalek.
// Copyright (c) 2016-2021 isis lovecruft
// Copyright (c) 2016-2019 Henry de Valence
// See LICENSE for licensing information.
//
// Authors:
// - isis agora lovecruft <[email protected]>
// - Henry de Valence <[email protected]>
//! Scalar multiplication on the Montgomery form of Curve25519.
//!
//! To avoid notational confusion with the Edwards code, we use
//! variables \\( u, v \\) for the Montgomery curve, so that “Montgomery
//! \\(u\\)” here corresponds to “Montgomery \\(x\\)” elsewhere.
//!
//! Montgomery arithmetic works not on the curve itself, but on the
//! \\(u\\)-line, which discards sign information and unifies the curve
//! and its quadratic twist. See [_Montgomery curves and their
//! arithmetic_][costello-smith] by Costello and Smith for more details.
//!
//! The `MontgomeryPoint` struct contains the affine \\(u\\)-coordinate
//! \\(u\_0(P)\\) of a point \\(P\\) on either the curve or the twist.
//! Here the map \\(u\_0 : \mathcal M \rightarrow \mathbb F\_p \\) is
//! defined by \\(u\_0((u,v)) = u\\); \\(u\_0(\mathcal O) = 0\\). See
//! section 5.4 of Costello-Smith for more details.
//!
//! # Scalar Multiplication
//!
//! Scalar multiplication on `MontgomeryPoint`s is provided by the `*`
//! operator, which implements the Montgomery ladder.
//!
//! # Edwards Conversion
//!
//! The \\(2\\)-to-\\(1\\) map from the Edwards model to the Montgomery
//! \\(u\\)-line is provided by `EdwardsPoint::to_montgomery()`.
//!
//! To lift a `MontgomeryPoint` to an `EdwardsPoint`, use
//! `MontgomeryPoint::to_edwards()`, which takes a sign parameter.
//! This function rejects `MontgomeryPoints` which correspond to points
//! on the twist.
//!
//! [costello-smith]: https://eprint.iacr.org/2017/212.pdf
// We allow non snake_case names because coordinates in projective space are
// traditionally denoted by the capitalisation of their respective
// counterparts in affine space. Yeah, you heard me, rustc, I'm gonna have my
// affine and projective cakes and eat both of them too.
#![allow(non_snake_case)]
use core::ops::{Mul, MulAssign};
use constants::{APLUS2_OVER_FOUR, MONTGOMERY_A, MONTGOMERY_A_NEG};
use edwards::{CompressedEdwardsY, EdwardsPoint};
use field::FieldElement;
use scalar::Scalar;
use traits::Identity;
use subtle::Choice;
use subtle::ConstantTimeEq;
use subtle::{ConditionallyNegatable, ConditionallySelectable};
use zeroize::Zeroize;
/// Holds the \\(u\\)-coordinate of a point on the Montgomery form of
/// Curve25519 or its twist.
#[derive(Copy, Clone, Debug, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct MontgomeryPoint(pub [u8; 32]);
/// Equality of `MontgomeryPoint`s is defined mod p.
impl ConstantTimeEq for MontgomeryPoint {
fn ct_eq(&self, other: &MontgomeryPoint) -> Choice {
let self_fe = FieldElement::from_bytes(&self.0);
let other_fe = FieldElement::from_bytes(&other.0);
self_fe.ct_eq(&other_fe)
}
}
impl Default for MontgomeryPoint {
fn default() -> MontgomeryPoint {
MontgomeryPoint([0u8; 32])
}
}
impl PartialEq for MontgomeryPoint {
fn eq(&self, other: &MontgomeryPoint) -> bool {
self.ct_eq(other).unwrap_u8() == 1u8
}
}
impl Eq for MontgomeryPoint {}
impl Identity for MontgomeryPoint {
/// Return the group identity element, which has order 4.
fn identity() -> MontgomeryPoint {
MontgomeryPoint([0u8; 32])
}
}
impl Zeroize for MontgomeryPoint {
fn zeroize(&mut self) {
self.0.zeroize();
}
}
impl MontgomeryPoint {
/// View this `MontgomeryPoint` as an array of bytes.
pub fn as_bytes<'a>(&'a self) -> &'a [u8; 32] {
&self.0
}
/// Convert this `MontgomeryPoint` to an array of bytes.
pub fn to_bytes(&self) -> [u8; 32] {
self.0
}
/// Attempt to convert to an `EdwardsPoint`, using the supplied
/// choice of sign for the `EdwardsPoint`.
///
/// # Inputs
///
/// * `sign`: a `u8` donating the desired sign of the resulting
/// `EdwardsPoint`. `0` denotes positive and `1` negative.
///
/// # Return
///
/// * `Some(EdwardsPoint)` if `self` is the \\(u\\)-coordinate of a
/// point on (the Montgomery form of) Curve25519;
///
/// * `None` if `self` is the \\(u\\)-coordinate of a point on the
/// twist of (the Montgomery form of) Curve25519;
///
pub fn to_edwards(&self, sign: u8) -> Option<EdwardsPoint> {
// To decompress the Montgomery u coordinate to an
// `EdwardsPoint`, we apply the birational map to obtain the
// Edwards y coordinate, then do Edwards decompression.
//
// The birational map is y = (u-1)/(u+1).
//
// The exceptional points are the zeros of the denominator,
// i.e., u = -1.
//
// But when u = -1, v^2 = u*(u^2+486662*u+1) = 486660.
//
// Since this is nonsquare mod p, u = -1 corresponds to a point
// on the twist, not the curve, so we can reject it early.
let u = FieldElement::from_bytes(&self.0);
if u == FieldElement::minus_one() { return None; }
let one = FieldElement::one();
let y = &(&u - &one) * &(&u + &one).invert();
let mut y_bytes = y.to_bytes();
y_bytes[31] ^= sign << 7;
CompressedEdwardsY(y_bytes).decompress()
}
}
/// Perform the Elligator2 mapping to a Montgomery point.
///
/// See <https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-10#section-6.7.1>
//
// TODO Determine how much of the hash-to-group API should be exposed after the CFRG
// draft gets into a more polished/accepted state.
#[allow(unused)]
pub fn elligator_encode(r_0: &FieldElement) -> MontgomeryPoint {
let one = FieldElement::one();
let d_1 = &one + &r_0.square2(); /* 2r^2 */
let d = &MONTGOMERY_A_NEG * &(d_1.invert()); /* A/(1+2r^2) */
let d_sq = &d.square();
let au = &MONTGOMERY_A * &d;
let inner = &(d_sq + &au) + &one;
let eps = &d * &inner; /* eps = d^3 + Ad^2 + d */
let (eps_is_sq, _eps) = FieldElement::sqrt_ratio_i(&eps, &one);
let zero = FieldElement::zero();
let Atemp = FieldElement::conditional_select(&MONTGOMERY_A, &zero, eps_is_sq); /* 0, or A if nonsquare*/
let mut u = &d + &Atemp; /* d, or d+A if nonsquare */
u.conditional_negate(!eps_is_sq); /* d, or -d-A if nonsquare */
MontgomeryPoint(u.to_bytes())
}
/// Perform the inverse Elligator2 mapping from a Montgomery point to
/// field element. This algorithm is based on [Elligator:
/// Elliptic-curve points indistinguishable from uniform random
/// strings][elligator], Section 5.3.
///
/// This function is a partial right inverse of `elligator_encode`: if
/// `elligator_decode(&p, sign) == Some(fe)` then `elligator_encode(&fe)
/// == p`.
///
/// This function does _not_ operate in constant time: if `point`
/// cannot be mapped to a field element, the function exits early. In
/// typical usage this function is combined with rejection sampling
/// and called repeatedly until a mappable point is found.
///
/// Note: the output field elements of this function are uniformly
/// distributed among the nonnegative field elements, but only if the
/// input points are also uniformly distributed among all points of
/// the curve. In particular, if the inputs are only selected from
/// members of the prime order group, then the outputs are
/// distinguishable from random.
///
/// # Inputs
///
/// * `point`: the \\(u\\)-coordinate of a point on the curve. Not all
/// points map to field elements.
///
/// * `v_is_negative`: true if the \\(v\\)-coordinate of the point is negative.
///
/// # Returns
///
/// Either `None`, if the point couldn't be mapped, or `Some(fe)` such
/// that `fe` is nonnegative and `elligator_encode(&fe) == point`.
///
/// [elligator] https://elligator.cr.yp.to/elligator-20130828.pdf
///
#[allow(unused)]
pub fn elligator_decode(point: &MontgomeryPoint, v_is_negative: Choice) -> Option<FieldElement> {
let one = FieldElement::one();
let u = FieldElement::from_bytes(&point.to_bytes());
let u_plus_A = &u + &MONTGOMERY_A;
let uu_plus_uA = &u * &u_plus_A;
// Condition: u is on the curve
let vv = &(&u * &uu_plus_uA) + &u;
let (u_is_on_curve, _v) = FieldElement::sqrt_ratio_i(&vv, &one);
if (!bool::from(u_is_on_curve)) {
return None
}
// Condition: u != -A
if (u == MONTGOMERY_A_NEG) {
return None
}
// Condition: -2u(u+A) is a square
let uu2_plus_uA2 = &uu_plus_uA + &uu_plus_uA;
// We compute root = sqrt(-1/2u(u+A)) to speed up the calculation.
// This is a square if and only if -2u(u+A) is.
let (is_square, root) = FieldElement::sqrt_ratio_i(&FieldElement::minus_one(),
&uu2_plus_uA2);
if (!bool::from(is_square | root.is_zero())) {
return None;
}
// if !v_is_negative: r = sqrt(-u / 2(u + a)) = root * u
// if v_is_negative: r = sqrt(-(u+A) / 2u) = root * (u + A)
let add = FieldElement::conditional_select(&u, &u_plus_A, v_is_negative);
let r = &root * &add;
// Both r and -r are valid results. Pick the nonnegative one.
let result = FieldElement::conditional_select(&r, &-&r, r.is_negative());
return Some(result);
}
/// A `ProjectivePoint` holds a point on the projective line
/// \\( \mathbb P(\mathbb F\_p) \\), which we identify with the Kummer
/// line of the Montgomery curve.
#[derive(Copy, Clone, Debug)]
struct ProjectivePoint {
pub U: FieldElement,
pub W: FieldElement,
}
impl Identity for ProjectivePoint {
fn identity() -> ProjectivePoint {
ProjectivePoint {
U: FieldElement::one(),
W: FieldElement::zero(),
}
}
}
impl Default for ProjectivePoint {
fn default() -> ProjectivePoint {
ProjectivePoint::identity()
}
}
impl ConditionallySelectable for ProjectivePoint {
fn conditional_select(
a: &ProjectivePoint,
b: &ProjectivePoint,
choice: Choice,
) -> ProjectivePoint {
ProjectivePoint {
U: FieldElement::conditional_select(&a.U, &b.U, choice),
W: FieldElement::conditional_select(&a.W, &b.W, choice),
}
}
}
impl ProjectivePoint {
/// Dehomogenize this point to affine coordinates.
///
/// # Return
///
/// * \\( u = U / W \\) if \\( W \neq 0 \\);
/// * \\( 0 \\) if \\( W \eq 0 \\);
pub fn to_affine(&self) -> MontgomeryPoint {
let u = &self.U * &self.W.invert();
MontgomeryPoint(u.to_bytes())
}
}
/// Perform the double-and-add step of the Montgomery ladder.
///
/// Given projective points
/// \\( (U\_P : W\_P) = u(P) \\),
/// \\( (U\_Q : W\_Q) = u(Q) \\),
/// and the affine difference
/// \\( u\_{P-Q} = u(P-Q) \\), set
/// $$
/// (U\_P : W\_P) \gets u([2]P)
/// $$
/// and
/// $$
/// (U\_Q : W\_Q) \gets u(P + Q).
/// $$
fn differential_add_and_double(
P: &mut ProjectivePoint,
Q: &mut ProjectivePoint,
affine_PmQ: &FieldElement,
) {
let t0 = &P.U + &P.W;
let t1 = &P.U - &P.W;
let t2 = &Q.U + &Q.W;
let t3 = &Q.U - &Q.W;
let t4 = t0.square(); // (U_P + W_P)^2 = U_P^2 + 2 U_P W_P + W_P^2
let t5 = t1.square(); // (U_P - W_P)^2 = U_P^2 - 2 U_P W_P + W_P^2
let t6 = &t4 - &t5; // 4 U_P W_P
let t7 = &t0 * &t3; // (U_P + W_P) (U_Q - W_Q) = U_P U_Q + W_P U_Q - U_P W_Q - W_P W_Q
let t8 = &t1 * &t2; // (U_P - W_P) (U_Q + W_Q) = U_P U_Q - W_P U_Q + U_P W_Q - W_P W_Q
let t9 = &t7 + &t8; // 2 (U_P U_Q - W_P W_Q)
let t10 = &t7 - &t8; // 2 (W_P U_Q - U_P W_Q)
let t11 = t9.square(); // 4 (U_P U_Q - W_P W_Q)^2
let t12 = t10.square(); // 4 (W_P U_Q - U_P W_Q)^2
let t13 = &APLUS2_OVER_FOUR * &t6; // (A + 2) U_P U_Q
let t14 = &t4 * &t5; // ((U_P + W_P)(U_P - W_P))^2 = (U_P^2 - W_P^2)^2
let t15 = &t13 + &t5; // (U_P - W_P)^2 + (A + 2) U_P W_P
let t16 = &t6 * &t15; // 4 (U_P W_P) ((U_P - W_P)^2 + (A + 2) U_P W_P)
let t17 = affine_PmQ * &t12; // U_D * 4 (W_P U_Q - U_P W_Q)^2
let t18 = t11; // W_D * 4 (U_P U_Q - W_P W_Q)^2
P.U = t14; // U_{P'} = (U_P + W_P)^2 (U_P - W_P)^2
P.W = t16; // W_{P'} = (4 U_P W_P) ((U_P - W_P)^2 + ((A + 2)/4) 4 U_P W_P)
Q.U = t18; // U_{Q'} = W_D * 4 (U_P U_Q - W_P W_Q)^2
Q.W = t17; // W_{Q'} = U_D * 4 (W_P U_Q - U_P W_Q)^2
}
define_mul_assign_variants!(LHS = MontgomeryPoint, RHS = Scalar);
define_mul_variants!(LHS = MontgomeryPoint, RHS = Scalar, Output = MontgomeryPoint);
define_mul_variants!(LHS = Scalar, RHS = MontgomeryPoint, Output = MontgomeryPoint);
/// Multiply this `MontgomeryPoint` by a `Scalar`.
impl<'a, 'b> Mul<&'b Scalar> for &'a MontgomeryPoint {
type Output = MontgomeryPoint;
/// Given `self` \\( = u\_0(P) \\), and a `Scalar` \\(n\\), return \\( u\_0([n]P) \\).
fn mul(self, scalar: &'b Scalar) -> MontgomeryPoint {
// Algorithm 8 of Costello-Smith 2017
let affine_u = FieldElement::from_bytes(&self.0);
let mut x0 = ProjectivePoint::identity();
let mut x1 = ProjectivePoint {
U: affine_u,
W: FieldElement::one(),
};
let bits: [i8; 256] = scalar.bits();
for i in (0..255).rev() {
let choice: u8 = (bits[i + 1] ^ bits[i]) as u8;
debug_assert!(choice == 0 || choice == 1);
ProjectivePoint::conditional_swap(&mut x0, &mut x1, choice.into());
differential_add_and_double(&mut x0, &mut x1, &affine_u);
}
ProjectivePoint::conditional_swap(&mut x0, &mut x1, Choice::from(bits[0] as u8));
x0.to_affine()
}
}
impl<'b> MulAssign<&'b Scalar> for MontgomeryPoint {
fn mul_assign(&mut self, scalar: &'b Scalar) {
*self = (self as &MontgomeryPoint) * scalar;
}
}
impl<'a, 'b> Mul<&'b MontgomeryPoint> for &'a Scalar {
type Output = MontgomeryPoint;
fn mul(self, point: &'b MontgomeryPoint) -> MontgomeryPoint {
point * self
}
}
// ------------------------------------------------------------------------
// Tests
// ------------------------------------------------------------------------
#[cfg(test)]
mod test {
use super::*;
use constants;
use core::convert::TryInto;
use rand_core::{OsRng, RngCore};
#[test]
fn identity_in_different_coordinates() {
let id_projective = ProjectivePoint::identity();
let id_montgomery = id_projective.to_affine();
assert!(id_montgomery == MontgomeryPoint::identity());
}
#[test]
fn identity_in_different_models() {
assert!(EdwardsPoint::identity().to_montgomery() == MontgomeryPoint::identity());
}
#[test]
#[cfg(feature = "serde")]
fn serde_bincode_basepoint_roundtrip() {
use bincode;
let encoded = bincode::serialize(&constants::X25519_BASEPOINT).unwrap();
let decoded: MontgomeryPoint = bincode::deserialize(&encoded).unwrap();
assert_eq!(encoded.len(), 32);
assert_eq!(decoded, constants::X25519_BASEPOINT);
let raw_bytes = constants::X25519_BASEPOINT.as_bytes();
let bp: MontgomeryPoint = bincode::deserialize(raw_bytes).unwrap();
assert_eq!(bp, constants::X25519_BASEPOINT);
}
/// Test Montgomery -> Edwards on the X/Ed25519 basepoint
#[test]
fn basepoint_montgomery_to_edwards() {
// sign bit = 0 => basepoint
assert_eq!(
constants::ED25519_BASEPOINT_POINT,
constants::X25519_BASEPOINT.to_edwards(0).unwrap()
);
// sign bit = 1 => minus basepoint
assert_eq!(
- constants::ED25519_BASEPOINT_POINT,
constants::X25519_BASEPOINT.to_edwards(1).unwrap()
);
}
/// Test Edwards -> Montgomery on the X/Ed25519 basepoint
#[test]
fn basepoint_edwards_to_montgomery() {
assert_eq!(
constants::ED25519_BASEPOINT_POINT.to_montgomery(),
constants::X25519_BASEPOINT
);
}
/// Check that Montgomery -> Edwards fails for points on the twist.
#[test]
fn montgomery_to_edwards_rejects_twist() {
let one = FieldElement::one();
// u = 2 corresponds to a point on the twist.
let two = MontgomeryPoint((&one+&one).to_bytes());
assert!(two.to_edwards(0).is_none());
// u = -1 corresponds to a point on the twist, but should be
// checked explicitly because it's an exceptional point for the
// birational map. For instance, libsignal will accept it.
let minus_one = MontgomeryPoint((-&one).to_bytes());
assert!(minus_one.to_edwards(0).is_none());
}
#[test]
fn eq_defined_mod_p() {
let mut u18_bytes = [0u8; 32]; u18_bytes[0] = 18;
let u18 = MontgomeryPoint(u18_bytes);
let u18_unred = MontgomeryPoint([255; 32]);
assert_eq!(u18, u18_unred);
}
#[test]
fn montgomery_ladder_matches_edwards_scalarmult() {
let mut csprng: OsRng = OsRng;
let s: Scalar = Scalar::random(&mut csprng);
let p_edwards: EdwardsPoint = &constants::ED25519_BASEPOINT_TABLE * &s;
let p_montgomery: MontgomeryPoint = p_edwards.to_montgomery();
let expected = s * p_edwards;
let result = s * p_montgomery;
assert_eq!(result, expected.to_montgomery())
}
const ELLIGATOR_CORRECT_OUTPUT: [u8; 32] = [
0x5f, 0x35, 0x20, 0x00, 0x1c, 0x6c, 0x99, 0x36, 0xa3, 0x12, 0x06, 0xaf, 0xe7, 0xc7, 0xac,
0x22, 0x4e, 0x88, 0x61, 0x61, 0x9b, 0xf9, 0x88, 0x72, 0x44, 0x49, 0x15, 0x89, 0x9d, 0x95,
0xf4, 0x6e,
];
#[test]
#[cfg(feature = "std")] // Vec
fn montgomery_elligator_correct() {
let bytes: std::vec::Vec<u8> = (0u8..32u8).collect();
let bits_in: [u8; 32] = (&bytes[..]).try_into().expect("Range invariant broken");
let fe = FieldElement::from_bytes(&bits_in);
let eg = elligator_encode(&fe);
assert_eq!(eg.to_bytes(), ELLIGATOR_CORRECT_OUTPUT);
}
#[test]
#[cfg(feature = "std")] // Vec
fn montgomery_elligator_decode_correct() {
let bytes: std::vec::Vec<u8> = (0u8..32u8).collect();
let bits_in: [u8; 32] = (&bytes[..]).try_into().expect("Range invariant broken");
let fe = FieldElement::from_bytes(&bits_in);
let eg = MontgomeryPoint(ELLIGATOR_CORRECT_OUTPUT);
let result = elligator_decode(&eg, 0.into());
assert_eq!(result, Some(fe));
}
#[test]
fn montgomery_elligator_encode_decode() {
for _i in 0..4096 {
let mut bits = [0u8; 32];
OsRng.fill_bytes(&mut bits);
let fe = FieldElement::from_bytes(&bits);
let eg = elligator_encode(&fe);
// Up to four different field values may encode to a
// single MontgomeryPoint. Firstly, the MontgomeryPoint
// loses the v-coordinate, so it may represent two
// different curve points, (u, v) and (u, -v). The
// elligator_decode function is given a sign argument to
// indicate which one is meant.
//
// Second, for each curve point (except zero), two
// distinct field elements encode to it, r and -r. The
// elligator_decode function returns the nonnegative one.
//
// We check here that one of these four values is equal to
// the original input to elligator_encode.
let mut found = false;
for i in 0..=1 {
let decoded = elligator_decode(&eg, i.into())
.expect("Elligator decode failed");
for j in &[fe, -&fe] {
found |= decoded == *j;
}
}
assert!(found);
}
}
#[test]
fn montgomery_elligator_decode_encode() {
for i in 0..4096 {
let mut bits = [0u8; 32];
OsRng.fill_bytes(&mut bits);
let point = MontgomeryPoint(bits);
let result = elligator_decode(&point, ((i % 2) as u8).into());
if let Some(fe) = result {
let encoded = elligator_encode(&fe);
assert_eq!(encoded, point);
}
}
}
/// Test that Elligator decoding will fail on a point that is not on the curve.
#[test]
fn montgomery_elligator_decode_noncurve() {
let one = FieldElement::one();
// u = 2 corresponds to a point on the twist.
|
for i in 0..=1 {
let result = elligator_decode(&two, i.into());
assert_eq!(None, result);
}
}
#[test]
fn montgomery_elligator_zero_zero() {
let zero = [0u8; 32];
let fe = FieldElement::from_bytes(&zero);
let eg = elligator_encode(&fe);
assert_eq!(eg.to_bytes(), zero);
}
#[test]
fn montgomery_elligator_decode_zero_zero() {
let zero = [0u8; 32];
let eg = MontgomeryPoint(zero);
let fe = elligator_decode(&eg, 0.into()).expect("Elligator decode failed");
assert_eq!(fe.to_bytes(), zero);
let fe_neg = elligator_decode(&eg, 1.into()).expect("Elligator decode failed");
assert_eq!(fe_neg.to_bytes(), zero);
}
}
|
let two = MontgomeryPoint((&one+&one).to_bytes());
|
workspace.bzl
|
"""Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "5020e104a1349a0ae6532b007b48c68b8f64c049"
LLVM_SHA256 = "3113dbc5f7b3e6405375eedfe95e220268bcc4818c8d8453a23ef00f82d4b172"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:BUILD.bazel",
patch_file = "//third_party/llvm:macos_build_fix.patch",
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
|
)
|
|
security_flavor.py
|
class SecurityFlavor(basestring):
"""
any|none|never|krb5|ntlm|sys
Possible values:
<ul>
<li> "any" - Any,
<li> "none" - Anonymous Access Allowed If Security Type
Not Already Listed,
<li> "never" - Never,
<li> "krb5" - Kerberos 5 Authentication,
<li> "ntlm" - CIFS NTLM,
|
@staticmethod
def get_api_name():
return "security-flavor"
|
<li> "sys" - NFS AUTH_SYS,
<li> "spinauth" - SpinAuth
</ul>
"""
|
back_to_back_optimizer.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
# Back_To_Back Optimizer.
# Collapse consecutive nodes into 1 node if possible.
from __future__ import unicode_literals
from oneflow.python.onnx.util import ONNX_DTYPE_NAMES # lgtm[py/unsafe-cyclic-import]
from .optimizer_base import GraphOptimizerBase # lgtm[py/unsafe-cyclic-import]
# pylint: disable=logging-not-lazy,unused-argument,missing-docstring,unused-variable,arguments-differ
_func_map = {}
def _register_func(op_type):
def _internal_fun(func):
_func_map[op_type] = func
return func
return _internal_fun
class BackToBackOptimizer(GraphOptimizerBase):
"""Remove back-to-back nodes e.g. 'Cast'
"""
def __init__(self): # pylint: disable=useless-super-delegation
super(BackToBackOptimizer, self).__init__()
def _Optimize(self, graph):
return self._ApplyOptimization(graph, self._OptimizeAtCurrentGraphLevel)
def _OptimizeAtCurrentGraphLevel(self, g):
for optype, handler in _func_map.items():
# candidate nodes for removal/optimization
nodes = [n for n in g.get_nodes() if n.type in optype]
# topological sort of candidates
# simplifying assumption for back-to-back-optimizer is
# the op_types have 1 input, 1 output, but multiple consumers
has_dependencies = set()
consumer_node_ids = {n.output[0]: [] for n in nodes}
for n in nodes:
if n.input[0] in consumer_node_ids:
consumer_node_ids[n.input[0]].extend([n])
has_dependencies.add(n.output[0])
# q = starting nodes with no dependencies
q = list(set(consumer_node_ids.keys()) - has_dependencies)
while q:
nodeid = q.pop(0)
node = g.get_node_by_output(nodeid, False)
consumer_nodes = consumer_node_ids[nodeid]
if len(consumer_nodes) > 0:
all_consumers = g.FindOutputConsumers(node.output[0])
if len(all_consumers) != len(consumer_nodes):
# if first node is used elsewhere, skip
continue
if set(node.output) & set(g.outputs):
# if this node is part of graph outputs, skip
continue
q2 = handler(g, node, consumer_nodes)
# add more nodes which can now be processed
q.extend(q2)
return g
@staticmethod
@_register_func("Cast")
def _OptimizeCast(g, node, consumer_nodes):
|
@staticmethod
@_register_func("Transpose")
def _OptimizeTranspose(g, node, consumer_nodes):
"""remove long chains of transpose ops"""
t1 = list(node.get_attr("perm").ints)
q2 = []
for node2 in consumer_nodes:
node2.input[0] = node.input[0]
t2 = list(node2.get_attr("perm").ints)
new_perm = [t1[i] for i in t2]
# check if node2 can be removed. otherwise only update
if new_perm == list(range(len(t2))):
# both nodes can be deleted
shape = g.get_shape(node2.output[0])
dtype = g.get_dtype(node2.output[0])
node2_consumers = g.FindOutputConsumers(node2.output[0])
g.ReplaceAllInputs(node2_consumers, node2.output[0], node.input[0])
g.RemoveNode(node2.name)
if set(node2.output) & set(g.outputs):
g.MakeNode(
"Identity",
[node.input[0]],
outputs=node2.output,
shapes=[shape],
dtypes=[dtype],
)
else:
node2.set_attr("perm", [t1[i] for i in t2])
q2.append(node2.output[0])
g.RemoveNode(node.name)
return q2
@staticmethod
@_register_func(("Squeeze", "Unsqueeze"))
def _OptimizeSqueezeUnsqueeze(g, node, consumer_nodes):
"""remove pairs of squeeze-unsqueeze nodes"""
if node.type != "Squeeze" or len(consumer_nodes) != 1:
# no need to return any value, since not removing long chain of nodes
return []
node2 = consumer_nodes[0]
if node2.type != "Unsqueeze":
return []
axis1 = node.get_attr("axes").ints
axis2 = node2.get_attr("axes").ints
# if squeeze followed by unsqueeze is on diff axes, skip
if axis1 != axis2:
return []
# if unsqueeze output is graph output, skip
if set(node2.output) & set(g.outputs):
return []
node2_consumers = g.FindOutputConsumers(node2.output[0])
g.ReplaceAllInputs(node2_consumers, node2.output[0], node.input[0])
g.RemoveNode(node.name)
g.RemoveNode(node2.name)
return []
|
"""remove long chains of cast ops"""
q2 = []
type1 = node.get_attr("to").i
type1_name = ONNX_DTYPE_NAMES[type1] if type1 in ONNX_DTYPE_NAMES else ""
# if parent node is cast node, and same type, delete this one
pnode = node.inputs[0]
if pnode.type == "Cast":
type2 = pnode.get_attr("to").i
if type1 == type2:
for node2 in consumer_nodes:
node2.input[0] = node.input[0]
q2.append(node2.output[0])
g.RemoveNode(node.name)
return q2
# otherwise, check consumer cast nodes for a target type
# that contains more information than current type
can_reduce = True
for node2 in consumer_nodes:
type2 = node2.get_attr("to").i
type2_name = ONNX_DTYPE_NAMES[type2] if type2 in ONNX_DTYPE_NAMES else ""
if "float" in type1_name or type1_name == "double":
# high information type. ok to eliminate
pass
elif "int" in type1_name:
# int* and uint* are mix of high and low information.
# for safety, keep the current node, unless type2 is bool,
# in which case it's ok to remove node
if type1 != type2 and type2_name != "bool":
can_reduce = False
elif type1_name == "bool":
# bool is low information, so don't eliminate
if type1 != type2:
can_reduce = False
elif type1_name == "string":
# can always remove string
pass
else:
# some odd type, keep node
can_reduce = False
q2.append(node2.output[0])
if can_reduce:
for node2 in consumer_nodes:
node2.input[0] = node.input[0]
g.RemoveNode(node.name)
return q2
|
doc.go
|
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
// Package budgets is an auto-generated package for the
// Cloud Billing Budget API.
//
// The Cloud Billing Budget API stores Cloud Billing budgets, which define a
// budget plan and the rules to execute as spend is tracked against that
// plan.
//
// NOTE: This package is in beta. It is not stable, and may be subject to changes.
//
// Example usage
//
// To get started with this package, create a client.
// ctx := context.Background()
// c, err := budgets.NewBudgetClient(ctx)
// if err != nil {
// // TODO: Handle error.
// }
// defer c.Close()
//
// The client will use your default application credentials. Clients should be reused instead of created as needed.
// The methods of Client are safe for concurrent use by multiple goroutines.
// The returned client must be Closed when it is done being used.
//
// Using the Client
//
// The following is an example of making an API call with the newly created client.
//
// ctx := context.Background()
// c, err := budgets.NewBudgetClient(ctx)
// if err != nil {
// // TODO: Handle error.
// }
// defer c.Close()
//
// req := &budgetspb.CreateBudgetRequest{
// // TODO: Fill request struct fields.
// // See https://pkg.go.dev/google.golang.org/genproto/googleapis/cloud/billing/budgets/v1beta1#CreateBudgetRequest.
// }
// resp, err := c.CreateBudget(ctx, req)
// if err != nil {
// // TODO: Handle error.
// }
// // TODO: Use resp.
// _ = resp
//
// Use of Context
//
// The ctx passed to NewClient is used for authentication requests and
// for creating the underlying connection, but is not used for subsequent calls.
// Individual methods on the client use the ctx given to them.
//
// To close the open connection, use the Close() method.
//
// For information about setting deadlines, reusing contexts, and more
// please visit https://pkg.go.dev/cloud.google.com/go.
package budgets // import "cloud.google.com/go/billing/budgets/apiv1beta1"
import (
"context"
"os"
"runtime"
"strconv"
"strings"
"unicode"
"google.golang.org/api/option"
"google.golang.org/grpc/metadata"
)
// For more information on implementing a client constructor hook, see
// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
type clientHookParams struct{}
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
const versionClient = "20220210"
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context
|
func checkDisableDeadlines() (bool, error) {
raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE")
if !ok {
return false, nil
}
b, err := strconv.ParseBool(raw)
return b, err
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-billing",
"https://www.googleapis.com/auth/cloud-platform",
}
}
// versionGo returns the Go runtime version. The returned string
// has no whitespace, suitable for reporting in header.
func versionGo() string {
const develPrefix = "devel +"
s := runtime.Version()
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}
notSemverRune := func(r rune) bool {
return !strings.ContainsRune("0123456789.", r)
}
if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return "UNKNOWN"
}
|
{
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
|
module.go
|
package ast
import (
"fmt"
"github.com/ontio/wast-parser/parser"
)
type Module struct {
Name OptionId
Kind ModuleKind
}
func (self *Module) Parse(ps *parser.ParserBuffer) error {
err := ps.ExpectKeywordMatch("module")
if err != nil {
return err
}
self.Name.Parse(ps)
if matchKeyword(ps.PeekToken(), "binary") {
|
}
var data [][]byte
for !ps.Empty() {
str, err := ps.ExpectString()
if err != nil {
return err
}
data = append(data, []byte(str))
}
self.Kind = ModuleKindBinary{Bins: data}
} else {
var fields []ModuleField
for !ps.Empty() {
err = ps.Parens(func(ps *parser.ParserBuffer) error {
field, err := parseModuleField(ps)
if err != nil {
return err
}
fields = append(fields, field)
return nil
})
if err != nil {
return err
}
}
self.Kind = ModuleKindText{Fields: fields}
}
return nil
}
type ModuleKind interface {
moduleKind()
}
type implModuleKind struct{}
func (self implModuleKind) moduleKind() {}
type ModuleKindText struct {
implModuleKind
Fields []ModuleField
}
type ModuleKindBinary struct {
implModuleKind
Bins [][]byte
}
type ModuleField interface {
moduleField()
}
type StartField struct {
implModuleField
Index Index
}
type implModuleField struct{}
func (self implModuleField) moduleField() {}
func parseModuleField(ps *parser.ParserBuffer) (ModuleField, error) {
kw, err := ps.PeekKeyword()
if err != nil {
return nil, err
}
var field ModuleField
switch kw {
case "type":
var ty Type
err = ty.Parse(ps)
field = ty
case "import":
var imp Import
err = imp.Parse(ps)
field = imp
case "func":
var fun Func
err = fun.Parse(ps)
field = fun
case "table":
var table Table
err = table.Parse(ps)
field = table
case "memory":
var mem Memory
err = mem.Parse(ps)
field = mem
case "global":
var global Global
err = global.Parse(ps)
field = global
case "export":
var val Export
err = val.Parse(ps)
field = val
case "start":
var val StartField
err = val.Index.Parse(ps)
field = val
case "elem":
var val Elem
err = val.Parse(ps)
field = val
case "data":
var val Data
err = val.Parse(ps)
field = val
default:
return nil, fmt.Errorf("invalid module field keyword: %s", kw)
}
if err != nil {
return nil, err
}
return field, nil
}
|
err := ps.ExpectKeywordMatch("binary")
if err != nil {
panic(err)
|
log.rs
|
use super::{Expression, Rule};
use nftnl_sys::{self as sys};
use std::{
ffi::CString,
os::raw::c_char
};
bitflags::bitflags! {
pub struct LogFlags: u8 {
const TCPSEQ = 0x01;
const TCPOPT = 0x02;
const IPOPT = 0x04;
const UID = 0x08;
const MACDECODE = 0x20;
const MASK = 0x2f;
}
}
pub struct Log {
group: Option<u16>,
snaplen: Option<u32>,
qthreshold: Option<u16>,
level: Option<i32>,
flags: Option<LogFlags>,
prefix: Option<CString>,
}
impl Log {
pub fn new() -> Self {
Self {
group: None,
snaplen: None,
qthreshold: None,
level: None,
flags: None,
prefix: None,
}
}
pub fn group(self, group: u16) -> Self {
Log {
group: Some(group),
..self
}
}
pub fn snaplen(self, snaplen: u32) -> Self {
Log {
snaplen: Some(snaplen),
..self
}
}
pub fn
|
(self, qthreshold: u16) -> Self {
Log {
qthreshold: Some(qthreshold),
..self
}
}
pub fn level(self, level: i32) -> Self {
Log {
level: Some(level),
..self
}
}
pub fn flags(self, flags: LogFlags) -> Self {
Log {
flags: Some(flags),
..self
}
}
pub fn prefix(self, prefix: &CString) -> Self {
Log {
prefix: Some(prefix.clone()),
..self
}
}
}
impl Expression for Log {
fn to_expr(&self, _rule: &Rule) -> *mut sys::nftnl_expr {
unsafe {
let expr = try_alloc!(sys::nftnl_expr_alloc(b"log\0" as *const _ as *const c_char));
match self.prefix.as_deref() {
Some(prefix) => {
sys::nftnl_expr_set_str(
expr,
sys::NFTNL_EXPR_LOG_PREFIX as u16,
prefix.as_ref().as_ptr(),
);
},
_ => {},
};
match self.group {
Some(group) => {
sys::nftnl_expr_set_u16(
expr,
sys::NFTNL_EXPR_LOG_GROUP as u16,
group as u16,
);
match self.snaplen {
Some(snaplen) => {
sys::nftnl_expr_set_u32(
expr,
sys::NFTNL_EXPR_LOG_SNAPLEN as u16,
snaplen as u32,
);
},
_ => {},
};
match self.qthreshold {
Some(qthreshold) => {
sys::nftnl_expr_set_u16(
expr,
sys::NFTNL_EXPR_LOG_QTHRESHOLD as u16,
qthreshold as u16,
);
},
_ => {},
};
},
_ => {
match self.level {
Some(level) => {
sys::nftnl_expr_set_u32(
expr,
sys::NFTNL_EXPR_LOG_LEVEL as u16,
level as u32,
);
},
_ => {},
};
match self.flags {
Some(flags) => {
sys::nftnl_expr_set_u32(
expr,
sys::NFTNL_EXPR_LOG_FLAGS as u16,
flags.bits as u32,
);
},
_ => {},
};
},
};
expr
}
}
}
|
qthreshold
|
rsets_test.go
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package rsets
import (
"testing"
. "github.com/pingcap/check"
"github.com/pingcap/tidb/context"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/field"
"github.com/pingcap/tidb/model"
"github.com/pingcap/tidb/plan"
"github.com/pingcap/tidb/util/format"
)
func TestT(t *testing.T) {
TestingT(t)
}
type testRowData struct {
id int64
data []interface{}
}
type testTablePlan struct {
rows []*testRowData
fields []string
filter bool
retNil bool
cursor int
}
var testData = []*testRowData{
{1, []interface{}{10, "hello"}},
{2, []interface{}{10, "hello"}},
{3, []interface{}{10, "hello"}},
{4, []interface{}{40, "hello"}},
{6, []interface{}{60, "hello"}},
}
func newTestTablePlan(rows []*testRowData, fields []string) *testTablePlan
|
func (p *testTablePlan) Do(ctx context.Context, f plan.RowIterFunc) error {
if p.retNil {
return nil
}
for _, d := range p.rows {
if more, err := f(d.id, d.data); !more || err != nil {
return err
}
}
return nil
}
func (p *testTablePlan) Explain(w format.Formatter) {}
func (p *testTablePlan) GetFields() []*field.ResultField {
var ret []*field.ResultField
for _, fn := range p.fields {
resultField := &field.ResultField{Name: fn, TableName: "t"}
resultField.Col.Name = model.NewCIStr(fn)
ret = append(ret, resultField)
}
return ret
}
func (p *testTablePlan) Filter(ctx context.Context, expr expression.Expression) (plan.Plan, bool, error) {
return p, p.filter, nil
}
func (p *testTablePlan) SetFilter(filter bool) {
p.filter = filter
}
func (p *testTablePlan) SetRetNil(retNil bool) {
p.retNil = retNil
}
func (p *testTablePlan) Next(ctx context.Context) (row *plan.Row, err error) {
if p.cursor == len(p.rows) || p.retNil {
return
}
row = &plan.Row{Data: p.rows[p.cursor].data}
p.cursor++
return
}
func (p *testTablePlan) Close() error {
p.cursor = 0
return nil
}
var _ = Suite(&testRsetsSuite{})
type testRsetsSuite struct {
r Recordset
names []string
}
func (s *testRsetsSuite) SetUpSuite(c *C) {
names := []string{"id", "name"}
tblPlan := newTestTablePlan(testData, names)
s.r = Recordset{Ctx: nil, Plan: tblPlan}
s.names = names
}
func (s *testRsetsSuite) TestGetFields(c *C) {
fields := s.r.GetFields()
c.Assert(fields, HasLen, 2)
for i, fld := range fields {
v, ok := fld.(*field.ResultField)
c.Assert(ok, IsTrue)
c.Assert(v.Name, Equals, s.names[i])
}
}
func (s *testRsetsSuite) TestDo(c *C) {
var checkDatas [][]interface{}
f := func(data []interface{}) (bool, error) {
checkDatas = append(checkDatas, data)
return true, nil
}
err := s.r.Do(f)
c.Assert(err, IsNil)
c.Assert(checkDatas, HasLen, 5)
for i, v := range checkDatas {
c.Assert(v, HasLen, 2)
c.Assert(v, DeepEquals, testData[i].data)
}
}
func (s *testRsetsSuite) TestFields(c *C) {
fields, err := s.r.Fields()
c.Assert(fields, HasLen, 2)
c.Assert(err, IsNil)
for i, fld := range fields {
c.Assert(fld.Name, Equals, s.names[i])
}
}
func (s *testRsetsSuite) TestFirstRow(c *C) {
row, err := s.r.FirstRow()
c.Assert(row, HasLen, 2)
c.Assert(err, IsNil)
c.Assert(row, DeepEquals, testData[0].data)
src := s.r.Plan.(*testTablePlan)
src.SetRetNil(true)
row, err = s.r.FirstRow()
c.Assert(row, HasLen, 0)
c.Assert(err, IsNil)
src.SetRetNil(false)
}
func (s *testRsetsSuite) TestRows(c *C) {
rows, err := s.r.Rows(2, 1)
c.Assert(rows, HasLen, 2)
c.Assert(err, IsNil)
for i, row := range rows {
c.Assert(row, HasLen, 2)
c.Assert(row, DeepEquals, testData[i+1].data)
}
rows, err = s.r.Rows(-1, 1)
c.Assert(rows, HasLen, 4)
c.Assert(err, IsNil)
rows, err = s.r.Rows(0, 1)
c.Assert(rows, HasLen, 0)
c.Assert(err, IsNil)
}
|
{
return &testTablePlan{rows: rows, fields: fields}
}
|
transforms.py
|
""" COCO transforms (quick and dirty)
Hacked together by Ross Wightman
"""
import torch
from PIL import Image
import numpy as np
import random
import math
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5)
IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5)
class ImageToNumpy:
def __call__(self, pil_img, annotations: dict):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.moveaxis(np_img, 2, 0) # HWC to CHW
return np_img, annotations
class ImageToTensor:
def __init__(self, dtype=torch.float32):
self.dtype = dtype
def __call__(self, pil_img, annotations: dict):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.moveaxis(np_img, 2, 0) # HWC to CHW
return torch.from_numpy(np_img).to(dtype=self.dtype), annotations
def _pil_interp(method):
if method == 'bicubic':
return Image.BICUBIC
elif method == 'lanczos':
return Image.LANCZOS
elif method == 'hamming':
return Image.HAMMING
else:
# default bilinear, do we want to allow nearest?
return Image.BILINEAR
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
def clip_boxes_(boxes, img_size):
height, width = img_size
clip_upper = np.array([height, width] * 2, dtype=boxes.dtype)
np.clip(boxes, 0, clip_upper, out=boxes)
def clip_boxes(boxes, img_size):
clipped_boxes = boxes.copy()
clip_boxes_(clipped_boxes, img_size)
return clipped_boxes
def _size_tuple(size):
if isinstance(size, int):
return size, size
else:
assert len(size) == 2
return size
class ResizePad:
def __init__(self, target_size: int, interpolation: str = 'bilinear', fill_color: tuple = (0, 0, 0)):
self.target_size = _size_tuple(target_size)
self.interpolation = interpolation
self.fill_color = fill_color
def __call__(self, img, anno: dict):
width, height = img.size
img_scale_y = self.target_size[0] / height
img_scale_x = self.target_size[1] / width
img_scale = min(img_scale_y, img_scale_x)
scaled_h = int(height * img_scale)
scaled_w = int(width * img_scale)
new_img = Image.new("RGB", (self.target_size[1], self.target_size[0]), color=self.fill_color)
interp_method = _pil_interp(self.interpolation)
img = img.resize((scaled_w, scaled_h), interp_method)
new_img.paste(img)
if 'bbox' in anno:
# FIXME haven't tested this path since not currently using dataset annotations for train/eval
bbox = anno['bbox']
bbox[:, :4] *= img_scale
clip_boxes_(bbox, (scaled_h, scaled_w))
valid_indices = (bbox[:, :2] < bbox[:, 2:4]).all(axis=1)
anno['bbox'] = bbox[valid_indices, :]
anno['cls'] = anno['cls'][valid_indices]
anno['img_scale'] = 1. / img_scale # back to original
return new_img, anno
class RandomResizePad:
def __init__(self, target_size: int, scale: tuple = (0.1, 2.0), interpolation: str = 'random',
fill_color: tuple = (0, 0, 0)):
self.target_size = _size_tuple(target_size)
self.scale = scale
if interpolation == 'random':
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = _pil_interp(interpolation)
self.fill_color = fill_color
def _get_params(self, img):
# Select a random scale factor.
scale_factor = random.uniform(*self.scale)
scaled_target_height = scale_factor * self.target_size[0]
scaled_target_width = scale_factor * self.target_size[1]
# Recompute the accurate scale_factor using rounded scaled image size.
width, height = img.size
img_scale_y = scaled_target_height / height
img_scale_x = scaled_target_width / width
img_scale = min(img_scale_y, img_scale_x)
# Select non-zero random offset (x, y) if scaled image is larger than target size
scaled_h = int(height * img_scale)
scaled_w = int(width * img_scale)
offset_y = scaled_h - self.target_size[0]
offset_x = scaled_w - self.target_size[1]
offset_y = int(max(0.0, float(offset_y)) * random.uniform(0, 1))
offset_x = int(max(0.0, float(offset_x)) * random.uniform(0, 1))
return scaled_h, scaled_w, offset_y, offset_x, img_scale
def __call__(self, img, anno: dict):
scaled_h, scaled_w, offset_y, offset_x, img_scale = self._get_params(img)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
img = img.resize((scaled_w, scaled_h), interpolation)
right, lower = min(scaled_w, offset_x + self.target_size[1]), min(scaled_h, offset_y + self.target_size[0])
img = img.crop((offset_x, offset_y, right, lower))
new_img = Image.new("RGB", (self.target_size[1], self.target_size[0]), color=self.fill_color)
new_img.paste(img)
if 'bbox' in anno:
# FIXME not fully tested
bbox = anno['bbox'].copy() # FIXME copy for debugger inspection, back to inplace
bbox[:, :4] *= img_scale
box_offset = np.stack([offset_y, offset_x] * 2)
bbox -= box_offset
clip_boxes_(bbox, (scaled_h, scaled_w))
valid_indices = (bbox[:, :2] < bbox[:, 2:4]).all(axis=1)
anno['bbox'] = bbox[valid_indices, :]
anno['cls'] = anno['cls'][valid_indices]
anno['img_scale'] = 1. / img_scale # back to original
return new_img, anno
class RandomFlip:
def __init__(self, horizontal=True, vertical=False, prob=0.5):
self.horizontal = horizontal
self.vertical = vertical
self.prob = prob
def _get_params(self):
do_horizontal = random.random() < self.prob if self.horizontal else False
do_vertical = random.random() < self.prob if self.vertical else False
return do_horizontal, do_vertical
def __call__(self, img, annotations: dict):
do_horizontal, do_vertical = self._get_params()
width, height = img.size
def _fliph(bbox):
x_max = width - bbox[:, 1]
x_min = width - bbox[:, 3]
bbox[:, 1] = x_min
bbox[:, 3] = x_max
def _flipv(bbox):
y_max = height - bbox[:, 0]
y_min = height - bbox[:, 2]
bbox[:, 0] = y_min
bbox[:, 2] = y_max
if do_horizontal and do_vertical:
img = img.transpose(Image.ROTATE_180)
if 'bbox' in annotations:
_fliph(annotations['bbox'])
_flipv(annotations['bbox'])
elif do_horizontal:
|
elif do_vertical:
img = img.transpose(Image.FLIP_TOP_BOTTOM)
if 'bbox' in annotations:
_flipv(annotations['bbox'])
return img, annotations
def resolve_fill_color(fill_color, img_mean=IMAGENET_DEFAULT_MEAN):
if isinstance(fill_color, tuple):
assert len(fill_color) == 3
fill_color = fill_color
else:
try:
int_color = int(fill_color)
fill_color = (int_color,) * 3
except ValueError:
assert fill_color == 'mean'
fill_color = tuple([int(round(255 * x)) for x in img_mean])
return fill_color
class Compose:
def __init__(self, transforms: list):
self.transforms = transforms
def __call__(self, img, annotations: dict):
for t in self.transforms:
img, annotations = t(img, annotations)
return img, annotations
def transforms_coco_eval(
img_size=224,
interpolation='bilinear',
use_prefetcher=False,
fill_color='mean',
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD):
fill_color = resolve_fill_color(fill_color, mean)
image_tfl = [
ResizePad(
target_size=img_size, interpolation=interpolation, fill_color=fill_color),
ImageToNumpy(),
]
assert use_prefetcher, "Only supporting prefetcher usage right now"
image_tf = Compose(image_tfl)
return image_tf
def transforms_coco_train(
img_size=224,
interpolation='random',
use_prefetcher=False,
fill_color='mean',
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD):
fill_color = resolve_fill_color(fill_color, mean)
image_tfl = [
RandomFlip(horizontal=True, prob=0.5),
RandomResizePad(
target_size=img_size, interpolation=interpolation, fill_color=fill_color),
ImageToNumpy(),
]
assert use_prefetcher, "Only supporting prefetcher usage right now"
image_tf = Compose(image_tfl)
return image_tf
|
img = img.transpose(Image.FLIP_LEFT_RIGHT)
if 'bbox' in annotations:
_fliph(annotations['bbox'])
|
lib.rs
|
use std::cell::RefCell;
use std::convert::TryInto;
use std::hash::{BuildHasher, Hasher};
use std::ops::BitXor;
use xorshift::Xorshift256;
#[derive(Debug, Clone)]
pub struct RandomState {
key: u64,
}
impl RandomState {
pub fn new() -> Self {
thread_local!( static KEY: RefCell<Xorshift256> = RefCell::new(Xorshift256::new_with_date()) );
KEY.with(|key| Self {
|
key: key.borrow_mut().gen(),
})
}
}
impl BuildHasher for RandomState {
type Hasher = FxHasher;
fn build_hasher(&self) -> Self::Hasher {
FxHasher::new_with_key(self.key)
}
}
#[derive(Debug)]
pub struct FxHasher {
hash: u64,
}
const K: u64 = 0x517cc1b727220a95;
impl FxHasher {
pub fn new() -> Self {
Self { hash: 0 }
}
pub fn new_with_key(hash: u64) -> Self {
Self { hash }
}
#[inline]
fn add_to_hash(&mut self, i: u64) {
self.hash = self.hash.rotate_left(5).bitxor(i).wrapping_mul(K);
}
}
impl Hasher for FxHasher {
#[inline]
fn write(&mut self, mut bytes: &[u8]) {
let read_usize = |bytes: &[u8]| u64::from_ne_bytes(bytes[..8].try_into().unwrap());
let mut hash = FxHasher { hash: self.hash };
while bytes.len() >= 8 {
hash.add_to_hash(read_usize(bytes));
bytes = &bytes[8..];
}
if bytes.len() >= 4 {
hash.add_to_hash(u32::from_ne_bytes(bytes[..4].try_into().unwrap()) as u64);
bytes = &bytes[4..];
}
if bytes.len() >= 2 {
hash.add_to_hash(u16::from_ne_bytes(bytes[..2].try_into().unwrap()) as u64);
bytes = &bytes[2..];
}
if bytes.len() >= 1 {
hash.add_to_hash(bytes[0] as u64);
}
self.hash = hash.hash;
}
#[inline]
fn write_u8(&mut self, i: u8) {
self.add_to_hash(i as u64);
}
#[inline]
fn write_u16(&mut self, i: u16) {
self.add_to_hash(i as u64);
}
#[inline]
fn write_u32(&mut self, i: u32) {
self.add_to_hash(i as u64);
}
#[inline]
fn write_u64(&mut self, i: u64) {
self.add_to_hash(i);
}
#[inline]
fn write_usize(&mut self, i: usize) {
self.add_to_hash(i as u64);
}
#[inline]
fn finish(&self) -> u64 {
self.hash
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn random_check() {
for _ in 0..10 {
let a = RandomState::new();
dbg!(a);
}
}
}
| |
main.rs
|
extern crate csv;
extern crate serde;
//#[macro_use]
extern crate serde_derive;
use std::process;
pub mod engine;
pub mod processor;
pub mod storage;
fn main()
|
{
if let Err(err) = processor::process() {
eprintln!("{}", err);
process::exit(1);
}
}
|
|
cluster_test.go
|
package test
import (
"math"
"regexp"
"runtime"
"strings"
"sync"
"testing"
"time"
"github.com/nats-io/gnatsd/auth"
"github.com/nats-io/go-nats"
)
var testServers = []string{
"nats://localhost:1222",
"nats://localhost:1223",
"nats://localhost:1224",
"nats://localhost:1225",
"nats://localhost:1226",
"nats://localhost:1227",
"nats://localhost:1228",
}
var servers = strings.Join(testServers, ",")
func TestServersOption(t *testing.T) {
opts := nats.DefaultOptions
opts.NoRandomize = true
_, err := opts.Connect()
if err != nats.ErrNoServers {
t.Fatalf("Wrong error: '%v'\n", err)
}
opts.Servers = testServers
_, err = opts.Connect()
if err == nil || err != nats.ErrNoServers {
t.Fatalf("Did not receive proper error: %v\n", err)
}
// Make sure we can connect to first server if running
s1 := RunServerOnPort(1222)
// Do this in case some failure occurs before explicit shutdown
defer s1.Shutdown()
nc, err := opts.Connect()
if err != nil {
t.Fatalf("Could not connect: %v\n", err)
}
if nc.ConnectedUrl() != "nats://localhost:1222" {
nc.Close()
t.Fatalf("Does not report correct connection: %s\n",
nc.ConnectedUrl())
}
nc.Close()
s1.Shutdown()
// Make sure we can connect to a non first server if running
s2 := RunServerOnPort(1223)
// Do this in case some failure occurs before explicit shutdown
defer s2.Shutdown()
nc, err = opts.Connect()
if err != nil {
t.Fatalf("Could not connect: %v\n", err)
}
defer nc.Close()
if nc.ConnectedUrl() != "nats://localhost:1223" {
t.Fatalf("Does not report correct connection: %s\n",
nc.ConnectedUrl())
}
}
func TestNewStyleServersOption(t *testing.T) {
_, err := nats.Connect(nats.DefaultURL, nats.DontRandomize())
if err != nats.ErrNoServers {
t.Fatalf("Wrong error: '%v'\n", err)
}
servers := strings.Join(testServers, ",")
_, err = nats.Connect(servers, nats.DontRandomize())
if err == nil || err != nats.ErrNoServers {
t.Fatalf("Did not receive proper error: %v\n", err)
}
// Make sure we can connect to first server if running
s1 := RunServerOnPort(1222)
// Do this in case some failure occurs before explicit shutdown
defer s1.Shutdown()
nc, err := nats.Connect(servers, nats.DontRandomize())
if err != nil {
t.Fatalf("Could not connect: %v\n", err)
}
if nc.ConnectedUrl() != "nats://localhost:1222" {
nc.Close()
t.Fatalf("Does not report correct connection: %s\n",
nc.ConnectedUrl())
}
nc.Close()
s1.Shutdown()
// Make sure we can connect to a non-first server if running
s2 := RunServerOnPort(1223)
// Do this in case some failure occurs before explicit shutdown
defer s2.Shutdown()
nc, err = nats.Connect(servers, nats.DontRandomize())
if err != nil {
t.Fatalf("Could not connect: %v\n", err)
}
defer nc.Close()
if nc.ConnectedUrl() != "nats://localhost:1223" {
t.Fatalf("Does not report correct connection: %s\n",
nc.ConnectedUrl())
}
}
func TestAuthServers(t *testing.T) {
var plainServers = []string{
"nats://localhost:1222",
"nats://localhost:1224",
}
auth := &auth.Plain{
Username: "derek",
Password: "foo",
}
as1 := RunServerOnPort(1222)
as1.SetClientAuthMethod(auth)
defer as1.Shutdown()
as2 := RunServerOnPort(1224)
as2.SetClientAuthMethod(auth)
defer as2.Shutdown()
pservers := strings.Join(plainServers, ",")
nc, err := nats.Connect(pservers, nats.DontRandomize(), nats.Timeout(5*time.Second))
if err == nil {
nc.Close()
t.Fatalf("Expect Auth failure, got no error\n")
}
if matched, _ := regexp.Match(`authorization`, []byte(err.Error())); !matched {
t.Fatalf("Wrong error, wanted Auth failure, got '%s'\n", err)
}
// Test that we can connect to a subsequent correct server.
var authServers = []string{
"nats://localhost:1222",
"nats://derek:foo@localhost:1224",
}
aservers := strings.Join(authServers, ",")
nc, err = nats.Connect(aservers, nats.DontRandomize(), nats.Timeout(5*time.Second))
if err != nil {
t.Fatalf("Expected to connect properly: %v\n", err)
}
defer nc.Close()
if nc.ConnectedUrl() != authServers[1] {
t.Fatalf("Does not report correct connection: %s\n",
nc.ConnectedUrl())
}
}
func TestBasicClusterReconnect(t *testing.T) {
s1 := RunServerOnPort(1222)
defer s1.Shutdown()
s2 := RunServerOnPort(1224)
defer s2.Shutdown()
dch := make(chan bool)
rch := make(chan bool)
dcbCalled := false
opts := []nats.Option{nats.DontRandomize(),
nats.DisconnectHandler(func(nc *nats.Conn) {
// Suppress any additional callbacks
if dcbCalled {
return
}
dcbCalled = true
dch <- true
}),
nats.ReconnectHandler(func(_ *nats.Conn) { rch <- true }),
}
nc, err := nats.Connect(servers, opts...)
if err != nil {
t.Fatalf("Expected to connect, got err: %v\n", err)
}
defer nc.Close()
s1.Shutdown()
// wait for disconnect
if e := WaitTime(dch, 2*time.Second); e != nil {
t.Fatal("Did not receive a disconnect callback message")
}
reconnectTimeStart := time.Now()
// wait for reconnect
if e := WaitTime(rch, 2*time.Second); e != nil {
t.Fatal("Did not receive a reconnect callback message")
}
if nc.ConnectedUrl() != testServers[2] {
t.Fatalf("Does not report correct connection: %s\n",
nc.ConnectedUrl())
}
// Make sure we did not wait on reconnect for default time.
// Reconnect should be fast since it will be a switch to the
// second server and not be dependent on server restart time.
// On Windows, a failed connect takes more than a second, so
// account for that.
maxDuration := 100 * time.Millisecond
if runtime.GOOS == "windows" {
maxDuration = 1100 * time.Millisecond
}
reconnectTime := time.Since(reconnectTimeStart)
if reconnectTime > maxDuration {
t.Fatalf("Took longer than expected to reconnect: %v\n", reconnectTime)
}
}
func TestHotSpotReconnect(t *testing.T) {
s1 := RunServerOnPort(1222)
defer s1.Shutdown()
var srvrs string
if runtime.GOOS == "windows" {
srvrs = strings.Join(testServers[:5], ",")
} else {
srvrs = servers
}
numClients := 32
clients := []*nats.Conn{}
wg := &sync.WaitGroup{}
wg.Add(numClients)
opts := []nats.Option{
nats.ReconnectWait(50 * time.Millisecond),
nats.ReconnectHandler(func(_ *nats.Conn) { wg.Done() }),
}
for i := 0; i < numClients; i++ {
nc, err := nats.Connect(srvrs, opts...)
if err != nil {
t.Fatalf("Expected to connect, got err: %v\n", err)
}
defer nc.Close()
if nc.ConnectedUrl() != testServers[0] {
t.Fatalf("Connected to incorrect server: %v\n", nc.ConnectedUrl())
}
clients = append(clients, nc)
}
s2 := RunServerOnPort(1224)
defer s2.Shutdown()
s3 := RunServerOnPort(1226)
defer s3.Shutdown()
s1.Shutdown()
numServers := 2
// Wait on all reconnects
wg.Wait()
// Walk the clients and calculate how many of each..
cs := make(map[string]int)
for _, nc := range clients {
cs[nc.ConnectedUrl()]++
nc.Close()
}
if len(cs) != numServers {
t.Fatalf("Wrong number of reported servers: %d vs %d\n", len(cs), numServers)
}
expected := numClients / numServers
v := uint(float32(expected) * 0.40)
// Check that each item is within acceptable range
for s, total := range cs {
delta := uint(math.Abs(float64(expected - total)))
if delta > v {
t.Fatalf("Connected clients to server: %s out of range: %d\n", s, total)
}
}
}
func TestProperReconnectDelay(t *testing.T) {
s1 := RunServerOnPort(1222)
defer s1.Shutdown()
var srvs string
opts := nats.DefaultOptions
if runtime.GOOS == "windows" {
srvs = strings.Join(testServers[:2], ",")
} else {
srvs = strings.Join(testServers, ",")
}
opts.NoRandomize = true
dcbCalled := false
closedCbCalled := false
dch := make(chan bool)
dcb := func(nc *nats.Conn) {
// Suppress any additional calls
if dcbCalled {
return
}
dcbCalled = true
dch <- true
}
ccb := func(_ *nats.Conn) {
closedCbCalled = true
}
nc, err := nats.Connect(srvs, nats.DontRandomize(), nats.DisconnectHandler(dcb), nats.ClosedHandler(ccb))
if err != nil {
t.Fatalf("Expected to connect, got err: %v\n", err)
}
defer nc.Close()
s1.Shutdown()
// wait for disconnect
if e := WaitTime(dch, 2*time.Second); e != nil {
t.Fatal("Did not receive a disconnect callback message")
}
// Wait, want to make sure we don't spin on reconnect to non-existent servers.
time.Sleep(1 * time.Second)
// Make sure we are still reconnecting..
if closedCbCalled {
t.Fatal("Closed CB was triggered, should not have been.")
}
if status := nc.Status(); status != nats.RECONNECTING {
t.Fatalf("Wrong status: %d\n", status)
}
}
func TestProperFalloutAfterMaxAttempts(t *testing.T) {
s1 := RunServerOnPort(1222)
defer s1.Shutdown()
opts := nats.DefaultOptions
// Reduce the list of servers for Windows tests
if runtime.GOOS == "windows" {
opts.Servers = testServers[:2]
opts.MaxReconnect = 2
} else {
opts.Servers = testServers
opts.MaxReconnect = 5
}
opts.NoRandomize = true
opts.ReconnectWait = (25 * time.Millisecond)
dch := make(chan bool)
opts.DisconnectedCB = func(_ *nats.Conn) {
dch <- true
}
closedCbCalled := false
cch := make(chan bool)
opts.ClosedCB = func(_ *nats.Conn) {
closedCbCalled = true
cch <- true
}
nc, err := opts.Connect()
if err != nil {
t.Fatalf("Expected to connect, got err: %v\n", err)
}
defer nc.Close()
s1.Shutdown()
// On Windows, creating a TCP connection to a server not running takes more than
// a second. So be generous with the WaitTime.
// wait for disconnect
if e := WaitTime(dch, 5*time.Second); e != nil {
t.Fatal("Did not receive a disconnect callback message")
}
// Wait for ClosedCB
if e := WaitTime(cch, 5*time.Second); e != nil {
t.Fatal("Did not receive a closed callback message")
}
// Make sure we are not still reconnecting..
if !closedCbCalled {
t.Logf("%+v\n", nc)
t.Fatal("Closed CB was not triggered, should have been.")
}
// Expect connection to be closed...
if !nc.IsClosed() {
t.Fatalf("Wrong status: %d\n", nc.Status())
}
}
func TestProperFalloutAfterMaxAttemptsWithAuthMismatch(t *testing.T) {
var myServers = []string{
"nats://localhost:1222",
"nats://localhost:4443",
}
s1 := RunServerOnPort(1222)
defer s1.Shutdown()
s2, _ := RunServerWithConfig("./configs/tlsverify.conf")
defer s2.Shutdown()
opts := nats.DefaultOptions
opts.Servers = myServers
opts.NoRandomize = true
if runtime.GOOS == "windows" {
opts.MaxReconnect = 2
} else {
opts.MaxReconnect = 5
}
opts.ReconnectWait = (25 * time.Millisecond)
dch := make(chan bool)
opts.DisconnectedCB = func(_ *nats.Conn) {
dch <- true
}
closedCbCalled := false
cch := make(chan bool)
opts.ClosedCB = func(_ *nats.Conn) {
closedCbCalled = true
cch <- true
}
nc, err := opts.Connect()
if err != nil {
t.Fatalf("Expected to connect, got err: %v\n", err)
}
defer nc.Close()
s1.Shutdown()
// On Windows, creating a TCP connection to a server not running takes more than
// a second. So be generous with the WaitTime.
// wait for disconnect
if e := WaitTime(dch, 5*time.Second); e != nil {
t.Fatal("Did not receive a disconnect callback message")
}
// Wait for ClosedCB
if e := WaitTime(cch, 5*time.Second); e != nil {
reconnects := nc.Stats().Reconnects
t.Fatalf("Did not receive a closed callback message, #reconnects: %v", reconnects)
}
// Make sure we have not exceeded MaxReconnect
reconnects := nc.Stats().Reconnects
if reconnects != uint64(opts.MaxReconnect) {
t.Fatalf("Num reconnects was %v, expected %v", reconnects, opts.MaxReconnect)
}
// Make sure we are not still reconnecting..
if !closedCbCalled {
t.Logf("%+v\n", nc)
t.Fatal("Closed CB was not triggered, should have been.")
}
// Expect connection to be closed...
if !nc.IsClosed() {
t.Fatalf("Wrong status: %d\n", nc.Status())
}
}
func TestTimeoutOnNoServers(t *testing.T) {
s1 := RunServerOnPort(1222)
defer s1.Shutdown()
opts := nats.DefaultOptions
if runtime.GOOS == "windows" {
opts.Servers = testServers[:2]
opts.MaxReconnect = 2
opts.ReconnectWait = (100 * time.Millisecond)
} else {
opts.Servers = testServers
// 1 second total time wait
opts.MaxReconnect = 10
opts.ReconnectWait = (100 * time.Millisecond)
}
opts.NoRandomize = true
dch := make(chan bool)
opts.DisconnectedCB = func(nc *nats.Conn) {
// Suppress any additional calls
nc.SetDisconnectHandler(nil)
dch <- true
}
cch := make(chan bool)
opts.ClosedCB = func(_ *nats.Conn) {
cch <- true
}
nc, err := opts.Connect()
if err != nil {
t.Fatalf("Expected to connect, got err: %v\n", err)
}
defer nc.Close()
s1.Shutdown()
// On Windows, creating a connection to a non-running server takes
// more than a second. So be generous with WaitTime
// wait for disconnect
if e := WaitTime(dch, 5*time.Second); e != nil {
t.Fatal("Did not receive a disconnect callback message")
}
startWait := time.Now()
// Wait for ClosedCB
if e := WaitTime(cch, 5*time.Second); e != nil {
t.Fatal("Did not receive a closed callback message")
}
if runtime.GOOS != "windows" {
timeWait := time.Since(startWait)
// Use 500ms as variable time delta
variable := (500 * time.Millisecond)
expected := (time.Duration(opts.MaxReconnect) * opts.ReconnectWait)
if timeWait > (expected + variable) {
t.Fatalf("Waited too long for Closed state: %d\n", timeWait/time.Millisecond)
}
}
}
func TestPingReconnect(t *testing.T) {
RECONNECTS := 4
s1 := RunServerOnPort(1222)
defer s1.Shutdown()
opts := nats.DefaultOptions
opts.Servers = testServers
opts.NoRandomize = true
opts.ReconnectWait = 200 * time.Millisecond
opts.PingInterval = 50 * time.Millisecond
opts.MaxPingsOut = -1
var wg sync.WaitGroup
wg.Add(1)
rch := make(chan time.Time, RECONNECTS)
dch := make(chan time.Time, RECONNECTS)
opts.DisconnectedCB = func(_ *nats.Conn) {
d := dch
select {
case d <- time.Now():
default:
d = nil
}
}
opts.ReconnectedCB = func(c *nats.Conn) {
r := rch
select {
|
wg.Done()
}
}
nc, err := opts.Connect()
if err != nil {
t.Fatalf("Expected to connect, got err: %v\n", err)
}
defer nc.Close()
wg.Wait()
s1.Shutdown()
<-dch
for i := 0; i < RECONNECTS-1; i++ {
disconnectedAt := <-dch
reconnectAt := <-rch
pingCycle := disconnectedAt.Sub(reconnectAt)
if pingCycle > 2*opts.PingInterval {
t.Fatalf("Reconnect due to ping took %s", pingCycle.String())
}
}
}
|
case r <- time.Now():
default:
r = nil
|
email.go
|
/* Copyright 2017 LinkedIn Corp. Licensed under the Apache License, Version
* 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package notifier
import (
"crypto/tls"
"errors"
"fmt"
"net/smtp"
"regexp"
"strings"
"text/template"
"time"
"github.com/Root-App/Burrow/core/internal/helpers"
"github.com/Root-App/Burrow/core/protocol"
"github.com/spf13/viper"
"go.uber.org/zap"
"gopkg.in/gomail.v2"
)
// EmailNotifier is a module which can be used to send notifications of consumer group status via email messages. One
// email is sent for each consumer group that matches the whitelist/blacklist and the status threshold.
type EmailNotifier struct {
// App is a pointer to the application context. This stores the channel to the storage subsystem
App *protocol.ApplicationContext
// Log is a logger that has been configured for this module to use. Normally, this means it has been set up with
// fields that are appropriate to identify this coordinator
Log *zap.Logger
name string
groupWhitelist *regexp.Regexp
groupBlacklist *regexp.Regexp
extras map[string]string
templateOpen *template.Template
templateClose *template.Template
to string
from string
smtpDialer *gomail.Dialer
sendMailFunc func(message *gomail.Message) error
}
// Configure validates the configuration of the email notifier. At minimum, there must be a valid server, port, from
// address, and to address. If any of these are missing or incorrect, this func will panic with an explanatory message.
// It is also possible to specify an auth-type of either "plain" or "crammd5", along with a username and password.
func (module *EmailNotifier) Configure(name string, configRoot string) {
module.name = name
// Abstract the SendMail call so we can test
if module.sendMailFunc == nil {
module.sendMailFunc = module.sendEmail
}
host := viper.GetString(configRoot + ".server")
port := viper.GetInt(configRoot + ".port")
serverWithPort := fmt.Sprintf("%s:%v", host, port)
if !helpers.ValidateHostList([]string{serverWithPort}) {
module.Log.Panic("bad server or port")
panic(errors.New("configuration error"))
}
module.from = viper.GetString(configRoot + ".from")
if module.from == "" {
module.Log.Panic("missing from address")
panic(errors.New("configuration error"))
}
module.to = viper.GetString(configRoot + ".to")
if module.to == "" {
module.Log.Panic("missing to address")
panic(errors.New("configuration error"))
}
// Set up dialer and extra TLS configuration
extraCa := viper.GetString(configRoot + ".extra-ca")
noVerify := viper.GetBool(configRoot + ".noverify")
d := gomail.NewDialer(host, port, "", "")
d.Auth = module.getSMTPAuth(configRoot)
d.TLSConfig = buildEmailTLSConfig(extraCa, noVerify, host)
module.smtpDialer = d
}
func buildEmailTLSConfig(extraCaFile string, noVerify bool, smtpHost string) *tls.Config {
rootCAs := buildRootCAs(extraCaFile, noVerify)
return &tls.Config{
InsecureSkipVerify: noVerify,
ServerName: smtpHost,
RootCAs: rootCAs,
}
}
// Builds authentication profile for smtp client
func (module *EmailNotifier) getSMTPAuth(configRoot string) smtp.Auth {
var auth smtp.Auth
// Set up SMTP authentication
switch strings.ToLower(viper.GetString(configRoot + ".auth-type")) {
case "plain":
auth = smtp.PlainAuth("", viper.GetString(configRoot+".username"), viper.GetString(configRoot+".password"), viper.GetString(configRoot+".server"))
case "crammd5":
auth = smtp.CRAMMD5Auth(viper.GetString(configRoot+".username"), viper.GetString(configRoot+".password"))
case "":
auth = nil
default:
module.Log.Panic("unknown auth type")
panic(errors.New("configuration error"))
}
return auth
}
// Start is a no-op for the email notifier. It always returns no error
func (module *EmailNotifier) Start() error {
return nil
}
// Stop is a no-op for the email notifier. It always returns no error
func (module *EmailNotifier) Stop() error {
return nil
}
// GetName returns the configured name of this module
func (module *EmailNotifier) GetName() string {
return module.name
}
// GetGroupWhitelist returns the compiled group whitelist (or nil, if there is not one)
func (module *EmailNotifier) GetGroupWhitelist() *regexp.Regexp {
return module.groupWhitelist
}
// GetGroupBlacklist returns the compiled group blacklist (or nil, if there is not one)
func (module *EmailNotifier) GetGroupBlacklist() *regexp.Regexp {
return module.groupBlacklist
}
// GetLogger returns the configured zap.Logger for this notifier
func (module *EmailNotifier) GetLogger() *zap.Logger {
return module.Log
}
// AcceptConsumerGroup has no additional function for the email notifier, and so always returns true
func (module *EmailNotifier) AcceptConsumerGroup(status *protocol.ConsumerGroupStatus) bool {
return true
}
// Notify sends a single email message, with the from and to set to the configured addresses for the notifier. The
// status, eventID, and startTime are all passed to the template for compiling the message. If stateGood is true, the
// "close" template is used. Otherwise, the "open" template is used.
func (module *EmailNotifier) Notify(status *protocol.ConsumerGroupStatus, eventID string, startTime time.Time, stateGood bool) {
logger := module.Log.With(
zap.String("cluster", status.Cluster),
zap.String("group", status.Group),
zap.String("id", eventID),
zap.String("status", status.Status.String()),
)
var tmpl *template.Template
if stateGood {
tmpl = module.templateClose
} else {
tmpl = module.templateOpen
}
// Put the from and to lines in without the template. Template should set the subject line, followed by a blank line
messageContent, err := executeTemplate(tmpl, module.extras, status, eventID, startTime)
if err != nil {
logger.Error("failed to assemble", zap.Error(err))
return
}
// Process template headers and send email
if m, err := module.createMessage(messageContent.String()); err == nil {
if err := module.sendMailFunc(m); err != nil {
logger.Error("failed to send", zap.Error(err))
}
} else {
logger.Error("failed to send", zap.Error(err))
}
}
// sendEmail uses the gomail smtpDialer to send a constructed message. This function is mocked for testing purposes
func (module *EmailNotifier) sendEmail(m *gomail.Message) error {
if err := module.smtpDialer.DialAndSend(m); err != nil {
return err
}
return nil
}
// createMessage organizes all relevant email message content into a structure for easy use
func (module *EmailNotifier) createMessage(messageContent string) (*gomail.Message, error) {
m := gomail.NewMessage()
var subject string
var mimeVersion string
contentType := "text/plain"
subjectDelimiter := "Subject: "
contentTypeDelimiter := "Content-Type: "
mimeVersionDelimiter := "MIME-version: "
if !strings.HasPrefix(messageContent, subjectDelimiter) {
return nil, errors.New("no subject line detected. Please make sure" +
" \"Subject: my_subject_line\" is included in your template")
}
var body string
// Go doesn't support regex lookaheads yet
for _, line := range strings.Split(messageContent, "\n") {
if strings.HasPrefix(line, subjectDelimiter) && subject == "" {
subject = getKeywordContent(line, subjectDelimiter)
} else if strings.HasPrefix(line, contentTypeDelimiter) {
contentType = strings.Replace(getKeywordContent(line, contentTypeDelimiter), ";", "", -1)
} else if strings.HasPrefix(line, mimeVersionDelimiter) {
mimeVersion = strings.Replace(getKeywordContent(line, mimeVersionDelimiter), ";", "", -1)
} else {
body = body + line + "\n"
}
}
recipients := strings.Split(module.to, ",")
m.SetHeader("To", recipients...)
m.SetHeader("From", module.from)
m.SetHeader("Subject", subject)
if mimeVersion != "" {
m.SetHeader("MIME-version", mimeVersion)
}
m.SetBody(contentType, body)
return m, nil
}
func getKeywordContent(header string, subjectDelimiter string) string
|
{
return strings.Split(header, subjectDelimiter)[1]
}
|
|
zmq4.go
|
// Copyright (c) 2018 Ashley Jeffs
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// +build ZMQ4
package writer
import (
"strings"
"time"
"github.com/Jeffail/benthos/lib/log"
"github.com/Jeffail/benthos/lib/message"
"github.com/Jeffail/benthos/lib/metrics"
"github.com/Jeffail/benthos/lib/types"
"github.com/pebbe/zmq4"
)
//------------------------------------------------------------------------------
// ZMQ4Config contains configuration fields for the ZMQ4 output type.
type ZMQ4Config struct {
URLs []string `json:"urls" yaml:"urls"`
Bind bool `json:"bind" yaml:"bind"`
SocketType string `json:"socket_type" yaml:"socket_type"`
HighWaterMark int `json:"high_water_mark" yaml:"high_water_mark"`
PollTimeoutMS int `json:"poll_timeout_ms" yaml:"poll_timeout_ms"`
}
// NewZMQ4Config creates a new ZMQ4Config with default values.
func NewZMQ4Config() *ZMQ4Config {
return &ZMQ4Config{
URLs: []string{"tcp://*:5556"},
Bind: true,
SocketType: "PUSH",
HighWaterMark: 0,
PollTimeoutMS: 5000,
}
}
//------------------------------------------------------------------------------
// ZMQ4 is an output type that writes ZMQ4 messages.
type ZMQ4 struct {
log log.Modular
stats metrics.Type
urls []string
conf *ZMQ4Config
pollTimeout time.Duration
poller *zmq4.Poller
socket *zmq4.Socket
}
// NewZMQ4 creates a new ZMQ4 output type.
func NewZMQ4(conf *ZMQ4Config, log log.Modular, stats metrics.Type) (*ZMQ4, error) {
z := ZMQ4{
log: log.NewModule(".output.zmq4"),
stats: stats,
conf: conf,
pollTimeout: time.Millisecond * time.Duration(conf.PollTimeoutMS),
}
_, err := getZMQType(conf.SocketType)
if nil != err {
return nil, err
}
for _, u := range conf.URLs {
for _, splitU := range strings.Split(u, ",") {
if len(splitU) > 0 {
z.urls = append(z.urls, splitU)
}
}
}
return &z, nil
}
//------------------------------------------------------------------------------
func getZMQType(t string) (zmq4.Type, error) {
switch t {
case "PUB":
return zmq4.PUB, nil
|
case "PUSH":
return zmq4.PUSH, nil
}
return zmq4.PULL, types.ErrInvalidZMQType
}
//------------------------------------------------------------------------------
// Connect attempts to establish a connection to a ZMQ4 socket.
func (z *ZMQ4) Connect() error {
if z.socket != nil {
return nil
}
t, err := getZMQType(z.conf.SocketType)
if nil != err {
return err
}
ctx, err := zmq4.NewContext()
if nil != err {
return err
}
var socket *zmq4.Socket
if socket, err = ctx.NewSocket(t); nil != err {
return err
}
defer func() {
if err != nil && socket != nil {
socket.Close()
}
}()
socket.SetSndhwm(z.conf.HighWaterMark)
for _, address := range z.urls {
if z.conf.Bind {
err = socket.Bind(address)
} else {
err = socket.Connect(address)
}
if err != nil {
return err
}
}
z.socket = socket
z.poller = zmq4.NewPoller()
z.poller.Add(z.socket, zmq4.POLLOUT)
z.log.Infof("Sending ZMQ4 messages to URLs: %s\n", z.urls)
return nil
}
// Write will attempt to write a message to the ZMQ4 socket.
func (z *ZMQ4) Write(msg types.Message) error {
if z.socket == nil {
return types.ErrNotConnected
}
_, err := z.socket.SendMessageDontwait(message.GetAllBytes(msg))
if err != nil {
var polled []zmq4.Polled
if polled, err = z.poller.Poll(z.pollTimeout); len(polled) == 1 {
_, err = z.socket.SendMessage(message.GetAllBytes(msg))
} else if err == nil {
return types.ErrTimeout
}
}
return err
}
// CloseAsync shuts down the ZMQ4 output and stops processing messages.
func (z *ZMQ4) CloseAsync() {
if z.socket != nil {
z.socket.Close()
z.socket = nil
}
}
// WaitForClose blocks until the ZMQ4 output has closed down.
func (z *ZMQ4) WaitForClose(timeout time.Duration) error {
return nil
}
//------------------------------------------------------------------------------
| |
analyticsreporting-gen.go
|
// Copyright 2019 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated file. DO NOT EDIT.
// Package analyticsreporting provides access to the Analytics Reporting API.
//
// For product documentation, see: https://developers.google.com/analytics/devguides/reporting/core/v4/
//
// Creating a client
//
// Usage example:
//
// import "google.golang.org/api/analyticsreporting/v4"
// ...
// ctx := context.Background()
// analyticsreportingService, err := analyticsreporting.NewService(ctx)
//
// In this example, Google Application Default Credentials are used for authentication.
//
// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.
//
// Other authentication options
//
// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes:
//
// analyticsreportingService, err := analyticsreporting.NewService(ctx, option.WithScopes(analyticsreporting.AnalyticsReadonlyScope))
//
// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey:
//
// analyticsreportingService, err := analyticsreporting.NewService(ctx, option.WithAPIKey("AIza..."))
//
// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource:
//
// config := &oauth2.Config{...}
// // ...
// token, err := config.Exchange(ctx, ...)
// analyticsreportingService, err := analyticsreporting.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))
//
// See https://godoc.org/google.golang.org/api/option/ for details on options.
package analyticsreporting // import "google.golang.org/api/analyticsreporting/v4"
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
googleapi "google.golang.org/api/googleapi"
gensupport "google.golang.org/api/internal/gensupport"
option "google.golang.org/api/option"
htransport "google.golang.org/api/transport/http"
)
// Always reference these packages, just in case the auto-generated code
// below doesn't.
var _ = bytes.NewBuffer
var _ = strconv.Itoa
var _ = fmt.Sprintf
var _ = json.NewDecoder
var _ = io.Copy
var _ = url.Parse
var _ = gensupport.MarshalJSON
var _ = googleapi.Version
var _ = errors.New
var _ = strings.Replace
var _ = context.Canceled
const apiId = "analyticsreporting:v4"
const apiName = "analyticsreporting"
const apiVersion = "v4"
const basePath = "https://analyticsreporting.googleapis.com/"
// OAuth2 scopes used by this API.
const (
// View and manage your Google Analytics data
AnalyticsScope = "https://www.googleapis.com/auth/analytics"
// View your Google Analytics data
AnalyticsReadonlyScope = "https://www.googleapis.com/auth/analytics.readonly"
)
// NewService creates a new Service.
func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) {
scopesOption := option.WithScopes(
"https://www.googleapis.com/auth/analytics",
"https://www.googleapis.com/auth/analytics.readonly",
)
// NOTE: prepend, so we don't override user-specified scopes.
opts = append([]option.ClientOption{scopesOption}, opts...)
client, endpoint, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, err
}
s, err := New(client)
if err != nil {
return nil, err
}
if endpoint != "" {
s.BasePath = endpoint
}
return s, nil
}
// New creates a new Service. It uses the provided http.Client for requests.
//
// Deprecated: please use NewService instead.
// To provide a custom HTTP client, use option.WithHTTPClient.
// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead.
func New(client *http.Client) (*Service, error) {
if client == nil {
return nil, errors.New("client is nil")
}
s := &Service{client: client, BasePath: basePath}
s.Reports = NewReportsService(s)
s.UserActivity = NewUserActivityService(s)
return s, nil
}
type Service struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
Reports *ReportsService
UserActivity *UserActivityService
}
func (s *Service) userAgent() string {
if s.UserAgent == "" {
return googleapi.UserAgent
}
return googleapi.UserAgent + " " + s.UserAgent
}
func NewReportsService(s *Service) *ReportsService {
rs := &ReportsService{s: s}
return rs
}
type ReportsService struct {
s *Service
}
func NewUserActivityService(s *Service) *UserActivityService {
rs := &UserActivityService{s: s}
return rs
}
type UserActivityService struct {
s *Service
}
// Activity: An Activity represents data for an activity of a user. Note
// that an
// Activity is different from a hit.
// A hit might result in multiple Activity's. For example, if a
// hit
// includes a transaction and a goal completion, there will be
// two
// Activity protos for this hit, one for ECOMMERCE and one for
// GOAL.
// Conversely, multiple hits can also construct one Activity. In
// classic
// e-commerce, data for one transaction might be sent through multiple
// hits.
// These hits will be merged into one ECOMMERCE Activity.
type Activity struct {
// ActivityTime: Timestamp of the activity. If activities for a visit
// cross midnight and
// occur in two separate dates, then two sessions (one per date)
// share the session identifier.
// For example, say session ID 113472 has activity within 2019-08-20,
// and
// session ID 243742 has activity within 2019-08-25 and 2019-08-26.
// Session ID
// 113472 is one session, and session ID 243742 is two sessions.
ActivityTime string `json:"activityTime,omitempty"`
// ActivityType: Type of this activity.
//
// Possible values:
// "ACTIVITY_TYPE_UNSPECIFIED" - ActivityType will never have this
// value in the response. Using this type in
// the request will result in an error.
// "PAGEVIEW" - Used when the activity resulted out of a visitor
// viewing a page.
// "SCREENVIEW" - Used when the activity resulted out of a visitor
// using an application on a
// mobile device.
// "GOAL" - Used to denote that a goal type activity.
// "ECOMMERCE" - An e-commerce transaction was performed by the
// visitor on the page.
// "EVENT" - Used when the activity is an event.
ActivityType string `json:"activityType,omitempty"`
// Appview: This will be set if `activity_type` equals `SCREEN_VIEW`.
Appview *ScreenviewData `json:"appview,omitempty"`
// Campaign: For manual campaign tracking, it is the value of the
// utm_campaign campaign
// tracking parameter. For AdWords autotagging, it is the name(s) of
// the
// online ad campaign(s) you use for the property. If you use neither,
// its
// value is (not set).
Campaign string `json:"campaign,omitempty"`
// ChannelGrouping: The Channel Group associated with an end user's
// session for this View
// (defined by the View's Channel Groupings).
ChannelGrouping string `json:"channelGrouping,omitempty"`
// CustomDimension: A list of all custom dimensions associated with this
// activity.
CustomDimension []*CustomDimension `json:"customDimension,omitempty"`
// Ecommerce: This will be set if `activity_type` equals `ECOMMERCE`.
Ecommerce *EcommerceData `json:"ecommerce,omitempty"`
// Event: This field contains all the details pertaining to an event and
// will be
// set if `activity_type` equals `EVENT`.
Event *EventData `json:"event,omitempty"`
// Goals: This field contains a list of all the goals that were reached
// in this
// activity when `activity_type` equals `GOAL`.
Goals *GoalSetData `json:"goals,omitempty"`
// Hostname: The hostname from which the tracking request was made.
Hostname string `json:"hostname,omitempty"`
// Keyword: For manual campaign tracking, it is the value of the
// utm_term campaign
// tracking parameter. For AdWords traffic, it contains the best
// matching
// targeting criteria. For the display network, where multiple
// targeting
// criteria could have caused the ad to show up, it returns the best
// matching
// targeting criteria as selected by Ads. This could be display_keyword,
// site
// placement, boomuserlist, user_interest, age, or gender. Otherwise its
// value
// is (not set).
Keyword string `json:"keyword,omitempty"`
// LandingPagePath: The first page in users' sessions, or the landing
// page.
LandingPagePath string `json:"landingPagePath,omitempty"`
// Medium: The type of referrals. For manual campaign tracking, it is
// the value of the
// utm_medium campaign tracking parameter. For AdWords autotagging, it
// is cpc.
// If users came from a search engine detected by Google Analytics, it
// is
// organic. If the referrer is not a search engine, it is referral. If
// users
// came directly to the property and document.referrer is empty, its
// value is
// (none).
Medium string `json:"medium,omitempty"`
// Pageview: This will be set if `activity_type` equals `PAGEVIEW`. This
// field
// contains all the details about the visitor and the page that was
// visited.
Pageview *PageviewData `json:"pageview,omitempty"`
// Source: The source of referrals. For manual campaign tracking, it is
// the value of
// the utm_source campaign tracking parameter. For AdWords autotagging,
// it is
// google. If you use neither, it is the domain of the source
// (e.g., document.referrer) referring the users. It may also contain a
// port
// address. If users arrived without a referrer, its value is (direct).
Source string `json:"source,omitempty"`
// ForceSendFields is a list of field names (e.g. "ActivityTime") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ActivityTime") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Activity) MarshalJSON() ([]byte, error) {
type NoMethod Activity
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Cohort: Defines a cohort. A cohort is a group of users who share a
// common
// characteristic. For example, all users with the same acquisition
// date
// belong to the same cohort.
type Cohort struct {
// DateRange: This is used for `FIRST_VISIT_DATE` cohort, the cohort
// selects users
// whose first visit date is between start date and end date defined in
// the
// DateRange. The date ranges should be aligned for cohort requests. If
// the
// request contains `ga:cohortNthDay` it should be exactly one day
// long,
// if `ga:cohortNthWeek` it should be aligned to the week boundary
// (starting
// at Sunday and ending Saturday), and for `ga:cohortNthMonth` the date
// range
// should be aligned to the month (starting at the first and ending on
// the
// last day of the month).
// For LTV requests there are no such restrictions.
// You do not need to supply a date range for
// the
// `reportsRequest.dateRanges` field.
DateRange *DateRange `json:"dateRange,omitempty"`
// Name: A unique name for the cohort. If not defined name will be
// auto-generated
// with values cohort_[1234...].
Name string `json:"name,omitempty"`
// Type: Type of the cohort. The only supported type as of now
// is
// `FIRST_VISIT_DATE`. If this field is unspecified the cohort is
// treated
// as `FIRST_VISIT_DATE` type cohort.
//
// Possible values:
// "UNSPECIFIED_COHORT_TYPE" - If unspecified it's treated as
// `FIRST_VISIT_DATE`.
// "FIRST_VISIT_DATE" - Cohorts that are selected based on first visit
// date.
Type string `json:"type,omitempty"`
// ForceSendFields is a list of field names (e.g. "DateRange") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DateRange") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Cohort) MarshalJSON() ([]byte, error) {
type NoMethod Cohort
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CohortGroup: Defines a cohort group.
// For example:
//
// "cohortGroup": {
// "cohorts": [{
// "name": "cohort 1",
// "type": "FIRST_VISIT_DATE",
// "dateRange": { "startDate": "2015-08-01", "endDate":
// "2015-08-01" }
// },{
// "name": "cohort 2"
// "type": "FIRST_VISIT_DATE"
// "dateRange": { "startDate": "2015-07-01", "endDate":
// "2015-07-01" }
// }]
// }
type CohortGroup struct {
// Cohorts: The definition for the cohort.
Cohorts []*Cohort `json:"cohorts,omitempty"`
// LifetimeValue: Enable Life Time Value (LTV). LTV measures lifetime
// value for users
// acquired through different channels.
// Please see:
// [Cohort
// Analysis](https://support.google.com/analytics/answer/6074676)
// and
// [Lifetime
// Value](https://support.google.com/analytics/answer/6182550)
// If the value of lifetimeValue is false:
//
// - The metric values are similar to the values in the web interface
// cohort
// report.
// - The cohort definition date ranges must be aligned to the calendar
// week
// and month. i.e. while requesting `ga:cohortNthWeek` the `startDate`
// in
// the cohort definition should be a Sunday and the `endDate` should
// be the
// following Saturday, and for `ga:cohortNthMonth`, the `startDate`
// should be the 1st of the month and `endDate` should be the last
// day
// of the month.
//
// When the lifetimeValue is true:
//
// - The metric values will correspond to the values in the web
// interface
// LifeTime value report.
// - The Lifetime Value report shows you how user value (Revenue) and
// engagement (Appviews, Goal Completions, Sessions, and Session
// Duration)
// grow during the 90 days after a user is acquired.
// - The metrics are calculated as a cumulative average per user per the
// time
// increment.
// - The cohort definition date ranges need not be aligned to the
// calendar
// week and month boundaries.
// - The `viewId` must be an
// [app view
//
// ID](https://support.google.com/analytics/answer/2649553#WebVersusAppVi
// ews)
LifetimeValue bool `json:"lifetimeValue,omitempty"`
// ForceSendFields is a list of field names (e.g. "Cohorts") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Cohorts") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *CohortGroup) MarshalJSON() ([]byte, error) {
type NoMethod CohortGroup
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ColumnHeader: Column headers.
type ColumnHeader struct {
// Dimensions: The dimension names in the response.
Dimensions []string `json:"dimensions,omitempty"`
// MetricHeader: Metric headers for the metrics in the response.
MetricHeader *MetricHeader `json:"metricHeader,omitempty"`
// ForceSendFields is a list of field names (e.g. "Dimensions") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Dimensions") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ColumnHeader) MarshalJSON() ([]byte, error) {
type NoMethod ColumnHeader
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CustomDimension: Custom dimension.
type CustomDimension struct {
// Index: Slot number of custom dimension.
Index int64 `json:"index,omitempty"`
// Value: Value of the custom dimension. Default value (i.e. empty
// string) indicates
// clearing sesion/visitor scope custom dimension value.
Value string `json:"value,omitempty"`
// ForceSendFields is a list of field names (e.g. "Index") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Index") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *CustomDimension) MarshalJSON() ([]byte, error) {
type NoMethod CustomDimension
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// DateRange: A contiguous set of days: startDate, startDate + 1 day,
// ..., endDate.
// The start and end dates are specified
// in
// [ISO8601](https://en.wikipedia.org/wiki/ISO_8601) date format
// `YYYY-MM-DD`.
type DateRange struct {
// EndDate: The end date for the query in the format `YYYY-MM-DD`.
EndDate string `json:"endDate,omitempty"`
// StartDate: The start date for the query in the format `YYYY-MM-DD`.
StartDate string `json:"startDate,omitempty"`
// ForceSendFields is a list of field names (e.g. "EndDate") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "EndDate") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *DateRange) MarshalJSON() ([]byte, error) {
type NoMethod DateRange
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// DateRangeValues: Used to return a list of metrics for a single
// DateRange / dimension
// combination
type DateRangeValues struct {
// PivotValueRegions: The values of each pivot region.
PivotValueRegions []*PivotValueRegion `json:"pivotValueRegions,omitempty"`
// Values: Each value corresponds to each Metric in the request.
Values []string `json:"values,omitempty"`
// ForceSendFields is a list of field names (e.g. "PivotValueRegions")
// to unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "PivotValueRegions") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *DateRangeValues) MarshalJSON() ([]byte, error) {
type NoMethod DateRangeValues
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Dimension:
// [Dimensions](https://support.google.com/analytics/answer/1033861)
// are attributes of your data. For example, the dimension
// `ga:city`
// indicates the city, for example, "Paris" or "New York", from which
// a session originates.
type Dimension struct {
// HistogramBuckets: If non-empty, we place dimension values into
// buckets after string to
// int64. Dimension values that are not the string representation of
// an
// integral value will be converted to zero. The bucket values have to
// be in
// increasing order. Each bucket is closed on the lower end, and open
// on the
// upper end. The "first" bucket includes all values less than the
// first
// boundary, the "last" bucket includes all values up to infinity.
// Dimension
// values that fall in a bucket get transformed to a new dimension
// value. For
// example, if one gives a list of "0, 1, 3, 4, 7", then we return
// the
// following buckets:
//
// - bucket #1: values < 0, dimension value "<0"
// - bucket #2: values in [0,1), dimension value "0"
// - bucket #3: values in [1,3), dimension value "1-2"
// - bucket #4: values in [3,4), dimension value "3"
// - bucket #5: values in [4,7), dimension value "4-6"
// - bucket #6: values >= 7, dimension value "7+"
//
// NOTE: If you are applying histogram mutation on any dimension, and
// using
// that dimension in sort, you will want to use the sort
// type
// `HISTOGRAM_BUCKET` for that purpose. Without that the dimension
// values
// will be sorted according to dictionary
// (lexicographic) order. For example the ascending dictionary order
// is:
//
// "<50", "1001+", "121-1000", "50-120"
//
// And the ascending `HISTOGRAM_BUCKET` order is:
//
// "<50", "50-120", "121-1000", "1001+"
//
// The client has to explicitly request "orderType":
// "HISTOGRAM_BUCKET"
// for a histogram-mutated dimension.
HistogramBuckets googleapi.Int64s `json:"histogramBuckets,omitempty"`
// Name: Name of the dimension to fetch, for example `ga:browser`.
Name string `json:"name,omitempty"`
// ForceSendFields is a list of field names (e.g. "HistogramBuckets") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "HistogramBuckets") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Dimension) MarshalJSON() ([]byte, error) {
type NoMethod Dimension
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// DimensionFilter: Dimension filter specifies the filtering options on
// a dimension.
type DimensionFilter struct {
// CaseSensitive: Should the match be case sensitive? Default is false.
CaseSensitive bool `json:"caseSensitive,omitempty"`
// DimensionName: The dimension to filter on. A DimensionFilter must
// contain a dimension.
DimensionName string `json:"dimensionName,omitempty"`
// Expressions: Strings or regular expression to match against. Only the
// first value of
// the list is used for comparison unless the operator is `IN_LIST`.
// If `IN_LIST` operator, then the entire list is used to filter
// the
// dimensions as explained in the description of the `IN_LIST` operator.
Expressions []string `json:"expressions,omitempty"`
// Not: Logical `NOT` operator. If this boolean is set to true, then the
// matching
// dimension values will be excluded in the report. The default is
// false.
Not bool `json:"not,omitempty"`
// Operator: How to match the dimension to the expression. The default
// is REGEXP.
//
// Possible values:
// "OPERATOR_UNSPECIFIED" - If the match type is unspecified, it is
// treated as a `REGEXP`.
// "REGEXP" - The match expression is treated as a regular expression.
// All match types
// are not treated as regular expressions.
// "BEGINS_WITH" - Matches the value which begin with the match
// expression provided.
// "ENDS_WITH" - Matches the values which end with the match
// expression provided.
// "PARTIAL" - Substring match.
// "EXACT" - The value should match the match expression entirely.
// "NUMERIC_EQUAL" - Integer comparison filters.
// case sensitivity is ignored for these and the expression
// is assumed to be a string representing an integer.
// Failure conditions:
//
// - If expression is not a valid int64, the client should expect
// an error.
// - Input dimensions that are not valid int64 values will never match
// the
// filter.
// "NUMERIC_GREATER_THAN" - Checks if the dimension is numerically
// greater than the match
// expression. Read the description for `NUMERIC_EQUALS` for
// restrictions.
// "NUMERIC_LESS_THAN" - Checks if the dimension is numerically less
// than the match expression.
// Read the description for `NUMERIC_EQUALS` for restrictions.
// "IN_LIST" - This option is used to specify a dimension filter whose
// expression can
// take any value from a selected list of values. This helps
// avoiding
// evaluating multiple exact match dimension filters which are OR'ed
// for
// every single response row. For example:
//
// expressions: ["A", "B", "C"]
//
// Any response row whose dimension has it is value as A, B or C,
// matches
// this DimensionFilter.
Operator string `json:"operator,omitempty"`
// ForceSendFields is a list of field names (e.g. "CaseSensitive") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "CaseSensitive") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *DimensionFilter) MarshalJSON() ([]byte, error) {
type NoMethod DimensionFilter
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// DimensionFilterClause: A group of dimension filters. Set the operator
// value to specify how
// the filters are logically combined.
type DimensionFilterClause struct {
// Filters: The repeated set of filters. They are logically combined
// based on the
// operator specified.
Filters []*DimensionFilter `json:"filters,omitempty"`
// Operator: The operator for combining multiple dimension filters. If
// unspecified, it
// is treated as an `OR`.
//
// Possible values:
// "OPERATOR_UNSPECIFIED" - Unspecified operator. It is treated as an
// `OR`.
// "OR" - The logical `OR` operator.
// "AND" - The logical `AND` operator.
Operator string `json:"operator,omitempty"`
// ForceSendFields is a list of field names (e.g. "Filters") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Filters") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *DimensionFilterClause) MarshalJSON() ([]byte, error) {
type NoMethod DimensionFilterClause
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// DynamicSegment: Dynamic segment definition for defining the segment
// within the request.
// A segment can select users, sessions or both.
type DynamicSegment struct {
// Name: The name of the dynamic segment.
Name string `json:"name,omitempty"`
// SessionSegment: Session Segment to select sessions to include in the
// segment.
SessionSegment *SegmentDefinition `json:"sessionSegment,omitempty"`
// UserSegment: User Segment to select users to include in the segment.
UserSegment *SegmentDefinition `json:"userSegment,omitempty"`
// ForceSendFields is a list of field names (e.g. "Name") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Name") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *DynamicSegment) MarshalJSON() ([]byte, error) {
type NoMethod DynamicSegment
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// EcommerceData: E-commerce details associated with the user activity.
type EcommerceData struct {
// ActionType: Action associated with this e-commerce action.
//
// Possible values:
// "UNKNOWN" - Action type is not known.
// "CLICK" - Click through of product lists.
// "DETAILS_VIEW" - Product detail views.
// "ADD_TO_CART" - Add product(s) to cart.
// "REMOVE_FROM_CART" - Remove product(s) from cart.
// "CHECKOUT" - Check out.
// "PAYMENT" - Completed purchase.
// "REFUND" - Refund of purchase.
// "CHECKOUT_OPTION" - Checkout options.
ActionType string `json:"actionType,omitempty"`
// EcommerceType: The type of this e-commerce activity.
//
// Possible values:
// "ECOMMERCE_TYPE_UNSPECIFIED" - Used when the e-commerce activity
// type is unspecified.
// "CLASSIC" - Used when activity has classic (non-enhanced)
// e-commerce information.
// "ENHANCED" - Used when activity has enhanced e-commerce
// information.
EcommerceType string `json:"ecommerceType,omitempty"`
// Products: Details of the products in this transaction.
Products []*ProductData `json:"products,omitempty"`
// Transaction: Transaction details of this e-commerce action.
Transaction *TransactionData `json:"transaction,omitempty"`
// ForceSendFields is a list of field names (e.g. "ActionType") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ActionType") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *EcommerceData) MarshalJSON() ([]byte, error) {
type NoMethod EcommerceData
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// EventData: Represents all the details pertaining to an event.
type EventData struct {
// EventAction: Type of interaction with the object. Eg: 'play'.
EventAction string `json:"eventAction,omitempty"`
// EventCategory: The object on the page that was interacted with. Eg:
// 'Video'.
EventCategory string `json:"eventCategory,omitempty"`
// EventCount: Number of such events in this activity.
EventCount int64 `json:"eventCount,omitempty,string"`
// EventLabel: Label attached with the event.
EventLabel string `json:"eventLabel,omitempty"`
// EventValue: Numeric value associated with the event.
EventValue int64 `json:"eventValue,omitempty,string"`
// ForceSendFields is a list of field names (e.g. "EventAction") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "EventAction") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *EventData) MarshalJSON() ([]byte, error) {
type NoMethod EventData
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GetReportsRequest: The batch request containing multiple report
// request.
type GetReportsRequest struct {
// ReportRequests: Requests, each request will have a separate
// response.
// There can be a maximum of 5 requests. All requests should have the
// same
// `dateRanges`, `viewId`, `segments`, `samplingLevel`, and
// `cohortGroup`.
ReportRequests []*ReportRequest `json:"reportRequests,omitempty"`
// UseResourceQuotas: Enables
// [resource
// based
// quotas](/analytics/devguides/reporting/core/v4/limits-quotas#ana
// lytics_reporting_api_v4),
// (defaults to `False`). If this field is set to `True` the
// per view (profile) quotas are governed by the computational
// cost of the request. Note that using cost based quotas will
// higher enable sampling rates. (10 Million for `SMALL`,
// 100M for `LARGE`. See the
// [limits and
// quotas
// documentation](/analytics/devguides/reporting/core/v4/limits-qu
// otas#analytics_reporting_api_v4)
// for details.
UseResourceQuotas bool `json:"useResourceQuotas,omitempty"`
// ForceSendFields is a list of field names (e.g. "ReportRequests") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ReportRequests") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *GetReportsRequest) MarshalJSON() ([]byte, error) {
type NoMethod GetReportsRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GetReportsResponse: The main response class which holds the reports
// from the Reporting API
// `batchGet` call.
type GetReportsResponse struct {
// QueryCost: The amount of resource quota tokens deducted to execute
// the query. Includes
// all responses.
QueryCost int64 `json:"queryCost,omitempty"`
// Reports: Responses corresponding to each of the request.
Reports []*Report `json:"reports,omitempty"`
// ResourceQuotasRemaining: The amount of resource quota remaining for
// the property.
ResourceQuotasRemaining *ResourceQuotasRemaining `json:"resourceQuotasRemaining,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "QueryCost") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "QueryCost") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GetReportsResponse) MarshalJSON() ([]byte, error) {
type NoMethod GetReportsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoalData: Represents all the details pertaining to a goal.
type GoalData struct {
// GoalCompletionLocation: URL of the page where this goal was
// completed.
GoalCompletionLocation string `json:"goalCompletionLocation,omitempty"`
// GoalCompletions: Total number of goal completions in this activity.
GoalCompletions int64 `json:"goalCompletions,omitempty,string"`
// GoalIndex: This identifies the goal as configured for the profile.
GoalIndex int64 `json:"goalIndex,omitempty"`
// GoalName: Name of the goal.
GoalName string `json:"goalName,omitempty"`
// GoalPreviousStep1: URL of the page one step prior to the goal
// completion.
GoalPreviousStep1 string `json:"goalPreviousStep1,omitempty"`
// GoalPreviousStep2: URL of the page two steps prior to the goal
// completion.
GoalPreviousStep2 string `json:"goalPreviousStep2,omitempty"`
// GoalPreviousStep3: URL of the page three steps prior to the goal
// completion.
GoalPreviousStep3 string `json:"goalPreviousStep3,omitempty"`
// GoalValue: Value in this goal.
GoalValue float64 `json:"goalValue,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "GoalCompletionLocation") to unconditionally include in API requests.
// By default, fields with empty values are omitted from API requests.
// However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "GoalCompletionLocation")
// to include in API requests with the JSON null value. By default,
// fields with empty values are omitted from API requests. However, any
// field with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *GoalData) MarshalJSON() ([]byte, error) {
type NoMethod GoalData
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *GoalData) UnmarshalJSON(data []byte) error {
type NoMethod GoalData
var s1 struct {
GoalValue gensupport.JSONFloat64 `json:"goalValue"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.GoalValue = float64(s1.GoalValue)
return nil
}
// GoalSetData: Represents a set of goals that were reached in an
// activity.
type GoalSetData struct {
// Goals: All the goals that were reached in the current activity.
Goals []*GoalData `json:"goals,omitempty"`
// ForceSendFields is a list of field names (e.g. "Goals") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Goals") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoalSetData) MarshalJSON() ([]byte, error) {
type NoMethod GoalSetData
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Metric:
// [Metrics](https://support.google.com/analytics/answer/1033861)
// are the quantitative measurements. For example, the metric
// `ga:users`
// indicates the total number of users for the requested time period.
type Metric struct {
// Alias: An alias for the metric expression is an alternate name for
// the
// expression. The alias can be used for filtering and sorting. This
// field
// is optional and is useful if the expression is not a single metric
// but
// a complex expression which cannot be used in filtering and
// sorting.
// The alias is also used in the response column header.
Alias string `json:"alias,omitempty"`
// Expression: A metric expression in the request. An expression is
// constructed from one
// or more metrics and numbers. Accepted operators include: Plus (+),
// Minus
// (-), Negation (Unary -), Divided by (/), Multiplied by (*),
// Parenthesis,
// Positive cardinal numbers (0-9), can include decimals and is limited
// to
// 1024 characters. Example `ga:totalRefunds/ga:users`, in most cases
// the
// metric expression is just a single metric name like
// `ga:users`.
// Adding mixed `MetricType` (E.g., `CURRENCY` + `PERCENTAGE`)
// metrics
// will result in unexpected results.
Expression string `json:"expression,omitempty"`
// FormattingType: Specifies how the metric expression should be
// formatted, for example
// `INTEGER`.
//
// Possible values:
// "METRIC_TYPE_UNSPECIFIED" - Metric type is unspecified.
// "INTEGER" - Integer metric.
// "FLOAT" - Float metric.
// "CURRENCY" - Currency metric.
// "PERCENT" - Percentage metric.
// "TIME" - Time metric in `HH:MM:SS` format.
FormattingType string `json:"formattingType,omitempty"`
// ForceSendFields is a list of field names (e.g. "Alias") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Alias") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Metric) MarshalJSON() ([]byte, error) {
type NoMethod Metric
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MetricFilter: MetricFilter specifies the filter on a metric.
type MetricFilter struct {
// ComparisonValue: The value to compare against.
ComparisonValue string `json:"comparisonValue,omitempty"`
// MetricName: The metric that will be filtered on. A metricFilter must
// contain a metric
// name. A metric name can be an alias earlier defined as a metric or it
// can
// also be a metric expression.
MetricName string `json:"metricName,omitempty"`
// Not: Logical `NOT` operator. If this boolean is set to true, then the
// matching
// metric values will be excluded in the report. The default is false.
Not bool `json:"not,omitempty"`
// Operator: Is the metric `EQUAL`, `LESS_THAN` or `GREATER_THAN`
// the
// comparisonValue, the default is `EQUAL`. If the operator
// is
// `IS_MISSING`, checks if the metric is missing and would ignore
// the
// comparisonValue.
//
// Possible values:
// "OPERATOR_UNSPECIFIED" - If the operator is not specified, it is
// treated as `EQUAL`.
// "EQUAL" - Should the value of the metric be exactly equal to the
// comparison value.
// "LESS_THAN" - Should the value of the metric be less than to the
// comparison value.
// "GREATER_THAN" - Should the value of the metric be greater than to
// the comparison value.
// "IS_MISSING" - Validates if the metric is missing.
// Doesn't take comparisonValue into account.
Operator string `json:"operator,omitempty"`
// ForceSendFields is a list of field names (e.g. "ComparisonValue") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ComparisonValue") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *MetricFilter) MarshalJSON() ([]byte, error) {
type NoMethod MetricFilter
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MetricFilterClause: Represents a group of metric filters.
// Set the operator value to specify how the filters are logically
// combined.
type MetricFilterClause struct {
// Filters: The repeated set of filters. They are logically combined
// based on the
// operator specified.
Filters []*MetricFilter `json:"filters,omitempty"`
// Operator: The operator for combining multiple metric filters. If
// unspecified, it is
// treated as an `OR`.
//
// Possible values:
// "OPERATOR_UNSPECIFIED" - Unspecified operator. It is treated as an
// `OR`.
// "OR" - The logical `OR` operator.
// "AND" - The logical `AND` operator.
Operator string `json:"operator,omitempty"`
// ForceSendFields is a list of field names (e.g. "Filters") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Filters") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MetricFilterClause) MarshalJSON() ([]byte, error) {
type NoMethod MetricFilterClause
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MetricHeader: The headers for the metrics.
type MetricHeader struct {
// MetricHeaderEntries: Headers for the metrics in the response.
MetricHeaderEntries []*MetricHeaderEntry `json:"metricHeaderEntries,omitempty"`
// PivotHeaders: Headers for the pivots in the response.
PivotHeaders []*PivotHeader `json:"pivotHeaders,omitempty"`
// ForceSendFields is a list of field names (e.g. "MetricHeaderEntries")
// to unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "MetricHeaderEntries") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *MetricHeader) MarshalJSON() ([]byte, error) {
type NoMethod MetricHeader
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MetricHeaderEntry: Header for the metrics.
type MetricHeaderEntry struct {
// Name: The name of the header.
Name string `json:"name,omitempty"`
// Type: The type of the metric, for example `INTEGER`.
//
// Possible values:
// "METRIC_TYPE_UNSPECIFIED" - Metric type is unspecified.
// "INTEGER" - Integer metric.
// "FLOAT" - Float metric.
// "CURRENCY" - Currency metric.
// "PERCENT" - Percentage metric.
// "TIME" - Time metric in `HH:MM:SS` format.
Type string `json:"type,omitempty"`
// ForceSendFields is a list of field names (e.g. "Name") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Name") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MetricHeaderEntry) MarshalJSON() ([]byte, error) {
type NoMethod MetricHeaderEntry
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// OrFiltersForSegment: A list of segment filters in the `OR` group are
// combined with the logical OR
// operator.
type OrFiltersForSegment struct {
// SegmentFilterClauses: List of segment filters to be combined with a
// `OR` operator.
SegmentFilterClauses []*SegmentFilterClause `json:"segmentFilterClauses,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "SegmentFilterClauses") to unconditionally include in API requests.
// By default, fields with empty values are omitted from API requests.
// However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "SegmentFilterClauses") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *OrFiltersForSegment) MarshalJSON() ([]byte, error) {
type NoMethod OrFiltersForSegment
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// OrderBy: Specifies the sorting options.
type OrderBy struct {
// FieldName: The field which to sort by. The default sort order is
// ascending. Example:
// `ga:browser`.
// Note, that you can only specify one field for sort here. For
// example,
// `ga:browser, ga:city` is not valid.
FieldName string `json:"fieldName,omitempty"`
// OrderType: The order type. The default orderType is `VALUE`.
//
// Possible values:
// "ORDER_TYPE_UNSPECIFIED" - Unspecified order type will be treated
// as sort based on value.
// "VALUE" - The sort order is based on the value of the chosen
// column; looks only at
// the first date range.
// "DELTA" - The sort order is based on the difference of the values
// of the chosen
// column between the first two date ranges. Usable only if there
// are
// exactly two date ranges.
// "SMART" - The sort order is based on weighted value of the chosen
// column. If
// column has n/d format, then weighted value of this ratio will
// be `(n + totals.n)/(d + totals.d)` Usable only for metrics
// that
// represent ratios.
// "HISTOGRAM_BUCKET" - Histogram order type is applicable only to
// dimension columns with
// non-empty histogram-buckets.
// "DIMENSION_AS_INTEGER" - If the dimensions are fixed length
// numbers, ordinary sort would just
// work fine. `DIMENSION_AS_INTEGER` can be used if the dimensions
// are
// variable length numbers.
OrderType string `json:"orderType,omitempty"`
// SortOrder: The sorting order for the field.
//
// Possible values:
// "SORT_ORDER_UNSPECIFIED" - If the sort order is unspecified, the
// default is ascending.
// "ASCENDING" - Ascending sort. The field will be sorted in an
// ascending manner.
// "DESCENDING" - Descending sort. The field will be sorted in a
// descending manner.
SortOrder string `json:"sortOrder,omitempty"`
// ForceSendFields is a list of field names (e.g. "FieldName") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "FieldName") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *OrderBy) MarshalJSON() ([]byte, error) {
type NoMethod OrderBy
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// PageviewData: Represents details collected when the visitor views a
// page.
type PageviewData struct {
// PagePath: The URL of the page that the visitor viewed.
PagePath string `json:"pagePath,omitempty"`
// PageTitle: The title of the page that the visitor viewed.
PageTitle string `json:"pageTitle,omitempty"`
// ForceSendFields is a list of field names (e.g. "PagePath") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "PagePath") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *PageviewData) MarshalJSON() ([]byte, error) {
type NoMethod PageviewData
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Pivot: The Pivot describes the pivot section in the request.
// The Pivot helps rearrange the information in the table for certain
// reports
// by pivoting your data on a second dimension.
type Pivot struct {
// DimensionFilterClauses: DimensionFilterClauses are logically combined
// with an `AND` operator: only
// data that is included by all these DimensionFilterClauses contributes
// to
// the values in this pivot region. Dimension filters can be used to
// restrict
// the columns shown in the pivot region. For example if you
// have
// `ga:browser` as the requested dimension in the pivot region, and
// you
// specify key filters to restrict `ga:browser` to only "IE" or
// "Firefox",
// then only those two browsers would show up as columns.
DimensionFilterClauses []*DimensionFilterClause `json:"dimensionFilterClauses,omitempty"`
// Dimensions: A list of dimensions to show as pivot columns. A Pivot
// can have a maximum
// of 4 dimensions. Pivot dimensions are part of the restriction on
// the
// total number of dimensions allowed in the request.
Dimensions []*Dimension `json:"dimensions,omitempty"`
// MaxGroupCount: Specifies the maximum number of groups to return.
// The default value is 10, also the maximum value is 1,000.
MaxGroupCount int64 `json:"maxGroupCount,omitempty"`
// Metrics: The pivot metrics. Pivot metrics are part of the
// restriction on total number of metrics allowed in the request.
Metrics []*Metric `json:"metrics,omitempty"`
// StartGroup: If k metrics were requested, then the response will
// contain some
// data-dependent multiple of k columns in the report. E.g., if you
// pivoted
// on the dimension `ga:browser` then you'd get k columns for "Firefox",
// k
// columns for "IE", k columns for "Chrome", etc. The ordering of the
// groups
// of columns is determined by descending order of "total" for the first
// of
// the k values. Ties are broken by lexicographic ordering of the
// first
// pivot dimension, then lexicographic ordering of the second
// pivot
// dimension, and so on. E.g., if the totals for the first value
// for
// Firefox, IE, and Chrome were 8, 2, 8, respectively, the order of
// columns
// would be Chrome, Firefox, IE.
//
// The following let you choose which of the groups of k columns
// are
// included in the response.
StartGroup int64 `json:"startGroup,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "DimensionFilterClauses") to unconditionally include in API requests.
// By default, fields with empty values are omitted from API requests.
// However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DimensionFilterClauses")
// to include in API requests with the JSON null value. By default,
// fields with empty values are omitted from API requests. However, any
// field with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Pivot) MarshalJSON() ([]byte, error) {
type NoMethod Pivot
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// PivotHeader: The headers for each of the pivot sections defined in
// the request.
type PivotHeader struct {
// PivotHeaderEntries: A single pivot section header.
PivotHeaderEntries []*PivotHeaderEntry `json:"pivotHeaderEntries,omitempty"`
// TotalPivotGroupsCount: The total number of groups for this pivot.
TotalPivotGroupsCount int64 `json:"totalPivotGroupsCount,omitempty"`
// ForceSendFields is a list of field names (e.g. "PivotHeaderEntries")
// to unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "PivotHeaderEntries") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *PivotHeader) MarshalJSON() ([]byte, error) {
type NoMethod PivotHeader
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// PivotHeaderEntry: The headers for the each of the metric column
// corresponding to the metrics
// requested in the pivots section of the response.
type PivotHeaderEntry struct {
// DimensionNames: The name of the dimensions in the pivot response.
DimensionNames []string `json:"dimensionNames,omitempty"`
// DimensionValues: The values for the dimensions in the pivot.
DimensionValues []string `json:"dimensionValues,omitempty"`
// Metric: The metric header for the metric in the pivot.
Metric *MetricHeaderEntry `json:"metric,omitempty"`
// ForceSendFields is a list of field names (e.g. "DimensionNames") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DimensionNames") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *PivotHeaderEntry) MarshalJSON() ([]byte, error) {
type NoMethod PivotHeaderEntry
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// PivotValueRegion: The metric values in the pivot region.
type PivotValueRegion struct {
// Values: The values of the metrics in each of the pivot regions.
Values []string `json:"values,omitempty"`
// ForceSendFields is a list of field names (e.g. "Values") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Values") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *PivotValueRegion) MarshalJSON() ([]byte, error) {
type NoMethod PivotValueRegion
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ProductData: Details of the products in an e-commerce transaction.
type ProductData struct {
// ItemRevenue: The total revenue from purchased product items.
ItemRevenue float64 `json:"itemRevenue,omitempty"`
// ProductName: The product name, supplied by the e-commerce tracking
// application, for
// the purchased items.
ProductName string `json:"productName,omitempty"`
// ProductQuantity: Total number of this product units in the
// transaction.
ProductQuantity int64 `json:"productQuantity,omitempty,string"`
// ProductSku: Unique code that represents the product.
ProductSku string `json:"productSku,omitempty"`
// ForceSendFields is a list of field names (e.g. "ItemRevenue") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ItemRevenue") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ProductData) MarshalJSON() ([]byte, error) {
type NoMethod ProductData
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *ProductData) UnmarshalJSON(data []byte) error {
type NoMethod ProductData
var s1 struct {
ItemRevenue gensupport.JSONFloat64 `json:"itemRevenue"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil
|
s.ItemRevenue = float64(s1.ItemRevenue)
return nil
}
// Report: The data response corresponding to the request.
type Report struct {
// ColumnHeader: The column headers.
ColumnHeader *ColumnHeader `json:"columnHeader,omitempty"`
// Data: Response data.
Data *ReportData `json:"data,omitempty"`
// NextPageToken: Page token to retrieve the next page of results in the
// list.
NextPageToken string `json:"nextPageToken,omitempty"`
// ForceSendFields is a list of field names (e.g. "ColumnHeader") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ColumnHeader") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Report) MarshalJSON() ([]byte, error) {
type NoMethod Report
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ReportData: The data part of the report.
type ReportData struct {
// DataLastRefreshed: The last time the data in the report was
// refreshed. All the hits received
// before this timestamp are included in the calculation of the report.
DataLastRefreshed string `json:"dataLastRefreshed,omitempty"`
// IsDataGolden: Indicates if response to this request is golden or not.
// Data is
// golden when the exact same request will not produce any new results
// if
// asked at a later point in time.
IsDataGolden bool `json:"isDataGolden,omitempty"`
// Maximums: Minimum and maximum values seen over all matching rows.
// These are both
// empty when `hideValueRanges` in the request is false, or
// when
// rowCount is zero.
Maximums []*DateRangeValues `json:"maximums,omitempty"`
// Minimums: Minimum and maximum values seen over all matching rows.
// These are both
// empty when `hideValueRanges` in the request is false, or
// when
// rowCount is zero.
Minimums []*DateRangeValues `json:"minimums,omitempty"`
// RowCount: Total number of matching rows for this query.
RowCount int64 `json:"rowCount,omitempty"`
// Rows: There's one ReportRow for every unique combination of
// dimensions.
Rows []*ReportRow `json:"rows,omitempty"`
// SamplesReadCounts: If the results
// are
// [sampled](https://support.google.com/analytics/answer/2637192),
// th
// is returns the total number of samples read, one entry per date
// range.
// If the results are not sampled this field will not be defined.
// See
// [developer
// guide](/analytics/devguides/reporting/core/v4/basics#sampling)
// for details.
SamplesReadCounts googleapi.Int64s `json:"samplesReadCounts,omitempty"`
// SamplingSpaceSizes: If the results
// are
// [sampled](https://support.google.com/analytics/answer/2637192),
// th
// is returns the total number of
// samples present, one entry per date range. If the results are not
// sampled
// this field will not be defined. See
// [developer
// guide](/analytics/devguides/reporting/core/v4/basics#sampling)
// for details.
SamplingSpaceSizes googleapi.Int64s `json:"samplingSpaceSizes,omitempty"`
// Totals: For each requested date range, for the set of all rows that
// match
// the query, every requested value format gets a total. The total
// for a value format is computed by first totaling the
// metrics
// mentioned in the value format and then evaluating the value
// format as a scalar expression. E.g., The "totals" for
// `3 / (ga:sessions + 2)` we compute
// `3 / ((sum of all relevant ga:sessions) + 2)`.
// Totals are computed before pagination.
Totals []*DateRangeValues `json:"totals,omitempty"`
// ForceSendFields is a list of field names (e.g. "DataLastRefreshed")
// to unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DataLastRefreshed") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *ReportData) MarshalJSON() ([]byte, error) {
type NoMethod ReportData
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ReportRequest: The main request class which specifies the Reporting
// API request.
type ReportRequest struct {
// CohortGroup: Cohort group associated with this request. If there is a
// cohort group
// in the request the `ga:cohort` dimension must be present.
// Every [ReportRequest](#ReportRequest) within a `batchGet` method
// must
// contain the same `cohortGroup` definition.
CohortGroup *CohortGroup `json:"cohortGroup,omitempty"`
// DateRanges: Date ranges in the request. The request can have a
// maximum of 2 date
// ranges. The response will contain a set of metric values for
// each
// combination of the dimensions for each date range in the request. So,
// if
// there are two date ranges, there will be two set of metric values,
// one for
// the original date range and one for the second date range.
// The `reportRequest.dateRanges` field should not be specified for
// cohorts
// or Lifetime value requests.
// If a date range is not provided, the default date range is
// (startDate:
// current date - 7 days, endDate: current date - 1 day).
// Every
// [ReportRequest](#ReportRequest) within a `batchGet` method
// must
// contain the same `dateRanges` definition.
DateRanges []*DateRange `json:"dateRanges,omitempty"`
// DimensionFilterClauses: The dimension filter clauses for filtering
// Dimension Values. They are
// logically combined with the `AND` operator. Note that filtering
// occurs
// before any dimensions are aggregated, so that the returned
// metrics
// represent the total for only the relevant dimensions.
DimensionFilterClauses []*DimensionFilterClause `json:"dimensionFilterClauses,omitempty"`
// Dimensions: The dimensions requested.
// Requests can have a total of 9 dimensions.
Dimensions []*Dimension `json:"dimensions,omitempty"`
// FiltersExpression: Dimension or metric filters that restrict the data
// returned for your
// request. To use the `filtersExpression`, supply a dimension or metric
// on
// which to filter, followed by the filter expression. For example,
// the
// following expression selects `ga:browser` dimension which starts
// with
// Firefox; `ga:browser=~^Firefox`. For more information on
// dimensions
// and metric filters,
// see
// [Filters
// reference](https://developers.google.com/analytics/devgui
// des/reporting/core/v3/reference#filters).
FiltersExpression string `json:"filtersExpression,omitempty"`
// HideTotals: If set to true, hides the total of all metrics for all
// the matching rows,
// for every date range. The default false and will return the totals.
HideTotals bool `json:"hideTotals,omitempty"`
// HideValueRanges: If set to true, hides the minimum and maximum across
// all matching rows.
// The default is false and the value ranges are returned.
HideValueRanges bool `json:"hideValueRanges,omitempty"`
// IncludeEmptyRows: If set to false, the response does not include rows
// if all the retrieved
// metrics are equal to zero. The default is false which will exclude
// these
// rows.
IncludeEmptyRows bool `json:"includeEmptyRows,omitempty"`
// MetricFilterClauses: The metric filter clauses. They are logically
// combined with the `AND`
// operator. Metric filters look at only the first date range and not
// the
// comparing date range. Note that filtering on metrics occurs after
// the
// metrics are aggregated.
MetricFilterClauses []*MetricFilterClause `json:"metricFilterClauses,omitempty"`
// Metrics: The metrics requested.
// Requests must specify at least one metric. Requests can have a
// total of 10 metrics.
Metrics []*Metric `json:"metrics,omitempty"`
// OrderBys: Sort order on output rows. To compare two rows, the
// elements of the
// following are applied in order until a difference is found. All
// date
// ranges in the output get the same row order.
OrderBys []*OrderBy `json:"orderBys,omitempty"`
// PageSize: Page size is for paging and specifies the maximum number of
// returned rows.
// Page size should be >= 0. A query returns the default of 1,000
// rows.
// The Analytics Core Reporting API returns a maximum of 100,000 rows
// per
// request, no matter how many you ask for. It can also return fewer
// rows
// than requested, if there aren't as many dimension segments as you
// expect.
// For instance, there are fewer than 300 possible values for
// `ga:country`,
// so when segmenting only by country, you can't get more than 300
// rows,
// even if you set `pageSize` to a higher value.
PageSize int64 `json:"pageSize,omitempty"`
// PageToken: A continuation token to get the next page of the results.
// Adding this to
// the request will return the rows after the pageToken. The pageToken
// should
// be the value returned in the nextPageToken parameter in the response
// to
// the GetReports request.
PageToken string `json:"pageToken,omitempty"`
// Pivots: The pivot definitions. Requests can have a maximum of 2
// pivots.
Pivots []*Pivot `json:"pivots,omitempty"`
// SamplingLevel: The desired
// report
// [sample](https://support.google.com/analytics/answer/2637192)
// size.
// If the the `samplingLevel` field is unspecified the `DEFAULT`
// sampling
// level is used. Every [ReportRequest](#ReportRequest) within
// a
// `batchGet` method must contain the same `samplingLevel` definition.
// See
// [developer
// guide](/analytics/devguides/reporting/core/v4/basics#sampling)
// for details.
//
// Possible values:
// "SAMPLING_UNSPECIFIED" - If the `samplingLevel` field is
// unspecified the `DEFAULT` sampling level
// is used.
// "DEFAULT" - Returns response with a sample size that balances speed
// and
// accuracy.
// "SMALL" - It returns a fast response with a smaller sampling size.
// "LARGE" - Returns a more accurate response using a large sampling
// size. But this
// may result in response being slower.
SamplingLevel string `json:"samplingLevel,omitempty"`
// Segments: Segment the data returned for the request. A segment
// definition helps look
// at a subset of the segment request. A request can contain up to
// four
// segments. Every [ReportRequest](#ReportRequest) within a
// `batchGet` method must contain the same `segments` definition.
// Requests
// with segments must have the `ga:segment` dimension.
Segments []*Segment `json:"segments,omitempty"`
// ViewId: The Analytics
// [view ID](https://support.google.com/analytics/answer/1009618)
// from which to retrieve data. Every
// [ReportRequest](#ReportRequest)
// within a `batchGet` method must contain the same `viewId`.
ViewId string `json:"viewId,omitempty"`
// ForceSendFields is a list of field names (e.g. "CohortGroup") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "CohortGroup") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ReportRequest) MarshalJSON() ([]byte, error) {
type NoMethod ReportRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ReportRow: A row in the report.
type ReportRow struct {
// Dimensions: List of requested dimensions.
Dimensions []string `json:"dimensions,omitempty"`
// Metrics: List of metrics for each requested DateRange.
Metrics []*DateRangeValues `json:"metrics,omitempty"`
// ForceSendFields is a list of field names (e.g. "Dimensions") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Dimensions") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ReportRow) MarshalJSON() ([]byte, error) {
type NoMethod ReportRow
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ResourceQuotasRemaining: The resource quota tokens remaining for the
// property after the request is
// completed.
type ResourceQuotasRemaining struct {
// DailyQuotaTokensRemaining: Daily resource quota remaining remaining.
DailyQuotaTokensRemaining int64 `json:"dailyQuotaTokensRemaining,omitempty"`
// HourlyQuotaTokensRemaining: Hourly resource quota tokens remaining.
HourlyQuotaTokensRemaining int64 `json:"hourlyQuotaTokensRemaining,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "DailyQuotaTokensRemaining") to unconditionally include in API
// requests. By default, fields with empty values are omitted from API
// requests. However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g.
// "DailyQuotaTokensRemaining") to include in API requests with the JSON
// null value. By default, fields with empty values are omitted from API
// requests. However, any field with an empty value appearing in
// NullFields will be sent to the server as null. It is an error if a
// field in this list has a non-empty value. This may be used to include
// null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ResourceQuotasRemaining) MarshalJSON() ([]byte, error) {
type NoMethod ResourceQuotasRemaining
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
type ScreenviewData struct {
// AppName: The application name.
AppName string `json:"appName,omitempty"`
// MobileDeviceBranding: Mobile manufacturer or branded name. Eg:
// "Google", "Apple" etc.
MobileDeviceBranding string `json:"mobileDeviceBranding,omitempty"`
// MobileDeviceModel: Mobile device model. Eg: "Pixel", "iPhone" etc.
MobileDeviceModel string `json:"mobileDeviceModel,omitempty"`
// ScreenName: The name of the screen.
ScreenName string `json:"screenName,omitempty"`
// ForceSendFields is a list of field names (e.g. "AppName") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AppName") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ScreenviewData) MarshalJSON() ([]byte, error) {
type NoMethod ScreenviewData
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SearchUserActivityRequest: The request to fetch User Report from
// Reporting API `userActivity:get` call.
type SearchUserActivityRequest struct {
// ActivityTypes: Set of all activity types being requested. Only
// acvities matching these
// types will be returned in the response. If empty, all activies will
// be
// returned.
//
// Possible values:
// "ACTIVITY_TYPE_UNSPECIFIED" - ActivityType will never have this
// value in the response. Using this type in
// the request will result in an error.
// "PAGEVIEW" - Used when the activity resulted out of a visitor
// viewing a page.
// "SCREENVIEW" - Used when the activity resulted out of a visitor
// using an application on a
// mobile device.
// "GOAL" - Used to denote that a goal type activity.
// "ECOMMERCE" - An e-commerce transaction was performed by the
// visitor on the page.
// "EVENT" - Used when the activity is an event.
ActivityTypes []string `json:"activityTypes,omitempty"`
// DateRange: Date range for which to retrieve the user activity. If a
// date range is not
// provided, the default date range is (startDate: current date - 7
// days,
// endDate: current date - 1 day).
DateRange *DateRange `json:"dateRange,omitempty"`
// PageSize: Page size is for paging and specifies the maximum number of
// returned rows.
// Page size should be > 0. If the value is 0 or if the field isn't
// specified,
// the request returns the default of 1000 rows per page.
PageSize int64 `json:"pageSize,omitempty"`
// PageToken: A continuation token to get the next page of the results.
// Adding this to
// the request will return the rows after the pageToken. The pageToken
// should
// be the value returned in the nextPageToken parameter in the response
// to
// the [SearchUserActivityRequest](#SearchUserActivityRequest) request.
PageToken string `json:"pageToken,omitempty"`
// User: Required. Unique user Id to query for.
// Every
// [SearchUserActivityRequest](#SearchUserActivityRequest) must contain
// this
// field.
User *User `json:"user,omitempty"`
// ViewId: Required. The Analytics
// [view ID](https://support.google.com/analytics/answer/1009618)
// from which to retrieve data.
// Every
// [SearchUserActivityRequest](#SearchUserActivityRequest) must contain
// the
// `viewId`.
ViewId string `json:"viewId,omitempty"`
// ForceSendFields is a list of field names (e.g. "ActivityTypes") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ActivityTypes") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SearchUserActivityRequest) MarshalJSON() ([]byte, error) {
type NoMethod SearchUserActivityRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SearchUserActivityResponse: The response from `userActivity:get`
// call.
type SearchUserActivityResponse struct {
// NextPageToken: This token should be passed
// to
// [SearchUserActivityRequest](#SearchUserActivityRequest) to retrieve
// the
// next page.
NextPageToken string `json:"nextPageToken,omitempty"`
// SampleRate: This field represents the
// [sampling rate](https://support.google.com/analytics/answer/2637192)
// for
// the given request and is a number between 0.0 to 1.0. See
// [developer
// guide](/analytics/devguides/reporting/core/v4/basics#sampling)
// for details.
SampleRate float64 `json:"sampleRate,omitempty"`
// Sessions: Each record represents a session (device details, duration,
// etc).
Sessions []*UserActivitySession `json:"sessions,omitempty"`
// TotalRows: Total rows returned by this query (across different
// pages).
TotalRows int64 `json:"totalRows,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SearchUserActivityResponse) MarshalJSON() ([]byte, error) {
type NoMethod SearchUserActivityResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *SearchUserActivityResponse) UnmarshalJSON(data []byte) error {
type NoMethod SearchUserActivityResponse
var s1 struct {
SampleRate gensupport.JSONFloat64 `json:"sampleRate"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.SampleRate = float64(s1.SampleRate)
return nil
}
// Segment: The segment definition, if the report needs to be
// segmented.
// A Segment is a subset of the Analytics data. For example, of the
// entire
// set of users, one Segment might be users from a particular country or
// city.
type Segment struct {
// DynamicSegment: A dynamic segment definition in the request.
DynamicSegment *DynamicSegment `json:"dynamicSegment,omitempty"`
// SegmentId: The segment ID of a built-in or custom segment, for
// example `gaid::-3`.
SegmentId string `json:"segmentId,omitempty"`
// ForceSendFields is a list of field names (e.g. "DynamicSegment") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DynamicSegment") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Segment) MarshalJSON() ([]byte, error) {
type NoMethod Segment
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SegmentDefinition: SegmentDefinition defines the segment to be a set
// of SegmentFilters which
// are combined together with a logical `AND` operation.
type SegmentDefinition struct {
// SegmentFilters: A segment is defined by a set of segment filters
// which are combined
// together with a logical `AND` operation.
SegmentFilters []*SegmentFilter `json:"segmentFilters,omitempty"`
// ForceSendFields is a list of field names (e.g. "SegmentFilters") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "SegmentFilters") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *SegmentDefinition) MarshalJSON() ([]byte, error) {
type NoMethod SegmentDefinition
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SegmentDimensionFilter: Dimension filter specifies the filtering
// options on a dimension.
type SegmentDimensionFilter struct {
// CaseSensitive: Should the match be case sensitive, ignored for
// `IN_LIST` operator.
CaseSensitive bool `json:"caseSensitive,omitempty"`
// DimensionName: Name of the dimension for which the filter is being
// applied.
DimensionName string `json:"dimensionName,omitempty"`
// Expressions: The list of expressions, only the first element is used
// for all operators
Expressions []string `json:"expressions,omitempty"`
// MaxComparisonValue: Maximum comparison values for `BETWEEN` match
// type.
MaxComparisonValue string `json:"maxComparisonValue,omitempty"`
// MinComparisonValue: Minimum comparison values for `BETWEEN` match
// type.
MinComparisonValue string `json:"minComparisonValue,omitempty"`
// Operator: The operator to use to match the dimension with the
// expressions.
//
// Possible values:
// "OPERATOR_UNSPECIFIED" - If the match type is unspecified, it is
// treated as a REGEXP.
// "REGEXP" - The match expression is treated as a regular expression.
// All other match
// types are not treated as regular expressions.
// "BEGINS_WITH" - Matches the values which begin with the match
// expression provided.
// "ENDS_WITH" - Matches the values which end with the match
// expression provided.
// "PARTIAL" - Substring match.
// "EXACT" - The value should match the match expression entirely.
// "IN_LIST" - This option is used to specify a dimension filter whose
// expression can
// take any value from a selected list of values. This helps
// avoiding
// evaluating multiple exact match dimension filters which are OR'ed
// for
// every single response row. For example:
//
// expressions: ["A", "B", "C"]
//
// Any response row whose dimension has it is value as A, B or C,
// matches
// this DimensionFilter.
// "NUMERIC_LESS_THAN" - Integer comparison filters.
// case sensitivity is ignored for these and the expression
// is assumed to be a string representing an integer.
// Failure conditions:
//
// - if expression is not a valid int64, the client should expect
// an error.
// - input dimensions that are not valid int64 values will never match
// the
// filter.
//
// Checks if the dimension is numerically less than the match
// expression.
// "NUMERIC_GREATER_THAN" - Checks if the dimension is numerically
// greater than the match
// expression.
// "NUMERIC_BETWEEN" - Checks if the dimension is numerically between
// the minimum and maximum
// of the match expression, boundaries excluded.
Operator string `json:"operator,omitempty"`
// ForceSendFields is a list of field names (e.g. "CaseSensitive") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "CaseSensitive") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SegmentDimensionFilter) MarshalJSON() ([]byte, error) {
type NoMethod SegmentDimensionFilter
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SegmentFilter: SegmentFilter defines the segment to be either a
// simple or a sequence
// segment. A simple segment condition contains dimension and metric
// conditions
// to select the sessions or users. A sequence segment condition can be
// used to
// select users or sessions based on sequential conditions.
type SegmentFilter struct {
// Not: If true, match the complement of simple or sequence segment.
// For example, to match all visits not from "New York", we can define
// the
// segment as follows:
//
// "sessionSegment": {
// "segmentFilters": [{
// "simpleSegment" :{
// "orFiltersForSegment": [{
// "segmentFilterClauses":[{
// "dimensionFilter": {
// "dimensionName": "ga:city",
// "expressions": ["New York"]
// }
// }]
// }]
// },
// "not": "True"
// }]
// },
Not bool `json:"not,omitempty"`
// SequenceSegment: Sequence conditions consist of one or more steps,
// where each step is
// defined by one or more dimension/metric conditions. Multiple steps
// can
// be combined with special sequence operators.
SequenceSegment *SequenceSegment `json:"sequenceSegment,omitempty"`
// SimpleSegment: A Simple segment conditions consist of one or more
// dimension/metric
// conditions that can be combined
SimpleSegment *SimpleSegment `json:"simpleSegment,omitempty"`
// ForceSendFields is a list of field names (e.g. "Not") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Not") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SegmentFilter) MarshalJSON() ([]byte, error) {
type NoMethod SegmentFilter
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SegmentFilterClause: Filter Clause to be used in a segment
// definition, can be wither a metric or
// a dimension filter.
type SegmentFilterClause struct {
// DimensionFilter: Dimension Filter for the segment definition.
DimensionFilter *SegmentDimensionFilter `json:"dimensionFilter,omitempty"`
// MetricFilter: Metric Filter for the segment definition.
MetricFilter *SegmentMetricFilter `json:"metricFilter,omitempty"`
// Not: Matches the complement (`!`) of the filter.
Not bool `json:"not,omitempty"`
// ForceSendFields is a list of field names (e.g. "DimensionFilter") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DimensionFilter") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *SegmentFilterClause) MarshalJSON() ([]byte, error) {
type NoMethod SegmentFilterClause
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SegmentMetricFilter: Metric filter to be used in a segment filter
// clause.
type SegmentMetricFilter struct {
// ComparisonValue: The value to compare against. If the operator is
// `BETWEEN`, this value is
// treated as minimum comparison value.
ComparisonValue string `json:"comparisonValue,omitempty"`
// MaxComparisonValue: Max comparison value is only used for `BETWEEN`
// operator.
MaxComparisonValue string `json:"maxComparisonValue,omitempty"`
// MetricName: The metric that will be filtered on. A `metricFilter`
// must contain a
// metric name.
MetricName string `json:"metricName,omitempty"`
// Operator: Specifies is the operation to perform to compare the
// metric. The default
// is `EQUAL`.
//
// Possible values:
// "UNSPECIFIED_OPERATOR" - Unspecified operator is treated as
// `LESS_THAN` operator.
// "LESS_THAN" - Checks if the metric value is less than comparison
// value.
// "GREATER_THAN" - Checks if the metric value is greater than
// comparison value.
// "EQUAL" - Equals operator.
// "BETWEEN" - For between operator, both the minimum and maximum are
// exclusive.
// We will use `LT` and `GT` for comparison.
Operator string `json:"operator,omitempty"`
// Scope: Scope for a metric defines the level at which that metric is
// defined. The
// specified metric scope must be equal to or greater than its primary
// scope
// as defined in the data model. The primary scope is defined by if
// the
// segment is selecting users or sessions.
//
// Possible values:
// "UNSPECIFIED_SCOPE" - If the scope is unspecified, it defaults to
// the condition scope,
// `USER` or `SESSION` depending on if the segment is trying to
// choose
// users or sessions.
// "PRODUCT" - Product scope.
// "HIT" - Hit scope.
// "SESSION" - Session scope.
// "USER" - User scope.
Scope string `json:"scope,omitempty"`
// ForceSendFields is a list of field names (e.g. "ComparisonValue") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ComparisonValue") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *SegmentMetricFilter) MarshalJSON() ([]byte, error) {
type NoMethod SegmentMetricFilter
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SegmentSequenceStep: A segment sequence definition.
type SegmentSequenceStep struct {
// MatchType: Specifies if the step immediately precedes or can be any
// time before the
// next step.
//
// Possible values:
// "UNSPECIFIED_MATCH_TYPE" - Unspecified match type is treated as
// precedes.
// "PRECEDES" - Operator indicates that the previous step precedes the
// next step.
// "IMMEDIATELY_PRECEDES" - Operator indicates that the previous step
// immediately precedes the next
// step.
MatchType string `json:"matchType,omitempty"`
// OrFiltersForSegment: A sequence is specified with a list of Or
// grouped filters which are
// combined with `AND` operator.
OrFiltersForSegment []*OrFiltersForSegment `json:"orFiltersForSegment,omitempty"`
// ForceSendFields is a list of field names (e.g. "MatchType") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "MatchType") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SegmentSequenceStep) MarshalJSON() ([]byte, error) {
type NoMethod SegmentSequenceStep
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SequenceSegment: Sequence conditions consist of one or more steps,
// where each step is defined
// by one or more dimension/metric conditions. Multiple steps can be
// combined
// with special sequence operators.
type SequenceSegment struct {
// FirstStepShouldMatchFirstHit: If set, first step condition must match
// the first hit of the visitor (in
// the date range).
FirstStepShouldMatchFirstHit bool `json:"firstStepShouldMatchFirstHit,omitempty"`
// SegmentSequenceSteps: The list of steps in the sequence.
SegmentSequenceSteps []*SegmentSequenceStep `json:"segmentSequenceSteps,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "FirstStepShouldMatchFirstHit") to unconditionally include in API
// requests. By default, fields with empty values are omitted from API
// requests. However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g.
// "FirstStepShouldMatchFirstHit") to include in API requests with the
// JSON null value. By default, fields with empty values are omitted
// from API requests. However, any field with an empty value appearing
// in NullFields will be sent to the server as null. It is an error if a
// field in this list has a non-empty value. This may be used to include
// null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SequenceSegment) MarshalJSON() ([]byte, error) {
type NoMethod SequenceSegment
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SimpleSegment: A Simple segment conditions consist of one or more
// dimension/metric
// conditions that can be combined.
type SimpleSegment struct {
// OrFiltersForSegment: A list of segment filters groups which are
// combined with logical `AND`
// operator.
OrFiltersForSegment []*OrFiltersForSegment `json:"orFiltersForSegment,omitempty"`
// ForceSendFields is a list of field names (e.g. "OrFiltersForSegment")
// to unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "OrFiltersForSegment") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *SimpleSegment) MarshalJSON() ([]byte, error) {
type NoMethod SimpleSegment
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TransactionData: Represents details collected when the visitor
// performs a transaction on the
// page.
type TransactionData struct {
// TransactionId: The transaction ID, supplied by the e-commerce
// tracking method, for the
// purchase in the shopping cart.
TransactionId string `json:"transactionId,omitempty"`
// TransactionRevenue: The total sale revenue (excluding shipping and
// tax) of the transaction.
TransactionRevenue float64 `json:"transactionRevenue,omitempty"`
// TransactionShipping: Total cost of shipping.
TransactionShipping float64 `json:"transactionShipping,omitempty"`
// TransactionTax: Total tax for the transaction.
TransactionTax float64 `json:"transactionTax,omitempty"`
// ForceSendFields is a list of field names (e.g. "TransactionId") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "TransactionId") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *TransactionData) MarshalJSON() ([]byte, error) {
type NoMethod TransactionData
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *TransactionData) UnmarshalJSON(data []byte) error {
type NoMethod TransactionData
var s1 struct {
TransactionRevenue gensupport.JSONFloat64 `json:"transactionRevenue"`
TransactionShipping gensupport.JSONFloat64 `json:"transactionShipping"`
TransactionTax gensupport.JSONFloat64 `json:"transactionTax"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.TransactionRevenue = float64(s1.TransactionRevenue)
s.TransactionShipping = float64(s1.TransactionShipping)
s.TransactionTax = float64(s1.TransactionTax)
return nil
}
// User: Contains information to identify a particular user uniquely.
type User struct {
// Type: Type of the user in the request. The field `userId` is
// associated with this
// type.
//
// Possible values:
// "USER_ID_TYPE_UNSPECIFIED" - When the User Id Type is not
// specified, the default type used will be
// CLIENT_ID.
// "USER_ID" - A single user, like a signed-in user account, that may
// interact with
// content across one or more devices and / or browser instances.
// "CLIENT_ID" - Analytics assigned client_id.
Type string `json:"type,omitempty"`
// UserId: Unique Id of the user for which the data is being requested.
UserId string `json:"userId,omitempty"`
// ForceSendFields is a list of field names (e.g. "Type") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Type") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *User) MarshalJSON() ([]byte, error) {
type NoMethod User
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// UserActivitySession: This represents a user session performed on a
// specific device at a certain
// time over a period of time.
type UserActivitySession struct {
// Activities: Represents a detailed view into each of the activity in
// this session.
Activities []*Activity `json:"activities,omitempty"`
// DataSource: The data source of a hit. By default, hits sent from
// analytics.js are
// reported as "web" and hits sent from the mobile SDKs are reported as
// "app".
// These values can be overridden in the Measurement Protocol.
DataSource string `json:"dataSource,omitempty"`
// DeviceCategory: The type of device used: "mobile", "tablet" etc.
DeviceCategory string `json:"deviceCategory,omitempty"`
// Platform: Platform on which the activity happened: "android", "ios"
// etc.
Platform string `json:"platform,omitempty"`
// SessionDate: Date of this session in ISO-8601 format.
SessionDate string `json:"sessionDate,omitempty"`
// SessionId: Unique ID of the session.
SessionId string `json:"sessionId,omitempty"`
// ForceSendFields is a list of field names (e.g. "Activities") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Activities") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *UserActivitySession) MarshalJSON() ([]byte, error) {
type NoMethod UserActivitySession
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// method id "analyticsreporting.reports.batchGet":
type ReportsBatchGetCall struct {
s *Service
getreportsrequest *GetReportsRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// BatchGet: Returns the Analytics data.
func (r *ReportsService) BatchGet(getreportsrequest *GetReportsRequest) *ReportsBatchGetCall {
c := &ReportsBatchGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.getreportsrequest = getreportsrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ReportsBatchGetCall) Fields(s ...googleapi.Field) *ReportsBatchGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ReportsBatchGetCall) Context(ctx context.Context) *ReportsBatchGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ReportsBatchGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ReportsBatchGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191113")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.getreportsrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v4/reports:batchGet")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "analyticsreporting.reports.batchGet" call.
// Exactly one of *GetReportsResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *GetReportsResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ReportsBatchGetCall) Do(opts ...googleapi.CallOption) (*GetReportsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &GetReportsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Returns the Analytics data.",
// "flatPath": "v4/reports:batchGet",
// "httpMethod": "POST",
// "id": "analyticsreporting.reports.batchGet",
// "parameterOrder": [],
// "parameters": {},
// "path": "v4/reports:batchGet",
// "request": {
// "$ref": "GetReportsRequest"
// },
// "response": {
// "$ref": "GetReportsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/analytics",
// "https://www.googleapis.com/auth/analytics.readonly"
// ]
// }
}
// method id "analyticsreporting.userActivity.search":
type UserActivitySearchCall struct {
s *Service
searchuseractivityrequest *SearchUserActivityRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Search: Returns User Activity data.
func (r *UserActivityService) Search(searchuseractivityrequest *SearchUserActivityRequest) *UserActivitySearchCall {
c := &UserActivitySearchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.searchuseractivityrequest = searchuseractivityrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *UserActivitySearchCall) Fields(s ...googleapi.Field) *UserActivitySearchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *UserActivitySearchCall) Context(ctx context.Context) *UserActivitySearchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *UserActivitySearchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *UserActivitySearchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191113")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.searchuseractivityrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v4/userActivity:search")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "analyticsreporting.userActivity.search" call.
// Exactly one of *SearchUserActivityResponse or error will be non-nil.
// Any non-2xx status code is an error. Response headers are in either
// *SearchUserActivityResponse.ServerResponse.Header or (if a response
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *UserActivitySearchCall) Do(opts ...googleapi.CallOption) (*SearchUserActivityResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &SearchUserActivityResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Returns User Activity data.",
// "flatPath": "v4/userActivity:search",
// "httpMethod": "POST",
// "id": "analyticsreporting.userActivity.search",
// "parameterOrder": [],
// "parameters": {},
// "path": "v4/userActivity:search",
// "request": {
// "$ref": "SearchUserActivityRequest"
// },
// "response": {
// "$ref": "SearchUserActivityResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/analytics",
// "https://www.googleapis.com/auth/analytics.readonly"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *UserActivitySearchCall) Pages(ctx context.Context, f func(*SearchUserActivityResponse) error) error {
c.ctx_ = ctx
defer func(pt string) { c.searchuseractivityrequest.PageToken = pt }(c.searchuseractivityrequest.PageToken) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.searchuseractivityrequest.PageToken = x.NextPageToken
}
}
|
{
return err
}
|
test_cal_plus.py
|
"""
1、case顺序:加-除-减-乘
2、fixture方法在case前打印【开始计算】,结束后打印【计算结束】
3、fixture方法存在在conftest.py,设置scope=module
4、控制case只执行顺序为:加-减-乘-除
5、结合allure生成本地测试报告
"""
import allure
import pytest
import yaml
from test_Calculator.src.calculator import Calculator
def get_data():
with open('./data.yml') as data_x:
data = yaml.safe_load(data_x)
data_data = data['datas']
data_name = data['ids']
return [data_data, data_name]
data = get_data()
get_cal = Calculator()
@pytest.mark.feature("测试方法")
class Test_Calculator:
@pytest.mark.story('加法测试')
@pytest.mark.run(order=0)
@pytest.mark.usefixtures("prints")
@pytest.mark.parametr
|
lt", data[0]['data_add'], ids=data[1]['ids_add'])
def test_add(self, a, b, result):
assert get_cal.add(a, b) == result
@pytest.mark.story('除法测试')
@pytest.mark.run(order=3)
@pytest.mark.parametrize("a, b, result", data[0]['data_div'], ids=data[1]['ids_div'])
def test_div(self, a, b, result):
assert get_cal.div(a, b) == result
@pytest.mark.story('减法测试')
@pytest.mark.run(order=1)
@pytest.mark.parametrize("a, b, result", data[0]['data_sub'], ids=data[1]['ids_sub'])
def test_sub(self, a, b, result):
assert get_cal.sub(a, b) == result
@pytest.mark.story('乘法测试')
@pytest.mark.run(order=2)
@pytest.mark.parametrize("a, b, result", data[0]['data_mul'], ids=data[1]['ids_mul'])
def test_mul(self, a, b, result):
assert get_cal.mul(a, b) == result
if __name__ == '__main__':
pytest.main('test_cal_plus.py', '-vs')
|
ize("a, b, resu
|
solaris.rs
|
pub type door_attr_t = ::c_uint;
pub type door_id_t = ::c_ulonglong;
s! {
pub struct shmid_ds {
pub shm_perm: ::ipc_perm,
pub shm_segsz: ::size_t,
pub shm_flags: ::uintptr_t,
pub shm_lkcnt: ::c_ushort,
pub shm_lpid: ::pid_t,
pub shm_cpid: ::pid_t,
pub shm_nattch: ::shmatt_t,
pub shm_cnattch: ::c_ulong,
pub shm_atime: ::time_t,
pub shm_dtime: ::time_t,
pub shm_ctime: ::time_t,
pub shm_amp: *mut ::c_void,
pub shm_gransize: u64,
pub shm_allocated: u64,
pub shm_pad4: [i64; 1],
}
pub struct door_desc_t__d_data__d_desc {
pub d_descriptor: ::c_int,
pub d_id: ::door_id_t
}
}
pub const PORT_SOURCE_POSTWAIT: ::c_int = 8;
pub const PORT_SOURCE_SIGNAL: ::c_int = 9;
pub const EPOLLEXCLUSIVE: ::c_int = 0x10000000;
pub const AF_LOCAL: ::c_int = 0;
pub const AF_FILE: ::c_int = 0;
extern "C" {
pub fn fexecve(
fd: ::c_int,
argv: *const *const ::c_char,
envp: *const *const ::c_char,
) -> ::c_int;
pub fn mincore(
addr: *const ::c_void,
len: ::size_t,
vec: *mut ::c_char,
) -> ::c_int;
pub fn door_call(d: ::c_int, params: *const door_arg_t) -> ::c_int;
pub fn door_return(
data_ptr: *const ::c_char,
data_size: ::size_t,
desc_ptr: *const door_desc_t,
num_desc: ::c_uint,
);
pub fn door_create(
server_procedure: extern "C" fn(
cookie: *const ::c_void,
argp: *const ::c_char,
arg_size: ::size_t,
dp: *const door_desc_t,
n_desc: ::c_uint,
),
cookie: *const ::c_void,
attributes: door_attr_t,
) -> ::c_int;
pub fn fattach(fildes: ::c_int, path: *const ::c_char) -> ::c_int;
}
|
s_no_extra_traits! {
#[cfg_attr(feature = "extra_traits", allow(missing_debug_implementations))]
pub union door_desc_t__d_data {
pub d_desc: door_desc_t__d_data__d_desc,
d_resv: [::c_int; 5], /* Check out /usr/include/sys/door.h */
}
#[cfg_attr(feature = "extra_traits", allow(missing_debug_implementations))]
pub struct door_desc_t {
pub d_attributes: door_attr_t,
pub d_data: door_desc_t__d_data,
}
#[cfg_attr(feature = "extra_traits", allow(missing_debug_implementations))]
pub struct door_arg_t {
pub data_ptr: *const ::c_char,
pub data_size: ::size_t,
pub desc_ptr: *const door_desc_t,
pub dec_num: ::c_uint,
pub rbuf: *const ::c_char,
pub rsize: ::size_t,
}
}
| |
client.go
|
package phrase
import (
"bytes"
"context"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"mime/multipart"
"net/http"
"net/http/httputil"
"net/url"
"os"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"unicode/utf8"
"golang.org/x/oauth2"
)
var (
jsonCheck = regexp.MustCompile(`(?i:(?:application|text)/(?:vnd\.[^;]+\+)?json)`)
xmlCheck = regexp.MustCompile(`(?i:(?:application|text)/xml)`)
)
// APIClient manages communication with the Phrase API Reference API v2.0.0
// In most cases there should be only one, shared, APIClient.
type APIClient struct {
cfg *Configuration
common service // Reuse a single struct instead of allocating one for each service on the heap.
// API Services
AccountsApi *AccountsApiService
AuthorizationsApi *AuthorizationsApiService
BitbucketSyncApi *BitbucketSyncApiService
BlacklistedKeysApi *BlacklistedKeysApiService
BranchesApi *BranchesApiService
CommentsApi *CommentsApiService
DistributionsApi *DistributionsApiService
DocumentsApi *DocumentsApiService
FormatsApi *FormatsApiService
GitHubSyncApi *GitHubSyncApiService
GitLabSyncApi *GitLabSyncApiService
GlossariesApi *GlossariesApiService
GlossaryTermTranslationsApi *GlossaryTermTranslationsApiService
GlossaryTermsApi *GlossaryTermsApiService
InvitationsApi *InvitationsApiService
JobLocalesApi *JobLocalesApiService
JobTemplateLocalesApi *JobTemplateLocalesApiService
JobTemplatesApi *JobTemplatesApiService
JobsApi *JobsApiService
KeysApi *KeysApiService
LocalesApi *LocalesApiService
MembersApi *MembersApiService
NotificationGroupsApi *NotificationGroupsApiService
NotificationsApi *NotificationsApiService
OrdersApi *OrdersApiService
ProjectsApi *ProjectsApiService
ReleasesApi *ReleasesApiService
ScreenshotMarkersApi *ScreenshotMarkersApiService
ScreenshotsApi *ScreenshotsApiService
SearchApi *SearchApiService
SpacesApi *SpacesApiService
StyleGuidesApi *StyleGuidesApiService
TagsApi *TagsApiService
TeamsApi *TeamsApiService
TranslationsApi *TranslationsApiService
UploadsApi *UploadsApiService
UsersApi *UsersApiService
VariablesApi *VariablesApiService
VersionsHistoryApi *VersionsHistoryApiService
WebhooksApi *WebhooksApiService
}
type service struct {
client *APIClient
}
// NewAPIClient creates a new API client. Requires a userAgent string describing your application.
// optionally a custom http.Client to allow for advanced features such as caching.
func NewAPIClient(cfg *Configuration) *APIClient {
if cfg.HTTPClient == nil {
cfg.HTTPClient = http.DefaultClient
}
c := &APIClient{}
c.cfg = cfg
c.common.client = c
// API Services
c.AccountsApi = (*AccountsApiService)(&c.common)
c.AuthorizationsApi = (*AuthorizationsApiService)(&c.common)
c.BitbucketSyncApi = (*BitbucketSyncApiService)(&c.common)
c.BlacklistedKeysApi = (*BlacklistedKeysApiService)(&c.common)
c.BranchesApi = (*BranchesApiService)(&c.common)
c.CommentsApi = (*CommentsApiService)(&c.common)
c.DistributionsApi = (*DistributionsApiService)(&c.common)
c.DocumentsApi = (*DocumentsApiService)(&c.common)
c.FormatsApi = (*FormatsApiService)(&c.common)
c.GitHubSyncApi = (*GitHubSyncApiService)(&c.common)
c.GitLabSyncApi = (*GitLabSyncApiService)(&c.common)
c.GlossariesApi = (*GlossariesApiService)(&c.common)
c.GlossaryTermTranslationsApi = (*GlossaryTermTranslationsApiService)(&c.common)
c.GlossaryTermsApi = (*GlossaryTermsApiService)(&c.common)
c.InvitationsApi = (*InvitationsApiService)(&c.common)
c.JobLocalesApi = (*JobLocalesApiService)(&c.common)
c.JobTemplateLocalesApi = (*JobTemplateLocalesApiService)(&c.common)
c.JobTemplatesApi = (*JobTemplatesApiService)(&c.common)
c.JobsApi = (*JobsApiService)(&c.common)
c.KeysApi = (*KeysApiService)(&c.common)
c.LocalesApi = (*LocalesApiService)(&c.common)
c.MembersApi = (*MembersApiService)(&c.common)
c.NotificationGroupsApi = (*NotificationGroupsApiService)(&c.common)
c.NotificationsApi = (*NotificationsApiService)(&c.common)
c.OrdersApi = (*OrdersApiService)(&c.common)
c.ProjectsApi = (*ProjectsApiService)(&c.common)
c.ReleasesApi = (*ReleasesApiService)(&c.common)
c.ScreenshotMarkersApi = (*ScreenshotMarkersApiService)(&c.common)
c.ScreenshotsApi = (*ScreenshotsApiService)(&c.common)
c.SearchApi = (*SearchApiService)(&c.common)
c.SpacesApi = (*SpacesApiService)(&c.common)
c.StyleGuidesApi = (*StyleGuidesApiService)(&c.common)
c.TagsApi = (*TagsApiService)(&c.common)
c.TeamsApi = (*TeamsApiService)(&c.common)
c.TranslationsApi = (*TranslationsApiService)(&c.common)
c.UploadsApi = (*UploadsApiService)(&c.common)
c.UsersApi = (*UsersApiService)(&c.common)
c.VariablesApi = (*VariablesApiService)(&c.common)
c.VersionsHistoryApi = (*VersionsHistoryApiService)(&c.common)
c.WebhooksApi = (*WebhooksApiService)(&c.common)
return c
}
func atoi(in string) (int, error) {
return strconv.Atoi(in)
}
// selectHeaderContentType select a content type from the available list.
func selectHeaderContentType(contentTypes []string) string {
if len(contentTypes) == 0 {
return ""
}
if contains(contentTypes, "application/json") {
return "application/json"
}
return contentTypes[0] // use the first content type specified in 'consumes'
}
// selectHeaderAccept join all accept types and return
func selectHeaderAccept(accepts []string) string {
if len(accepts) == 0 {
return ""
}
if contains(accepts, "application/json") {
return "application/json"
}
return strings.Join(accepts, ",")
}
// contains is a case insenstive match, finding needle in a haystack
func contains(haystack []string, needle string) bool {
for _, a := range haystack {
if strings.ToLower(a) == strings.ToLower(needle) {
return true
}
}
return false
}
// Verify optional parameters are of the correct type.
func typeCheckParameter(obj interface{}, expected string, name string) error {
// Make sure there is an object.
if obj == nil {
return nil
}
// Check the type is as expected.
if reflect.TypeOf(obj).String() != expected {
return fmt.Errorf("Expected %s to be of type %s but received %s.", name, expected, reflect.TypeOf(obj).String())
}
return nil
}
// parameterToString convert interface{} parameters to string, using a delimiter if format is provided.
func parameterToString(obj interface{}, collectionFormat string) string {
var delimiter string
switch collectionFormat {
case "pipes":
delimiter = "|"
case "ssv":
delimiter = " "
case "tsv":
delimiter = "\t"
case "csv":
delimiter = ","
}
if reflect.TypeOf(obj).Kind() == reflect.Slice {
return strings.Trim(strings.Replace(fmt.Sprint(obj), " ", delimiter, -1), "[]")
} else if t, ok := obj.(time.Time); ok {
return t.Format(time.RFC3339)
}
return fmt.Sprintf("%v", obj)
}
// helper for converting interface{} parameters to json strings
func parameterToJson(obj interface{}) (string, error) {
jsonBuf, err := json.Marshal(obj)
if err != nil {
return "", err
}
return string(jsonBuf), err
}
// callAPI do the request.
func (c *APIClient) callAPI(request *http.Request) (*APIResponse, error) {
if c.cfg.Debug {
dump, err := httputil.DumpRequestOut(request, true)
if err != nil {
return nil, err
}
log.Printf("\n%s\n", string(dump))
}
resp, err := c.cfg.HTTPClient.Do(request)
if err != nil {
return nil, err
}
response := NewAPIResponse(resp)
if c.cfg.Debug {
dump, err := httputil.DumpResponse(resp, true)
if err != nil {
return response, err
}
log.Printf("\n%s\n", string(dump))
}
return response, err
}
// ChangeBasePath changes base path to allow switching to mocks
func (c *APIClient) ChangeBasePath(path string) {
c.cfg.BasePath = path
}
// Allow modification of underlying config for alternate implementations and testing
// Caution: modifying the configuration while live can cause data races and potentially unwanted behavior
func (c *APIClient) GetConfig() *Configuration {
return c.cfg
}
// prepareRequest build the request
func (c *APIClient) prepareRequest(
ctx context.Context,
path string, method string,
postBody interface{},
headerParams map[string]string,
queryParams url.Values,
formParams url.Values,
formFileName string,
fileName string,
fileBytes []byte) (localVarRequest *http.Request, err error) {
var body *bytes.Buffer
// Detect postBody type and post.
if postBody != nil {
contentType := headerParams["Content-Type"]
if contentType == "" {
contentType = detectContentType(postBody)
headerParams["Content-Type"] = contentType
}
body, err = setBody(postBody, contentType)
if err != nil {
return nil, err
}
}
// add form parameters and file if available.
if strings.HasPrefix(headerParams["Content-Type"], "multipart/form-data") && len(formParams) > 0 || (len(fileBytes) > 0 && fileName != "") {
if body != nil {
return nil, errors.New("Cannot specify postBody and multipart form at the same time.")
}
body = &bytes.Buffer{}
w := multipart.NewWriter(body)
for k, v := range formParams {
for _, iv := range v {
if strings.HasPrefix(k, "@") { // file
err = addFile(w, k[1:], iv)
if err != nil {
return nil, err
}
} else { // form value
w.WriteField(k, iv)
}
}
}
if len(fileBytes) > 0 && fileName != "" {
w.Boundary()
//_, fileNm := filepath.Split(fileName)
part, err := w.CreateFormFile(formFileName, filepath.Base(fileName))
if err != nil {
return nil, err
}
_, err = part.Write(fileBytes)
if err != nil {
return nil, err
}
}
// Set the Boundary in the Content-Type
headerParams["Content-Type"] = w.FormDataContentType()
// Set Content-Length
headerParams["Content-Length"] = fmt.Sprintf("%d", body.Len())
w.Close()
}
if strings.HasPrefix(headerParams["Content-Type"], "application/x-www-form-urlencoded") && len(formParams) > 0 {
if body != nil {
return nil, errors.New("Cannot specify postBody and x-www-form-urlencoded form at the same time.")
}
body = &bytes.Buffer{}
body.WriteString(formParams.Encode())
// Set Content-Length
headerParams["Content-Length"] = fmt.Sprintf("%d", body.Len())
}
// Setup path and query parameters
url, err := url.Parse(path)
if err != nil {
return nil, err
}
// Override request host, if applicable
if c.cfg.Host != "" {
url.Host = c.cfg.Host
}
// Override request scheme, if applicable
if c.cfg.Scheme != "" {
url.Scheme = c.cfg.Scheme
}
// Adding Query Param
query := url.Query()
for k, v := range queryParams {
for _, iv := range v {
query.Add(k, iv)
}
}
// Encode the parameters.
url.RawQuery = query.Encode()
// Generate a new request
if body != nil {
localVarRequest, err = http.NewRequest(method, url.String(), body)
} else {
localVarRequest, err = http.NewRequest(method, url.String(), nil)
}
if err != nil {
return nil, err
}
// add header parameters, if any
if len(headerParams) > 0 {
headers := http.Header{}
for h, v := range headerParams {
headers.Set(h, v)
}
localVarRequest.Header = headers
}
// Add the user agent to the request.
localVarRequest.Header.Add("User-Agent", c.cfg.UserAgent)
if ctx != nil {
// add context to the request
localVarRequest = localVarRequest.WithContext(ctx)
// Walk through any authentication.
// OAuth2 authentication
if tok, ok := ctx.Value(ContextOAuth2).(oauth2.TokenSource); ok {
// We were able to grab an oauth2 token from the context
var latestToken *oauth2.Token
if latestToken, err = tok.Token(); err != nil {
return nil, err
}
latestToken.SetAuthHeader(localVarRequest)
}
// Basic HTTP Authentication
if auth, ok := ctx.Value(ContextBasicAuth).(BasicAuth); ok {
localVarRequest.SetBasicAuth(auth.UserName, auth.Password)
}
// AccessToken Authentication
if auth, ok := ctx.Value(ContextAccessToken).(string); ok {
localVarRequest.Header.Add("Authorization", "Bearer "+auth)
}
}
for header, value := range c.cfg.DefaultHeader {
localVarRequest.Header.Add(header, value)
}
return localVarRequest, nil
}
func (c *APIClient) decode(v interface{}, b []byte, contentType string) (err error) {
if len(b) == 0 {
return nil
}
if s, ok := v.(*string); ok {
*s = string(b)
return nil
}
if f, ok := v.(**os.File); ok {
file, err := ioutil.TempFile(os.TempDir(), "phrase-locale-download-*")
if err != nil {
return err
}
file.Write(b)
file.Seek(0, io.SeekStart)
*f = file
return nil
}
if xmlCheck.MatchString(contentType) {
if err = xml.Unmarshal(b, v); err != nil
|
return nil
}
if jsonCheck.MatchString(contentType) {
if err = json.Unmarshal(b, v); err != nil {
return err
}
return nil
}
return errors.New("undefined response type")
}
// Add a file to the multipart request
func addFile(w *multipart.Writer, fieldName, path string) error {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
part, err := w.CreateFormFile(fieldName, filepath.Base(path))
if err != nil {
return err
}
_, err = io.Copy(part, file)
return err
}
// Prevent trying to import "fmt"
func reportError(format string, a ...interface{}) error {
return fmt.Errorf(format, a...)
}
// Set request body from an interface{}
func setBody(body interface{}, contentType string) (bodyBuf *bytes.Buffer, err error) {
if bodyBuf == nil {
bodyBuf = &bytes.Buffer{}
}
if reader, ok := body.(io.Reader); ok {
_, err = bodyBuf.ReadFrom(reader)
} else if b, ok := body.([]byte); ok {
_, err = bodyBuf.Write(b)
} else if s, ok := body.(string); ok {
_, err = bodyBuf.WriteString(s)
} else if s, ok := body.(*string); ok {
_, err = bodyBuf.WriteString(*s)
} else if jsonCheck.MatchString(contentType) {
err = json.NewEncoder(bodyBuf).Encode(body)
} else if xmlCheck.MatchString(contentType) {
err = xml.NewEncoder(bodyBuf).Encode(body)
}
if err != nil {
return nil, err
}
if bodyBuf.Len() == 0 {
err = fmt.Errorf("Invalid body type %s\n", contentType)
return nil, err
}
return bodyBuf, nil
}
// detectContentType method is used to figure out `Request.Body` content type for request header
func detectContentType(body interface{}) string {
contentType := "text/plain; charset=utf-8"
kind := reflect.TypeOf(body).Kind()
switch kind {
case reflect.Struct, reflect.Map, reflect.Ptr:
contentType = "application/json; charset=utf-8"
case reflect.String:
contentType = "text/plain; charset=utf-8"
default:
if b, ok := body.([]byte); ok {
contentType = http.DetectContentType(b)
} else if kind == reflect.Slice {
contentType = "application/json; charset=utf-8"
}
}
return contentType
}
// Ripped from https://github.com/gregjones/httpcache/blob/master/httpcache.go
type cacheControl map[string]string
func parseCacheControl(headers http.Header) cacheControl {
cc := cacheControl{}
ccHeader := headers.Get("Cache-Control")
for _, part := range strings.Split(ccHeader, ",") {
part = strings.Trim(part, " ")
if part == "" {
continue
}
if strings.ContainsRune(part, '=') {
keyval := strings.Split(part, "=")
cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
} else {
cc[part] = ""
}
}
return cc
}
// CacheExpires helper function to determine remaining time before repeating a request.
func CacheExpires(r *http.Response) time.Time {
// Figure out when the cache expires.
var expires time.Time
now, err := time.Parse(time.RFC1123, r.Header.Get("date"))
if err != nil {
return time.Now()
}
respCacheControl := parseCacheControl(r.Header)
if maxAge, ok := respCacheControl["max-age"]; ok {
lifetime, err := time.ParseDuration(maxAge + "s")
if err != nil {
expires = now
} else {
expires = now.Add(lifetime)
}
} else {
expiresHeader := r.Header.Get("Expires")
if expiresHeader != "" {
expires, err = time.Parse(time.RFC1123, expiresHeader)
if err != nil {
expires = now
}
}
}
return expires
}
func strlen(s string) int {
return utf8.RuneCountInString(s)
}
// GenericOpenAPIError Provides access to the body, error and model on returned errors.
type GenericOpenAPIError struct {
body []byte
error string
model interface{}
}
// Error returns non-empty string if there was an error.
func (e GenericOpenAPIError) Error() string {
return e.error
}
// Body returns the raw bytes of the response
func (e GenericOpenAPIError) Body() []byte {
return e.body
}
// Model returns the unpacked model of the error
func (e GenericOpenAPIError) Model() interface{} {
return e.model
}
|
{
return err
}
|
wallet_encryption.py
|
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Wallet encryption"""
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
assert_greater_than,
assert_greater_than_or_equal,
)
class WalletEncryptionTest(BitcoinTestFramework):
|
if __name__ == '__main__':
WalletEncryptionTest().main()
|
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
passphrase = "WalletPassphrase"
passphrase2 = "SecondWalletPassphrase"
# Make sure the wallet isn't encrypted first
address = self.nodes[0].getnewaddress()
privkey = self.nodes[0].dumpprivkey(address)
assert_equal(privkey[:1], "b")
assert_equal(len(privkey), 52)
# Encrypt the wallet
self.nodes[0].node_encrypt_wallet(passphrase)
self.start_node(0)
# Test that the wallet is encrypted
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Check that walletpassphrase works
self.nodes[0].walletpassphrase(passphrase, 2)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
# Check that the timeout is right
time.sleep(2)
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test wrong passphrase
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10)
# Test walletlock
self.nodes[0].walletpassphrase(passphrase, 84600)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test passphrase changes
self.nodes[0].walletpassphrasechange(passphrase, passphrase2)
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10)
self.nodes[0].walletpassphrase(passphrase2, 10)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
# Test timeout bounds
assert_raises_rpc_error(-8, "Timeout cannot be negative.", self.nodes[0].walletpassphrase, passphrase2, -10)
# Check the timeout
# Check a time less than the limit
MAX_VALUE = 100000000
expected_time = int(time.time()) + MAX_VALUE - 600
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE - 600)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
# Check a time greater than the limit
expected_time = int(time.time()) + MAX_VALUE - 1
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE + 1000)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
|
pod_disruption_budget.rs
|
// Generated from definition io.k8s.api.policy.v1.PodDisruptionBudget
/// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
#[derive(Clone, Debug, Default, PartialEq)]
pub struct PodDisruptionBudget {
/// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
pub metadata: crate::apimachinery::pkg::apis::meta::v1::ObjectMeta,
/// Specification of the desired behavior of the PodDisruptionBudget.
pub spec: Option<crate::api::policy::v1::PodDisruptionBudgetSpec>,
/// Most recently observed status of the PodDisruptionBudget.
pub status: Option<crate::api::policy::v1::PodDisruptionBudgetStatus>,
}
// Begin policy/v1/PodDisruptionBudget
// Generated from operation createPolicyV1NamespacedPodDisruptionBudget
impl PodDisruptionBudget {
/// create a PodDisruptionBudget
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::CreateResponse`]`<Self>>` constructor, or [`crate::CreateResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn create(
namespace: &str,
body: &crate::api::policy::v1::PodDisruptionBudget,
optional: crate::CreateOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::CreateResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::post(__url);
let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation deletePolicyV1CollectionNamespacedPodDisruptionBudget
impl PodDisruptionBudget {
/// delete collection of PodDisruptionBudget
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::DeleteResponse`]`<`[`crate::List`]`<Self>>>` constructor, or [`crate::DeleteResponse`]`<`[`crate::List`]`<Self>>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `delete_optional`
///
/// Delete options. Use `Default::default()` to not pass any.
///
/// * `list_optional`
///
/// List options. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_collection(
namespace: &str,
delete_optional: crate::DeleteOptional<'_>,
list_optional: crate::ListOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::DeleteResponse<crate::List<Self>>>), crate::RequestError> {
let __url = format!("/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
list_optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::delete(__url);
let __body = crate::serde_json::to_vec(&delete_optional).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation deletePolicyV1NamespacedPodDisruptionBudget
impl PodDisruptionBudget {
/// delete a PodDisruptionBudget
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::DeleteResponse`]`<Self>>` constructor, or [`crate::DeleteResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PodDisruptionBudget
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete(
name: &str,
namespace: &str,
optional: crate::DeleteOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::DeleteResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let __request = crate::http::Request::delete(__url);
let __body = crate::serde_json::to_vec(&optional).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation listPolicyV1NamespacedPodDisruptionBudget
impl PodDisruptionBudget {
/// list or watch objects of kind PodDisruptionBudget
///
/// This operation only supports listing all items of this type.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ListResponse`]`<Self>>` constructor, or [`crate::ListResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn list(
namespace: &str,
optional: crate::ListOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::ListResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation listPolicyV1PodDisruptionBudgetForAllNamespaces
impl PodDisruptionBudget {
/// list or watch objects of kind PodDisruptionBudget
///
/// This operation only supports listing all items of this type.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ListResponse`]`<Self>>` constructor, or [`crate::ListResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn list_for_all_namespaces(
optional: crate::ListOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::ListResponse<Self>>), crate::RequestError> {
let __url = "/apis/policy/v1/poddisruptionbudgets?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation patchPolicyV1NamespacedPodDisruptionBudget
impl PodDisruptionBudget {
/// partially update the specified PodDisruptionBudget
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::PatchResponse`]`<Self>>` constructor, or [`crate::PatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PodDisruptionBudget
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn patch(
name: &str,
namespace: &str,
body: &crate::apimachinery::pkg::apis::meta::v1::Patch,
optional: crate::PatchOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::PatchResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::patch(__url);
let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static(match body {
crate::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json",
}));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation patchPolicyV1NamespacedPodDisruptionBudgetStatus
impl PodDisruptionBudget {
/// partially update status of the specified PodDisruptionBudget
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::PatchResponse`]`<Self>>` constructor, or [`crate::PatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PodDisruptionBudget
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn patch_status(
name: &str,
namespace: &str,
body: &crate::apimachinery::pkg::apis::meta::v1::Patch,
optional: crate::PatchOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::PatchResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}/status?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::patch(__url);
let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static(match body {
crate::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json",
}));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation readPolicyV1NamespacedPodDisruptionBudget
impl PodDisruptionBudget {
/// read the specified PodDisruptionBudget
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReadPodDisruptionBudgetResponse`]`>` constructor, or [`ReadPodDisruptionBudgetResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PodDisruptionBudget
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
#[cfg(feature = "api")]
pub fn read(
name: &str,
namespace: &str,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<ReadPodDisruptionBudgetResponse>), crate::RequestError> {
let __url = format!("/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<ReadPodDisruptionBudgetResponse as Response>::try_from_parts` to parse the HTTP response body of [`PodDisruptionBudget::read`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReadPodDisruptionBudgetResponse {
Ok(crate::api::policy::v1::PodDisruptionBudget),
Other(Result<Option<crate::serde_json::Value>, crate::serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReadPodDisruptionBudgetResponse {
fn try_from_parts(status_code: crate::http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
crate::http::StatusCode::OK => {
let result = match crate::serde_json::from_slice(buf) {
Ok(value) => value,
Err(err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReadPodDisruptionBudgetResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match crate::serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReadPodDisruptionBudgetResponse::Other(result), read))
},
}
}
}
// Generated from operation readPolicyV1NamespacedPodDisruptionBudgetStatus
impl PodDisruptionBudget {
/// read status of the specified PodDisruptionBudget
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReadPodDisruptionBudgetStatusResponse`]`>` constructor, or [`ReadPodDisruptionBudgetStatusResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PodDisruptionBudget
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
#[cfg(feature = "api")]
pub fn read_status(
name: &str,
namespace: &str,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<ReadPodDisruptionBudgetStatusResponse>), crate::RequestError> {
let __url = format!("/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}/status",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Use `<ReadPodDisruptionBudgetStatusResponse as Response>::try_from_parts` to parse the HTTP response body of [`PodDisruptionBudget::read_status`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReadPodDisruptionBudgetStatusResponse {
Ok(crate::api::policy::v1::PodDisruptionBudget),
Other(Result<Option<crate::serde_json::Value>, crate::serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReadPodDisruptionBudgetStatusResponse {
fn try_from_parts(status_code: crate::http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
crate::http::StatusCode::OK => {
let result = match crate::serde_json::from_slice(buf) {
Ok(value) => value,
Err(err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReadPodDisruptionBudgetStatusResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match crate::serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReadPodDisruptionBudgetStatusResponse::Other(result), read))
},
}
}
}
// Generated from operation replacePolicyV1NamespacedPodDisruptionBudget
impl PodDisruptionBudget {
/// replace the specified PodDisruptionBudget
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ReplaceResponse`]`<Self>>` constructor, or [`crate::ReplaceResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PodDisruptionBudget
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn replace(
name: &str,
namespace: &str,
body: &crate::api::policy::v1::PodDisruptionBudget,
optional: crate::ReplaceOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::ReplaceResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::put(__url);
let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation replacePolicyV1NamespacedPodDisruptionBudgetStatus
impl PodDisruptionBudget {
/// replace status of the specified PodDisruptionBudget
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ReplaceResponse`]`<Self>>` constructor, or [`crate::ReplaceResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the PodDisruptionBudget
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn replace_status(
name: &str,
namespace: &str,
body: &crate::api::policy::v1::PodDisruptionBudget,
optional: crate::ReplaceOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::ReplaceResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets/{name}/status?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::put(__url);
let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation watchPolicyV1NamespacedPodDisruptionBudget
impl PodDisruptionBudget {
/// list or watch objects of kind PodDisruptionBudget
///
/// This operation only supports watching one item, or a list of items, of this type for changes.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::WatchResponse`]`<Self>>` constructor, or [`crate::WatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn watch(
namespace: &str,
optional: crate::WatchOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::WatchResponse<Self>>), crate::RequestError>
|
}
// Generated from operation watchPolicyV1PodDisruptionBudgetForAllNamespaces
impl PodDisruptionBudget {
/// list or watch objects of kind PodDisruptionBudget
///
/// This operation only supports watching one item, or a list of items, of this type for changes.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::WatchResponse`]`<Self>>` constructor, or [`crate::WatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn watch_for_all_namespaces(
optional: crate::WatchOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::WatchResponse<Self>>), crate::RequestError> {
let __url = "/apis/policy/v1/poddisruptionbudgets?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// End policy/v1/PodDisruptionBudget
impl crate::Resource for PodDisruptionBudget {
const API_VERSION: &'static str = "policy/v1";
const GROUP: &'static str = "policy";
const KIND: &'static str = "PodDisruptionBudget";
const VERSION: &'static str = "v1";
const URL_PATH_SEGMENT: &'static str = "poddisruptionbudgets";
type Scope = crate::NamespaceResourceScope;
}
impl crate::ListableResource for PodDisruptionBudget {
const LIST_KIND: &'static str = "PodDisruptionBudgetList";
}
impl crate::Metadata for PodDisruptionBudget {
type Ty = crate::apimachinery::pkg::apis::meta::v1::ObjectMeta;
fn metadata(&self) -> &<Self as crate::Metadata>::Ty {
&self.metadata
}
fn metadata_mut(&mut self) -> &mut<Self as crate::Metadata>::Ty {
&mut self.metadata
}
}
impl<'de> crate::serde::Deserialize<'de> for PodDisruptionBudget {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_api_version,
Key_kind,
Key_metadata,
Key_spec,
Key_status,
Other,
}
impl<'de> crate::serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error {
Ok(match v {
"apiVersion" => Field::Key_api_version,
"kind" => Field::Key_kind,
"metadata" => Field::Key_metadata,
"spec" => Field::Key_spec,
"status" => Field::Key_status,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = PodDisruptionBudget;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(<Self::Value as crate::Resource>::KIND)
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> {
let mut value_metadata: Option<crate::apimachinery::pkg::apis::meta::v1::ObjectMeta> = None;
let mut value_spec: Option<crate::api::policy::v1::PodDisruptionBudgetSpec> = None;
let mut value_status: Option<crate::api::policy::v1::PodDisruptionBudgetStatus> = None;
while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_api_version => {
let value_api_version: String = crate::serde::de::MapAccess::next_value(&mut map)?;
if value_api_version != <Self::Value as crate::Resource>::API_VERSION {
return Err(crate::serde::de::Error::invalid_value(crate::serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::API_VERSION));
}
},
Field::Key_kind => {
let value_kind: String = crate::serde::de::MapAccess::next_value(&mut map)?;
if value_kind != <Self::Value as crate::Resource>::KIND {
return Err(crate::serde::de::Error::invalid_value(crate::serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::KIND));
}
},
Field::Key_metadata => value_metadata = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_spec => value_spec = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_status => value_status = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(PodDisruptionBudget {
metadata: value_metadata.unwrap_or_default(),
spec: value_spec,
status: value_status,
})
}
}
deserializer.deserialize_struct(
<Self as crate::Resource>::KIND,
&[
"apiVersion",
"kind",
"metadata",
"spec",
"status",
],
Visitor,
)
}
}
impl crate::serde::Serialize for PodDisruptionBudget {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer {
let mut state = serializer.serialize_struct(
<Self as crate::Resource>::KIND,
3 +
self.spec.as_ref().map_or(0, |_| 1) +
self.status.as_ref().map_or(0, |_| 1),
)?;
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::API_VERSION)?;
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::KIND)?;
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "metadata", &self.metadata)?;
if let Some(value) = &self.spec {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "spec", value)?;
}
if let Some(value) = &self.status {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "status", value)?;
}
crate::serde::ser::SerializeStruct::end(state)
}
}
#[cfg(feature = "schemars")]
impl crate::schemars::JsonSchema for PodDisruptionBudget {
fn schema_name() -> String {
"io.k8s.api.policy.v1.PodDisruptionBudget".to_owned()
}
fn json_schema(__gen: &mut crate::schemars::gen::SchemaGenerator) -> crate::schemars::schema::Schema {
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::Object))),
object: Some(Box::new(crate::schemars::schema::ObjectValidation {
properties: [
(
"apiVersion".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
}),
),
(
"kind".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
}),
),
(
"metadata".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::apimachinery::pkg::apis::meta::v1::ObjectMeta>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"spec".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::policy::v1::PodDisruptionBudgetSpec>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("Specification of the desired behavior of the PodDisruptionBudget.".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
(
"status".to_owned(),
{
let mut schema_obj = __gen.subschema_for::<crate::api::policy::v1::PodDisruptionBudgetStatus>().into_object();
schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata {
description: Some("Most recently observed status of the PodDisruptionBudget.".to_owned()),
..Default::default()
}));
crate::schemars::schema::Schema::Object(schema_obj)
},
),
].into(),
required: [
"metadata".to_owned(),
].into(),
..Default::default()
})),
..Default::default()
})
}
}
|
{
let __url = format!("/apis/policy/v1/namespaces/{namespace}/poddisruptionbudgets?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
|
settings_utils.py
|
import os
import json
Environ = os._Environ
def is_on_cloudfoundry(env: Environ=os.environ) -> bool:
return 'VCAP_SERVICES' in env
def load_cups_from_vcap_services(name: str, env: Environ=os.environ) -> None:
'''
Detects if VCAP_SERVICES exists in the environment; if so, parses
it and imports all the credentials from the given custom
user-provided service (CUPS) as strings into the environment.
For more details on CUPS, see:
https://docs.cloudfoundry.org/devguide/services/user-provided.html
'''
if not is_on_cloudfoundry(env):
return
vcap = json.loads(env['VCAP_SERVICES'])
for entry in vcap.get('user-provided', []):
if entry['name'] == name:
for key, value in entry['credentials'].items():
env[key] = value
def
|
(name: str, service: str,
env: Environ=os.environ) -> str:
"""
Sets os.environ[DATABASE_URL] from a service entry in VCAP_SERVICES.
"""
if not is_on_cloudfoundry(env):
return
# FIXME: this'll break if there are multiple databases. Not an issue right
# now, but could be in the future. Keep an eye on it.
vcap = json.loads(env['VCAP_SERVICES'])
env['DATABASE_URL'] = vcap[service][0]["credentials"]["uri"]
|
load_database_url_from_vcap_services
|
tests.rs
|
use crate::{Error, mock::*};
use frame_support::{assert_ok, assert_noop};
#[test]
fn test_bet_small_bet()
|
#[test]
fn test_bet_whole_pool() {
new_test_ext().execute_with(|| {
assert_ok!(Pooler::deposit(Origin::signed(1), 10000000000000));
assert_ok!(Chance::bet(Origin::signed(2), 10000000000000));
let bet = [(2,9000000000000,),];
println!("large bet{:#?}", Chance::scheduled_bet());
assert_eq!(Chance::scheduled_bet(), bet);
}
)}
|
{
new_test_ext().execute_with(|| {
assert_ok!(Pooler::deposit(Origin::signed(1), 100000000000000));
assert_ok!(Chance::bet(Origin::signed(2), 1000000000000));
let bet = [(2,990000000000,),];
println!("small bet{:#?}", Chance::scheduled_bet());
assert_eq!(Chance::scheduled_bet(), bet);
}
)}
|
mod.rs
|
use std::collections::BTreeSet;
use maplit::btreeset;
use abstutil::{prettyprint_usize, Timer};
use geom::Speed;
use map_gui::options::OptionsPanel;
use map_gui::render::DrawMap;
use map_gui::tools::{grey_out_map, ChooseSomething, ColorLegend, PopupMsg};
use map_gui::ID;
use map_model::{EditCmd, IntersectionID, LaneID, LaneType, MapEdits};
use widgetry::{
lctrl, Choice, Color, ControlState, Drawable, EventCtx, GfxCtx, HorizontalAlignment, Key, Line,
Menu, Outcome, Panel, State, StyledButtons, Text, TextExt, VerticalAlignment, Widget,
};
pub use self::cluster_traffic_signals::ClusterTrafficSignalEditor;
pub use self::lanes::LaneEditor;
pub use self::routes::RouteEditor;
pub use self::stop_signs::StopSignEditor;
pub use self::traffic_signals::TrafficSignalEditor;
pub use self::validate::{check_blackholes, check_sidewalk_connectivity, try_change_lt};
use crate::app::{App, Transition};
use crate::common::{tool_panel, CommonState, Warping};
use crate::debug::DebugMode;
use crate::sandbox::{GameplayMode, SandboxMode, TimeWarpScreen};
mod bulk;
mod cluster_traffic_signals;
mod lanes;
mod routes;
mod select;
mod stop_signs;
mod traffic_signals;
mod validate;
mod zones;
pub struct EditMode {
tool_panel: Panel,
top_center: Panel,
changelist: Panel,
orig_edits: MapEdits,
orig_dirty: bool,
// Retained state from the SandboxMode that spawned us
mode: GameplayMode,
// edits name, number of commands
changelist_key: (String, usize),
unzoomed: Drawable,
zoomed: Drawable,
}
impl EditMode {
pub fn new(ctx: &mut EventCtx, app: &mut App, mode: GameplayMode) -> Box<dyn State<App>> {
let orig_dirty = app.primary.dirty_from_edits;
assert!(app.primary.suspended_sim.is_none());
app.primary.suspended_sim = Some(app.primary.clear_sim());
let edits = app.primary.map.get_edits();
let layer = crate::layer::map::Static::edits(ctx, app);
Box::new(EditMode {
tool_panel: tool_panel(ctx),
top_center: make_topcenter(ctx, app),
changelist: make_changelist(ctx, app),
orig_edits: edits.clone(),
orig_dirty,
mode,
changelist_key: (edits.edits_name.clone(), edits.commands.len()),
unzoomed: layer.unzoomed,
zoomed: layer.zoomed,
})
}
fn quit(&self, ctx: &mut EventCtx, app: &mut App) -> Transition {
let old_sim = app.primary.suspended_sim.take().unwrap();
// If nothing changed, short-circuit
if app.primary.map.get_edits() == &self.orig_edits {
app.primary.sim = old_sim;
app.primary.dirty_from_edits = self.orig_dirty;
// Could happen if we load some edits, then load whatever we entered edit mode with.
ctx.loading_screen("apply edits", |_, mut timer| {
app.primary
.map
.recalculate_pathfinding_after_edits(&mut timer);
});
return Transition::Pop;
}
ctx.loading_screen("apply edits", move |ctx, mut timer| {
app.primary
.map
.recalculate_pathfinding_after_edits(&mut timer);
if app.primary.current_flags.live_map_edits {
app.primary.sim = old_sim;
app.primary.dirty_from_edits = true;
app.primary
.sim
.handle_live_edited_traffic_signals(&app.primary.map);
let (trips, parked_cars) = app.primary.sim.handle_live_edits(&app.primary.map);
if trips == 0 && parked_cars == 0 {
Transition::Pop
} else {
Transition::Replace(PopupMsg::new(
ctx,
"Map changes complete",
vec![
format!(
"Your edits interrupted {} trips and displaced {} parked cars",
prettyprint_usize(trips),
prettyprint_usize(parked_cars)
),
"Simulation results won't be finalized unless you restart from \
midnight with your changes"
.to_string(),
],
))
}
} else {
Transition::Multi(vec![
Transition::Pop,
Transition::Replace(SandboxMode::async_new(
ctx,
app,
self.mode.clone(),
Box::new(move |ctx, app| {
vec![Transition::Push(TimeWarpScreen::new(
ctx,
app,
old_sim.time(),
None,
))]
}),
)),
])
}
})
}
}
impl State<App> for EditMode {
fn event(&mut self, ctx: &mut EventCtx, app: &mut App) -> Transition {
{
let edits = app.primary.map.get_edits();
let changelist_key = (edits.edits_name.clone(), edits.commands.len());
if self.changelist_key != changelist_key {
self.changelist_key = changelist_key;
self.changelist = make_changelist(ctx, app);
let layer = crate::layer::map::Static::edits(ctx, app);
self.unzoomed = layer.unzoomed;
self.zoomed = layer.zoomed;
}
}
ctx.canvas_movement();
// Restrict what can be selected.
if ctx.redo_mouseover() {
app.primary.current_selection = app.mouseover_unzoomed_roads_and_intersections(ctx);
if match app.primary.current_selection {
Some(ID::Lane(l)) => !can_edit_lane(&self.mode, l, app),
Some(ID::Intersection(i)) => {
!self.mode.can_edit_stop_signs()
&& app.primary.map.maybe_get_stop_sign(i).is_some()
}
Some(ID::Road(_)) => false,
_ => true,
} {
app.primary.current_selection = None;
}
}
if app.opts.dev && ctx.input.pressed(lctrl(Key::D)) {
return Transition::Push(DebugMode::new(ctx));
}
match self.top_center.event(ctx) {
Outcome::Clicked(x) => match x.as_ref() {
"finish editing" => {
return self.quit(ctx, app);
}
_ => unreachable!(),
},
_ => {}
}
match self.changelist.event(ctx) {
Outcome::Clicked(x) => match x.as_ref() {
"manage proposals" => {
let mode = self.mode.clone();
return Transition::Push(ChooseSomething::new(
ctx,
"Manage proposals",
vec![
Choice::string("rename current proposal"),
Choice::string("open a saved proposal").multikey(lctrl(Key::L)),
Choice::string("create a blank proposal"),
Choice::string("save this proposal as..."),
Choice::string("delete this proposal and remove all edits")
.fg(Color::hex("#EB3223")),
],
Box::new(move |choice, ctx, app| match choice.as_ref() {
"rename current proposal" => {
let old_name = app.primary.map.get_edits().edits_name.clone();
Transition::Replace(SaveEdits::new(
ctx,
app,
format!("Rename \"{}\"", old_name),
false,
Some(Transition::Pop),
Box::new(move |_, app| {
abstio::delete_file(abstio::path_edits(
app.primary.map.get_name(),
&old_name,
));
}),
))
}
"open a saved proposal" => {
if app.primary.map.unsaved_edits() {
Transition::Multi(vec![
Transition::Replace(LoadEdits::new(ctx, app, mode.clone())),
Transition::Push(SaveEdits::new(
ctx,
app,
"Do you want to save your proposal first?",
true,
Some(Transition::Multi(vec![
Transition::Pop,
Transition::Pop,
])),
Box::new(|_, _| {}),
)),
])
} else {
Transition::Replace(LoadEdits::new(ctx, app, mode.clone()))
}
}
"create a blank proposal" => {
if app.primary.map.unsaved_edits() {
Transition::Replace(SaveEdits::new(
ctx,
app,
"Do you want to save your proposal first?",
true,
Some(Transition::Pop),
Box::new(|ctx, app| {
apply_map_edits(ctx, app, app.primary.map.new_edits());
}),
))
} else {
apply_map_edits(ctx, app, app.primary.map.new_edits());
Transition::Pop
}
}
"save this proposal as..." => Transition::Replace(SaveEdits::new(
ctx,
app,
format!("Save \"{}\" as", app.primary.map.get_edits().edits_name),
false,
Some(Transition::Pop),
Box::new(|_, _| {}),
)),
"delete this proposal and remove all edits" => {
abstio::delete_file(abstio::path_edits(
app.primary.map.get_name(),
&app.primary.map.get_edits().edits_name,
));
apply_map_edits(ctx, app, app.primary.map.new_edits());
Transition::Pop
}
_ => unreachable!(),
}),
));
}
"load proposal" => {}
"undo" => {
let mut edits = app.primary.map.get_edits().clone();
let maybe_id = cmd_to_id(&edits.commands.pop().unwrap());
apply_map_edits(ctx, app, edits);
if let Some(id) = maybe_id {
return Transition::Push(Warping::new(
ctx,
app.primary.canonical_point(id.clone()).unwrap(),
Some(10.0),
Some(id),
&mut app.primary,
));
}
}
x => {
let idx = x["change #".len()..].parse::<usize>().unwrap();
if let Some(id) = cmd_to_id(&app.primary.map.get_edits().commands[idx - 1]) {
return Transition::Push(Warping::new(
ctx,
app.primary.canonical_point(id.clone()).unwrap(),
Some(10.0),
Some(id),
&mut app.primary,
));
}
}
},
_ => {}
}
// So useful that the hotkey should work even before opening the menu
if ctx.input.pressed(lctrl(Key::L)) {
if app.primary.map.unsaved_edits() {
return Transition::Multi(vec![
Transition::Push(LoadEdits::new(ctx, app, self.mode.clone())),
Transition::Push(SaveEdits::new(
ctx,
app,
"Do you want to save your proposal first?",
true,
Some(Transition::Multi(vec![Transition::Pop, Transition::Pop])),
Box::new(|_, _| {}),
)),
]);
} else {
return Transition::Push(LoadEdits::new(ctx, app, self.mode.clone()));
}
}
if ctx.canvas.cam_zoom < app.opts.min_zoom_for_detail {
if let Some(id) = app.primary.current_selection.clone() {
if app.per_obj.left_click(ctx, "edit this") {
return Transition::Push(Warping::new(
ctx,
app.primary.canonical_point(id).unwrap(),
Some(10.0),
None,
&mut app.primary,
));
}
}
} else {
if let Some(ID::Intersection(id)) = app.primary.current_selection {
if let Some(state) = maybe_edit_intersection(ctx, app, id, &self.mode) {
return Transition::Push(state);
}
}
if let Some(ID::Lane(l)) = app.primary.current_selection {
if app.per_obj.left_click(ctx, "edit lane") {
return Transition::Push(LaneEditor::new(ctx, app, l, self.mode.clone()));
}
}
}
match self.tool_panel.event(ctx) {
Outcome::Clicked(x) => match x.as_ref() {
"back" => self.quit(ctx, app),
"settings" => Transition::Push(OptionsPanel::new(ctx, app)),
_ => unreachable!(),
},
_ => Transition::Keep,
}
}
fn draw(&self, g: &mut GfxCtx, app: &App) {
self.tool_panel.draw(g);
self.top_center.draw(g);
self.changelist.draw(g);
if g.canvas.cam_zoom < app.opts.min_zoom_for_detail {
g.redraw(&self.unzoomed);
} else {
g.redraw(&self.zoomed);
}
CommonState::draw_osd(g, app);
}
}
pub struct SaveEdits {
panel: Panel,
current_name: String,
cancel: Option<Transition>,
on_success: Box<dyn Fn(&mut EventCtx, &mut App)>,
reset: bool,
}
impl SaveEdits {
pub fn new<I: Into<String>>(
ctx: &mut EventCtx,
app: &App,
title: I,
discard: bool,
cancel: Option<Transition>,
on_success: Box<dyn Fn(&mut EventCtx, &mut App)>,
) -> Box<dyn State<App>> {
let initial_name = if app.primary.map.unsaved_edits() {
String::new()
} else {
format!("copy of {}", app.primary.map.get_edits().edits_name)
};
let mut save = SaveEdits {
current_name: initial_name.clone(),
panel: Panel::new(Widget::col(vec![
Line(title).small_heading().draw(ctx),
Widget::row(vec![
"Name:".draw_text(ctx),
Widget::text_entry(ctx, initial_name, true).named("filename"),
]),
// TODO Want this to always consistently be one line high, but it isn't for a blank
// line
Text::new().draw(ctx).named("warning"),
Widget::row(vec![
if discard {
ctx.style()
.btn_solid_destructive_text("Discard proposal")
.build_def(ctx)
} else {
Widget::nothing()
},
if cancel.is_some() {
ctx.style()
.btn_solid_dark_text("Cancel")
.hotkey(Key::Escape)
.build_def(ctx)
} else {
Widget::nothing()
},
ctx.style()
.btn_solid_dark_text("Save")
.disabled(true)
.build_def(ctx),
])
.align_right(),
]))
.build(ctx),
cancel,
on_success,
reset: discard,
};
save.recalc_btn(ctx, app);
Box::new(save)
}
fn recalc_btn(&mut self, ctx: &mut EventCtx, app: &App) {
if self.current_name.is_empty() {
self.panel.replace(
ctx,
"Save",
ctx.style()
.btn_solid_dark_text("Save")
.disabled(true)
.build_def(ctx),
);
self.panel.replace(ctx, "warning", Text::new().draw(ctx));
} else if abstio::file_exists(abstio::path_edits(
app.primary.map.get_name(),
&self.current_name,
)) {
self.panel.replace(
ctx,
"Save",
ctx.style()
.btn_solid_dark_text("Save")
.disabled(true)
.build_def(ctx),
);
self.panel.replace(
ctx,
"warning",
Line("A proposal with this name already exists")
.fg(Color::hex("#FF5E5E"))
.draw(ctx),
);
} else {
self.panel.replace(
ctx,
"Save",
ctx.style()
.btn_solid_dark_text("Save")
.hotkey(Key::Enter)
.build_def(ctx),
);
self.panel.replace(ctx, "warning", Text::new().draw(ctx));
}
}
}
impl State<App> for SaveEdits {
fn event(&mut self, ctx: &mut EventCtx, app: &mut App) -> Transition {
match self.panel.event(ctx) {
Outcome::Clicked(x) => match x.as_ref() {
"Save" | "Overwrite existing proposal" => {
let mut edits = app.primary.map.get_edits().clone();
edits.edits_name = self.current_name.clone();
app.primary.map.must_apply_edits(edits);
app.primary.map.save_edits();
if self.reset {
apply_map_edits(ctx, app, app.primary.map.new_edits());
}
(self.on_success)(ctx, app);
return Transition::Pop;
}
"Discard proposal" => {
apply_map_edits(ctx, app, app.primary.map.new_edits());
return Transition::Pop;
}
"Cancel" => {
return self.cancel.take().unwrap();
}
_ => unreachable!(),
},
_ => {}
}
let name = self.panel.text_box("filename");
if name != self.current_name {
self.current_name = name;
self.recalc_btn(ctx, app);
}
Transition::Keep
}
fn draw(&self, g: &mut GfxCtx, app: &App) {
grey_out_map(g, app);
self.panel.draw(g);
}
}
struct LoadEdits {
panel: Panel,
mode: GameplayMode,
}
impl LoadEdits {
fn new(ctx: &mut EventCtx, app: &App, mode: GameplayMode) -> Box<dyn State<App>> {
let current_edits_name = &app.primary.map.get_edits().edits_name;
let your_edits = vec![
Line("Your proposals").small_heading().draw(ctx),
Menu::new(
ctx,
abstio::list_all_objects(abstio::path_all_edits(app.primary.map.get_name()))
.into_iter()
.map(|name| Choice::new(name.clone(), ()).active(&name != current_edits_name))
.collect(),
),
];
// widgetry can't toggle keyboard focus between two menus, so just use buttons for the less
// common use case.
let mut proposals = vec![Line("Community proposals").small_heading().draw(ctx)];
// Up-front filter out proposals that definitely don't fit the current map
for name in abstio::list_all_objects(abstio::path("system/proposals")) {
let path = abstio::path(format!("system/proposals/{}.json", name));
if MapEdits::load(&app.primary.map, path.clone(), &mut Timer::throwaway()).is_ok() {
proposals.push(
ctx.style()
.btn_outline_light_text(&name)
.build_widget(ctx, &path),
);
}
}
Box::new(LoadEdits {
mode,
panel: Panel::new(Widget::col(vec![
Widget::row(vec![
Line("Load proposal").small_heading().draw(ctx),
ctx.style().btn_close_widget(ctx),
]),
ctx.style()
.btn_outline_light_text("Start over with blank proposal")
.build_def(ctx),
Widget::row(vec![Widget::col(your_edits), Widget::col(proposals)]).evenly_spaced(),
]))
.exact_size_percent(50, 50)
.build(ctx),
})
}
}
impl State<App> for LoadEdits {
fn event(&mut self, ctx: &mut EventCtx, app: &mut App) -> Transition {
match self.panel.event(ctx) {
Outcome::Clicked(x) => {
match x.as_ref() {
"close" => Transition::Pop,
"Start over with blank proposal" => {
apply_map_edits(ctx, app, app.primary.map.new_edits());
Transition::Pop
}
path => {
// TODO Kind of a hack. If it ends with .json, it's already a path.
// Otherwise it's a result from the menu.
let path = if path.ends_with(".json") {
path.to_string()
} else {
abstio::path_edits(app.primary.map.get_name(), path)
};
match MapEdits::load(
&app.primary.map,
path.clone(),
&mut Timer::throwaway(),
)
.and_then(|edits| {
if self.mode.allows(&edits) {
Ok(edits)
} else {
Err(anyhow!(
"The current gameplay mode restricts edits. This proposal has \
a banned command."
))
}
}) {
Ok(edits) => {
apply_map_edits(ctx, app, edits);
Transition::Pop
}
// TODO Hack. Have to replace ourselves, because the Menu might be
// invalidated now that something was chosen.
Err(err) => {
println!("Can't load {}: {}", path, err);
Transition::Multi(vec![
Transition::Replace(LoadEdits::new(
ctx,
app,
self.mode.clone(),
)),
// TODO Menu draws at a weird Z-order to deal with tooltips, so
// now the menu underneath
// bleeds through
Transition::Push(PopupMsg::new(
ctx,
"Error",
vec![format!("Can't load {}", path), err.to_string()],
)),
])
}
}
}
}
}
_ => Transition::Keep,
}
}
fn draw(&self, g: &mut GfxCtx, app: &App) {
grey_out_map(g, app);
self.panel.draw(g);
}
}
fn make_topcenter(ctx: &mut EventCtx, app: &App) -> Panel {
Panel::new(Widget::col(vec![
Line("Editing map")
.small_heading()
.draw(ctx)
.centered_horiz(),
ctx.style()
.btn_solid_dark_text(&format!(
"Finish & resume from {}",
app.primary
.suspended_sim
.as_ref()
.unwrap()
.time()
.ampm_tostring()
))
.hotkey(Key::Escape)
.build_widget(ctx, "finish editing"),
]))
.aligned(HorizontalAlignment::Center, VerticalAlignment::Top)
.build(ctx)
}
pub fn apply_map_edits(ctx: &mut EventCtx, app: &mut App, edits: MapEdits) {
let mut timer = Timer::new("apply map edits");
let (roads_changed, turns_deleted, turns_added, mut modified_intersections) =
app.primary.map.must_apply_edits(edits);
if !roads_changed.is_empty() || !modified_intersections.is_empty() {
app.primary
.draw_map
.draw_all_unzoomed_roads_and_intersections =
DrawMap::regenerate_unzoomed_layer(&app.primary.map, &app.cs, ctx, &mut timer);
}
for r in roads_changed {
let road = app.primary.map.get_r(r);
app.primary.draw_map.roads[r.0].clear_rendering();
// An edit to one lane potentially affects markings in all lanes in the same road, because
// of one-way markings, driving lines, etc.
for l in road.all_lanes() {
app.primary.draw_map.lanes[l.0].clear_rendering();
}
}
let mut lanes_of_modified_turns: BTreeSet<LaneID> = BTreeSet::new();
for t in turns_deleted {
lanes_of_modified_turns.insert(t.src);
modified_intersections.insert(t.parent);
}
for t in &turns_added {
lanes_of_modified_turns.insert(t.src);
modified_intersections.insert(t.parent);
}
for i in modified_intersections {
app.primary.draw_map.intersections[i.0].clear_rendering();
}
if app.primary.layer.as_ref().and_then(|l| l.name()) == Some("map edits") {
app.primary.layer = Some(Box::new(crate::layer::map::Static::edits(ctx, app)));
}
// Autosave
app.primary.map.save_edits();
}
pub fn can_edit_lane(mode: &GameplayMode, l: LaneID, app: &App) -> bool {
let l = app.primary.map.get_l(l);
mode.can_edit_lanes()
&& !l.is_walkable()
&& l.lane_type != LaneType::SharedLeftTurn
&& !l.is_light_rail()
&& !app.primary.map.get_parent(l.id).is_service()
}
pub fn speed_limit_choices(app: &App) -> Vec<Choice<Speed>> {
// Don't need anything higher than 70mph. Though now I kind of miss 3am drives on TX-71...
(10..=70)
.step_by(5)
.map(|mph| {
let s = Speed::miles_per_hour(mph as f64);
Choice::new(s.to_string(&app.opts.units), s)
})
.collect()
}
pub fn maybe_edit_intersection(
ctx: &mut EventCtx,
app: &mut App,
id: IntersectionID,
mode: &GameplayMode,
) -> Option<Box<dyn State<App>>>
|
fn make_changelist(ctx: &mut EventCtx, app: &App) -> Panel {
// TODO Support redo. Bit harder here to reset the redo_stack when the edits
// change, because nested other places modify it too.
let edits = app.primary.map.get_edits();
let mut col = vec![
Widget::row(vec![
ctx.style()
.btn_outline_light_popup(&edits.edits_name)
.hotkey(lctrl(Key::P))
.build_widget(ctx, "manage proposals"),
"autosaved"
.draw_text(ctx)
.container()
.padding(10)
.bg(Color::hex("#5D9630")),
]),
ColorLegend::row(
ctx,
app.cs.edits_layer,
format!(
"{} roads, {} intersections changed",
edits.changed_roads.len(),
edits.original_intersections.len()
),
),
];
if edits.commands.len() > 5 {
col.push(format!("{} more...", edits.commands.len() - 5).draw_text(ctx));
}
for idx in edits.commands.len().max(5) - 5..edits.commands.len() {
let (summary, details) = edits.commands[idx].describe(&app.primary.map);
let mut txt = Text::from(Line(format!("{}) {}", idx + 1, summary)));
for line in details {
txt.add(Line(line).secondary());
}
let btn = ctx
.style()
.btn_plain_light()
.label_styled_text(txt, ControlState::Default)
.build_widget(ctx, &format!("change #{}", idx + 1));
if idx == edits.commands.len() - 1 {
col.push(
Widget::row(vec![
btn,
ctx.style()
.btn_close()
.hotkey(lctrl(Key::Z))
.build_widget(ctx, "undo"),
])
.padding(16)
.outline(2.0, Color::WHITE),
);
} else {
col.push(btn);
}
}
Panel::new(Widget::col(col))
.aligned(HorizontalAlignment::Right, VerticalAlignment::Center)
.build(ctx)
}
// TODO Ideally a Tab.
fn cmd_to_id(cmd: &EditCmd) -> Option<ID> {
match cmd {
EditCmd::ChangeRoad { r, .. } => Some(ID::Road(*r)),
EditCmd::ChangeIntersection { i, .. } => Some(ID::Intersection(*i)),
EditCmd::ChangeRouteSchedule { .. } => None,
}
}
pub struct ConfirmDiscard {
panel: Panel,
discard: Box<dyn Fn(&mut App)>,
}
impl ConfirmDiscard {
pub fn new(ctx: &mut EventCtx, discard: Box<dyn Fn(&mut App)>) -> Box<dyn State<App>> {
Box::new(ConfirmDiscard {
discard,
panel: Panel::new(Widget::col(vec![
Widget::row(vec![
Widget::draw_svg(ctx, "system/assets/tools/alert.svg")
.container()
.padding_top(6),
Line("Alert").small_heading().draw(ctx),
ctx.style().btn_close_widget(ctx),
]),
"Are you sure you want to discard changes you made?".draw_text(ctx),
Widget::row(vec![
ctx.style()
.btn_solid_dark_text("Cancel")
.hotkey(Key::Escape)
.build_def(ctx),
ctx.style()
.btn_solid_destructive_text("Yes, discard")
.build_def(ctx),
])
.align_right(),
]))
.build(ctx),
})
}
}
impl State<App> for ConfirmDiscard {
fn event(&mut self, ctx: &mut EventCtx, app: &mut App) -> Transition {
match self.panel.event(ctx) {
Outcome::Clicked(x) => match x.as_ref() {
"close" | "Cancel" => Transition::Pop,
"Yes, discard" => {
(self.discard)(app);
Transition::Multi(vec![Transition::Pop, Transition::Pop])
}
_ => unreachable!(),
},
_ => Transition::Keep,
}
}
fn draw(&self, g: &mut GfxCtx, _: &App) {
self.panel.draw(g);
}
}
|
{
if app.primary.map.maybe_get_stop_sign(id).is_some()
&& mode.can_edit_stop_signs()
&& app.per_obj.left_click(ctx, "edit stop signs")
{
return Some(StopSignEditor::new(ctx, app, id, mode.clone()));
}
if app.primary.map.maybe_get_traffic_signal(id).is_some()
&& app.per_obj.left_click(ctx, "edit traffic signal")
{
return Some(TrafficSignalEditor::new(
ctx,
app,
btreeset! {id},
mode.clone(),
));
}
if app.primary.map.get_i(id).is_closed()
&& app.per_obj.left_click(ctx, "re-open closed intersection")
{
// This resets to the original state; it doesn't undo the closure to the last
// state. Seems reasonable to me.
let mut edits = app.primary.map.get_edits().clone();
edits.commands.push(EditCmd::ChangeIntersection {
i: id,
old: app.primary.map.get_i_edit(id),
new: edits.original_intersections[&id].clone(),
});
apply_map_edits(ctx, app, edits);
}
None
}
|
schema.py
|
import codecs
import contextlib
import copy
from decimal import Decimal
from django.apps.registry import Apps
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.utils import six
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_delete_table = "DROP TABLE %(table)s"
sql_create_inline_fk = "REFERENCES %(to_table)s (%(to_column)s)"
sql_create_unique = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)"
sql_delete_unique = "DROP INDEX %(name)s"
def __enter__(self):
with self.connection.cursor() as c:
# Some SQLite schema alterations need foreign key constraints to be
# disabled. This is the default in SQLite but can be changed with a
# build flag and might change in future, so can't be relied upon.
# We enforce it here for the duration of the transaction.
c.execute('PRAGMA foreign_keys')
self._initial_pragma_fk = c.fetchone()[0]
c.execute('PRAGMA foreign_keys = 0')
return super(DatabaseSchemaEditor, self).__enter__()
def __exit__(self, exc_type, exc_value, traceback):
super(DatabaseSchemaEditor, self).__exit__(exc_type, exc_value, traceback)
with self.connection.cursor() as c:
# Restore initial FK setting - PRAGMA values can't be parametrized
c.execute('PRAGMA foreign_keys = %s' % int(self._initial_pragma_fk))
def quote_value(self, value):
# The backend "mostly works" without this function and there are use
# cases for compiling Python without the sqlite3 libraries (e.g.
# security hardening).
try:
import sqlite3
value = sqlite3.adapt(value)
except ImportError:
pass
except sqlite3.ProgrammingError:
pass
# Manual emulation of SQLite parameter quoting
if isinstance(value, type(True)):
return str(int(value))
elif isinstance(value, (Decimal, float)):
return str(value)
elif isinstance(value, six.integer_types):
return str(value)
elif isinstance(value, six.string_types):
return "'%s'" % six.text_type(value).replace("\'", "\'\'")
elif value is None:
return "NULL"
elif isinstance(value, (bytes, bytearray, six.memoryview)):
# Bytes are only allowed for BLOB fields, encoded as string
# literals containing hexadecimal data and preceded by a single "X"
# character:
# value = b'\x01\x02' => value_hex = b'0102' => return X'0102'
value = bytes(value)
hex_encoder = codecs.getencoder('hex_codec')
value_hex, _length = hex_encoder(value)
# Use 'ascii' encoding for b'01' => '01', no need to use force_text here.
return "X'%s'" % value_hex.decode('ascii')
else:
raise ValueError("Cannot quote parameter value %r of type %s" % (value, type(value)))
def _remake_table(self, model, create_fields=[], delete_fields=[], alter_fields=[], override_uniques=None,
override_indexes=None):
"""
Shortcut to transform a model from old_model into new_model
The essential steps are:
1. rename the model's existing table, e.g. "app_model" to "app_model__old"
2. create a table with the updated definition called "app_model"
3. copy the data from the old renamed table to the new table
4. delete the "app_model__old" table
"""
# Self-referential fields must be recreated rather than copied from
# the old model to ensure their remote_field.field_name doesn't refer
# to an altered field.
def is_self_referential(f):
return f.is_relation and f.remote_field.model is model
# Work out the new fields dict / mapping
body = {
f.name: f.clone() if is_self_referential(f) else f
for f in model._meta.local_concrete_fields
}
# Since mapping might mix column names and default values,
# its values must be already quoted.
mapping = {f.column: self.quote_name(f.column) for f in model._meta.local_concrete_fields}
# This maps field names (not columns) for things like unique_together
rename_mapping = {}
# If any of the new or altered fields is introducing a new PK,
# remove the old one
restore_pk_field = None
if any(f.primary_key for f in create_fields) or any(n.primary_key for o, n in alter_fields):
for name, field in list(body.items()):
if field.primary_key:
field.primary_key = False
restore_pk_field = field
if field.auto_created:
del body[name]
del mapping[field.column]
# Add in any created fields
for field in create_fields:
body[field.name] = field
# Choose a default and insert it into the copy map
if not field.many_to_many and field.concrete:
mapping[field.column] = self.quote_value(
self.effective_default(field)
)
# Add in any altered fields
for (old_field, new_field) in alter_fields:
body.pop(old_field.name, None)
mapping.pop(old_field.column, None)
body[new_field.name] = new_field
if old_field.null and not new_field.null:
case_sql = "coalesce(%(col)s, %(default)s)" % {
'col': self.quote_name(old_field.column),
'default': self.quote_value(self.effective_default(new_field))
}
mapping[new_field.column] = case_sql
else:
mapping[new_field.column] = self.quote_name(old_field.column)
rename_mapping[old_field.name] = new_field.name
# Remove any deleted fields
for field in delete_fields:
del body[field.name]
del mapping[field.column]
# Remove any implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.delete_model(field.remote_field.through)
# Work inside a new app registry
apps = Apps()
# Provide isolated instances of the fields to the new model body so
# that the existing model's internals aren't interfered with when
# the dummy model is constructed.
body = copy.deepcopy(body)
# Work out the new value of unique_together, taking renames into
# account
if override_uniques is None:
override_uniques = [
[rename_mapping.get(n, n) for n in unique]
for unique in model._meta.unique_together
]
# Work out the new value for index_together, taking renames into
# account
if override_indexes is None:
override_indexes = [
[rename_mapping.get(n, n) for n in index]
for index in model._meta.index_together
]
# Construct a new model for the new state
meta_contents = {
'app_label': model._meta.app_label,
'db_table': model._meta.db_table,
'unique_together': override_uniques,
'index_together': override_indexes,
'apps': apps,
}
meta = type("Meta", tuple(), meta_contents)
body['Meta'] = meta
body['__module__'] = model.__module__
temp_model = type(model._meta.object_name, model.__bases__, body)
# We need to modify model._meta.db_table, but everything explodes
# if the change isn't reversed before the end of this method. This
# context manager helps us avoid that situation.
@contextlib.contextmanager
def altered_table_name(model, temporary_table_name):
original_table_name = model._meta.db_table
model._meta.db_table = temporary_table_name
yield
model._meta.db_table = original_table_name
with altered_table_name(model, model._meta.db_table + "__old"):
# Rename the old table to make way for the new
self.alter_db_table(model, temp_model._meta.db_table, model._meta.db_table)
# Create a new table with the updated schema. We remove things
# from the deferred SQL that match our table name, too
self.deferred_sql = [x for x in self.deferred_sql if temp_model._meta.db_table not in x]
self.create_model(temp_model)
# Copy data from the old table into the new table
field_maps = list(mapping.items())
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(temp_model._meta.db_table),
', '.join(self.quote_name(x) for x, y in field_maps),
', '.join(y for x, y in field_maps),
self.quote_name(model._meta.db_table),
))
# Delete the old table
self.delete_model(model, handle_autom2m=False)
# Run deferred SQL on correct table
for sql in self.deferred_sql:
self.execute(sql)
self.deferred_sql = []
# Fix any PK-removed field
if restore_pk_field:
restore_pk_field.primary_key = True
def delete_model(self, model, handle_autom2m=True):
if handle_autom2m:
super(DatabaseSchemaEditor, self).delete_model(model)
else:
# Delete the table (and only that)
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.create_model(field.remote_field.through)
self._remake_table(model, create_fields=[field])
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# M2M fields are a special case
if field.many_to_many:
# For implicit M2M tables, delete the auto-created table
if field.remote_field.through._meta.auto_created:
self.delete_model(field.remote_field.through)
# For explicit "through" M2M fields, do nothing
# For everything else, remake.
else:
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
self._remake_table(model, delete_fields=[field])
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Actually perform a "physical" (non-ManyToMany) field update."""
# Alter by remaking table
self._remake_table(model, alter_fields=[(old_field, new_field)])
def
|
(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
self._remake_table(model, override_indexes=new_index_together)
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
self._remake_table(model, override_uniques=new_unique_together)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
if old_field.remote_field.through._meta.db_table == new_field.remote_field.through._meta.db_table:
# The field name didn't change, but some options did; we have to propagate this altering.
self._remake_table(
old_field.remote_field.through,
alter_fields=[(
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()),
)],
override_uniques=(new_field.m2m_field_name(), new_field.m2m_reverse_field_name()),
)
return
# Make a new through table
self.create_model(new_field.remote_field.through)
# Copy the data across
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(new_field.remote_field.through._meta.db_table),
', '.join([
"id",
new_field.m2m_column_name(),
new_field.m2m_reverse_name(),
]),
', '.join([
"id",
old_field.m2m_column_name(),
old_field.m2m_reverse_name(),
]),
self.quote_name(old_field.remote_field.through._meta.db_table),
))
# Delete the old through table
self.delete_model(old_field.remote_field.through)
|
alter_index_together
|
check-doc.py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Beans Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
REGEX_ARG = r'(?:ForceSet|SoftSet|Get|Is)(?:Bool)?Args?(?:Set)?\("(-[^"]+)"'
REGEX_DOC = r'AddArg\("(-[^"=]+?)(?:=|")'
CMD_ROOT_DIR = '$(git rev-parse --show-toplevel)/{}'.format(FOLDER_GREP)
CMD_GREP_ARGS = r"git grep --perl-regexp '{}' -- {} ':(exclude){}'".format(REGEX_ARG, CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_WALLET_ARGS = r"git grep --function-context 'void WalletInit::AddWalletOptions' -- {} | grep AddArg".format(CMD_ROOT_DIR)
CMD_GREP_WALLET_HIDDEN_ARGS = r"git grep --function-context 'void DummyWalletInit::AddWalletOptions' -- {}".format(CMD_ROOT_DIR)
CMD_GREP_DOCS = r"git grep --perl-regexp '{}' {}".format(REGEX_DOC, CMD_ROOT_DIR)
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-h', '-help', '-dbcrashratio', '-forcecompactdb', '-zapwallettxes'])
def lint_missing_argument_documentation():
used = check_output(CMD_GREP_ARGS, shell=True).decode('utf8').strip()
docd = check_output(CMD_GREP_DOCS, shell=True).decode('utf8').strip()
args_used = set(re.findall(re.compile(REGEX_ARG), used))
args_docd = set(re.findall(re.compile(REGEX_DOC), docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print("Args used : {}".format(len(args_used)))
print("Args documented : {}".format(len(args_docd)))
print("Args undocumented: {}".format(len(args_need_doc)))
print(args_need_doc)
print("Args unknown : {}".format(len(args_unknown)))
print(args_unknown)
assert 0 == len(args_need_doc), "Please document the following arguments: {}".format(args_need_doc)
def lint_missing_hidden_wallet_args():
|
def main():
lint_missing_argument_documentation()
lint_missing_hidden_wallet_args()
if __name__ == "__main__":
main()
|
wallet_args = check_output(CMD_GREP_WALLET_ARGS, shell=True).decode('utf8').strip()
wallet_hidden_args = check_output(CMD_GREP_WALLET_HIDDEN_ARGS, shell=True).decode('utf8').strip()
wallet_args = set(re.findall(re.compile(REGEX_DOC), wallet_args))
wallet_hidden_args = set(re.findall(re.compile(r' "([^"=]+)'), wallet_hidden_args))
hidden_missing = wallet_args.difference(wallet_hidden_args)
if hidden_missing:
assert 0, "Please add {} to the hidden args in DummyWalletInit::AddWalletOptions".format(hidden_missing)
|
find_controller.rs
|
// This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/wusyong/gir-files)
// DO NOT EDIT
use crate::WebView;
use glib::{
object::{Cast, IsA},
signal::{connect_raw, SignalHandlerId},
translate::*,
StaticType, ToValue,
};
use std::{boxed::Box as Box_, fmt, mem::transmute};
glib::wrapper! {
#[doc(alias = "WebKitFindController")]
pub struct FindController(Object<ffi::WebKitFindController, ffi::WebKitFindControllerClass>);
match fn {
type_ => || ffi::webkit_find_controller_get_type(),
}
}
impl FindController {
// rustdoc-stripper-ignore-next
/// Creates a new builder-pattern struct instance to construct [`FindController`] objects.
///
/// This method returns an instance of [`FindControllerBuilder`] which can be used to create [`FindController`] objects.
pub fn builder() -> FindControllerBuilder {
FindControllerBuilder::default()
}
}
#[derive(Clone, Default)]
// rustdoc-stripper-ignore-next
/// A [builder-pattern] type to construct [`FindController`] objects.
///
/// [builder-pattern]: https://doc.rust-lang.org/1.0.0/style/ownership/builders.html
pub struct FindControllerBuilder {
web_view: Option<WebView>,
}
impl FindControllerBuilder {
// rustdoc-stripper-ignore-next
/// Create a new [`FindControllerBuilder`].
pub fn new() -> Self {
Self::default()
}
// rustdoc-stripper-ignore-next
/// Build the [`FindController`].
pub fn build(self) -> FindController {
let mut properties: Vec<(&str, &dyn ToValue)> = vec![];
if let Some(ref web_view) = self.web_view {
properties.push(("web-view", web_view));
}
glib::Object::new::<FindController>(&properties)
.expect("Failed to create an instance of FindController")
}
pub fn web_view(mut self, web_view: &impl IsA<WebView>) -> Self {
self.web_view = Some(web_view.clone().upcast());
self
}
}
pub const NONE_FIND_CONTROLLER: Option<&FindController> = None;
pub trait FindControllerExt: 'static {
#[doc(alias = "webkit_find_controller_count_matches")]
fn count_matches(&self, search_text: &str, find_options: u32, max_match_count: u32);
#[doc(alias = "webkit_find_controller_get_max_match_count")]
#[doc(alias = "get_max_match_count")]
fn max_match_count(&self) -> u32;
#[doc(alias = "webkit_find_controller_get_options")]
#[doc(alias = "get_options")]
fn options(&self) -> u32;
#[doc(alias = "webkit_find_controller_get_search_text")]
#[doc(alias = "get_search_text")]
fn search_text(&self) -> Option<glib::GString>;
#[doc(alias = "webkit_find_controller_get_web_view")]
#[doc(alias = "get_web_view")]
fn web_view(&self) -> Option<WebView>;
#[doc(alias = "webkit_find_controller_search")]
fn search(&self, search_text: &str, find_options: u32, max_match_count: u32);
#[doc(alias = "webkit_find_controller_search_finish")]
fn search_finish(&self);
#[doc(alias = "webkit_find_controller_search_next")]
fn search_next(&self);
#[doc(alias = "webkit_find_controller_search_previous")]
fn search_previous(&self);
fn text(&self) -> Option<glib::GString>;
#[doc(alias = "counted-matches")]
fn connect_counted_matches<F: Fn(&Self, u32) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "failed-to-find-text")]
fn connect_failed_to_find_text<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "found-text")]
fn connect_found_text<F: Fn(&Self, u32) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "max-match-count")]
fn connect_max_match_count_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "options")]
fn connect_options_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "text")]
fn connect_text_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<FindController>> FindControllerExt for O {
fn count_matches(&self, search_text: &str, find_options: u32, max_match_count: u32) {
unsafe {
ffi::webkit_find_controller_count_matches(
self.as_ref().to_glib_none().0,
search_text.to_glib_none().0,
find_options,
max_match_count,
);
}
}
fn max_match_count(&self) -> u32 {
unsafe { ffi::webkit_find_controller_get_max_match_count(self.as_ref().to_glib_none().0) }
}
|
}
fn search_text(&self) -> Option<glib::GString> {
unsafe {
from_glib_none(ffi::webkit_find_controller_get_search_text(
self.as_ref().to_glib_none().0,
))
}
}
fn web_view(&self) -> Option<WebView> {
unsafe {
from_glib_none(ffi::webkit_find_controller_get_web_view(
self.as_ref().to_glib_none().0,
))
}
}
fn search(&self, search_text: &str, find_options: u32, max_match_count: u32) {
unsafe {
ffi::webkit_find_controller_search(
self.as_ref().to_glib_none().0,
search_text.to_glib_none().0,
find_options,
max_match_count,
);
}
}
fn search_finish(&self) {
unsafe {
ffi::webkit_find_controller_search_finish(self.as_ref().to_glib_none().0);
}
}
fn search_next(&self) {
unsafe {
ffi::webkit_find_controller_search_next(self.as_ref().to_glib_none().0);
}
}
fn search_previous(&self) {
unsafe {
ffi::webkit_find_controller_search_previous(self.as_ref().to_glib_none().0);
}
}
fn text(&self) -> Option<glib::GString> {
unsafe {
let mut value = glib::Value::from_type(<glib::GString as StaticType>::static_type());
glib::gobject_ffi::g_object_get_property(
self.to_glib_none().0 as *mut glib::gobject_ffi::GObject,
b"text\0".as_ptr() as *const _,
value.to_glib_none_mut().0,
);
value
.get()
.expect("Return Value for property `text` getter")
}
}
fn connect_counted_matches<F: Fn(&Self, u32) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn counted_matches_trampoline<
P: IsA<FindController>,
F: Fn(&P, u32) + 'static,
>(
this: *mut ffi::WebKitFindController,
match_count: libc::c_uint,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(
FindController::from_glib_borrow(this).unsafe_cast_ref(),
match_count,
)
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"counted-matches\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
counted_matches_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_failed_to_find_text<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn failed_to_find_text_trampoline<
P: IsA<FindController>,
F: Fn(&P) + 'static,
>(
this: *mut ffi::WebKitFindController,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(FindController::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"failed-to-find-text\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
failed_to_find_text_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_found_text<F: Fn(&Self, u32) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn found_text_trampoline<P: IsA<FindController>, F: Fn(&P, u32) + 'static>(
this: *mut ffi::WebKitFindController,
match_count: libc::c_uint,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(
FindController::from_glib_borrow(this).unsafe_cast_ref(),
match_count,
)
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"found-text\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
found_text_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_max_match_count_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_max_match_count_trampoline<
P: IsA<FindController>,
F: Fn(&P) + 'static,
>(
this: *mut ffi::WebKitFindController,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(FindController::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::max-match-count\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_max_match_count_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_options_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_options_trampoline<P: IsA<FindController>, F: Fn(&P) + 'static>(
this: *mut ffi::WebKitFindController,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(FindController::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::options\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_options_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_text_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_text_trampoline<P: IsA<FindController>, F: Fn(&P) + 'static>(
this: *mut ffi::WebKitFindController,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(FindController::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::text\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_text_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
impl fmt::Display for FindController {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("FindController")
}
}
|
fn options(&self) -> u32 {
unsafe { ffi::webkit_find_controller_get_options(self.as_ref().to_glib_none().0) }
|
vk_scope.rs
|
// Generated by `scripts/generate.js`
use utils::vk_traits::*;
/// Wrapper for [VkScopeNV](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkScopeNV.html).
#[repr(i32)]
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum
|
{
Device = 1,
Workgroup = 2,
Subgroup = 3,
QueueFamily = 5,
}
#[doc(hidden)]
pub type RawVkScope = i32;
impl VkWrappedType<RawVkScope> for VkScope {
fn vk_to_raw(src: &VkScope, dst: &mut RawVkScope) {
*dst = *src as i32
}
}
impl VkRawType<VkScope> for RawVkScope {
fn vk_to_wrapped(src: &RawVkScope) -> VkScope {
unsafe {
*((src as *const i32) as *const VkScope)
}
}
}
impl Default for VkScope {
fn default() -> VkScope {
VkScope::Device
}
}
|
VkScope
|
views.py
|
from django.shortcuts import render,redirect
from django.contrib import messages
from django.template import Context
from .models import Court, CourtManager, SelectedCourt
from apps.users.models import User
from datetime import datetime
from decimal import Decimal
from django.contrib.auth.decorators import login_required
# Create your views here.
def index(request):
|
def main(request):
context = {
'court' : Court.objects.all()
}
return render(request, "courts/main.html", context)
def court(request, courtid):
context = {
'one_court' : Court.objects.get(id=courtid)
}
return render(request, "courts/courts.html", context)
def select(request):
if 'user_id' not in request.session:
context = {
'message' : "Please login"
}
return render(request, "courts/index.html", context)
context = {
'courts' : Court.objects.all()
}
return render(request, "courts/select.html", context)
"""
This is logic that checks the times that a court has been reserved.
"""
def schedule(request):
if 'user_id' not in request.session:
context = {
'message' : "Please login"
}
return render(request, "courts/", context)
usr = User(id=request.session['user_id'])
crt = Court.objects.get(id=request.POST['courtid'])
intime = request.POST['timein']
outtime = request.POST['timeout']
dform = "%Y-%m-%d %H:%M"
diff = datetime.strptime(outtime, dform) - datetime.strptime(intime, dform)
hours = diff.seconds/3600
if hours < 4 and hours > 0:
total_price = Decimal(hours) * crt.price
if intime > outtime:
context = {
'courts' : Court.objects.all(),
'message': "End date/time is earlier than begin date/time."
}
elif intime <= datetime.now().strftime(dform):
context = {
'courts' : Court.objects.all(),
'message': "Begin date/time is in the past."
}
else:
SelectedCourt.objects.create(user=usr, court=crt, timein=intime, timeout=outtime, total_price=total_price)
context = {
'courts' : Court.objects.all()
}
else:
context = {
'courts' : Court.objects.all(),
'message': "Scheduled time is too long."
}
return render(request, "courts/select.html", context)
"""
This presents a dashboard which shows court reservations.
"""
def dashboard(request):
if 'user_id' not in request.session:
context = {
'message' : "Please login"
}
return render(request, "courts/index.html", context)
usr = User(id=request.session['user_id'])
context = {
'court_times' : SelectedCourt.objects.filter(user=usr)
}
return render(request, "courts/dashboard.html", context)
def search(request):
return render(request, "courts/search.html")
def searchzip(request):
return "HELLO WORLD"
|
return render(request, "courts/index.html")
|
example_test.go
|
// Copyright Splunk Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package splunkhttprouter_test
import (
"fmt"
"net/http"
|
"github.com/signalfx/splunk-otel-go/instrumentation/github.com/julienschmidt/httprouter/splunkhttprouter"
)
func Index(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
fmt.Fprint(w, "Welcome!\n")
}
func Hello(w http.ResponseWriter, _ *http.Request, ps httprouter.Params) {
fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name"))
}
func Example() {
router := splunkhttprouter.New()
router.GET("/", Index)
router.GET("/hello/:name", Hello)
if err := http.ListenAndServe(":8080", router); err != nil {
panic(err)
}
}
|
"github.com/julienschmidt/httprouter"
|
util.go
|
// This file and its contents are licensed under the Apache License 2.0.
// Please see the included NOTICE for copyright information and
// LICENSE for a copy of the license.
package util
import (
"flag"
"fmt"
"os"
"regexp"
"strings"
"sync"
"time"
)
const (
PromNamespace = "ts_prom"
maskPasswordReplaceString1 = "password=$1'****'"
maskPasswordReplaceString2 = "password:$1****$3"
/* #nosec */
maskPasswordReplaceString3 = "postgres:$1****"
)
var (
maskPasswordRegex1 = regexp.MustCompile(`[p|P]assword=(\s*?)'([^']+?)'`)
maskPasswordRegex2 = regexp.MustCompile(`[p|P]assword:(\s*?)([^\s]+?)( |$)`)
maskPasswordRegex3 = regexp.MustCompile(`postgres:(([^:]*\:){1})([^@]*)`)
)
//ThroughputCalc runs on scheduled interval to calculate the throughput per second and sends results to a channel
type ThroughputCalc struct {
tickInterval time.Duration
previous float64
current chan float64
Values chan float64
running bool
lock sync.Mutex
}
// NewThroughputCalc returns a throughput calculator based on a duration
func NewThroughputCalc(interval time.Duration) *ThroughputCalc {
return &ThroughputCalc{tickInterval: interval, current: make(chan float64, 1), Values: make(chan float64, 1)}
}
// SetCurrent sets the value of the counter
func (dt *ThroughputCalc) SetCurrent(value float64) {
select {
case dt.current <- value:
default:
}
}
// Start the throughput calculator
func (dt *ThroughputCalc) Start() {
dt.lock.Lock()
defer dt.lock.Unlock()
if !dt.running {
dt.running = true
ticker := time.NewTicker(dt.tickInterval)
go func() {
for range ticker.C {
if !dt.running {
return
}
current := <-dt.current
diff := current - dt.previous
dt.previous = current
select {
case dt.Values <- diff / dt.tickInterval.Seconds():
default:
}
}
}()
}
}
// MaskPassword is used to mask sensitive password data before outputing to persistent stream like logs.
func MaskPassword(s string) string {
s = maskPasswordRegex1.ReplaceAllString(s, maskPasswordReplaceString1)
s = maskPasswordRegex2.ReplaceAllString(s, maskPasswordReplaceString2)
return maskPasswordRegex3.ReplaceAllString(s, maskPasswordReplaceString3)
}
// ParseEnv takes a prefix string p and *flag.FlagSet. Each flag
// in the FlagSet is exposed as an upper case environment variable
// prefixed with p. Any flag that was not explicitly set by a user
// is updated to the environment variable, if set.
//
// Note: when run with multiple times with different prefixes on the
// same FlagSet, precedence will get values set with prefix which is
// parsed first.
func ParseEnv(p string, fs *flag.FlagSet) {
// Build a map of explicitly set flags.
set := make(map[string]struct{})
fs.Visit(func(f *flag.Flag) {
set[f.Name] = struct{}{}
})
fs.VisitAll(func(f *flag.Flag) {
// Create an env var name
// based on the supplied prefix.
envVar := fmt.Sprintf("%s_%s", p, strings.ToUpper(f.Name))
envVar = strings.Replace(envVar, "-", "_", -1)
// Update the Flag.Value if the
// env var is non "".
if val := os.Getenv(envVar); val != ""
|
// Append the env var to the
// Flag.Usage field.
f.Usage = fmt.Sprintf("%s [%s]", f.Usage, envVar)
})
}
|
{
// Update the value if it hasn't
// already been set.
if _, defined := set[f.Name]; !defined {
// Ignore error since only error that can occur is
// when using a non-set flag name which cannot happen
// in this situation.
_ = fs.Set(f.Name, val)
}
}
|
find digits.py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the findDigits function below.
def findDigits(n):
|
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
result = findDigits(n)
fptr.write(str(result) + '\n')
fptr.close()
|
num = n
count = 0
n = str(n)
for i in n:
d = int(i)
if d!=0 and num%d==0:
count+=1
return count
|
mod.rs
|
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use std::intrinsics;
pub trait Cypherable {
fn label(&self) -> String {
unsafe {
intrinsics::type_name::<Self>()
.split(':').last().unwrap()
.to_string()
}
}
fn cypher_ident(&self) -> String;
}
macro_rules! save_child_rel {
( $tx:expr, $parent:expr, $child:expr, $rel:tt ) => {{
$tx.exec(
format!("MERGE (p: {}) MERGE (c: {})
MERGE (p)-[:{}]-(c)",
$parent.cypher_ident(),
$child.cypher_ident(),
$rel,
).as_ref()
)
}}
}
macro_rules! save_child_ll {
( $tx:expr, $parent:expr, $child:expr, $rel:tt ) => {{
try!($tx.exec(
format!("MERGE (p: {p})
MERGE (c: {c})
MERGE (c)-[ptr:{rel}]->(p)
WITH p, c
MATCH (p)-[ptr:TAIL]->(tail:{tail})
WHERE NOT id(tail) = id(c)
DELETE ptr
MERGE (c)-[:PREV]->(tail)",
p = $parent.cypher_ident(),
c = $child.cypher_ident(),
rel = $rel,
tail = $child.label()
).as_ref()
));
$tx.exec(
format!("MATCH (p: {p})
MATCH (c: {c})
MERGE (p)-[:TAIL]->(c)",
p = $parent.cypher_ident(),
c = $child.cypher_ident()
).as_ref()
).map(|_| true)
}}
}
macro_rules! is_tail {
( $parent:expr, $child:expr ) => {{
let graph = GraphClient::connect(::NEO4J_ENDPOINT).unwrap();
graph.cypher().exec(
format!("MATCH (c: {})<-[:TAIL]-(p: {}) RETURN c",
$child.cypher_ident(),
$parent.cypher_ident(),
).as_ref()
).map(|r| r.rows().count() > 0)
}}
}
pub mod config;
pub mod entity;
pub mod manager;
pub mod test;
pub fn hash<T: Hash>(t: &T) -> u64 {
let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}
#[cfg(test)]
mod tests {
use serde_json;
use std::mem;
use std::path::PathBuf;
use super::*;
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)]
pub struct TestStruct {
pub field1: String,
}
impl From<Vec<u8>> for TestStruct {
fn from(v: Vec<u8>) -> Self {
let buf = String::from_utf8(v).unwrap();
serde_json::from_str(&buf).unwrap()
}
}
impl Storable for TestStruct {
fn db_path() -> PathBuf {
PathBuf::from("/tmp/test")
}
fn key(&self) -> Vec<u8>
|
}
#[test]
fn test_save_and_get() {
let mut s = String::with_capacity(10240);
for _ in 0..10240 {
s.push_str("x");
}
let test1 = TestStruct { field1: s };
test1.save();
let records = TestStruct::get_all();
for record in records.iter() {
assert_eq!(*record, test1.clone());
}
}
}
|
{
unsafe { mem::transmute::<i64, [u8; 8]>(self.created_at).to_vec() }
}
|
test_misc.py
|
# Owner(s): ["oncall: jit"]
from typing import Any, Dict, List, Optional, Tuple
from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.testing import FileCheck
from torch import jit
from jit.test_module_interface import TestModuleInterface # noqa: F401
import os
import sys
import torch
import torch.testing._internal.jit_utils
import torch.nn as nn
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestMisc(JitTestCase):
def test_joined_str(self):
def func(x):
hello, test = "Hello", "test"
print(f"{hello + ' ' + test}, I'm a {test}")
print("format blank")
hi = 'hi'
print(f"stuff before {hi}")
print(f"{hi} stuff after")
return x + 1
x = torch.arange(4., requires_grad=True)
# TODO: Add support for f-strings in string parser frontend
# self.checkScript(func, [x], optimize=True, capture_output=True)
with self.capture_stdout() as captured:
out = func(x)
scripted = torch.jit.script(func)
with self.capture_stdout() as captured_script:
out_script = func(x)
self.assertEqual(out, out_script)
self.assertEqual(captured, captured_script)
def test_kwarg_support(self):
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, "variable number of arguments"):
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str = 2):
pass
torch.jit.script(M())
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str):
return n_tokens, device_name
sm = torch.jit.script(M())
with self.assertRaisesRegex(RuntimeError, "missing value for argument 'n_tokens'"):
sm()
with self.assertRaisesRegex(RuntimeError, "positional arg"):
sm(3, 'hello')
self.assertEqual(sm(n_tokens=3, device_name='hello'), (3, 'hello'))
def test_tuple_subscripted_assign(self):
with self.assertRaisesRegex(RuntimeError, "subscripted assignment"):
@torch.jit.script
def foo(a: Tuple[int, int]) -> None:
a[0] = a[1]
with self.assertRaisesRegex(RuntimeError, "augmented assignment"):
@torch.jit.script
def bar(a: Tuple[int, int]) -> None:
a[0] += a[1]
def test_subexpression_List_Future(self):
@torch.jit.script
def fn(x: List[torch.jit.Future[int]]) -> torch.jit.Future[int]:
return x[0]
FileCheck().check('Future[int]').check('Future[int]').run(fn.graph)
def
|
(self):
@torch.jit.script
def fn() -> torch.jit.Future[int]:
x: List[torch.jit.Future[int]] = []
return x[0]
FileCheck().check("Future[int][]").run(fn.graph)
def test_future_isinstance(self):
@torch.jit.script
def fn(x: Any) -> torch.jit.Future[int]:
assert isinstance(x, jit.Future[int])
return x
FileCheck().check("Future[int]").run(fn.graph)
def test_str_refine_any(self):
def forward(x: Any) -> str:
if isinstance(x, str):
return x
return "foo"
forward = torch.jit.script(forward)
self.assertEqual(forward(1), "foo")
self.assertEqual(forward("bar"), "bar")
def test_subexpression_Tuple_int_int_Future(self):
@torch.jit.script
def fn(x: Tuple[int, int, torch.jit.Future[int]]) -> Tuple[int, torch.jit.Future[int]]:
return x[0], x[2]
FileCheck().check('(int, int, Future[int])').check('(int, Future[int])').run(fn.graph)
def test_subexpression_Dict_int_Future(self):
@torch.jit.script
def fn(x: Dict[int, torch.jit.Future[int]], y: int) -> torch.jit.Future[int]:
return x[y]
FileCheck().check('Dict(int, Future(int))').check('Future[int]').run(fn.graph)
def test_subexpression_Optional(self):
@torch.jit.script
def fn(x: Optional[Dict[int, torch.jit.Future[int]]]) -> Optional[torch.jit.Future[int]]:
if x is not None:
return x[0]
else:
return None
FileCheck().check('Dict(int, Future(int))?').run(fn.graph)
def test_if_returning_any(self):
"""
Check that an if statement can return different
types early from each branch when the return
type of the function is Any.
"""
def if_function(inp: torch.Tensor) -> Any:
if inp.shape[0] == 1:
return inp * inp
else:
return "str"
self.checkScript(if_function, (torch.randn(5),))
def test_export_opnames_interface(self):
@torch.jit.interface
class OneTwoModule(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
pass
def two(self, x: torch.Tensor) -> torch.Tensor:
pass
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
class FooMod(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y
def two(self, x: torch.Tensor) -> torch.Tensor:
return 2 * x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.one(self.two(x), x)
class BarMod(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x * y
def two(self, x: torch.Tensor) -> torch.Tensor:
return 2 / x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.two(self.one(x, x))
make_global(OneTwoModule)
class M(nn.Module):
sub : OneTwoModule
def __init__(self):
super(M, self).__init__()
self.sub = BarMod()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.sub.forward(x)
def use_module_interface(mod_list: List[OneTwoModule], x: torch.Tensor):
return mod_list[0].forward(x) + mod_list[1].forward(x)
torch._C._enable_mobile_interface_call_export()
scripted_M_mod = torch.jit.script(M())
self.assertTrue(set(['aten::mul.Scalar', 'aten::mul.Tensor', 'aten::reciprocal']).issubset(
set(torch.jit.export_opnames(scripted_M_mod))))
scripted_M_mod.sub = torch.jit.script(FooMod())
self.assertTrue(set(['aten::add.Tensor', 'aten::mul.Scalar']).issubset(
set(torch.jit.export_opnames(scripted_M_mod))))
def test_broadcasting_list(self):
"""
Test BroadcastingList and torch.nn._size_N_t alias
"""
from torch._jit_internal import BroadcastingList2
from torch.nn.common_types import _size_2_t
def sum_i(x: _size_2_t) -> int:
return x[0] + x[1]
def sum_f(x: BroadcastingList2[float]) -> float:
return x[0] + x[1]
self.assertTrue(torch.jit.script(sum_i)(4) == 8)
self.assertTrue(torch.jit.script(sum_f)(4.5) == 9.)
|
test_subexpression_Future_annotate
|
main.rs
|
use std::{fs, process, path::Path};
use clap::{Arg, App, crate_version, crate_authors};
use spinners::{Spinner, Spinners};
use byte_unit::Byte;
use colored::*;
mod timer;
mod scanner;
fn main() {
let matches = App::new("Directory tree")
.version(crate_version!())
.author(crate_authors!())
.about("Creates a JSON representing a directory tree.")
.arg(
Arg::with_name("scan")
.short("s")
.long("scan")
.number_of_values(2)
.value_names(&["DIR", "FILE"])
.required(true)
.help("Provide directory to scan and file's path to write JSON")
.takes_value(true),
)
.get_matches();
let mut args = matches.values_of("scan").unwrap();
let scan_dir = args.next().unwrap();
let out_path = args.next().unwrap();
// Validate <FILE> arg.
let path = Path::new(out_path);
if path.parent().unwrap().is_dir() == false {
eprintln!(
"{} {}",
"Error".bright_red().bold(),
"File's path does not exist"
);
process::exit(0);
}
if let Some(extension) = path.extension() {
if extension.to_str().unwrap().trim().to_lowercase() != "json" {
eprintln!(
"{} {}",
"Error".bright_red().bold(),
"Invalid extension. Expected `.json`"
);
process::exit(0);
}
} else {
eprintln!(
"{} {}",
"Error".bright_red().bold(),
"Provide file name to write json. The file need not exist before hand"
);
process::exit(0);
}
// Begin scanning.
let now = timer::start();
let sp = Spinner::new(
Spinners::Line,
"Scanning, please wait... ".bright_cyan().bold().to_string(),
);
let result = match scanner::run(scan_dir) {
Ok(r) => r,
Err(e) => {
eprintln!("{} {}", "Error".bright_red().bold(), e);
process::exit(0);
}
};
if let Err(e) = fs::write(out_path, result.json) {
eprintln!("{} {}", "Error".bright_red().bold(), e);
process::exit(0);
}
let byte = Byte::from_bytes(result.size as u128).get_appropriate_unit(true);
|
println!("{}", "Done!".bright_green().bold());
println!("{} {}", "Total time".bright_blue().bold(), time);
println!("{} {}", "Scan time".bright_blue().bold(), result.time);
println!("{} {}", "Size".bright_blue().bold(), byte.to_string());
println!("{} {}", "Total items".bright_blue().bold(), result.items);
}
|
let time = timer::end(now);
sp.stop();
|
sum_uniformity_primer.py
|
#------------------------------------------------------------------------------------------
def getUniformityMetrics(depths):
# get total cumulative depth
depthTotal = sum(depths)
# if zero depth at all primers/sites, return zeros
if depthTotal == 0:
|
# sort
depths.sort()
# get mean depth
depthMean = 1.00 * depthTotal / len(depths)
# get % of mean metrics
pctGtVec = []
for pct in (5.0, 10.0, 20.0, 30.0):
for idx in range(len(depths)):
pctMean = 100.00 * depths[idx] / depthMean
if pctMean >= pct:
x = 100.00 - (100.00 * idx) / len(depths)
pctGtVec.append(x)
break
# done
outvec = [depthMean]
outvec.extend(pctGtVec)
return tuple(outvec)
#------------------------------------------------------------------------------------------
def run(cfg):
print("sum_uniformity_primer starting...")
readSet = cfg.readSet
# initialize a depth vector for each depth type
depthVecs = ([], []) # MTs, reads
# loop over primer read depths from previous step, compute raw and MT-corrected uniformity
for line in open(readSet + ".sum.primer.umis.txt", "r"):
# skip column header line
if line.startswith("read set"):
continue
# parse line
vals = line.strip().split("|")
(readSet, primer, strand, chrom, loc5, loc3, numMts, numReads) = vals[0:8]
# save depth count for later
depthVecs[0].append(int(numMts))
depthVecs[1].append(int(numReads))
# open output file
fileout = open(readSet + ".sum.uniformity.primer.summary.txt", "w")
# write num of primers
fileout.write("{}\t# of primers\n".format(len(depthVecs[0])))
# define metric labels and get metric values for each depth type
metricNames = ("mean primer", "% of primers >= 5% of mean", "% of primers >= 10% of mean", "% of primers >= 20% of mean", "% of primers >= 30% of mean")
depthTypes = ("UMI", "read fragment")
# write primer uniformity metrics to output file
for idxDepthType in range(len(depthTypes)):
depthType = depthTypes[idxDepthType]
metricVals = getUniformityMetrics(depthVecs[idxDepthType])
for idxMetric in range(len(metricNames)):
metricName = metricNames[idxMetric]
metricVal = metricVals[idxMetric]
fileout.write("{:.2f}\t{} {} depth\n".format(metricVal,metricName,depthType))
# done
fileout.close()
|
outvec = [0] * 5
return tuple(outvec)
|
index.ts
|
export * from './calculateSteps';
export * from './clamp';
export * from './getKeydownValue';
export * from './getMarkPercent';
export * from './getMarkValues';
|
export * from './renderMarks';
|
export * from './getPercent';
export * from './getRTLSafeKey';
export * from './on';
|
delete.go
|
package keys
import (
"bufio"
"github.com/onomyprotocol/onomy-sdk/client"
"github.com/onomyprotocol/onomy-sdk/client/input"
"github.com/onomyprotocol/onomy-sdk/crypto/keyring"
"github.com/spf13/cobra"
)
const (
flagYes = "yes"
flagForce = "force"
)
// DeleteKeyCommand deletes a key from the key store.
func DeleteKeyCommand() *cobra.Command
|
{
cmd := &cobra.Command{
Use: "delete <name>...",
Short: "Delete the given keys",
Long: `Delete keys from the Keybase backend.
Note that removing offline or ledger keys will remove
only the public key references stored locally, i.e.
private keys stored in a ledger device cannot be deleted with the CLI.
`,
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
buf := bufio.NewReader(cmd.InOrStdin())
clientCtx, err := client.GetClientQueryContext(cmd)
if err != nil {
return err
}
for _, name := range args {
info, err := clientCtx.Keyring.Key(name)
if err != nil {
return err
}
// confirm deletion, unless -y is passed
if skip, _ := cmd.Flags().GetBool(flagYes); !skip {
if yes, err := input.GetConfirmation("Key reference will be deleted. Continue?", buf, cmd.ErrOrStderr()); err != nil {
return err
} else if !yes {
continue
}
}
if err := clientCtx.Keyring.Delete(name); err != nil {
return err
}
if info.GetType() == keyring.TypeLedger || info.GetType() == keyring.TypeOffline {
cmd.PrintErrln("Public key reference deleted")
continue
}
cmd.PrintErrln("Key deleted forever (uh oh!)")
}
return nil
},
}
cmd.Flags().BoolP(flagYes, "y", false, "Skip confirmation prompt when deleting offline or ledger key references")
cmd.Flags().BoolP(flagForce, "f", false, "Remove the key unconditionally without asking for the passphrase. Deprecated.")
return cmd
}
|
|
JsletManager.js
|
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
class
|
{
constructor() {
this._jsletContextMap = null;
this.init();
}
init() {
this._jsletContextMap = new Map();
}
addContext(ref, context) {
this._jsletContextMap.set(ref, context);
}
getContext(ref) {
return this._jsletContextMap.get(ref);
}
getJslet(ref, url) {
const ctx = this.getContext(ref);
let jslet = undefined;
if (ctx)
jslet = ctx.getJslet(url);
return jslet;
}
}
exports.JsletManager = JsletManager;
;
|
JsletManager
|
caipiao_apis.go
|
// Copyright 2013 The Changkong Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package caipiao
import (
"github.com/one-han/open_taobao"
)
/* 根据卖家id与appkey获取商品信息。 */
type CaipiaoGoodsInfoGetRequest struct {
open_taobao.TaobaoMethodRequest
}
func (r *CaipiaoGoodsInfoGetRequest) GetResponse(accessToken string) (*CaipiaoGoodsInfoGetResponse, []byte, error) {
var resp CaipiaoGoodsInfoGetResponseResult
data, err := r.TaobaoMethodRequest.GetResponse(accessToken, "taobao.caipiao.goods.info.get", &resp)
if err != nil {
return nil, data, err
}
return resp.Response, data, err
}
type CaipiaoGoodsInfoGetResponse struct {
Results []*LotteryWangcaiSellerGoodsInfo `json:"results"`
TotalResults int `json:"total_results"`
}
type CaipiaoGoodsInfoGetResponseResult struct {
Response *CaipiaoGoodsInfoGetResponse `json:"caipiao_goods_info_get_response"`
}
/* 录入参加送彩票商品信息,如果录入成功,返回true,如果录入失败,返回false,后端会根据商品id与赠送类型(goodsid_presenttype_uk)来决定是新增数据还是修改数据。 */
type CaipiaoGoodsInfoInputRequest struct {
open_taobao.TaobaoMethodRequest
}
/* 活动结束时间,格式需严格遵守yyyy-MM-dd HH:mm:ss,不可为空 */
func (r *CaipiaoGoodsInfoInputRequest) SetActEndDate(value string) {
r.SetValue("act_end_date", value)
}
/* 活动开始时间,格式需严格遵守yyyy-MM-dd HH:mm:ss,不可为空 */
func (r *CaipiaoGoodsInfoInputRequest) SetActStartDate(value string) {
r.SetValue("act_start_date", value)
|
/* 店铺相关商品参加的送彩票活动描述 */
func (r *CaipiaoGoodsInfoInputRequest) SetGoodsDesc(value string) {
r.SetValue("goods_desc", value)
}
/* 商品在淘宝的唯一id,不可为空 */
func (r *CaipiaoGoodsInfoInputRequest) SetGoodsId(value string) {
r.SetValue("goods_id", value)
}
/* 商品主图地址 */
func (r *CaipiaoGoodsInfoInputRequest) SetGoodsImage(value string) {
r.SetValue("goods_image", value)
}
/* 商品价格,保留两位小数,不可为空 */
func (r *CaipiaoGoodsInfoInputRequest) SetGoodsPrice(value string) {
r.SetValue("goods_price", value)
}
/* 商品标题 */
func (r *CaipiaoGoodsInfoInputRequest) SetGoodsTitle(value string) {
r.SetValue("goods_title", value)
}
/* 商品类目编号,不可为空 */
func (r *CaipiaoGoodsInfoInputRequest) SetGoodsType(value string) {
r.SetValue("goods_type", value)
}
/* 彩种id,不可为空 */
func (r *CaipiaoGoodsInfoInputRequest) SetLotteryTypeId(value string) {
r.SetValue("lottery_type_id", value)
}
/* 赠送类型:0-满就送;1-好评送;2-分享送;3-游戏送;4-收藏送,不可为空 */
func (r *CaipiaoGoodsInfoInputRequest) SetPresentType(value string) {
r.SetValue("present_type", value)
}
func (r *CaipiaoGoodsInfoInputRequest) GetResponse(accessToken string) (*CaipiaoGoodsInfoInputResponse, []byte, error) {
var resp CaipiaoGoodsInfoInputResponseResult
data, err := r.TaobaoMethodRequest.GetResponse(accessToken, "taobao.caipiao.goods.info.input", &resp)
if err != nil {
return nil, data, err
}
return resp.Response, data, err
}
type CaipiaoGoodsInfoInputResponse struct {
InputResult bool `json:"input_result"`
}
type CaipiaoGoodsInfoInputResponseResult struct {
Response *CaipiaoGoodsInfoInputResponse `json:"caipiao_goods_info_input_response"`
}
/* 卖家给买家送彩票,可以指定彩种和注数。赠送成功,返回true; 以下几种情况情况, 返回false: 注数超过100注、卖家未签署支付宝代扣协议、卖家或者买家信息不存在等。 */
type CaipiaoLotterySendRequest struct {
open_taobao.TaobaoMethodRequest
}
/* 彩票接收方淘宝数字ID,不可为空、0和负数。 */
func (r *CaipiaoLotterySendRequest) SetBuyerNumId(value string) {
r.SetValue("buyer_num_id", value)
}
/* 彩种ID,此彩种ID为彩票系统中的序号。 */
func (r *CaipiaoLotterySendRequest) SetLotteryTypeId(value string) {
r.SetValue("lottery_type_id", value)
}
/* 送彩方淘宝数字ID, 不可为空、0和负数。 */
func (r *CaipiaoLotterySendRequest) SetSellerNumId(value string) {
r.SetValue("seller_num_id", value)
}
/* 彩票注数,不可为空、0和负数,最大值为100。 */
func (r *CaipiaoLotterySendRequest) SetStakeCount(value string) {
r.SetValue("stake_count", value)
}
/* 送彩票给接收方的赠言。 不能超过50个字符,1个中文字符、1个英文字母及1个数字等均当作一个字符,如果超过,则会截取。 */
func (r *CaipiaoLotterySendRequest) SetSweetyWords(value string) {
r.SetValue("sweety_words", value)
}
func (r *CaipiaoLotterySendRequest) GetResponse(accessToken string) (*CaipiaoLotterySendResponse, []byte, error) {
var resp CaipiaoLotterySendResponseResult
data, err := r.TaobaoMethodRequest.GetResponse(accessToken, "taobao.caipiao.lottery.send", &resp)
if err != nil {
return nil, data, err
}
return resp.Response, data, err
}
type CaipiaoLotterySendResponse struct {
SendResult bool `json:"send_result"`
}
type CaipiaoLotterySendResponseResult struct {
Response *CaipiaoLotterySendResponse `json:"caipiao_lottery_send_response"`
}
/* 卖家使用nick给买家送彩票,可以指定彩种和注数。赠送成功,返回true; 以下几种情况情况, 返回false: 注数超过100注、卖家未签署支付宝代扣协议、卖家或者买家信息不存在等。 */
type CaipiaoLotterySendbynickRequest struct {
open_taobao.TaobaoMethodRequest
}
/* 彩票接收方nick, 不可为空、""。 */
func (r *CaipiaoLotterySendbynickRequest) SetBuyerNick(value string) {
r.SetValue("buyer_nick", value)
}
/* 彩种ID,此彩种ID为彩票系统中的序号。 */
func (r *CaipiaoLotterySendbynickRequest) SetLotteryTypeId(value string) {
r.SetValue("lottery_type_id", value)
}
/* 彩票注数,不可为空、0和负数,最大值为100。 */
func (r *CaipiaoLotterySendbynickRequest) SetStakeCount(value string) {
r.SetValue("stake_count", value)
}
/* 送彩票给接收方的赠言。 不能超过50个字符,1个中文字符、1个英文字母及1个数字等均当作一个字符,如果超过,则会截取。 */
func (r *CaipiaoLotterySendbynickRequest) SetSweetyWords(value string) {
r.SetValue("sweety_words", value)
}
func (r *CaipiaoLotterySendbynickRequest) GetResponse(accessToken string) (*CaipiaoLotterySendbynickResponse, []byte, error) {
var resp CaipiaoLotterySendbynickResponseResult
data, err := r.TaobaoMethodRequest.GetResponse(accessToken, "taobao.caipiao.lottery.sendbynick", &resp)
if err != nil {
return nil, data, err
}
return resp.Response, data, err
}
type CaipiaoLotterySendbynickResponse struct {
SendResult bool `json:"send_result"`
}
type CaipiaoLotterySendbynickResponseResult struct {
Response *CaipiaoLotterySendbynickResponse `json:"caipiao_lottery_sendbynick_response"`
}
/* 获取彩票系统支持的可用于赠送的彩种列表 */
type CaipiaoLotterytypesGetRequest struct {
open_taobao.TaobaoMethodRequest
}
func (r *CaipiaoLotterytypesGetRequest) GetResponse(accessToken string) (*CaipiaoLotterytypesGetResponse, []byte, error) {
var resp CaipiaoLotterytypesGetResponseResult
data, err := r.TaobaoMethodRequest.GetResponse(accessToken, "taobao.caipiao.lotterytypes.get", &resp)
if err != nil {
return nil, data, err
}
return resp.Response, data, err
}
type CaipiaoLotterytypesGetResponse struct {
Results []*LotteryType `json:"results"`
TotalResults int `json:"total_results"`
}
type CaipiaoLotterytypesGetResponseResult struct {
Response *CaipiaoLotterytypesGetResponse `json:"caipiao_lotterytypes_get_response"`
}
/* 查询卖家指定时间范围内的赠送订单列表, 只查询3个月以内的数据 */
type CaipiaoPresentItemsGetRequest struct {
open_taobao.TaobaoMethodRequest
}
/* 赠送订单的截止时间,格式为yyyyMMdd, 距当前最长时间间隔是3个月,最近可以取当天的时间。不可为空。要求endDate-startDate必须<=3个月, */
func (r *CaipiaoPresentItemsGetRequest) SetEndDate(value string) {
r.SetValue("end_date", value)
}
/* 当前页码, 不可为空、0和负数。 */
func (r *CaipiaoPresentItemsGetRequest) SetPageNo(value string) {
r.SetValue("page_no", value)
}
/* 每页的大小,不可为空、0和负数。最大为500,如果超过500,则取默认的500. */
func (r *CaipiaoPresentItemsGetRequest) SetPageSize(value string) {
r.SetValue("page_size", value)
}
/* 赠送订单的开始时间, 格式为yyyyMMdd, 距当前最长时间间隔是3个月, 最近可以取当天的时间. 不可为空。 */
func (r *CaipiaoPresentItemsGetRequest) SetStartDate(value string) {
r.SetValue("start_date", value)
}
func (r *CaipiaoPresentItemsGetRequest) GetResponse(accessToken string) (*CaipiaoPresentItemsGetResponse, []byte, error) {
var resp CaipiaoPresentItemsGetResponseResult
data, err := r.TaobaoMethodRequest.GetResponse(accessToken, "taobao.caipiao.present.items.get", &resp)
if err != nil {
return nil, data, err
}
return resp.Response, data, err
}
type CaipiaoPresentItemsGetResponse struct {
Results []*LotteryWangcaiPresent `json:"results"`
TotalResults int `json:"total_results"`
}
type CaipiaoPresentItemsGetResponseResult struct {
Response *CaipiaoPresentItemsGetResponse `json:"caipiao_present_items_get_response"`
}
/* 查询卖家一段时间内按天统计的彩票赠送数据,只支持查询90天以内的数据. */
type CaipiaoPresentStatGetRequest struct {
open_taobao.TaobaoMethodRequest
}
/* 指定查询的天数,从当前日期(不包括当前日期)向前推算的天数,可为空。如果为空、0、负数或者大于90天,则设置为默认的90天。举例:当天是20120703, days=2, 则统计数据的日期为:20120702,20120701. */
func (r *CaipiaoPresentStatGetRequest) SetDays(value string) {
r.SetValue("days", value)
}
func (r *CaipiaoPresentStatGetRequest) GetResponse(accessToken string) (*CaipiaoPresentStatGetResponse, []byte, error) {
var resp CaipiaoPresentStatGetResponseResult
data, err := r.TaobaoMethodRequest.GetResponse(accessToken, "taobao.caipiao.present.stat.get", &resp)
if err != nil {
return nil, data, err
}
return resp.Response, data, err
}
type CaipiaoPresentStatGetResponse struct {
Results []*LotteryWangcaiPresentStat `json:"results"`
TotalResults int `json:"total_results"`
}
type CaipiaoPresentStatGetResponseResult struct {
Response *CaipiaoPresentStatGetResponse `json:"caipiao_present_stat_get_response"`
}
/* 获取卖家最新的赠送订单列表(90天以内的),支持按列表数量查询中奖订单,或按每天的日期分页查询中奖或所有订单。注意num与date只要传入一个即可,如果两个参数都传以date方式查询为准。 */
type CaipiaoPresentWinItemsGetRequest struct {
open_taobao.TaobaoMethodRequest
}
/* 查询日期,格式请严格遵守yyyy-MM-dd */
func (r *CaipiaoPresentWinItemsGetRequest) SetDate(value string) {
r.SetValue("date", value)
}
/* 查询个数,最大值为500.如果为空、0和负数,则取默认值500 */
func (r *CaipiaoPresentWinItemsGetRequest) SetNum(value string) {
r.SetValue("num", value)
}
/* 查询页码,空,零,负的情况默认为1(注意每页数据量为50) */
func (r *CaipiaoPresentWinItemsGetRequest) SetPageNo(value string) {
r.SetValue("page_no", value)
}
/* 0:查询中奖订单,1:查询所有订单,默认为0,注意按列表数量查询只会查询中奖订单。 */
func (r *CaipiaoPresentWinItemsGetRequest) SetSearchType(value string) {
r.SetValue("search_type", value)
}
func (r *CaipiaoPresentWinItemsGetRequest) GetResponse(accessToken string) (*CaipiaoPresentWinItemsGetResponse, []byte, error) {
var resp CaipiaoPresentWinItemsGetResponseResult
data, err := r.TaobaoMethodRequest.GetResponse(accessToken, "taobao.caipiao.present.win.items.get", &resp)
if err != nil {
return nil, data, err
}
return resp.Response, data, err
}
type CaipiaoPresentWinItemsGetResponse struct {
Results []*LotteryWangcaiPresent `json:"results"`
TotalResults int `json:"total_results"`
}
type CaipiaoPresentWinItemsGetResponseResult struct {
Response *CaipiaoPresentWinItemsGetResponse `json:"caipiao_present_win_items_get_response"`
}
/* 录入参加送彩票店铺信息,如果录入成功,返回true,如果录入失败,返回false,后端会根据卖家id与赠送类型(sellerid_presenttype_uk)来决定是新增数据还是修改数据。 */
type CaipiaoShopInfoInputRequest struct {
open_taobao.TaobaoMethodRequest
}
/* 活动结束时间,格式需严格遵守yyyy-MM-dd HH:mm:ss,不可为空 */
func (r *CaipiaoShopInfoInputRequest) SetActEndDate(value string) {
r.SetValue("act_end_date", value)
}
/* 活动开始时间,格式需严格遵守yyyy-MM-dd HH:mm:ss,不可为空 */
func (r *CaipiaoShopInfoInputRequest) SetActStartDate(value string) {
r.SetValue("act_start_date", value)
}
/* 赠送类型:0-满就送;1-好评送;2-分享送;3-游戏送;4-收藏送,不可为空 */
func (r *CaipiaoShopInfoInputRequest) SetPresentType(value string) {
r.SetValue("present_type", value)
}
/* 店铺参加的送彩票活动描述 */
func (r *CaipiaoShopInfoInputRequest) SetShopDesc(value string) {
r.SetValue("shop_desc", value)
}
/* 店铺名称 */
func (r *CaipiaoShopInfoInputRequest) SetShopName(value string) {
r.SetValue("shop_name", value)
}
/* 店铺类目编号,不可为空 */
func (r *CaipiaoShopInfoInputRequest) SetShopType(value string) {
r.SetValue("shop_type", value)
}
func (r *CaipiaoShopInfoInputRequest) GetResponse(accessToken string) (*CaipiaoShopInfoInputResponse, []byte, error) {
var resp CaipiaoShopInfoInputResponseResult
data, err := r.TaobaoMethodRequest.GetResponse(accessToken, "taobao.caipiao.shop.info.input", &resp)
if err != nil {
return nil, data, err
}
return resp.Response, data, err
}
type CaipiaoShopInfoInputResponse struct {
InputResult bool `json:"input_result"`
}
type CaipiaoShopInfoInputResponseResult struct {
Response *CaipiaoShopInfoInputResponse `json:"caipiao_shop_info_input_response"`
}
/* 检查用户是否签署了支付宝代扣协议。如果签署了,返回true; 如果没签署,返回false, 同时返回签署代扣协议的Url。 */
type CaipiaoSignstatusCheckRequest struct {
open_taobao.TaobaoMethodRequest
}
func (r *CaipiaoSignstatusCheckRequest) GetResponse(accessToken string) (*CaipiaoSignstatusCheckResponse, []byte, error) {
var resp CaipiaoSignstatusCheckResponseResult
data, err := r.TaobaoMethodRequest.GetResponse(accessToken, "taobao.caipiao.signstatus.check", &resp)
if err != nil {
return nil, data, err
}
return resp.Response, data, err
}
type CaipiaoSignstatusCheckResponse struct {
Sign bool `json:"sign"`
SignUrl string `json:"sign_url"`
}
type CaipiaoSignstatusCheckResponseResult struct {
Response *CaipiaoSignstatusCheckResponse `json:"caipiao_signstatus_check_response"`
}
|
}
|
download_test.go
|
package cmd
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strconv"
"testing"
"github.com/exercism/cli/config"
"github.com/exercism/cli/workspace"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
)
func TestDownloadWithoutToken(t *testing.T) {
cfg := config.Config{
UserViperConfig: viper.New(),
}
err := runDownload(cfg, pflag.NewFlagSet("fake", pflag.PanicOnError), []string{})
if assert.Error(t, err) {
assert.Regexp(t, "Welcome to Exercism", err.Error())
// It uses the default base API url to infer the host
assert.Regexp(t, "exercism.io/my/settings", err.Error())
}
}
func TestDownloadWithoutWorkspace(t *testing.T) {
v := viper.New()
v.Set("token", "abc123")
cfg := config.Config{
UserViperConfig: v,
}
err := runDownload(cfg, pflag.NewFlagSet("fake", pflag.PanicOnError), []string{})
if assert.Error(t, err) {
assert.Regexp(t, "re-run the configure", err.Error())
}
}
func TestDownloadWithoutBaseURL(t *testing.T) {
v := viper.New()
v.Set("token", "abc123")
v.Set("workspace", "/home/whatever")
cfg := config.Config{
UserViperConfig: v,
}
err := runDownload(cfg, pflag.NewFlagSet("fake", pflag.PanicOnError), []string{})
if assert.Error(t, err) {
assert.Regexp(t, "re-run the configure", err.Error())
}
}
func TestDownloadWithoutFlags(t *testing.T) {
v := viper.New()
v.Set("token", "abc123")
v.Set("workspace", "/home/username")
v.Set("apibaseurl", "http://example.com")
cfg := config.Config{
UserViperConfig: v,
}
flags := pflag.NewFlagSet("fake", pflag.PanicOnError)
setupDownloadFlags(flags)
err := runDownload(cfg, flags, []string{})
if assert.Error(t, err) {
assert.Regexp(t, "need an --exercise name or a solution --uuid", err.Error())
}
}
func TestDownload(t *testing.T) {
co := newCapturedOutput()
co.override()
defer co.reset()
testCases := []struct {
requester bool
expectedDir string
flags map[string]string
}{
{
requester: true,
expectedDir: "",
flags: map[string]string{"exercise": "bogus-exercise"},
},
{
requester: true,
expectedDir: "",
flags: map[string]string{"uuid": "bogus-id"},
},
{
requester: false,
expectedDir: filepath.Join("users", "alice"),
flags: map[string]string{"uuid": "bogus-id"},
},
{
requester: true,
expectedDir: filepath.Join("teams", "bogus-team"),
flags: map[string]string{"exercise": "bogus-exercise", "track": "bogus-track", "team": "bogus-team"},
},
}
for _, tc := range testCases {
tmpDir, err := ioutil.TempDir("", "download-cmd")
defer os.RemoveAll(tmpDir)
assert.NoError(t, err)
ts := fakeDownloadServer(strconv.FormatBool(tc.requester), tc.flags["team"])
defer ts.Close()
v := viper.New()
v.Set("workspace", tmpDir)
v.Set("apibaseurl", ts.URL)
v.Set("token", "abc123")
cfg := config.Config{
UserViperConfig: v,
}
flags := pflag.NewFlagSet("fake", pflag.PanicOnError)
setupDownloadFlags(flags)
for name, value := range tc.flags {
flags.Set(name, value)
}
err = runDownload(cfg, flags, []string{})
assert.NoError(t, err)
targetDir := filepath.Join(tmpDir, tc.expectedDir)
assertDownloadedCorrectFiles(t, targetDir)
dir := filepath.Join(targetDir, "bogus-track", "bogus-exercise")
b, err := ioutil.ReadFile(workspace.NewExerciseFromDir(dir).MetadataFilepath())
var metadata workspace.ExerciseMetadata
err = json.Unmarshal(b, &metadata)
assert.NoError(t, err)
assert.Equal(t, "bogus-track", metadata.Track)
assert.Equal(t, "bogus-exercise", metadata.ExerciseSlug)
assert.Equal(t, tc.requester, metadata.IsRequester)
}
}
func fakeDownloadServer(requestor, teamSlug string) *httptest.Server {
mux := http.NewServeMux()
server := httptest.NewServer(mux)
mux.HandleFunc("/file-1.txt", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "this is file 1")
})
mux.HandleFunc("/subdir/file-2.txt", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "this is file 2")
})
mux.HandleFunc("/full/path/with/numeric-suffix/bogus-track/bogus-exercise-12345/subdir/numeric.txt", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "with numeric suffix")
})
mux.HandleFunc("/special-char-filename#.txt", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "this is a special file")
})
mux.HandleFunc("/\\with-leading-backslash.txt", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "with backslash in name")
})
mux.HandleFunc("/\\with\\backslashes\\in\\path.txt", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "with backslash in path")
})
mux.HandleFunc("/with-leading-slash.txt", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "this has a slash")
})
mux.HandleFunc("/file-3.txt", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "")
})
mux.HandleFunc("/solutions/latest", func(w http.ResponseWriter, r *http.Request) {
team := "null"
if teamSlug := r.FormValue("team_id"); teamSlug != "" {
team = fmt.Sprintf(`{"name": "Bogus Team", "slug": "%s"}`, teamSlug)
}
payloadBody := fmt.Sprintf(payloadTemplate, requestor, team, server.URL+"/")
fmt.Fprint(w, payloadBody)
})
mux.HandleFunc("/solutions/bogus-id", func(w http.ResponseWriter, r *http.Request) {
payloadBody := fmt.Sprintf(payloadTemplate, requestor, "null", server.URL+"/")
fmt.Fprint(w, payloadBody)
})
return server
}
func assertDownloadedCorrectFiles(t *testing.T, targetDir string) {
expectedFiles := []struct {
desc string
path string
contents string
}{
{
desc: "a file in the exercise root directory",
path: filepath.Join(targetDir, "bogus-track", "bogus-exercise", "file-1.txt"),
contents: "this is file 1",
},
{
desc: "a file in a subdirectory",
path: filepath.Join(targetDir, "bogus-track", "bogus-exercise", "subdir", "file-2.txt"),
contents: "this is file 2",
},
{
desc: "a path with a numeric suffix",
path: filepath.Join(targetDir, "bogus-track", "bogus-exercise", "subdir", "numeric.txt"),
contents: "with numeric suffix",
},
{
desc: "a file that requires URL encoding",
path: filepath.Join(targetDir, "bogus-track", "bogus-exercise", "special-char-filename#.txt"),
contents: "this is a special file",
},
{
desc: "a file that has a leading slash",
path: filepath.Join(targetDir, "bogus-track", "bogus-exercise", "with-leading-slash.txt"),
contents: "this has a slash",
},
{
desc: "a file with a leading backslash",
path: filepath.Join(targetDir, "bogus-track", "bogus-exercise", "with-leading-backslash.txt"),
contents: "with backslash in name",
},
{
desc: "a file with backslashes in path",
path: filepath.Join(targetDir, "bogus-track", "bogus-exercise", "with", "backslashes", "in", "path.txt"),
contents: "with backslash in path",
},
}
for _, file := range expectedFiles {
t.Run(file.desc, func(t *testing.T) {
b, err := ioutil.ReadFile(file.path)
assert.NoError(t, err)
assert.Equal(t, file.contents, string(b))
})
}
path := filepath.Join(targetDir, "bogus-track", "bogus-exercise", "file-3.txt")
_, err := os.Lstat(path)
assert.True(t, os.IsNotExist(err), "It should not write the file if empty.")
}
const payloadTemplate = `
{
"solution": {
"id": "bogus-id",
"user": {
"handle": "alice",
"is_requester": %s
},
"team": %s,
"exercise": {
"id": "bogus-exercise",
"instructions_url": "http://example.com/bogus-exercise",
"auto_approve": false,
"track": {
|
"id": "bogus-track",
"language": "Bogus Language"
}
},
"file_download_base_url": "%s",
"files": [
"file-1.txt",
"subdir/file-2.txt",
"special-char-filename#.txt",
"/with-leading-slash.txt",
"\\with-leading-backslash.txt",
"\\with\\backslashes\\in\\path.txt",
"file-3.txt",
"/full/path/with/numeric-suffix/bogus-track/bogus-exercise-12345/subdir/numeric.txt"
],
"iteration": {
"submitted_at": "2017-08-21t10:11:12.130z"
}
}
}
`
| |
queue.rs
|
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use std::cmp::min;
use std::num::Wrapping;
use std::sync::atomic::{fence, Ordering};
use vm_memory::{Address, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap};
pub(super) const VIRTQ_DESC_F_NEXT: u16 = 0x1;
pub(super) const VIRTQ_DESC_F_WRITE: u16 = 0x2;
// GuestMemoryMmap::read_obj_from_addr() will be used to fetch the descriptor,
// which has an explicit constraint that the entire descriptor doesn't
// cross the page boundary. Otherwise the descriptor may be splitted into
// two mmap regions which causes failure of GuestMemoryMmap::read_obj_from_addr().
//
// The Virtio Spec 1.0 defines the alignment of VirtIO descriptor is 16 bytes,
// which fulfills the explicit constraint of GuestMemoryMmap::read_obj_from_addr().
/// A virtio descriptor constraints with C representive.
#[repr(C)]
#[derive(Default, Clone, Copy)]
struct Descriptor {
addr: u64,
len: u32,
flags: u16,
next: u16,
}
unsafe impl ByteValued for Descriptor {}
/// A virtio descriptor chain.
pub struct DescriptorChain<'a> {
desc_table: GuestAddress,
queue_size: u16,
ttl: u16, // used to prevent infinite chain cycles
/// Reference to guest memory
pub mem: &'a GuestMemoryMmap,
/// Index into the descriptor table
pub index: u16,
/// Guest physical address of device specific data
pub addr: GuestAddress,
/// Length of device specific data
pub len: u32,
/// Includes next, write, and indirect bits
pub flags: u16,
/// Index into the descriptor table of the next descriptor if flags has
/// the next bit set
pub next: u16,
}
impl<'a> DescriptorChain<'a> {
fn checked_new(
mem: &GuestMemoryMmap,
desc_table: GuestAddress,
queue_size: u16,
index: u16,
) -> Option<DescriptorChain> {
if index >= queue_size {
return None;
}
let desc_head = mem.checked_offset(desc_table, (index as usize) * 16)?;
mem.checked_offset(desc_head, 16)?;
// These reads can't fail unless Guest memory is hopelessly broken.
let desc = match mem.read_obj::<Descriptor>(desc_head) {
Ok(ret) => ret,
Err(_) => {
// TODO log address
error!("Failed to read from memory");
return None;
}
};
let chain = DescriptorChain {
mem,
desc_table,
queue_size,
ttl: queue_size,
index,
addr: GuestAddress(desc.addr),
len: desc.len,
flags: desc.flags,
next: desc.next,
};
if chain.is_valid() {
Some(chain)
} else {
None
}
}
fn is_valid(&self) -> bool
|
/// Gets if this descriptor chain has another descriptor chain linked after it.
pub fn has_next(&self) -> bool {
self.flags & VIRTQ_DESC_F_NEXT != 0 && self.ttl > 1
}
/// If the driver designated this as a write only descriptor.
///
/// If this is false, this descriptor is read only.
/// Write only means the the emulated device can write and the driver can read.
pub fn is_write_only(&self) -> bool {
self.flags & VIRTQ_DESC_F_WRITE != 0
}
/// Gets the next descriptor in this descriptor chain, if there is one.
///
/// Note that this is distinct from the next descriptor chain returned by `AvailIter`, which is
/// the head of the next _available_ descriptor chain.
pub fn next_descriptor(&self) -> Option<DescriptorChain<'a>> {
if self.has_next() {
DescriptorChain::checked_new(self.mem, self.desc_table, self.queue_size, self.next).map(
|mut c| {
c.ttl = self.ttl - 1;
c
},
)
} else {
None
}
}
}
#[derive(Clone, Debug, PartialEq)]
/// A virtio queue's parameters.
pub struct Queue {
/// The maximal size in elements offered by the device
pub(crate) max_size: u16,
/// The queue size in elements the driver selected
pub size: u16,
/// Indicates if the queue is finished with configuration
pub ready: bool,
/// Guest physical address of the descriptor table
pub desc_table: GuestAddress,
/// Guest physical address of the available ring
pub avail_ring: GuestAddress,
/// Guest physical address of the used ring
pub used_ring: GuestAddress,
pub(crate) next_avail: Wrapping<u16>,
pub(crate) next_used: Wrapping<u16>,
}
impl Queue {
/// Constructs an empty virtio queue with the given `max_size`.
pub fn new(max_size: u16) -> Queue {
Queue {
max_size,
size: 0,
ready: false,
desc_table: GuestAddress(0),
avail_ring: GuestAddress(0),
used_ring: GuestAddress(0),
next_avail: Wrapping(0),
next_used: Wrapping(0),
}
}
pub fn get_max_size(&self) -> u16 {
self.max_size
}
/// Return the actual size of the queue, as the driver may not set up a
/// queue as big as the device allows.
pub fn actual_size(&self) -> u16 {
min(self.size, self.max_size)
}
pub fn is_valid(&self, mem: &GuestMemoryMmap) -> bool {
let queue_size = u64::from(self.actual_size());
let desc_table = self.desc_table;
let desc_table_size = 16 * queue_size;
let avail_ring = self.avail_ring;
let avail_ring_size = 6 + 2 * queue_size;
let used_ring = self.used_ring;
let used_ring_size = 6 + 8 * queue_size;
if !self.ready {
error!("attempt to use virtio queue that is not marked ready");
false
} else if self.size > self.max_size || self.size == 0 || (self.size & (self.size - 1)) != 0
{
error!("virtio queue with invalid size: {}", self.size);
false
} else if desc_table
.checked_add(desc_table_size)
.map_or(true, |v| !mem.address_in_range(v))
{
error!(
"virtio queue descriptor table goes out of bounds: start:0x{:08x} size:0x{:08x}",
desc_table.raw_value(),
desc_table_size
);
false
} else if avail_ring
.checked_add(avail_ring_size)
.map_or(true, |v| !mem.address_in_range(v))
{
error!(
"virtio queue available ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
avail_ring.raw_value(),
avail_ring_size
);
false
} else if used_ring
.checked_add(used_ring_size)
.map_or(true, |v| !mem.address_in_range(v))
{
error!(
"virtio queue used ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
used_ring.raw_value(),
used_ring_size
);
false
} else if desc_table.raw_value() & 0xf != 0 {
error!("virtio queue descriptor table breaks alignment contraints");
false
} else if avail_ring.raw_value() & 0x1 != 0 {
error!("virtio queue available ring breaks alignment contraints");
false
} else if used_ring.raw_value() & 0x3 != 0 {
error!("virtio queue used ring breaks alignment contraints");
false
} else {
true
}
}
/// Returns the number of yet-to-be-popped descriptor chains in the avail ring.
pub fn len(&self, mem: &GuestMemoryMmap) -> u16 {
(self.avail_idx(mem) - self.next_avail).0
}
/// Checks if the driver has made any descriptor chains available in the avail ring.
pub fn is_empty(&self, mem: &GuestMemoryMmap) -> bool {
self.len(mem) == 0
}
/// Pop the first available descriptor chain from the avail ring.
pub fn pop<'a, 'b>(&'a mut self, mem: &'b GuestMemoryMmap) -> Option<DescriptorChain<'b>> {
if self.len(mem) == 0 {
return None;
}
// We'll need to find the first available descriptor, that we haven't yet popped.
// In a naive notation, that would be:
// `descriptor_table[avail_ring[next_avail]]`.
//
// First, we compute the byte-offset (into `self.avail_ring`) of the index of the next available
// descriptor. `self.avail_ring` stores the address of a `struct virtq_avail`, as defined by
// the VirtIO spec:
//
// ```C
// struct virtq_avail {
// le16 flags;
// le16 idx;
// le16 ring[QUEUE_SIZE];
// le16 used_event
// }
// ```
//
// We use `self.next_avail` to store the position, in `ring`, of the next available
// descriptor index, with a twist: we always only increment `self.next_avail`, so the
// actual position will be `self.next_avail % self.actual_size()`.
// We are now looking for the offset of `ring[self.next_avail % self.actual_size()]`.
// `ring` starts after `flags` and `idx` (4 bytes into `struct virtq_avail`), and holds
// 2-byte items, so the offset will be:
let index_offset = 4 + 2 * (self.next_avail.0 % self.actual_size());
// `self.is_valid()` already performed all the bound checks on the descriptor table
// and virtq rings, so it's safe to unwrap guest memory reads and to use unchecked
// offsets.
let desc_index: u16 = mem
.read_obj(self.avail_ring.unchecked_add(u64::from(index_offset)))
.unwrap();
DescriptorChain::checked_new(mem, self.desc_table, self.actual_size(), desc_index).map(
|dc| {
self.next_avail += Wrapping(1);
dc
},
)
}
/// Undo the effects of the last `self.pop()` call.
/// The caller can use this, if it was unable to consume the last popped descriptor chain.
pub fn undo_pop(&mut self) {
self.next_avail -= Wrapping(1);
}
/// Puts an available descriptor head into the used ring for use by the guest.
pub fn add_used(&mut self, mem: &GuestMemoryMmap, desc_index: u16, len: u32) {
if desc_index >= self.actual_size() {
error!(
"attempted to add out of bounds descriptor to used ring: {}",
desc_index
);
return;
}
let used_ring = self.used_ring;
let next_used = u64::from(self.next_used.0 % self.actual_size());
let used_elem = used_ring.unchecked_add(4 + next_used * 8);
// These writes can't fail as we are guaranteed to be within the descriptor ring.
mem.write_obj(u32::from(desc_index), used_elem).unwrap();
mem.write_obj(len as u32, used_elem.unchecked_add(4))
.unwrap();
self.next_used += Wrapping(1);
// This fence ensures all descriptor writes are visible before the index update is.
fence(Ordering::Release);
mem.write_obj(self.next_used.0 as u16, used_ring.unchecked_add(2))
.unwrap();
}
/// Goes back one position in the available descriptor chain offered by the driver.
/// Rust does not support bidirectional iterators. This is the only way to revert the effect
/// of an iterator increment on the queue.
pub fn go_to_previous_position(&mut self) {
self.next_avail -= Wrapping(1);
}
/// Fetch the available ring index (`virtq_avail->idx`) from guest memory.
/// This is written by the driver, to indicate the next slot that will be filled in the avail
/// ring.
fn avail_idx(&self, mem: &GuestMemoryMmap) -> Wrapping<u16> {
// Bound checks for queue inner data have already been performed, at device activation time,
// via `self.is_valid()`, so it's safe to unwrap and use unchecked offsets here.
// Note: the `MmioTransport` code ensures that queue addresses cannot be changed by the guest
// after device activation, so we can be certain that no change has occured since
// the last `self.is_valid()` check.
let addr = self.avail_ring.unchecked_add(2);
Wrapping(mem.read_obj::<u16>(addr).unwrap())
}
}
#[cfg(test)]
pub(crate) mod tests {
extern crate vm_memory;
use std::marker::PhantomData;
use std::mem;
pub use super::*;
use vm_memory::{GuestAddress, GuestMemoryMmap};
// Represents a location in GuestMemoryMmap which holds a given type.
pub struct SomeplaceInMemory<'a, T> {
pub location: GuestAddress,
mem: &'a GuestMemoryMmap,
phantom: PhantomData<*const T>,
}
// The ByteValued trait is required to use mem.read_obj_from_addr and write_obj_at_addr.
impl<'a, T> SomeplaceInMemory<'a, T>
where
T: vm_memory::ByteValued,
{
fn new(location: GuestAddress, mem: &'a GuestMemoryMmap) -> Self {
SomeplaceInMemory {
location,
mem,
phantom: PhantomData,
}
}
// Reads from the actual memory location.
pub fn get(&self) -> T {
self.mem.read_obj(self.location).unwrap()
}
// Writes to the actual memory location.
pub fn set(&self, val: T) {
self.mem.write_obj(val, self.location).unwrap()
}
// This function returns a place in memory which holds a value of type U, and starts
// offset bytes after the current location.
fn map_offset<U>(&self, offset: usize) -> SomeplaceInMemory<'a, U> {
SomeplaceInMemory {
location: self.location.checked_add(offset as u64).unwrap(),
mem: self.mem,
phantom: PhantomData,
}
}
// This function returns a place in memory which holds a value of type U, and starts
// immediately after the end of self (which is location + sizeof(T)).
fn next_place<U>(&self) -> SomeplaceInMemory<'a, U> {
self.map_offset::<U>(mem::size_of::<T>())
}
fn end(&self) -> GuestAddress {
self.location
.checked_add(mem::size_of::<T>() as u64)
.unwrap()
}
}
// Represents a virtio descriptor in guest memory.
pub struct VirtqDesc<'a> {
pub addr: SomeplaceInMemory<'a, u64>,
pub len: SomeplaceInMemory<'a, u32>,
pub flags: SomeplaceInMemory<'a, u16>,
pub next: SomeplaceInMemory<'a, u16>,
}
impl<'a> VirtqDesc<'a> {
fn new(start: GuestAddress, mem: &'a GuestMemoryMmap) -> Self {
assert_eq!(start.0 & 0xf, 0);
let addr = SomeplaceInMemory::new(start, mem);
let len = addr.next_place();
let flags = len.next_place();
let next = flags.next_place();
VirtqDesc {
addr,
len,
flags,
next,
}
}
fn start(&self) -> GuestAddress {
self.addr.location
}
fn end(&self) -> GuestAddress {
self.next.end()
}
pub fn set(&self, addr: u64, len: u32, flags: u16, next: u16) {
self.addr.set(addr);
self.len.set(len);
self.flags.set(flags);
self.next.set(next);
}
}
// Represents a virtio queue ring. The only difference between the used and available rings,
// is the ring element type.
pub struct VirtqRing<'a, T> {
pub flags: SomeplaceInMemory<'a, u16>,
pub idx: SomeplaceInMemory<'a, u16>,
pub ring: Vec<SomeplaceInMemory<'a, T>>,
pub event: SomeplaceInMemory<'a, u16>,
}
impl<'a, T> VirtqRing<'a, T>
where
T: vm_memory::ByteValued,
{
fn new(
start: GuestAddress,
mem: &'a GuestMemoryMmap,
qsize: u16,
alignment: usize,
) -> Self {
assert_eq!(start.0 & (alignment as u64 - 1), 0);
let flags = SomeplaceInMemory::new(start, mem);
let idx = flags.next_place();
let mut ring = Vec::with_capacity(qsize as usize);
ring.push(idx.next_place());
for _ in 1..qsize as usize {
let x = ring.last().unwrap().next_place();
ring.push(x)
}
let event = ring.last().unwrap().next_place();
flags.set(0);
idx.set(0);
event.set(0);
VirtqRing {
flags,
idx,
ring,
event,
}
}
pub fn end(&self) -> GuestAddress {
self.event.end()
}
}
#[repr(C)]
#[derive(Clone, Copy, Default)]
pub struct VirtqUsedElem {
pub id: u32,
pub len: u32,
}
unsafe impl vm_memory::ByteValued for VirtqUsedElem {}
pub type VirtqAvail<'a> = VirtqRing<'a, u16>;
pub type VirtqUsed<'a> = VirtqRing<'a, VirtqUsedElem>;
pub struct VirtQueue<'a> {
pub dtable: Vec<VirtqDesc<'a>>,
pub avail: VirtqAvail<'a>,
pub used: VirtqUsed<'a>,
}
impl<'a> VirtQueue<'a> {
// We try to make sure things are aligned properly :-s
pub fn new(start: GuestAddress, mem: &'a GuestMemoryMmap, qsize: u16) -> Self {
// power of 2?
assert!(qsize > 0 && qsize & (qsize - 1) == 0);
let mut dtable = Vec::with_capacity(qsize as usize);
let mut end = start;
for _ in 0..qsize {
let d = VirtqDesc::new(end, mem);
end = d.end();
dtable.push(d);
}
const AVAIL_ALIGN: usize = 2;
let avail = VirtqAvail::new(end, mem, qsize, AVAIL_ALIGN);
const USED_ALIGN: u64 = 4;
let mut x = avail.end().0;
x = (x + USED_ALIGN - 1) & !(USED_ALIGN - 1);
let used = VirtqUsed::new(GuestAddress(x), mem, qsize, USED_ALIGN as usize);
VirtQueue {
dtable,
avail,
used,
}
}
pub fn size(&self) -> u16 {
self.dtable.len() as u16
}
fn dtable_start(&self) -> GuestAddress {
self.dtable.first().unwrap().start()
}
fn avail_start(&self) -> GuestAddress {
self.avail.flags.location
}
fn used_start(&self) -> GuestAddress {
self.used.flags.location
}
// Creates a new Queue, using the underlying memory regions represented by the VirtQueue.
pub fn create_queue(&self) -> Queue {
let mut q = Queue::new(self.size());
q.size = self.size();
q.ready = true;
q.desc_table = self.dtable_start();
q.avail_ring = self.avail_start();
q.used_ring = self.used_start();
q
}
pub fn start(&self) -> GuestAddress {
self.dtable_start()
}
pub fn end(&self) -> GuestAddress {
self.used.end()
}
}
#[test]
fn test_checked_new_descriptor_chain() {
let m = &GuestMemoryMmap::from_ranges(&[
(GuestAddress(0), 0x10000),
(GuestAddress(0x20000), 0x2000),
])
.unwrap();
let vq = VirtQueue::new(GuestAddress(0), m, 16);
assert!(vq.end().0 < 0x1000);
// index >= queue_size
assert!(DescriptorChain::checked_new(m, vq.dtable_start(), 16, 16).is_none());
// desc_table address is way off
assert!(DescriptorChain::checked_new(m, GuestAddress(0x00ff_ffff_ffff), 16, 0).is_none());
// Let's create an invalid chain.
{
// The first desc has a normal len, and the next_descriptor flag is set.
vq.dtable[0].addr.set(0x1000);
vq.dtable[0].len.set(0x1000);
vq.dtable[0].flags.set(VIRTQ_DESC_F_NEXT);
// .. but the the index of the next descriptor is too large
vq.dtable[0].next.set(16);
assert!(DescriptorChain::checked_new(m, vq.dtable_start(), 16, 0).is_none());
}
// Finally, let's test an ok chain.
{
vq.dtable[0].next.set(1);
vq.dtable[1].set(0x2000, 0x1000, 0, 0);
let c = DescriptorChain::checked_new(m, vq.dtable_start(), 16, 0).unwrap();
assert_eq!(c.mem as *const GuestMemoryMmap, m as *const GuestMemoryMmap);
assert_eq!(c.desc_table, vq.dtable_start());
assert_eq!(c.queue_size, 16);
assert_eq!(c.ttl, c.queue_size);
assert_eq!(c.index, 0);
assert_eq!(c.addr, GuestAddress(0x1000));
assert_eq!(c.len, 0x1000);
assert_eq!(c.flags, VIRTQ_DESC_F_NEXT);
assert_eq!(c.next, 1);
assert!(c.next_descriptor().unwrap().next_descriptor().is_none());
}
}
#[test]
fn test_queue_validation() {
let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let vq = VirtQueue::new(GuestAddress(0), m, 16);
let mut q = vq.create_queue();
// q is currently valid
assert!(q.is_valid(m));
// shouldn't be valid when not marked as ready
q.ready = false;
assert!(!q.is_valid(m));
q.ready = true;
// or when size > max_size
q.size = q.max_size << 1;
assert!(!q.is_valid(m));
q.size = q.max_size;
// or when size is 0
q.size = 0;
assert!(!q.is_valid(m));
q.size = q.max_size;
// or when size is not a power of 2
q.size = 11;
assert!(!q.is_valid(m));
q.size = q.max_size;
// or if the various addresses are off
q.desc_table = GuestAddress(0xffff_ffff);
assert!(!q.is_valid(m));
q.desc_table = GuestAddress(0x1001);
assert!(!q.is_valid(m));
q.desc_table = vq.dtable_start();
q.avail_ring = GuestAddress(0xffff_ffff);
assert!(!q.is_valid(m));
q.avail_ring = GuestAddress(0x1001);
assert!(!q.is_valid(m));
q.avail_ring = vq.avail_start();
q.used_ring = GuestAddress(0xffff_ffff);
assert!(!q.is_valid(m));
q.used_ring = GuestAddress(0x1001);
assert!(!q.is_valid(m));
q.used_ring = vq.used_start();
}
#[test]
fn test_queue_processing() {
let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let vq = VirtQueue::new(GuestAddress(0), m, 16);
let mut q = vq.create_queue();
q.ready = true;
// Let's create two simple descriptor chains.
for j in 0..5 {
vq.dtable[j].set(
0x1000 * (j + 1) as u64,
0x1000,
VIRTQ_DESC_F_NEXT,
(j + 1) as u16,
);
}
// the chains are (0, 1) and (2, 3, 4)
vq.dtable[1].flags.set(0);
vq.dtable[4].flags.set(0);
vq.avail.ring[0].set(0);
vq.avail.ring[1].set(2);
vq.avail.idx.set(2);
// We've just set up two chains.
assert_eq!(q.len(m), 2);
// The first chain should hold exactly two descriptors.
let d = q.pop(m).unwrap().next_descriptor().unwrap();
assert!(!d.has_next());
assert!(d.next_descriptor().is_none());
// We popped one chain, so there should be only one left.
assert_eq!(q.len(m), 1);
// The next chain holds three descriptors.
let d = q
.pop(m)
.unwrap()
.next_descriptor()
.unwrap()
.next_descriptor()
.unwrap();
assert!(!d.has_next());
assert!(d.next_descriptor().is_none());
// We've popped both chains, so the queue should be empty.
assert!(q.is_empty(m));
assert!(q.pop(m).is_none());
// Undoing the last pop should let us walk the last chain again.
q.undo_pop();
assert_eq!(q.len(m), 1);
// Walk the last chain again (three descriptors).
let d = q
.pop(m)
.unwrap()
.next_descriptor()
.unwrap()
.next_descriptor()
.unwrap();
assert!(!d.has_next());
assert!(d.next_descriptor().is_none());
}
#[test]
fn test_add_used() {
let m = &GuestMemoryMmap::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap();
let vq = VirtQueue::new(GuestAddress(0), m, 16);
let mut q = vq.create_queue();
assert_eq!(vq.used.idx.get(), 0);
//index too large
q.add_used(m, 16, 0x1000);
assert_eq!(vq.used.idx.get(), 0);
//should be ok
q.add_used(m, 1, 0x1000);
assert_eq!(vq.used.idx.get(), 1);
let x = vq.used.ring[0].get();
assert_eq!(x.id, 1);
assert_eq!(x.len, 0x1000);
}
}
|
{
!self.has_next() || self.next < self.queue_size
}
|
include.rs
|
use std::collections::{ LinkedList };
use crate::protocol::{ End };
use crate::base::{
Protocol,
Session,
Empty,
Context,
AppendContext,
ContextLens,
PartialSession,
};
use crate::context::{
append_emtpy_slot,
};
use crate::session::end::{
wait,
terminate,
wait_async,
};
use crate::session::cut::{
Cut,
AllRight
};
pub fn include_session
< C, A, B >
( session : Session < A >,
cont : impl FnOnce
( C :: Length )
->
PartialSession <
C :: Appended,
B
>
) ->
PartialSession < C, B >
where
A : Protocol,
B : Protocol,
C : Context,
C : AppendContext < ( A, () ) >,
{
AllRight :: cut ( session, cont )
}
pub fn wait_session
< I, P >
( session1 : Session < End >,
cont : PartialSession < I, P >
) ->
PartialSession < I, P >
where
P : Protocol,
I : Context,
I : AppendContext < (End, ()) >,
I : AppendContext < (Empty, ()) >,
I::Length :
ContextLens <
< I as
AppendContext < (End, ()) >
>::Appended,
End,
Empty,
Target =
< I as
AppendContext < (Empty, ()) >
>::Appended
>
{
include_session ( session1, move | chan | {
wait_async ( chan, async move || {
append_emtpy_slot ( cont )
})
})
}
pub fn wait_sessions
< I, P >
( sessions :
Vec <
Session < End >
>,
cont : PartialSession < I, P >
) ->
PartialSession < I, P >
where
P : Protocol,
I : AppendContext < (End, ()) >,
I : AppendContext < (Empty, ()) >,
I::Length :
ContextLens <
< I as
AppendContext < (End, ()) >
>::Appended,
End,
Empty,
Target =
< I as
AppendContext < (Empty, ()) >
>::Appended
>
|
pub fn join_sessions
( sessions :
Vec <
Session < End >
>
) ->
Session < End >
{
do_join_sessions ( sessions.into_iter().collect() )
}
fn do_join_sessions
( mut sessions :
LinkedList <
Session < End >
>
) ->
Session < End >
{
match sessions.pop_front() {
Some (session) => {
include_session ( session, move | c1 | {
include_session (
do_join_sessions ( sessions ),
move | c2 | {
wait ( c1,
wait ( c2,
terminate ()))
})
})
},
None => {
terminate()
}
}
}
|
{
wait_session (
join_sessions (sessions) ,
cont
)
}
|
call_fn.rs
|
#![cfg(not(feature = "no_function"))]
use rhai::{Dynamic, Engine, EvalAltResult, FnPtr, Func, FuncArgs, RegisterFn, Scope, INT};
use std::{any::TypeId, iter::once};
#[test]
fn test_call_fn() -> Result<(), Box<EvalAltResult>> {
let engine = Engine::new();
let mut scope = Scope::new();
scope.push("foo", 42 as INT);
let ast = engine.compile(
r"
fn hello(x, y) {
x + y
}
fn hello(x) {
x *= foo;
foo = 1;
x
}
fn hello() {
41 + foo
}
fn define_var() {
let bar = 21;
bar * 2
}
",
)?;
let r: INT = engine.call_fn(&mut scope, &ast, "hello", (42 as INT, 123 as INT))?;
assert_eq!(r, 165);
let r: INT = engine.call_fn(&mut scope, &ast, "hello", (123 as INT,))?;
assert_eq!(r, 5166);
let r: INT = engine.call_fn(&mut scope, &ast, "hello", ())?;
assert_eq!(r, 42);
let r: INT = engine.call_fn(&mut scope, &ast, "define_var", ())?;
assert_eq!(r, 42);
assert!(!scope.contains("bar"));
assert_eq!(
scope
.get_value::<INT>("foo")
.expect("variable foo should exist"),
1
);
Ok(())
}
struct Options {
pub foo: bool,
pub bar: String,
pub baz: INT,
}
impl FuncArgs for Options {
fn parse<C: Extend<Dynamic>>(self, container: &mut C) {
container.extend(once(self.foo.into()));
container.extend(once(self.bar.into()));
container.extend(once(self.baz.into()));
}
}
#[test]
fn test_call_fn_args() -> Result<(), Box<EvalAltResult>> {
let options = Options {
foo: false,
bar: "world".to_string(),
baz: 42,
};
let engine = Engine::new();
let mut scope = Scope::new();
let ast = engine.compile(
r#"
fn hello(x, y, z) {
if x { "hello " + y } else { y + z }
}
"#,
)?;
let result: String = engine.call_fn(&mut scope, &ast, "hello", options)?;
assert_eq!(result, "world42");
Ok(())
}
#[test]
fn
|
() -> Result<(), Box<EvalAltResult>> {
let engine = Engine::new();
let mut scope = Scope::new();
let ast = engine.compile("fn add(x, n) { x + n }")?;
let r: INT = engine.call_fn(&mut scope, &ast, "add", (40 as INT, 2 as INT))?;
assert_eq!(r, 42);
let ast = engine.compile("private fn add(x, n, ) { x + n }")?;
let r: INT = engine.call_fn(&mut scope, &ast, "add", (40 as INT, 2 as INT))?;
assert_eq!(r, 42);
Ok(())
}
#[test]
#[cfg(not(feature = "no_object"))]
fn test_fn_ptr_raw() -> Result<(), Box<EvalAltResult>> {
let mut engine = Engine::new();
#[allow(deprecated)]
engine
.register_fn("mul", |x: &mut INT, y: INT| *x *= y)
.register_raw_fn(
"bar",
&[
TypeId::of::<INT>(),
TypeId::of::<FnPtr>(),
TypeId::of::<INT>(),
],
move |context, args| {
let fp = std::mem::take(args[1]).cast::<FnPtr>();
let value = args[2].clone();
let this_ptr = args.get_mut(0).unwrap();
fp.call_dynamic(context, Some(this_ptr), [value])
},
);
assert_eq!(
engine.eval::<INT>(
r#"
fn foo(x) { this += x; }
let x = 41;
x.bar(Fn("foo"), 1);
x
"#
)?,
42
);
assert_eq!(
engine.eval::<INT>(
r#"
fn foo(x, y) { this += x + y; }
let x = 40;
let v = 1;
x.bar(Fn("foo").curry(v), 1);
x
"#
)?,
42
);
assert_eq!(
engine.eval::<INT>(
r#"
private fn foo(x) { this += x; }
let x = 41;
x.bar(Fn("foo"), 1);
x
"#
)?,
42
);
assert_eq!(
engine.eval::<INT>(
r#"
let x = 21;
x.bar(Fn("mul"), 2);
x
"#
)?,
42
);
Ok(())
}
#[test]
fn test_anonymous_fn() -> Result<(), Box<EvalAltResult>> {
let calc_func = Func::<(INT, INT, INT), INT>::create_from_script(
Engine::new(),
"fn calc(x, y, z,) { (x + y) * z }",
"calc",
)?;
assert_eq!(calc_func(42, 123, 9)?, 1485);
let calc_func = Func::<(INT, String, INT), INT>::create_from_script(
Engine::new(),
"fn calc(x, y, z) { (x + len(y)) * z }",
"calc",
)?;
assert_eq!(calc_func(42, "hello".to_string(), 9)?, 423);
let calc_func = Func::<(INT, String, INT), INT>::create_from_script(
Engine::new(),
"private fn calc(x, y, z) { (x + len(y)) * z }",
"calc",
)?;
assert_eq!(calc_func(42, "hello".to_string(), 9)?, 423);
Ok(())
}
|
test_call_fn_private
|
DebuggerPausedMessage.js
|
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/**
* @unrestricted
*/
Sources.DebuggerPausedMessage = class {
constructor() {
this._element = createElementWithClass('div', 'paused-message flex-none');
const root = UI.createShadowRootWithCoreStyles(this._element, 'sources/debuggerPausedMessage.css');
this._contentElement = root.createChild('div');
}
/**
* @return {!Element}
*/
element() {
return this._element;
}
/**
* @param {string} description
*/
static _descriptionWithoutStack(description) {
const firstCallFrame = /^\s+at\s/m.exec(description);
return firstCallFrame ? description.substring(0, firstCallFrame.index - 1) :
description.substring(0, description.lastIndexOf('\n'));
}
/**
* @param {!SDK.DebuggerPausedDetails} details
* @return {!Promise<!Element>}
*/
static async _createDOMBreakpointHitMessage(details) {
const messageWrapper = createElement('span');
const domDebuggerModel = details.debuggerModel.target().model(SDK.DOMDebuggerModel);
if (!details.auxData || !domDebuggerModel)
return messageWrapper;
const data = domDebuggerModel.resolveDOMBreakpointData(/** @type {!Object} */ (details.auxData));
if (!data)
return messageWrapper;
const mainElement = messageWrapper.createChild('div', 'status-main');
mainElement.appendChild(UI.Icon.create('smallicon-info', 'status-icon'));
mainElement.appendChild(createTextNode(
String.sprintf('Paused on %s', Sources.DebuggerPausedMessage.BreakpointTypeNouns.get(data.type))));
const subElement = messageWrapper.createChild('div', 'status-sub monospace');
const linkifiedNode = await Common.Linkifier.linkify(data.node);
subElement.appendChild(linkifiedNode);
if (data.targetNode) {
const targetNodeLink = await Common.Linkifier.linkify(data.targetNode);
let message;
if (data.insertion)
message = data.targetNode === data.node ? 'Child %s added' : 'Descendant %s added';
else
message = 'Descendant %s removed';
subElement.appendChild(createElement('br'));
subElement.appendChild(UI.formatLocalized(message, [targetNodeLink]));
}
return messageWrapper;
}
/**
* @param {?SDK.DebuggerPausedDetails} details
* @param {!Bindings.DebuggerWorkspaceBinding} debuggerWorkspaceBinding
* @param {!Bindings.BreakpointManager} breakpointManager
* @return {!Promise}
*/
async render(details, debuggerWorkspaceBinding, breakpointManager) {
this._contentElement.removeChildren();
this._contentElement.hidden = !details;
if (!details)
return;
const status = this._contentElement.createChild('div', 'paused-status');
const errorLike = details.reason === SDK.DebuggerModel.BreakReason.Exception ||
details.reason === SDK.DebuggerModel.BreakReason.PromiseRejection ||
details.reason === SDK.DebuggerModel.BreakReason.Assert || details.reason === SDK.DebuggerModel.BreakReason.OOM;
let messageWrapper;
if (details.reason === SDK.DebuggerModel.BreakReason.DOM) {
messageWrapper = await Sources.DebuggerPausedMessage._createDOMBreakpointHitMessage(details);
} else if (details.reason === SDK.DebuggerModel.BreakReason.EventListener) {
let eventNameForUI = '';
if (details.auxData) {
eventNameForUI =
SDK.domDebuggerManager.resolveEventListenerBreakpointTitle(/** @type {!Object} */ (details.auxData));
}
messageWrapper = buildWrapper(Common.UIString('Paused on event listener'), eventNameForUI);
} else if (details.reason === SDK.DebuggerModel.BreakReason.XHR) {
messageWrapper = buildWrapper(Common.UIString('Paused on XHR or fetch'), details.auxData['url'] || '');
} else if (details.reason === SDK.DebuggerModel.BreakReason.Exception) {
const description = details.auxData['description'] || details.auxData['value'] || '';
const descriptionWithoutStack = Sources.DebuggerPausedMessage._descriptionWithoutStack(description);
messageWrapper = buildWrapper(Common.UIString('Paused on exception'), descriptionWithoutStack, description);
} else if (details.reason === SDK.DebuggerModel.BreakReason.PromiseRejection) {
const description = details.auxData['description'] || details.auxData['value'] || '';
const descriptionWithoutStack = Sources.DebuggerPausedMessage._descriptionWithoutStack(description);
messageWrapper =
buildWrapper(Common.UIString('Paused on promise rejection'), descriptionWithoutStack, description);
} else if (details.reason === SDK.DebuggerModel.BreakReason.Assert) {
messageWrapper = buildWrapper(Common.UIString('Paused on assertion'));
} else if (details.reason === SDK.DebuggerModel.BreakReason.DebugCommand) {
messageWrapper = buildWrapper(Common.UIString('Paused on debugged function'));
} else if (details.reason === SDK.DebuggerModel.BreakReason.OOM) {
messageWrapper = buildWrapper(Common.UIString('Paused before potential out-of-memory crash'));
} else if (details.callFrames.length) {
const uiLocation = debuggerWorkspaceBinding.rawLocationToUILocation(details.callFrames[0].location());
const breakpoint = uiLocation ? breakpointManager.findBreakpoint(uiLocation) : null;
const defaultText = breakpoint ? Common.UIString('Paused on breakpoint') : Common.UIString('Debugger paused');
messageWrapper = buildWrapper(defaultText);
} else {
console.warn(
'ScriptsPanel paused, but callFrames.length is zero.'); // TODO remove this once we understand this case better
}
status.classList.toggle('error-reason', errorLike);
if (messageWrapper)
status.appendChild(messageWrapper);
/**
* @param {string} mainText
* @param {string=} subText
* @param {string=} title
* @return {!Element}
*/
function
|
(mainText, subText, title) {
const messageWrapper = createElement('span');
const mainElement = messageWrapper.createChild('div', 'status-main');
const icon = UI.Icon.create(errorLike ? 'smallicon-error' : 'smallicon-info', 'status-icon');
mainElement.appendChild(icon);
mainElement.appendChild(createTextNode(mainText));
if (subText) {
const subElement = messageWrapper.createChild('div', 'status-sub monospace');
subElement.textContent = subText;
subElement.title = title || subText;
}
return messageWrapper;
}
}
};
Sources.DebuggerPausedMessage.BreakpointTypeNouns = new Map([
[SDK.DOMDebuggerModel.DOMBreakpoint.Type.SubtreeModified, Common.UIString('subtree modifications')],
[SDK.DOMDebuggerModel.DOMBreakpoint.Type.AttributeModified, Common.UIString('attribute modifications')],
[SDK.DOMDebuggerModel.DOMBreakpoint.Type.NodeRemoved, Common.UIString('node removal')],
]);
|
buildWrapper
|
fix.rs
|
//! High-level overview of how `fix` works:
//!
//! The main goal is to run `cargo check` to get rustc to emit JSON
//! diagnostics with suggested fixes that can be applied to the files on the
//! filesystem, and validate that those changes didn't break anything.
//!
//! Cargo begins by launching a `LockServer` thread in the background to
//! listen for network connections to coordinate locking when multiple targets
//! are built simultaneously. It ensures each package has only one fix running
//! at once.
//!
//! The `RustfixDiagnosticServer` is launched in a background thread (in
//! `JobQueue`) to listen for network connections to coordinate displaying
//! messages to the user on the console (so that multiple processes don't try
//! to print at the same time).
//!
//! Cargo begins a normal `cargo check` operation with itself set as a proxy
//! for rustc by setting `primary_unit_rustc` in the build config. When
//! cargo launches rustc to check a crate, it is actually launching itself.
//! The `FIX_ENV` environment variable is set so that cargo knows it is in
//! fix-proxy-mode.
//!
//! Each proxied cargo-as-rustc detects it is in fix-proxy-mode (via `FIX_ENV`
//! environment variable in `main`) and does the following:
//!
//! - Acquire a lock from the `LockServer` from the master cargo process.
//! - Launches the real rustc (`rustfix_and_fix`), looking at the JSON output
//! for suggested fixes.
//! - Uses the `rustfix` crate to apply the suggestions to the files on the
//! file system.
//! - If rustfix fails to apply any suggestions (for example, they are
//! overlapping), but at least some suggestions succeeded, it will try the
//! previous two steps up to 4 times as long as some suggestions succeed.
//! - Assuming there's at least one suggestion applied, and the suggestions
//! applied cleanly, rustc is run again to verify the suggestions didn't
//! break anything. The change will be backed out if it fails (unless
//! `--broken-code` is used).
//! - If there are any warnings or errors, rustc will be run one last time to
//! show them to the user.
use std::collections::{BTreeSet, HashMap, HashSet};
use std::env;
use std::ffi::OsString;
use std::path::{Path, PathBuf};
use std::process::{self, Command, ExitStatus};
use std::str;
use anyhow::{bail, Context, Error};
use cargo_util::{exit_status_to_string, is_simple_exit_code, paths, ProcessBuilder};
use log::{debug, trace, warn};
use rustfix::diagnostics::Diagnostic;
use rustfix::{self, CodeFix};
use crate::core::compiler::RustcTargetData;
use crate::core::resolver::features::{FeatureOpts, FeatureResolver};
use crate::core::resolver::{HasDevUnits, Resolve, ResolveBehavior};
use crate::core::{Edition, MaybePackage, Workspace};
use crate::ops::{self, CompileOptions};
use crate::util::diagnostic_server::{Message, RustfixDiagnosticServer};
use crate::util::errors::CargoResult;
use crate::util::Config;
use crate::util::{existing_vcs_repo, LockServer, LockServerClient};
use crate::{drop_eprint, drop_eprintln};
const FIX_ENV: &str = "__CARGO_FIX_PLZ";
const BROKEN_CODE_ENV: &str = "__CARGO_FIX_BROKEN_CODE";
const EDITION_ENV: &str = "__CARGO_FIX_EDITION";
const IDIOMS_ENV: &str = "__CARGO_FIX_IDIOMS";
pub struct FixOptions {
pub edition: bool,
pub idioms: bool,
pub compile_opts: CompileOptions,
pub allow_dirty: bool,
pub allow_no_vcs: bool,
pub allow_staged: bool,
pub broken_code: bool,
}
pub fn fix(ws: &Workspace<'_>, opts: &mut FixOptions) -> CargoResult<()> {
check_version_control(ws.config(), opts)?;
if opts.edition {
check_resolver_change(ws, opts)?;
}
// Spin up our lock server, which our subprocesses will use to synchronize fixes.
let lock_server = LockServer::new()?;
let mut wrapper = ProcessBuilder::new(env::current_exe()?);
wrapper.env(FIX_ENV, lock_server.addr().to_string());
let _started = lock_server.start()?;
opts.compile_opts.build_config.force_rebuild = true;
if opts.broken_code {
wrapper.env(BROKEN_CODE_ENV, "1");
}
if opts.edition {
wrapper.env(EDITION_ENV, "1");
}
if opts.idioms {
wrapper.env(IDIOMS_ENV, "1");
}
*opts
.compile_opts
.build_config
.rustfix_diagnostic_server
.borrow_mut() = Some(RustfixDiagnosticServer::new()?);
if let Some(server) = opts
.compile_opts
.build_config
.rustfix_diagnostic_server
.borrow()
.as_ref()
{
server.configure(&mut wrapper);
}
let rustc = ws.config().load_global_rustc(Some(ws))?;
wrapper.arg(&rustc.path);
// primary crates are compiled using a cargo subprocess to do extra work of applying fixes and
// repeating build until there are no more changes to be applied
opts.compile_opts.build_config.primary_unit_rustc = Some(wrapper);
ops::compile(ws, &opts.compile_opts)?;
Ok(())
}
fn check_version_control(config: &Config, opts: &FixOptions) -> CargoResult<()> {
if opts.allow_no_vcs {
return Ok(());
}
if !existing_vcs_repo(config.cwd(), config.cwd()) {
bail!(
"no VCS found for this package and `cargo fix` can potentially \
perform destructive changes; if you'd like to suppress this \
error pass `--allow-no-vcs`"
)
}
if opts.allow_dirty && opts.allow_staged {
return Ok(());
}
let mut dirty_files = Vec::new();
let mut staged_files = Vec::new();
if let Ok(repo) = git2::Repository::discover(config.cwd()) {
let mut repo_opts = git2::StatusOptions::new();
repo_opts.include_ignored(false);
for status in repo.statuses(Some(&mut repo_opts))?.iter() {
if let Some(path) = status.path() {
match status.status() {
git2::Status::CURRENT => (),
git2::Status::INDEX_NEW
| git2::Status::INDEX_MODIFIED
| git2::Status::INDEX_DELETED
| git2::Status::INDEX_RENAMED
| git2::Status::INDEX_TYPECHANGE => {
if !opts.allow_staged {
staged_files.push(path.to_string())
}
}
_ => {
if !opts.allow_dirty {
dirty_files.push(path.to_string())
}
}
};
}
}
}
if dirty_files.is_empty() && staged_files.is_empty() {
return Ok(());
}
let mut files_list = String::new();
for file in dirty_files {
files_list.push_str(" * ");
files_list.push_str(&file);
files_list.push_str(" (dirty)\n");
}
for file in staged_files {
files_list.push_str(" * ");
files_list.push_str(&file);
files_list.push_str(" (staged)\n");
}
bail!(
"the working directory of this package has uncommitted changes, and \
`cargo fix` can potentially perform destructive changes; if you'd \
like to suppress this error pass `--allow-dirty`, `--allow-staged`, \
or commit the changes to these files:\n\
\n\
{}\n\
",
files_list
);
}
fn check_resolver_change(ws: &Workspace<'_>, opts: &FixOptions) -> CargoResult<()> {
let root = ws.root_maybe();
match root {
MaybePackage::Package(root_pkg) => {
if root_pkg.manifest().resolve_behavior().is_some() {
// If explicitly specified by the user, no need to check.
return Ok(());
}
// Only trigger if updating the root package from 2018.
let pkgs = opts.compile_opts.spec.get_packages(ws)?;
if !pkgs.iter().any(|&pkg| pkg == root_pkg) {
// The root is not being migrated.
return Ok(());
}
if root_pkg.manifest().edition() != Edition::Edition2018 {
// V1 to V2 only happens on 2018 to 2021.
return Ok(());
}
}
MaybePackage::Virtual(_vm) => {
// Virtual workspaces don't have a global edition to set (yet).
return Ok(());
}
}
// 2018 without `resolver` set must be V1
assert_eq!(ws.resolve_behavior(), ResolveBehavior::V1);
let specs = opts.compile_opts.spec.to_package_id_specs(ws)?;
let target_data = RustcTargetData::new(ws, &opts.compile_opts.build_config.requested_kinds)?;
// HasDevUnits::No because that may uncover more differences.
// This is not the same as what `cargo fix` is doing, since it is doing
// `--all-targets` which includes dev dependencies.
let ws_resolve = ops::resolve_ws_with_opts(
ws,
&target_data,
&opts.compile_opts.build_config.requested_kinds,
&opts.compile_opts.cli_features,
&specs,
HasDevUnits::No,
crate::core::resolver::features::ForceAllTargets::No,
)?;
let feature_opts = FeatureOpts::new_behavior(ResolveBehavior::V2, HasDevUnits::No);
let v2_features = FeatureResolver::resolve(
ws,
&target_data,
&ws_resolve.targeted_resolve,
&ws_resolve.pkg_set,
&opts.compile_opts.cli_features,
&specs,
&opts.compile_opts.build_config.requested_kinds,
feature_opts,
)?;
let differences = v2_features.compare_legacy(&ws_resolve.resolved_features);
if differences.is_empty() {
// Nothing is different, nothing to report.
return Ok(());
}
let config = ws.config();
config.shell().note(
"Switching to Edition 2021 will enable the use of the version 2 feature resolver in Cargo.",
)?;
drop_eprintln!(
config,
"This may cause some dependencies to be built with fewer features enabled than previously."
);
drop_eprintln!(
config,
"More information about the resolver changes may be found \
at https://doc.rust-lang.org/nightly/edition-guide/rust-2021/default-cargo-resolver.html"
);
drop_eprintln!(
config,
"When building the following dependencies, \
the given features will no longer be used:\n"
);
for ((pkg_id, for_host), removed) in differences {
drop_eprint!(config, " {}", pkg_id);
if for_host {
drop_eprint!(config, " (as host dependency)");
}
drop_eprint!(config, ": ");
let joined: Vec<_> = removed.iter().map(|s| s.as_str()).collect();
drop_eprintln!(config, "{}", joined.join(", "));
}
drop_eprint!(config, "\n");
report_maybe_diesel(config, &ws_resolve.targeted_resolve)?;
Ok(())
}
fn report_maybe_diesel(config: &Config, resolve: &Resolve) -> CargoResult<()> {
if resolve
.iter()
.any(|pid| pid.name() == "diesel" && pid.version().major == 1)
&& resolve.iter().any(|pid| pid.name() == "diesel_migrations")
{
config.shell().note(
"\
This project appears to use both diesel and diesel_migrations. These packages have
a known issue where the build may fail due to the version 2 resolver preventing
feature unification between those two packages. See
<https://github.com/rust-lang/cargo/issues/9450> for some potential workarounds.
",
)?;
}
Ok(())
}
/// Entry point for `cargo` running as a proxy for `rustc`.
///
/// This is called every time `cargo` is run to check if it is in proxy mode.
///
/// Returns `false` if `fix` is not being run (not in proxy mode). Returns
/// `true` if in `fix` proxy mode, and the fix was complete without any
/// warnings or errors. If there are warnings or errors, this does not return,
/// and the process exits with the corresponding `rustc` exit code.
pub fn fix_maybe_exec_rustc(config: &Config) -> CargoResult<bool> {
let lock_addr = match env::var(FIX_ENV) {
Ok(s) => s,
Err(_) => return Ok(false),
};
let args = FixArgs::get()?;
trace!("cargo-fix as rustc got file {:?}", args.file);
let workspace_rustc = std::env::var("RUSTC_WORKSPACE_WRAPPER")
.map(PathBuf::from)
.ok();
let rustc = ProcessBuilder::new(&args.rustc).wrapped(workspace_rustc.as_ref());
trace!("start rustfixing {:?}", args.file);
let fixes = rustfix_crate(&lock_addr, &rustc, &args.file, &args, config)?;
// Ok now we have our final goal of testing out the changes that we applied.
// If these changes went awry and actually started to cause the crate to
// *stop* compiling then we want to back them out and continue to print
// warnings to the user.
//
// If we didn't actually make any changes then we can immediately execute the
// new rustc, and otherwise we capture the output to hide it in the scenario
// that we have to back it all out.
if !fixes.files.is_empty() {
let mut cmd = rustc.build_command();
args.apply(&mut cmd, config);
cmd.arg("--error-format=json");
let output = cmd.output().context("failed to spawn rustc")?;
if output.status.success() {
for (path, file) in fixes.files.iter() {
Message::Fixed {
file: path.clone(),
fixes: file.fixes_applied,
}
.post()?;
}
}
// If we succeeded then we'll want to commit to the changes we made, if
// any. If stderr is empty then there's no need for the final exec at
// the end, we just bail out here.
if output.status.success() && output.stderr.is_empty() {
return Ok(true);
}
// Otherwise, if our rustc just failed, then that means that we broke the
// user's code with our changes. Back out everything and fall through
// below to recompile again.
if !output.status.success() {
if env::var_os(BROKEN_CODE_ENV).is_none() {
for (path, file) in fixes.files.iter() {
paths::write(path, &file.original_code)?;
}
}
log_failed_fix(&output.stderr, output.status)?;
}
}
// This final fall-through handles multiple cases;
// - If the fix failed, show the original warnings and suggestions.
// - If `--broken-code`, show the error messages.
// - If the fix succeeded, show any remaining warnings.
let mut cmd = rustc.build_command();
args.apply(&mut cmd, config);
for arg in args.format_args {
// Add any json/error format arguments that Cargo wants. This allows
// things like colored output to work correctly.
cmd.arg(arg);
}
exit_with(cmd.status().context("failed to spawn rustc")?);
}
#[derive(Default)]
struct FixedCrate {
files: HashMap<String, FixedFile>,
}
struct FixedFile {
errors_applying_fixes: Vec<String>,
fixes_applied: u32,
original_code: String,
}
/// Attempts to apply fixes to a single crate.
///
/// This runs `rustc` (possibly multiple times) to gather suggestions from the
/// compiler and applies them to the files on disk.
fn rustfix_crate(
lock_addr: &str,
rustc: &ProcessBuilder,
filename: &Path,
args: &FixArgs,
config: &Config,
) -> Result<FixedCrate, Error> {
args.check_edition_and_send_status(config)?;
// First up, we want to make sure that each crate is only checked by one
// process at a time. If two invocations concurrently check a crate then
// it's likely to corrupt it.
//
// Historically this used per-source-file locking, then per-package
// locking. It now uses a single, global lock as some users do things like
// #[path] or include!() of shared files between packages. Serializing
// makes it slower, but is the only safe way to prevent concurrent
// modification.
let _lock = LockServerClient::lock(&lock_addr.parse()?, "global")?;
// Next up, this is a bit suspicious, but we *iteratively* execute rustc and
// collect suggestions to feed to rustfix. Once we hit our limit of times to
// execute rustc or we appear to be reaching a fixed point we stop running
// rustc.
//
// This is currently done to handle code like:
//
// ::foo::<::Bar>();
//
// where there are two fixes to happen here: `crate::foo::<crate::Bar>()`.
// The spans for these two suggestions are overlapping and its difficult in
// the compiler to **not** have overlapping spans here. As a result, a naive
// implementation would feed the two compiler suggestions for the above fix
// into `rustfix`, but one would be rejected because it overlaps with the
// other.
//
// In this case though, both suggestions are valid and can be automatically
// applied! To handle this case we execute rustc multiple times, collecting
// fixes each time we do so. Along the way we discard any suggestions that
// failed to apply, assuming that they can be fixed the next time we run
// rustc.
//
// Naturally, we want a few protections in place here though to avoid looping
// forever or otherwise losing data. To that end we have a few termination
// conditions:
//
// * Do this whole process a fixed number of times. In theory we probably
// need an infinite number of times to apply fixes, but we're not gonna
// sit around waiting for that.
// * If it looks like a fix genuinely can't be applied we need to bail out.
// Detect this when a fix fails to get applied *and* no suggestions
// successfully applied to the same file. In that case looks like we
// definitely can't make progress, so bail out.
let mut fixes = FixedCrate::default();
let mut last_fix_counts = HashMap::new();
let iterations = env::var("CARGO_FIX_MAX_RETRIES")
.ok()
.and_then(|n| n.parse().ok())
.unwrap_or(4);
for _ in 0..iterations {
last_fix_counts.clear();
for (path, file) in fixes.files.iter_mut() {
last_fix_counts.insert(path.clone(), file.fixes_applied);
// We'll generate new errors below.
file.errors_applying_fixes.clear();
}
rustfix_and_fix(&mut fixes, rustc, filename, args, config)?;
let mut progress_yet_to_be_made = false;
for (path, file) in fixes.files.iter_mut() {
if file.errors_applying_fixes.is_empty() {
continue;
}
// If anything was successfully fixed *and* there's at least one
// error, then assume the error was spurious and we'll try again on
// the next iteration.
if file.fixes_applied != *last_fix_counts.get(path).unwrap_or(&0) {
progress_yet_to_be_made = true;
}
}
if !progress_yet_to_be_made {
break;
}
}
// Any errors still remaining at this point need to be reported as probably
// bugs in Cargo and/or rustfix.
for (path, file) in fixes.files.iter_mut() {
for error in file.errors_applying_fixes.drain(..) {
Message::ReplaceFailed {
file: path.clone(),
message: error,
}
.post()?;
}
}
Ok(fixes)
}
/// Executes `rustc` to apply one round of suggestions to the crate in question.
///
/// This will fill in the `fixes` map with original code, suggestions applied,
/// and any errors encountered while fixing files.
fn rustfix_and_fix(
fixes: &mut FixedCrate,
rustc: &ProcessBuilder,
filename: &Path,
args: &FixArgs,
config: &Config,
) -> Result<(), Error> {
// If not empty, filter by these lints.
// TODO: implement a way to specify this.
let only = HashSet::new();
let mut cmd = rustc.build_command();
cmd.arg("--error-format=json");
args.apply(&mut cmd, config);
let output = cmd.output().with_context(|| {
format!(
"failed to execute `{}`",
rustc.get_program().to_string_lossy()
)
})?;
// If rustc didn't succeed for whatever reasons then we're very likely to be
// looking at otherwise broken code. Let's not make things accidentally
// worse by applying fixes where a bug could cause *more* broken code.
// Instead, punt upwards which will reexec rustc over the original code,
// displaying pretty versions of the diagnostics we just read out.
if !output.status.success() && env::var_os(BROKEN_CODE_ENV).is_none() {
debug!(
"rustfixing `{:?}` failed, rustc exited with {:?}",
filename,
output.status.code()
);
return Ok(());
}
let fix_mode = env::var_os("__CARGO_FIX_YOLO")
.map(|_| rustfix::Filter::Everything)
.unwrap_or(rustfix::Filter::MachineApplicableOnly);
// Sift through the output of the compiler to look for JSON messages.
// indicating fixes that we can apply.
let stderr = str::from_utf8(&output.stderr).context("failed to parse rustc stderr as UTF-8")?;
let suggestions = stderr
.lines()
.filter(|x| !x.is_empty())
.inspect(|y| trace!("line: {}", y))
// Parse each line of stderr, ignoring errors, as they may not all be JSON.
.filter_map(|line| serde_json::from_str::<Diagnostic>(line).ok())
// From each diagnostic, try to extract suggestions from rustc.
.filter_map(|diag| rustfix::collect_suggestions(&diag, &only, fix_mode));
// Collect suggestions by file so we can apply them one at a time later.
let mut file_map = HashMap::new();
let mut num_suggestion = 0;
for suggestion in suggestions {
trace!("suggestion");
// Make sure we've got a file associated with this suggestion and all
// snippets point to the same file. Right now it's not clear what
// we would do with multiple files.
let file_names = suggestion
.solutions
.iter()
.flat_map(|s| s.replacements.iter())
.map(|r| &r.snippet.file_name);
let file_name = if let Some(file_name) = file_names.clone().next() {
file_name.clone()
} else {
trace!("rejecting as it has no solutions {:?}", suggestion);
continue;
};
if !file_names.clone().all(|f| f == &file_name) {
trace!("rejecting as it changes multiple files: {:?}", suggestion);
continue;
}
file_map
.entry(file_name)
.or_insert_with(Vec::new)
.push(suggestion);
num_suggestion += 1;
}
debug!(
"collected {} suggestions for `{}`",
num_suggestion,
filename.display(),
);
for (file, suggestions) in file_map {
// Attempt to read the source code for this file. If this fails then
// that'd be pretty surprising, so log a message and otherwise keep
// going.
let code = match paths::read(file.as_ref()) {
Ok(s) => s,
Err(e) => {
warn!("failed to read `{}`: {}", file, e);
continue;
}
};
let num_suggestions = suggestions.len();
debug!("applying {} fixes to {}", num_suggestions, file);
// If this file doesn't already exist then we just read the original
// code, so save it. If the file already exists then the original code
// doesn't need to be updated as we've just read an interim state with
// some fixes but perhaps not all.
let fixed_file = fixes
.files
.entry(file.clone())
.or_insert_with(|| FixedFile {
errors_applying_fixes: Vec::new(),
fixes_applied: 0,
original_code: code.clone(),
});
let mut fixed = CodeFix::new(&code);
// As mentioned above in `rustfix_crate`, we don't immediately warn
// about suggestions that fail to apply here, and instead we save them
// off for later processing.
for suggestion in suggestions.iter().rev() {
match fixed.apply(suggestion) {
Ok(()) => fixed_file.fixes_applied += 1,
Err(e) => fixed_file.errors_applying_fixes.push(e.to_string()),
}
}
let new_code = fixed.finish()?;
paths::write(&file, new_code)?;
}
Ok(())
}
fn exit_with(status: ExitStatus) -> ! {
#[cfg(unix)]
{
use std::io::Write;
use std::os::unix::prelude::*;
if let Some(signal) = status.signal() {
drop(writeln!(
std::io::stderr().lock(),
"child failed with signal `{}`",
signal
));
process::exit(2);
}
}
process::exit(status.code().unwrap_or(3));
}
fn log_failed_fix(stderr: &[u8], status: ExitStatus) -> Result<(), Error> {
let stderr = str::from_utf8(stderr).context("failed to parse rustc stderr as utf-8")?;
let diagnostics = stderr
.lines()
.filter(|x| !x.is_empty())
.filter_map(|line| serde_json::from_str::<Diagnostic>(line).ok());
let mut files = BTreeSet::new();
let mut errors = Vec::new();
for diagnostic in diagnostics {
errors.push(diagnostic.rendered.unwrap_or(diagnostic.message));
for span in diagnostic.spans.into_iter() {
files.insert(span.file_name);
}
}
// Include any abnormal messages (like an ICE or whatever).
errors.extend(
stderr
.lines()
.filter(|x| !x.starts_with('{'))
.map(|x| x.to_string()),
);
let mut krate = None;
let mut prev_dash_dash_krate_name = false;
for arg in env::args() {
if prev_dash_dash_krate_name {
krate = Some(arg.clone());
}
if arg == "--crate-name" {
prev_dash_dash_krate_name = true;
} else {
prev_dash_dash_krate_name = false;
}
}
let files = files.into_iter().collect();
let abnormal_exit = if status.code().map_or(false, is_simple_exit_code) {
None
} else {
Some(exit_status_to_string(status))
};
Message::FixFailed {
files,
krate,
errors,
abnormal_exit,
}
.post()?;
Ok(())
}
/// Various command-line options and settings used when `cargo` is running as
/// a proxy for `rustc` during the fix operation.
struct FixArgs {
/// This is the `.rs` file that is being fixed.
file: PathBuf,
/// If `--edition` is used to migrate to the next edition, this is the
/// edition we are migrating towards.
prepare_for_edition: Option<Edition>,
/// `true` if `--edition-idioms` is enabled.
idioms: bool,
/// The current edition.
///
/// `None` if on 2015.
enabled_edition: Option<Edition>,
/// Other command-line arguments not reflected by other fields in
/// `FixArgs`.
other: Vec<OsString>,
/// Path to the `rustc` executable.
rustc: PathBuf,
/// Console output flags (`--error-format`, `--json`, etc.).
///
/// The normal fix procedure always uses `--json`, so it overrides what
/// Cargo normally passes when applying fixes. When displaying warnings or
/// errors, it will use these flags.
format_args: Vec<String>,
}
impl FixArgs {
fn get() -> Result<FixArgs, Error> {
let rustc = env::args_os()
.nth(1)
.map(PathBuf::from)
.ok_or_else(|| anyhow::anyhow!("expected rustc as first argument"))?;
let mut file = None;
let mut enabled_edition = None;
let mut other = Vec::new();
let mut format_args = Vec::new();
for arg in env::args_os().skip(2) {
let path = PathBuf::from(arg);
if path.extension().and_then(|s| s.to_str()) == Some("rs") && path.exists() {
file = Some(path);
continue;
}
if let Some(s) = path.to_str() {
if let Some(edition) = s.strip_prefix("--edition=") {
enabled_edition = Some(edition.parse()?);
continue;
}
if s.starts_with("--error-format=") || s.starts_with("--json=") {
// Cargo may add error-format in some cases, but `cargo
// fix` wants to add its own.
format_args.push(s.to_string());
continue;
}
}
other.push(path.into());
}
let file = file.ok_or_else(|| anyhow::anyhow!("could not find .rs file in rustc args"))?;
let idioms = env::var(IDIOMS_ENV).is_ok();
let prepare_for_edition = env::var(EDITION_ENV).ok().map(|_| {
enabled_edition
.unwrap_or(Edition::Edition2015)
.saturating_next()
});
Ok(FixArgs {
file,
prepare_for_edition,
idioms,
enabled_edition,
other,
rustc,
format_args,
})
}
fn apply(&self, cmd: &mut Command, config: &Config) {
cmd.arg(&self.file);
cmd.args(&self.other).arg("--cap-lints=warn");
if let Some(edition) = self.enabled_edition {
cmd.arg("--edition").arg(edition.to_string());
if self.idioms && edition.supports_idiom_lint() {
cmd.arg(format!("-Wrust-{}-idioms", edition));
}
}
if let Some(edition) = self.prepare_for_edition {
if edition.supports_compat_lint() {
if config.nightly_features_allowed {
cmd.arg("--force-warn")
.arg(format!("rust-{}-compatibility", edition))
.arg("-Zunstable-options");
} else {
cmd.arg("-W").arg(format!("rust-{}-compatibility", edition));
}
}
}
}
/// Validates the edition, and sends a message indicating what is being
/// done.
fn
|
(&self, config: &Config) -> CargoResult<()> {
let to_edition = match self.prepare_for_edition {
Some(s) => s,
None => {
return Message::Fixing {
file: self.file.display().to_string(),
}
.post();
}
};
// Unfortunately determining which cargo targets are being built
// isn't easy, and each target can be a different edition. The
// cargo-as-rustc fix wrapper doesn't know anything about the
// workspace, so it can't check for the `cargo-features` unstable
// opt-in. As a compromise, this just restricts to the nightly
// toolchain.
//
// Unfortunately this results in a pretty poor error message when
// multiple jobs run in parallel (the error appears multiple
// times). Hopefully this doesn't happen often in practice.
if !to_edition.is_stable() && !config.nightly_features_allowed {
bail!(
"cannot migrate {} to edition {to_edition}\n\
Edition {to_edition} is unstable and not allowed in this release, \
consider trying the nightly release channel.",
self.file.display(),
to_edition = to_edition
);
}
let from_edition = self.enabled_edition.unwrap_or(Edition::Edition2015);
if from_edition == to_edition {
Message::EditionAlreadyEnabled {
file: self.file.display().to_string(),
edition: to_edition,
}
.post()
} else {
Message::Migrating {
file: self.file.display().to_string(),
from_edition,
to_edition,
}
.post()
}
}
}
|
check_edition_and_send_status
|
writer.rs
|
use std::io;
use crate::fastfield::serializer::FastFieldSerializer;
use crate::schema::{Document, Field, Value};
use crate::DocId;
use crate::{
fastfield::serializer::CompositeFastFieldSerializer, indexer::doc_id_mapping::DocIdMapping,
};
/// Writer for byte array (as in, any number of bytes per document) fast fields
///
/// This `BytesFastFieldWriter` is only useful for advanced user.
/// The normal way to get your associated bytes in your index
/// is to
/// - declare your field with fast set to `Cardinality::SingleValue`
/// in your schema
/// - add your document simply by calling `.add_document(...)` with associating bytes to the field.
///
/// The `BytesFastFieldWriter` can be acquired from the
/// fast field writer by calling
/// [`.get_bytes_writer(...)`](./struct.FastFieldsWriter.html#method.get_bytes_writer).
///
/// Once acquired, writing is done by calling `.add_document_val(&[u8])`
/// once per document, even if there are no bytes associated to it.
pub struct BytesFastFieldWriter {
field: Field,
vals: Vec<u8>,
doc_index: Vec<u64>,
}
impl BytesFastFieldWriter {
|
BytesFastFieldWriter {
field,
vals: Vec::new(),
doc_index: Vec::new(),
}
}
/// The memory used (inclusive childs)
pub fn mem_usage(&self) -> usize {
self.vals.capacity() + self.doc_index.capacity() * std::mem::size_of::<u64>()
}
/// Access the field associated to the `BytesFastFieldWriter`
pub fn field(&self) -> Field {
self.field
}
/// Finalize the current document.
pub(crate) fn next_doc(&mut self) {
self.doc_index.push(self.vals.len() as u64);
}
/// Shift to the next document and add all of the
/// matching field values present in the document.
pub fn add_document(&mut self, doc: &Document) {
self.next_doc();
for field_value in doc.get_all(self.field) {
if let Value::Bytes(ref bytes) = field_value {
self.vals.extend_from_slice(bytes);
return;
}
}
}
/// Register the bytes associated to a document.
///
/// The method returns the `DocId` of the document that was
/// just written.
pub fn add_document_val(&mut self, val: &[u8]) -> DocId {
let doc = self.doc_index.len() as DocId;
self.next_doc();
self.vals.extend_from_slice(val);
doc
}
/// Returns an iterator over values per doc_id in ascending doc_id order.
///
/// Normally the order is simply iterating self.doc_id_index.
/// With doc_id_map it accounts for the new mapping, returning values in the order of the
/// new doc_ids.
fn get_ordered_values<'a: 'b, 'b>(
&'a self,
doc_id_map: Option<&'b DocIdMapping>,
) -> impl Iterator<Item = &'b [u8]> {
let doc_id_iter = if let Some(doc_id_map) = doc_id_map {
Box::new(doc_id_map.iter_old_doc_ids().cloned()) as Box<dyn Iterator<Item = u32>>
} else {
Box::new(self.doc_index.iter().enumerate().map(|el| el.0 as u32))
as Box<dyn Iterator<Item = u32>>
};
doc_id_iter.map(move |doc_id| self.get_values_for_doc_id(doc_id))
}
/// returns all values for a doc_ids
fn get_values_for_doc_id(&self, doc_id: u32) -> &[u8] {
let start_pos = self.doc_index[doc_id as usize] as usize;
let end_pos = self
.doc_index
.get(doc_id as usize + 1)
.cloned()
.unwrap_or(self.vals.len() as u64) as usize; // special case, last doc_id has no offset information
&self.vals[start_pos..end_pos]
}
/// Serializes the fast field values by pushing them to the `FastFieldSerializer`.
pub fn serialize(
&self,
serializer: &mut CompositeFastFieldSerializer,
doc_id_map: Option<&DocIdMapping>,
) -> io::Result<()> {
// writing the offset index
let mut doc_index_serializer =
serializer.new_u64_fast_field_with_idx(self.field, 0, self.vals.len() as u64, 0)?;
let mut offset = 0;
for vals in self.get_ordered_values(doc_id_map) {
doc_index_serializer.add_val(offset)?;
offset += vals.len() as u64;
}
doc_index_serializer.add_val(self.vals.len() as u64)?;
doc_index_serializer.close_field()?;
// writing the values themselves
let mut value_serializer = serializer.new_bytes_fast_field_with_idx(self.field, 1);
// the else could be removed, but this is faster (difference not benchmarked)
if let Some(doc_id_map) = doc_id_map {
for vals in self.get_ordered_values(Some(doc_id_map)) {
// sort values in case of remapped doc_ids?
value_serializer.write_all(vals)?;
}
} else {
value_serializer.write_all(&self.vals)?;
}
Ok(())
}
}
|
/// Creates a new `BytesFastFieldWriter`
pub fn new(field: Field) -> Self {
|
ratings-counter.py
|
from pyspark import SparkConf, SparkContext
import collections
conf = SparkConf().setMaster("local").setAppName("RatingsHistogram")
sc = SparkContext(conf = conf)
|
lines = sc.textFile("D:/celebal/resources/ml-100k/u.data")
ratings = lines.map(lambda x: x.split()[2])
result = ratings.countByValue()
sortedResults = collections.OrderedDict(sorted(result.items()))
for key, value in sortedResults.items():
print("%s %i" % (key, value))
| |
script.ts
|
declare var Chart: typeof import('chart.js');
import { animateNumber, formatNumber, getRank } from './utils/Utils';
import {
getPlayerArtifacts,
getPlayerMoves,
getPlayerPlanets,
getLeaderBoard,
} from './utils/GraphQueries';
import type { Planet, Artifact } from './utils/GraphQueries';
const createPlanetLevelsGraph = (playerPlanets: Planet[]) => {
const planetTypesByLevel: { [key: string]: number[] } = {};
const planetTypes = ['PLANET', 'SILVER_MINE', 'RUINS', 'TRADING_POST', 'SILVER_BANK'];
for (const planetType of planetTypes) {
const planetsWithType = playerPlanets.filter((p) => p.planetType === planetType);
const byLevel: { [key: number]: number } = {};
for (let i = 0; i <= 9; i++) {
byLevel[i] = planetsWithType.filter((p) => p.planetLevel === i).length;
}
planetTypesByLevel[planetType] = Object.values(byLevel);
}
const canvas = <HTMLCanvasElement>document.querySelector('#planets-by-level canvas');
const ctx = canvas.getContext('2d');
if (!ctx) throw new Error('Failed to get ctx');
return new Chart.Chart(ctx, {
type: 'bar',
data: {
labels: [...Array(10).keys()].map((i) => `Level ${i}`),
datasets: Object.values(planetTypesByLevel).map((planets, i) => ({
data: planets,
backgroundColor: [
'#ff5100',
'rgb(255, 221, 48)',
'rgb(193, 60, 255)',
'rgb(20, 70, 200)',
'#f30304',
][i],
label: ['Planet', 'Asteroid Field', 'Foundry', 'Spacetime Rip', 'Quasar'][i],
})),
},
options: {
scales: { x: { stacked: true }, y: { stacked: true } },
plugins: {
tooltip: {
// @ts-ignore
position: 'middle',
},
},
},
});
};
const createArtifactsGraph = (artifacts: Artifact[]) => {
const canvas = <HTMLCanvasElement>document.querySelector('#artifacts-by-rarity canvas');
const artifactRarities = ['COMMON', 'RARE', 'EPIC', 'LEGENDARY', 'MYTHIC'];
const bgColors: { [type: string]: string } = {
MONOLITH: '#d611ea',
COLOSSUS: '#ee6194',
SPACESHIP: '#ff5200',
PYRAMID: '#ff8145',
WORMHOLE: '#28939b',
PLANETARYSHIELD: '#9c81ee',
PHOTOIDCANNON: '#ff7dff',
BLOOMFILTER: '#4baf37',
BLACKDOMAIN: '#473f38',
};
const artifactTypes = Object.keys(bgColors);
const ctx = canvas.getContext('2d');
if (!ctx) throw new Error('Failed to get ctx');
return new Chart.Chart(ctx, {
type: 'bar',
data: {
labels: ['Common', 'Rare', 'Epic', 'Legendary', 'Mythic'],
datasets: artifactTypes.map((artifactType) => ({
|
artifacts.filter((a) => a.artifactType === artifactType && a.rarity === r)
.length
),
backgroundColor: bgColors[artifactType],
})),
},
options: {
scales: { x: { stacked: true }, y: { stacked: true } },
plugins: {
tooltip: {
// @ts-ignore
position: 'middle',
},
},
},
});
};
// populating the data in the grid
const calculateEnergyCap = (playerPlanets: Planet[]) => {
const totalEnergyCapContainer = document.getElementById('total-energy-cap');
if (!totalEnergyCapContainer) return;
const totalEnergyCap = playerPlanets.reduce((a, b) => a + b.milliEnergyCap / 1000, 0);
animateNumber(totalEnergyCapContainer, totalEnergyCap, (i) => formatNumber(Math.round(i)));
};
const calculateAllArtifacts = async (address: string) => {
const artifactsAmountContainer = document.getElementById('total-artifacts-amount');
if (!artifactsAmountContainer) return;
const playerArtifacts = await getPlayerArtifacts(address);
animateNumber(artifactsAmountContainer, playerArtifacts.length, Math.round);
};
const calculateAmountOfMoves = async (address: string) => {
const movesAmountContainer = document.getElementById('total-moves-made');
if (!movesAmountContainer) return;
const playerMoves = await getPlayerMoves(address);
animateNumber(movesAmountContainer, playerMoves.length, Math.round);
};
const calculateRank = async (address: string) => {
const rankContainer = document.getElementById('rank');
if (!rankContainer) return;
const leaderBoard = await getLeaderBoard({ major: 6, minor: 4 });
const { rank } = getRank(address, leaderBoard);
rankContainer.innerText = rank !== -1 ? rank.toString() : 'none';
};
// Main Script
// Property 'family' does not exist on type
// '(ctx: ScriptableContext<keyof ChartTypeRegistry>, options: AnyObject) => Partial<FontSpec> | undefined'
// ts(2339)
// @ts-ignore ???
Chart.Chart.defaults.font.family = "'IBM Plex Mono', monospace";
Chart.Chart.defaults.aspectRatio = 3;
Chart.Chart.defaults.borderColor = 'rgb(232, 230, 227, 0.1)';
Chart.Chart.defaults.color = 'rgb(232, 230, 227)';
Chart.Chart.defaults.plugins.legend.display = false;
Chart.Tooltip.positioners.middle = (items, eventPosition) => {
if (items.length !== 1) {
// For more than 1 item, just show at the nearest
return Chart.Tooltip.positioners.average(items, eventPosition);
}
const el = <any>items[0].element;
let xPos = 0;
let yPos = 0;
if (el && el.hasValue()) {
const props = el.getProps(['x', 'y', 'horizontal', 'base']);
const { base, horizontal, x, y } = props;
if (horizontal) {
xPos = (base + x) / 2;
yPos = y;
} else {
xPos = x;
yPos = (base + y) / 2;
}
}
return {
x: xPos,
y: yPos,
};
};
const urlParams = new URLSearchParams(window.location.search);
const player =
urlParams.get('player')?.toLowerCase() || '0x8459c6bebe2d53b4dcaa71499a1ae4274c0e4df9';
const mainInput = <HTMLInputElement>document.getElementById('player-input');
mainInput.addEventListener('input', () => {
const { customError: isValidAddress } = mainInput.validity;
if (!isValidAddress) {
mainInput.title = 'Invalid address!!!!!!!!';
}
});
mainInput.value = player;
(async () => {
const playerPlanets = await getPlayerPlanets(mainInput.value);
const playerArtifacts = await getPlayerArtifacts(player);
calculateAllArtifacts(mainInput.value);
calculateEnergyCap(playerPlanets);
calculateAmountOfMoves(mainInput.value);
calculateRank(mainInput.value);
createPlanetLevelsGraph(playerPlanets);
createArtifactsGraph(playerArtifacts);
})();
|
label: artifactType,
data: artifactRarities.map(
(r) =>
|
server_client_test.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package handlers
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"reflect"
"sort"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/test-infra/boskos/client"
"k8s.io/test-infra/boskos/common"
"k8s.io/test-infra/boskos/crds"
"k8s.io/test-infra/boskos/ranch"
)
func makeTestBoskos(t *testing.T, r *ranch.Ranch) *httptest.Server
|
type testMuxWrapper struct {
t *testing.T
*http.ServeMux
requstCount int
}
func (tmw *testMuxWrapper) ServeHTTP(w http.ResponseWriter, r *http.Request) {
tmw.requstCount++
requestBody, err := ioutil.ReadAll(r.Body)
if err != nil {
tmw.t.Fatalf("failed to read request body: %v", err)
}
r.Body = ioutil.NopCloser(bytes.NewBuffer(requestBody))
if err := compareWithFixture(fmt.Sprintf("%s-request-%d", tmw.t.Name(), tmw.requstCount), requestBody); err != nil {
tmw.t.Errorf("data differs from fixture: %v", err)
}
tmw.ServeMux.ServeHTTP(&bodyLoggingHTTPWriter{t: tmw.t, ResponseWriter: w}, r)
}
type bodyLoggingHTTPWriter struct {
t *testing.T
http.ResponseWriter
}
func (blhw *bodyLoggingHTTPWriter) Write(data []byte) (int, error) {
if err := compareWithFixture(blhw.t.Name()+"-response", data); err != nil {
blhw.t.Errorf("data differs from fixture: %v", err)
}
return blhw.ResponseWriter.Write(data)
}
func TestAcquireUpdate(t *testing.T) {
var testcases = []struct {
name string
resource *crds.ResourceObject
}{
{
name: "noInfo",
resource: newResource("test", "type", common.Dirty, "", time.Time{}),
},
{
name: "existingInfo",
resource: &crds.ResourceObject{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
Spec: crds.ResourceSpec{
Type: "type",
},
Status: crds.ResourceStatus{
State: common.Dirty,
UserData: common.UserDataFromMap(common.UserDataMap{"test": "old"}),
},
},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
r := MakeTestRanch([]runtime.Object{tc.resource})
boskos := makeTestBoskos(t, r)
owner := "owner"
client, err := client.NewClient(owner, boskos.URL, "", "")
if err != nil {
t.Fatalf("failed to create the Boskos client")
}
userData := common.UserDataFromMap(common.UserDataMap{"test": "new"})
newState := "acquired"
receivedRes, err := client.Acquire(tc.resource.Spec.Type, tc.resource.Status.State, newState)
if err != nil {
t.Fatalf("unable to acquire resource: %v", err)
}
if receivedRes.State != newState || receivedRes.Owner != owner {
t.Errorf("resource should match. Expected \n%v, received \n%v", tc.resource, receivedRes)
}
if err = client.UpdateOne(receivedRes.Name, receivedRes.State, userData); err != nil {
t.Errorf("unable to update resource. %v", err)
}
boskos.Close()
updatedResource, err := r.Storage.GetResource(tc.resource.Name)
if err != nil {
t.Error("unable to list resources")
}
if !reflect.DeepEqual(updatedResource.UserData.ToMap(), userData.ToMap()) {
t.Errorf("info should match. Expected \n%v, received \n%v", userData.ToMap(), updatedResource.UserData.ToMap())
}
})
}
}
func TestAcquireByState(t *testing.T) {
newState := "newState"
owner := "owner"
var testcases = []struct {
name, state string
resources []runtime.Object
expected []common.Resource
err error
names []string
}{
{
name: "noNames",
state: "state1",
resources: []runtime.Object{
newResource("test", "type", common.Dirty, "", time.Time{}),
},
err: fmt.Errorf("status 400 Bad Request, status code 400"),
},
{
name: "noState",
names: []string{"test"},
state: "state1",
resources: []runtime.Object{
newResource("test", "type", common.Dirty, "", time.Time{}),
},
err: fmt.Errorf("resources not found"),
},
{
name: "existing",
names: []string{"test2", "test3"},
state: "state1",
resources: []runtime.Object{
newResource("test1", "type1", common.Dirty, "", time.Time{}),
newResource("test2", "type2", "state1", "", time.Time{}),
newResource("test3", "type3", "state1", "", time.Time{}),
newResource("test4", "type4", common.Dirty, "", time.Time{}),
},
expected: []common.Resource{
common.NewResource("test2", "type2", newState, owner, fakeNow),
common.NewResource("test3", "type3", newState, owner, fakeNow),
},
},
{
name: "alreadyOwned",
names: []string{"test2", "test3"},
state: "state1",
resources: []runtime.Object{
newResource("test1", "type1", common.Dirty, "", time.Time{}),
newResource("test2", "type2", "state1", "foo", time.Time{}),
newResource("test3", "type3", "state1", "foo", time.Time{}),
newResource("test4", "type4", common.Dirty, "", time.Time{}),
},
err: fmt.Errorf("resources not found"),
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
r := MakeTestRanch(tc.resources)
boskos := makeTestBoskos(t, r)
client, err := client.NewClient(owner, boskos.URL, "", "")
if err != nil {
t.Fatalf("failed to create the Boskos client")
}
receivedRes, err := client.AcquireByState(tc.state, newState, tc.names)
boskos.Close()
if !reflect.DeepEqual(err, tc.err) {
t.Fatalf("tc: %s - errors don't match, expected %v, received\n %v", tc.name, tc.err, err)
}
sort.Sort(common.ResourceByName(receivedRes))
if !reflect.DeepEqual(receivedRes, tc.expected) {
t.Errorf("tc: %s - resources should match. Expected \n%v, received \n%v", tc.name, tc.expected, receivedRes)
}
})
}
}
func TestClientServerUpdate(t *testing.T) {
owner := "owner"
newResourceWithUD := func(name, rtype, state, owner string, t time.Time, ud common.UserDataMap) common.Resource {
res := common.NewResource(name, rtype, state, owner, t)
res.UserData = common.UserDataFromMap(ud)
return res
}
newCRDResourceWithUD := func(name, rtype, state, owner string, t time.Time, ud common.UserDataMap) *crds.ResourceObject {
res := newResource(name, rtype, state, owner, t)
res.Status.UserData = common.UserDataFromMap(ud)
return res
}
initialState := "state1"
finalState := "state2"
rType := "type"
resourceName := "test"
var testcases = []struct {
name string
resource *crds.ResourceObject
expected common.Resource
err error
names []string
ud common.UserDataMap
}{
{
name: "noUserData",
resource: newResource(resourceName, rType, initialState, "", time.Time{}),
expected: common.NewResource(resourceName, rType, finalState, owner, fakeNow),
},
{
name: "userData",
resource: newResource(resourceName, rType, initialState, "", time.Time{}),
expected: newResourceWithUD(resourceName, rType, finalState, owner, fakeNow, common.UserDataMap{"custom": "custom"}),
ud: common.UserDataMap{"custom": "custom"},
},
{
name: "newUserData",
resource: newCRDResourceWithUD(resourceName, rType, initialState, "", fakeNow, common.UserDataMap{"1": "1"}),
expected: newResourceWithUD(resourceName, rType, finalState, owner, fakeNow, common.UserDataMap{"1": "1", "2": "2"}),
ud: common.UserDataMap{"2": "2"},
},
{
name: "OverRideUserData",
resource: newCRDResourceWithUD(resourceName, rType, initialState, "", fakeNow, common.UserDataMap{"1": "1"}),
expected: newResourceWithUD(resourceName, rType, finalState, owner, fakeNow, common.UserDataMap{"1": "2"}),
ud: common.UserDataMap{"1": "2"},
},
{
name: "DeleteUserData",
resource: newCRDResourceWithUD(resourceName, rType, initialState, "", fakeNow, common.UserDataMap{"1": "1", "2": "2"}),
expected: newResourceWithUD(resourceName, rType, finalState, owner, fakeNow, common.UserDataMap{"2": "2"}),
ud: common.UserDataMap{"1": ""},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
r := MakeTestRanch([]runtime.Object{tc.resource})
boskos := makeTestBoskos(t, r)
client, err := client.NewClient(owner, boskos.URL, "", "")
if err != nil {
t.Fatalf("failed to create the Boskos client")
}
_, err = client.Acquire(rType, initialState, finalState)
if err != nil {
t.Errorf("failed to acquire resource")
}
err = client.UpdateOne(resourceName, finalState, common.UserDataFromMap(tc.ud))
boskos.Close()
if !reflect.DeepEqual(err, tc.err) {
t.Fatalf("tc: %s - errors don't match, expected %v, received\n %v", tc.name, tc.err, err)
}
receivedRes, _ := r.Storage.GetResource(tc.resource.Name)
if !reflect.DeepEqual(receivedRes.UserData.ToMap(), tc.expected.UserData.ToMap()) {
t.Errorf("tc: %s - resources user data should match. Expected \n%v, received \n%v", tc.name, tc.expected.UserData.ToMap(), receivedRes.UserData.ToMap())
}
// Hack: remove UserData to be able to compare since we already compared it before.
receivedRes.UserData = nil
tc.expected.UserData = nil
if !reflect.DeepEqual(receivedRes, tc.expected) {
t.Errorf("tc: %s - resources should match. Expected \n%v, received \n%v", tc.name, tc.expected, receivedRes)
}
})
}
}
func newResource(name, rtype, state, owner string, t time.Time) *crds.ResourceObject {
if state == "" {
state = common.Free
}
return &crds.ResourceObject{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: crds.ResourceSpec{
Type: rtype,
},
Status: crds.ResourceStatus{
State: state,
Owner: owner,
LastUpdate: t,
UserData: &common.UserData{},
},
}
}
|
{
handler := &testMuxWrapper{t: t, ServeMux: NewBoskosHandler(r)}
return httptest.NewServer(handler)
}
|
alias.go
|
package kms
import (
"fmt"
awskms "github.com/aws/aws-sdk-go/service/kms"
)
type Alias struct {
client aliasesClient
name *string
identifier string
rtype string
}
func
|
(client aliasesClient, name *string) Alias {
return Alias{
client: client,
name: name,
identifier: *name,
rtype: "KMS Alias",
}
}
func (a Alias) Delete() error {
_, err := a.client.DeleteAlias(&awskms.DeleteAliasInput{AliasName: a.name})
if err != nil {
return fmt.Errorf("Delete: %s", err)
}
return nil
}
func (a Alias) Name() string {
return a.identifier
}
func (a Alias) Type() string {
return a.rtype
}
|
NewAlias
|
logrus_config.go
|
package logger
import (
"os"
"path"
"runtime"
"strconv"
"github.com/sirupsen/logrus"
)
//NewLogrus -
func
|
() {
l = logrus.New()
l.SetOutput(os.Stdout)
l.SetReportCaller(true)
l.SetFormatter(&logrus.JSONFormatter{
CallerPrettyfier: func(f *runtime.Frame) (string, string) {
filename := path.Base(f.File)
//return fmt.Sprintf("%s()", f.Function), fmt.Sprintf("%s:%d", filename, f.Line)
return f.Function + "()", filename + ":" + strconv.Itoa(f.Line)
},
})
}
// SetServiceInfo -
func SetServiceInfo(service string) {
l.AddHook(&fieldHook{
Service: service,
Host: func() string {
name, err := os.Hostname()
if err != nil {
return ""
}
return name
}(),
})
}
// SetLevel -
func SetLevel(level logrus.Level) {
switch level {
case TraceLevel:
l.SetLevel(logrus.TraceLevel)
case DebugLevel:
l.SetLevel(logrus.DebugLevel)
case InfoLevel:
l.SetLevel(logrus.InfoLevel)
case ErrorLevel:
l.SetLevel(logrus.ErrorLevel)
case WarnLevel:
l.SetLevel(logrus.WarnLevel)
}
}
// SetSlackHook -
func SetSlackHook(webhook string, level logrus.Level) {
}
|
NewLogrus
|
gaussian.py
|
#
# Plasma
# Copyright (c) 2021 Yusuf Olokoba.
#
from torch import arange, exp, tensor, Tensor
from torch.nn.functional import conv2d, conv3d, pad
from typing import Tuple
def gaussian_kernel (kernel_size: int, sigma: float = -1.) -> Tensor:
"""
Normalized 1D Gaussian kernel.
This operation is NOT differentiable w.r.t its arguments.
Parameters:
kernel_size (int): Kernel size, should be odd.
sigma (float): Gaussian standard deviation. If less than 1, it is automatically computed from the kernel size.
Returns:
Tensor: Normalized Gaussian kernel with shape (K,).
"""
sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8 if sigma < 0 else sigma # From OpenCV ::getGaussianKernel
x = arange(kernel_size).float() - kernel_size // 2
x = x + 0.5 if kernel_size % 2 == 0 else x
kernel = exp((-x.pow(2.) / (2. * sigma ** 2)))
return kernel / kernel.sum()
def gaussian_filter (input: Tensor, kernel_size: Tuple[int, int]) -> Tensor:
|
def gaussian_filter_3d (input: Tensor, kernel_size: Tuple[int, int, int]) -> Tensor:
"""
Apply a Gaussian filter to a volume.
Parameters:
input (Tensor): Input volume with shape (N,C,D,H,W).
kernel_size (tuple): Kernel size in each dimension (Kz,Ky,Kx).
Returns:
Tensor: Filtered volume with shape (N,C,D,H,W).
"""
_,channels,_,_,_ = input.shape
kernel_size_z, kernel_size_y, kernel_size_x = kernel_size
# Compute kernels
kernel_x = gaussian_kernel(kernel_size_x).to(input.device)
kernel_y = gaussian_kernel(kernel_size_y).to(input.device)
kernel_z = gaussian_kernel(kernel_size_z).to(input.device)
# Reshape
kernel_x = kernel_x.expand(channels, 1, 1, 1, -1)
kernel_y = kernel_y.expand(channels, 1, 1, 1, -1).permute(0, 1, 2, 4, 3).contiguous()
kernel_z = kernel_z.expand(channels, 1, 1, 1, -1).permute(0, 1, 4, 2, 3).contiguous()
# Seperable convolution
result = conv3d(input, kernel_x, padding=(0, 0, kernel_size_x // 2), groups=channels)
result = conv3d(result, kernel_y, padding=(0, kernel_size_y // 2, 0), groups=channels)
result = conv3d(result, kernel_z, padding=(kernel_size_z // 2, 0, 0), groups=channels)
return result
|
"""
Apply a Gaussian filter to an image.
Parameters:
input (Tensor): Input image with shape (N,C,H,W).
kernel_size (tuple): Kernel size in each dimension (Ky,Kx).
Returns:
Tensor: Filtered image with shape (N,C,H,W).
"""
_,channels,_,_ = input.shape
kernel_size_y, kernel_size_x = kernel_size
# Compute kernels
kernel_x = gaussian_kernel(kernel_size_x).to(input.device)
kernel_y = gaussian_kernel(kernel_size_y).to(input.device)
# Reshape
kernel_x = kernel_x.expand(channels, 1, 1, -1)
kernel_y = kernel_y.expand(channels, 1, 1, -1).permute(0, 1, 3, 2).contiguous()
# Seperable convolution
result = conv2d(input, kernel_x, padding=(0, kernel_size_x // 2), groups=channels)
result = conv2d(result, kernel_y, padding=(kernel_size_y // 2, 0), groups=channels)
return result
|
widget_text.rs
|
use std::sync::Arc;
use crate::{
style::WidgetVisuals, text::LayoutJob, Align, Color32, FontFamily, FontSelection, Galley, Pos2,
Style, TextStyle, Ui, Visuals,
};
/// Text and optional style choices for it.
///
/// The style choices (font, color) are applied to the entire text.
/// For more detailed control, use [`crate::text::LayoutJob`] instead.
///
/// A [`RichText`] can be used in most widgets and helper functions, e.g. [`Ui::label`] and [`Ui::button`].
///
/// ### Example
/// ```
/// use egui::{RichText, Color32};
///
/// RichText::new("Plain");
/// RichText::new("colored").color(Color32::RED);
/// RichText::new("Large and underlined").size(20.0).underline();
/// ```
#[derive(Clone, Default, PartialEq)]
pub struct RichText {
text: String,
size: Option<f32>,
family: Option<FontFamily>,
text_style: Option<TextStyle>,
background_color: Color32,
text_color: Option<Color32>,
code: bool,
strong: bool,
weak: bool,
strikethrough: bool,
underline: bool,
italics: bool,
raised: bool,
}
impl From<&str> for RichText {
#[inline]
fn from(text: &str) -> Self {
RichText::new(text)
}
}
impl From<&String> for RichText {
#[inline]
fn from(text: &String) -> Self {
RichText::new(text)
}
}
impl From<&mut String> for RichText {
#[inline]
fn from(text: &mut String) -> Self {
RichText::new(text.clone())
}
}
impl From<String> for RichText {
#[inline]
fn from(text: String) -> Self {
RichText::new(text)
}
}
impl RichText {
#[inline]
pub fn new(text: impl Into<String>) -> Self {
Self {
text: text.into(),
..Default::default()
}
}
#[inline]
pub fn is_empty(&self) -> bool {
self.text.is_empty()
}
#[inline]
pub fn text(&self) -> &str {
&self.text
}
/// Select the font size (in points).
/// This overrides the value from [`Self::text_style`].
#[inline]
pub fn size(mut self, size: f32) -> Self {
self.size = Some(size);
self
}
/// Select the font family.
///
/// This overrides the value from [`Self::text_style`].
///
/// Only the families available in [`crate::FontDefinitions::families`] may be used.
#[inline]
pub fn family(mut self, family: FontFamily) -> Self {
self.family = Some(family);
self
}
/// Select the font and size.
/// This overrides the value from [`Self::text_style`].
#[inline]
pub fn font(mut self, font_id: crate::FontId) -> Self {
let crate::FontId { size, family } = font_id;
self.size = Some(size);
self.family = Some(family);
self
}
/// Override the [`TextStyle`].
#[inline]
pub fn text_style(mut self, text_style: TextStyle) -> Self {
self.text_style = Some(text_style);
self
}
/// Set the [`TextStyle`] unless it has already been set
#[inline]
pub fn fallback_text_style(mut self, text_style: TextStyle) -> Self {
self.text_style.get_or_insert(text_style);
self
}
/// Use [`TextStyle::Heading`].
#[inline]
pub fn heading(self) -> Self {
self.text_style(TextStyle::Heading)
}
/// Use [`TextStyle::Monospace`].
#[inline]
pub fn monospace(self) -> Self {
self.text_style(TextStyle::Monospace)
}
/// Monospace label with different background color.
#[inline]
pub fn code(mut self) -> Self {
self.code = true;
self.text_style(TextStyle::Monospace)
}
/// Extra strong text (stronger color).
#[inline]
pub fn strong(mut self) -> Self {
self.strong = true;
self
}
/// Extra weak text (fainter color).
#[inline]
pub fn weak(mut self) -> Self {
self.weak = true;
self
}
/// Draw a line under the text.
///
/// If you want to control the line color, use [`LayoutJob`] instead.
#[inline]
pub fn underline(mut self) -> Self {
self.underline = true;
self
}
/// Draw a line through the text, crossing it out.
///
/// If you want to control the strikethrough line color, use [`LayoutJob`] instead.
#[inline]
pub fn strikethrough(mut self) -> Self {
self.strikethrough = true;
self
}
/// Tilt the characters to the right.
#[inline]
pub fn italics(mut self) -> Self {
self.italics = true;
self
}
/// Smaller text.
#[inline]
pub fn small(self) -> Self {
self.text_style(TextStyle::Small)
}
/// For e.g. exponents.
#[inline]
pub fn small_raised(self) -> Self {
self.text_style(TextStyle::Small).raised()
}
/// Align text to top. Only applicable together with [`Self::small()`].
#[inline]
pub fn raised(mut self) -> Self {
self.raised = true;
self
}
/// Fill-color behind the text.
#[inline]
pub fn background_color(mut self, background_color: impl Into<Color32>) -> Self {
self.background_color = background_color.into();
self
}
/// Override text color.
#[inline]
pub fn color(mut self, color: impl Into<Color32>) -> Self {
self.text_color = Some(color.into());
self
}
/// Read the font height of the selected text style.
pub fn font_height(&self, fonts: &epaint::Fonts, style: &Style) -> f32 {
let mut font_id = self.text_style.as_ref().map_or_else(
|| FontSelection::Default.resolve(style),
|text_style| text_style.resolve(style),
);
if let Some(size) = self.size {
font_id.size = size;
}
if let Some(family) = &self.family {
font_id.family = family.clone();
}
fonts.row_height(&font_id)
}
fn into_text_job(
self,
style: &Style,
fallback_font: FontSelection,
default_valign: Align,
) -> WidgetTextJob {
let text_color = self.get_text_color(&style.visuals);
let Self {
text,
size,
family,
text_style,
background_color,
text_color: _, // already used by `get_text_color`
code,
strong: _, // already used by `get_text_color`
weak: _, // already used by `get_text_color`
strikethrough,
underline,
italics,
raised,
} = self;
let job_has_color = text_color.is_some();
let line_color = text_color.unwrap_or_else(|| style.visuals.text_color());
let text_color = text_color.unwrap_or(crate::Color32::TEMPORARY_COLOR);
let font_id = {
let mut font_id = text_style
.or_else(|| style.override_text_style.clone())
.map_or_else(
|| fallback_font.resolve(style),
|text_style| text_style.resolve(style),
);
if let Some(size) = size {
font_id.size = size;
}
if let Some(family) = family {
font_id.family = family;
}
font_id
};
let mut background_color = background_color;
if code {
background_color = style.visuals.code_bg_color;
}
let underline = if underline {
crate::Stroke::new(1.0, line_color)
} else {
crate::Stroke::none()
};
let strikethrough = if strikethrough {
crate::Stroke::new(1.0, line_color)
} else {
crate::Stroke::none()
};
let valign = if raised {
crate::Align::TOP
} else {
default_valign
};
let text_format = crate::text::TextFormat {
font_id,
color: text_color,
background: background_color,
italics,
underline,
strikethrough,
valign,
};
let job = LayoutJob::single_section(text, text_format);
WidgetTextJob { job, job_has_color }
}
fn get_text_color(&self, visuals: &Visuals) -> Option<Color32> {
if let Some(text_color) = self.text_color {
Some(text_color)
} else if self.strong {
Some(visuals.strong_text_color())
} else if self.weak {
Some(visuals.weak_text_color())
} else {
visuals.override_text_color
}
}
}
// ----------------------------------------------------------------------------
/// This is how you specify text for a widget.
///
/// A lot of widgets use `impl Into<WidgetText>` as an argument,
/// allowing you to pass in [`String`], [`RichText`], [`LayoutJob`], and more.
///
/// Often a [`WidgetText`] is just a simple [`String`],
/// but it can be a [`RichText`] (text with color, style, etc),
/// a [`LayoutJob`] (for when you want full control of how the text looks)
/// or text that has already been layed out in a [`Galley`].
#[derive(Clone)]
pub enum WidgetText {
RichText(RichText),
/// Use this [`LayoutJob`] when laying out the text.
///
/// Only [`LayoutJob::text`] and [`LayoutJob::sections`] are guaranteed to be respected.
///
/// [`TextWrapping::max_width`](epaint::text::TextWrapping::max_width), [`LayoutJob::halign`], [`LayoutJob::justify`]
/// and [`LayoutJob::first_row_min_height`] will likely be determined by the [`crate::Layout`]
/// of the [`Ui`] the widget is placed in.
/// If you want all parts of the [`LayoutJob`] respected, then convert it to a
/// [`Galley`] and use [`Self::Galley`] instead.
LayoutJob(LayoutJob),
/// Use exactly this galley when painting the text.
Galley(Arc<Galley>),
}
impl Default for WidgetText {
fn default() -> Self {
Self::RichText(RichText::default())
}
}
impl WidgetText {
#[inline]
pub fn is_empty(&self) -> bool {
match self {
Self::RichText(text) => text.is_empty(),
Self::LayoutJob(job) => job.is_empty(),
Self::Galley(galley) => galley.is_empty(),
}
}
#[inline]
pub fn text(&self) -> &str {
match self {
Self::RichText(text) => text.text(),
Self::LayoutJob(job) => &job.text,
Self::Galley(galley) => galley.text(),
}
}
/// Override the [`TextStyle`] if, and only if, this is a [`RichText`].
///
/// Prefer using [`RichText`] directly!
#[inline]
pub fn text_style(self, text_style: TextStyle) -> Self {
match self {
Self::RichText(text) => Self::RichText(text.text_style(text_style)),
Self::LayoutJob(_) | Self::Galley(_) => self,
}
}
/// Set the [`TextStyle`] unless it has already been set
///
/// Prefer using [`RichText`] directly!
#[inline]
pub fn fallback_text_style(self, text_style: TextStyle) -> Self {
match self {
Self::RichText(text) => Self::RichText(text.fallback_text_style(text_style)),
Self::LayoutJob(_) | Self::Galley(_) => self,
}
}
/// Override text color if, and only if, this is a [`RichText`].
///
/// Prefer using [`RichText`] directly!
#[inline]
pub fn color(self, color: impl Into<Color32>) -> Self {
match self {
Self::RichText(text) => Self::RichText(text.color(color)),
Self::LayoutJob(_) | Self::Galley(_) => self,
}
}
/// Prefer using [`RichText`] directly!
pub fn heading(self) -> Self {
match self {
Self::RichText(text) => Self::RichText(text.heading()),
Self::LayoutJob(_) | Self::Galley(_) => self,
}
}
/// Prefer using [`RichText`] directly!
pub fn monospace(self) -> Self {
match self {
Self::RichText(text) => Self::RichText(text.monospace()),
Self::LayoutJob(_) | Self::Galley(_) => self,
}
}
/// Prefer using [`RichText`] directly!
pub fn code(self) -> Self {
match self {
Self::RichText(text) => Self::RichText(text.code()),
Self::LayoutJob(_) | Self::Galley(_) => self,
}
}
/// Prefer using [`RichText`] directly!
pub fn strong(self) -> Self {
match self {
Self::RichText(text) => Self::RichText(text.strong()),
Self::LayoutJob(_) | Self::Galley(_) => self,
}
}
/// Prefer using [`RichText`] directly!
pub fn weak(self) -> Self {
match self {
Self::RichText(text) => Self::RichText(text.weak()),
Self::LayoutJob(_) | Self::Galley(_) => self,
}
}
/// Prefer using [`RichText`] directly!
pub fn underline(self) -> Self {
match self {
Self::RichText(text) => Self::RichText(text.underline()),
Self::LayoutJob(_) | Self::Galley(_) => self,
}
}
/// Prefer using [`RichText`] directly!
pub fn strikethrough(self) -> Self
|
/// Prefer using [`RichText`] directly!
pub fn italics(self) -> Self {
match self {
Self::RichText(text) => Self::RichText(text.italics()),
Self::LayoutJob(_) | Self::Galley(_) => self,
}
}
/// Prefer using [`RichText`] directly!
pub fn small(self) -> Self {
match self {
Self::RichText(text) => Self::RichText(text.small()),
Self::LayoutJob(_) | Self::Galley(_) => self,
}
}
/// Prefer using [`RichText`] directly!
pub fn small_raised(self) -> Self {
match self {
Self::RichText(text) => Self::RichText(text.small_raised()),
Self::LayoutJob(_) | Self::Galley(_) => self,
}
}
/// Prefer using [`RichText`] directly!
pub fn raised(self) -> Self {
match self {
Self::RichText(text) => Self::RichText(text.raised()),
Self::LayoutJob(_) | Self::Galley(_) => self,
}
}
/// Prefer using [`RichText`] directly!
pub fn background_color(self, background_color: impl Into<Color32>) -> Self {
match self {
Self::RichText(text) => Self::RichText(text.background_color(background_color)),
Self::LayoutJob(_) | Self::Galley(_) => self,
}
}
pub(crate) fn font_height(&self, fonts: &epaint::Fonts, style: &Style) -> f32 {
match self {
Self::RichText(text) => text.font_height(fonts, style),
Self::LayoutJob(job) => job.font_height(fonts),
Self::Galley(galley) => {
if let Some(row) = galley.rows.first() {
row.height()
} else {
galley.size().y
}
}
}
}
pub fn into_text_job(
self,
style: &Style,
fallback_font: FontSelection,
default_valign: Align,
) -> WidgetTextJob {
match self {
Self::RichText(text) => text.into_text_job(style, fallback_font, default_valign),
Self::LayoutJob(job) => WidgetTextJob {
job,
job_has_color: true,
},
Self::Galley(galley) => {
let job: LayoutJob = (*galley.job).clone();
WidgetTextJob {
job,
job_has_color: true,
}
}
}
}
/// Layout with wrap mode based on the containing [`Ui`].
///
/// wrap: override for [`Ui::wrap_text`].
pub fn into_galley(
self,
ui: &Ui,
wrap: Option<bool>,
available_width: f32,
fallback_font: impl Into<FontSelection>,
) -> WidgetTextGalley {
let wrap = wrap.unwrap_or_else(|| ui.wrap_text());
let wrap_width = if wrap { available_width } else { f32::INFINITY };
match self {
Self::RichText(text) => {
let valign = ui.layout().vertical_align();
let mut text_job = text.into_text_job(ui.style(), fallback_font.into(), valign);
text_job.job.wrap.max_width = wrap_width;
WidgetTextGalley {
galley: ui.fonts().layout_job(text_job.job),
galley_has_color: text_job.job_has_color,
}
}
Self::LayoutJob(mut job) => {
job.wrap.max_width = wrap_width;
WidgetTextGalley {
galley: ui.fonts().layout_job(job),
galley_has_color: true,
}
}
Self::Galley(galley) => WidgetTextGalley {
galley,
galley_has_color: true,
},
}
}
}
impl From<&str> for WidgetText {
#[inline]
fn from(text: &str) -> Self {
Self::RichText(RichText::new(text))
}
}
impl From<&String> for WidgetText {
#[inline]
fn from(text: &String) -> Self {
Self::RichText(RichText::new(text))
}
}
impl From<String> for WidgetText {
#[inline]
fn from(text: String) -> Self {
Self::RichText(RichText::new(text))
}
}
impl From<RichText> for WidgetText {
#[inline]
fn from(rich_text: RichText) -> Self {
Self::RichText(rich_text)
}
}
impl From<LayoutJob> for WidgetText {
#[inline]
fn from(layout_job: LayoutJob) -> Self {
Self::LayoutJob(layout_job)
}
}
impl From<Arc<Galley>> for WidgetText {
#[inline]
fn from(galley: Arc<Galley>) -> Self {
Self::Galley(galley)
}
}
// ----------------------------------------------------------------------------
#[derive(Clone, PartialEq)]
pub struct WidgetTextJob {
pub job: LayoutJob,
pub job_has_color: bool,
}
impl WidgetTextJob {
pub fn into_galley(self, fonts: &crate::text::Fonts) -> WidgetTextGalley {
let Self { job, job_has_color } = self;
let galley = fonts.layout_job(job);
WidgetTextGalley {
galley,
galley_has_color: job_has_color,
}
}
}
// ----------------------------------------------------------------------------
/// Text that has been layed out and ready to be painted.
#[derive(Clone, PartialEq)]
pub struct WidgetTextGalley {
pub galley: Arc<Galley>,
pub galley_has_color: bool,
}
impl WidgetTextGalley {
/// Size of the layed out text.
#[inline]
pub fn size(&self) -> crate::Vec2 {
self.galley.size()
}
/// Size of the layed out text.
#[inline]
pub fn text(&self) -> &str {
self.galley.text()
}
#[inline]
pub fn galley(&self) -> &Arc<Galley> {
&self.galley
}
/// Use the colors in the original [`WidgetText`] if any,
/// else fall back to the one specified by the [`WidgetVisuals`].
pub fn paint_with_visuals(
self,
painter: &crate::Painter,
text_pos: Pos2,
visuals: &WidgetVisuals,
) {
self.paint_with_fallback_color(painter, text_pos, visuals.text_color());
}
/// Use the colors in the original [`WidgetText`] if any,
/// else fall back to the given color.
pub fn paint_with_fallback_color(
self,
painter: &crate::Painter,
text_pos: Pos2,
text_color: Color32,
) {
if self.galley_has_color {
painter.galley(text_pos, self.galley);
} else {
painter.galley_with_color(text_pos, self.galley, text_color);
}
}
/// Paint with this specific color.
pub fn paint_with_color_override(
self,
painter: &crate::Painter,
text_pos: Pos2,
text_color: Color32,
) {
painter.galley_with_color(text_pos, self.galley, text_color);
}
}
|
{
match self {
Self::RichText(text) => Self::RichText(text.strikethrough()),
Self::LayoutJob(_) | Self::Galley(_) => self,
}
}
|
app-root.tsx
|
import '@stencil/router';
import { LocationSegments, RouterHistory } from '@stencil/router';
import { Component, Element, Listen, State, h} from '@stencil/core';
import SiteProviderConsumer, { SiteState } from '../../global/site-provider-consumer';
import { ResponsiveContainer } from '@ionic-internal/sites-shared';
@Component({
tag: 'app-root',
styleUrl: 'app-root.css'
})
export class AppRoot {
private scrollY = 0;
private history?: RouterHistory;
elements = [
'site-header',
'site-menu',
'app-burger',
'main'
];
@Element() el!: HTMLElement;
@State() isLeftSidebarIn: boolean = false;
@Listen('resize', { target: 'window' })
handleResize() {
requestAnimationFrame(() => {
if (window.innerWidth > 768 && this.isLeftSidebarIn) {
this.isLeftSidebarIn = false;
document.body.classList.remove('no-scroll');
this.elements.forEach((el) => {
this.el.querySelector(el)!.classList.remove('left-sidebar-in');
});
}
});
}
private setHistory = ({ history }: { history: RouterHistory }) => {
if (!this.history) {
this.history = history;
this.history.listen((_location: LocationSegments) => {
// Hubspot
// (window as any)._hsq.push(['setPath', location.pathname + location.search ]);
// (window as any)._hsq.push(['trackPageView']);
});
}
}
componentDidLoad() {
this.isLeftSidebarIn = false;
}
private toggleLeftSidebar = () => {
if (window.innerWidth > 768) {
return;
}
const elements = this.elements
.map(el => this.el.querySelector(el))
.filter(el => !!el) as Element[];
this.scrollY = window.scrollY;
if (this.isLeftSidebarIn) {
this.isLeftSidebarIn = false;
document.body.classList.remove('no-scroll');
elements.forEach(el => {
el.classList.remove('left-sidebar-in');
el.classList.add('left-sidebar-out');
});
window.requestAnimationFrame(() => window.scrollTo(0, this.scrollY));
} else {
this.isLeftSidebarIn = true;
document.body.classList.add('no-scroll');
elements.forEach(el => {
el.classList.add('left-sidebar-in');
el.classList.remove('left-sidebar-out');
});
window.requestAnimationFrame(() => window.scrollTo(0, this.scrollY));
}
}
render() {
const siteState: SiteState = {
isLeftSidebarIn: this.isLeftSidebarIn,
toggleLeftSidebar: this.toggleLeftSidebar
};
return (
<SiteProviderConsumer.Provider state={siteState}>
<site-root>
<site-platform-bar productName='Stencil'/>
<site-header />
<main>
<stencil-router scrollTopOffset={0}>
<stencil-route style={{ display: 'none' }} routeRender={this.setHistory}/>
<stencil-route-switch>
<stencil-route url="/" component="landing-page" exact={true}/>
<stencil-route url="/docs/:pageName" routeRender={({ match }) => (
<doc-component page={match!.url}></doc-component>
)}/>
<stencil-route url="/blog" component="blog-list" exact={true}/>
<stencil-route url="/blog/:pageName" routeRender={({ match }) => (
<blog-component page={match!.url}></blog-component>
|
<stencil-route url="/pwa" component="pwas-page" />
<stencil-route url="/resources" component="resources-page" />
<stencil-route component='notfound-page'></stencil-route>
</stencil-route-switch>
</stencil-router>
<footer>
<ResponsiveContainer>
<div class="footer-col">
<app-icon name="logo"/>
<p>© 2020 StencilJS. Released under MIT License</p>
<ul class="external-links list--unstyled">
<li>
<a rel="noopener" class="link--external" target="_blank" href="https://twitter.com/stenciljs" aria-label="Twitter">
<app-icon name="twitter"></app-icon>
</a>
</li>
<li>
<a rel="noopener" class="link--external" target="_blank" href="https://stencil-worldwide.herokuapp.com" aria-label="Slack">
<app-icon name="slack"></app-icon>
</a>
</li>
<li>
<a rel="noopener" class="link--external" target="_blank" href="https://github.com/ionic-team/stencil" aria-label="Github">
<app-icon name="github"></app-icon>
</a>
</li>
</ul>
</div>
</ResponsiveContainer>
</footer>
</main>
</site-root>
</SiteProviderConsumer.Provider>
);
}
}
|
)}/>
|
lib.rs
|
/*!
# juniper_iron
This repository contains the [Iron][Iron] web framework integration for
[Juniper][Juniper], a [GraphQL][GraphQL] implementation for Rust.
For documentation, including guides and examples, check out [Juniper][Juniper].
A basic usage example can also be found in the [Api documentation][documentation].
## Links
* [Juniper][Juniper]
* [Api Reference][documentation]
* [Iron framework][Iron]
## Integrating with Iron
For example, continuing from the schema created above and using Iron to expose
the schema on an HTTP endpoint supporting both GET and POST requests:
```rust,no_run
extern crate iron;
# extern crate juniper;
# extern crate juniper_iron;
# use std::collections::HashMap;
use iron::prelude::*;
use juniper_iron::GraphQLHandler;
use juniper::{Context, EmptyMutation};
# use juniper::FieldResult;
#
# struct User { id: String, name: String, friend_ids: Vec<String> }
# struct QueryRoot;
# struct Database { users: HashMap<String, User> }
#
# #[juniper::graphql_object( Context = Database )]
# impl User {
# fn id(&self) -> FieldResult<&String> {
# Ok(&self.id)
# }
#
# fn name(&self) -> FieldResult<&String> {
# Ok(&self.name)
# }
#
# fn friends(context: &Database) -> FieldResult<Vec<&User>> {
# Ok(self.friend_ids.iter()
# .filter_map(|id| executor.context().users.get(id))
# .collect())
# }
# }
#
# #[juniper::graphql_object( Context = Database )]
# impl QueryRoot {
# fn user(context: &Database, id: String) -> FieldResult<Option<&User>> {
# Ok(executor.context().users.get(&id))
# }
# }
// This function is executed for every request. Here, we would realistically
// provide a database connection or similar. For this example, we'll be
// creating the database from scratch.
fn context_factory(_: &mut Request) -> IronResult<Database> {
Ok(Database {
users: vec![
( "1000".to_owned(), User {
id: "1000".to_owned(), name: "Robin".to_owned(),
friend_ids: vec!["1001".to_owned()] } ),
( "1001".to_owned(), User {
id: "1001".to_owned(), name: "Max".to_owned(),
friend_ids: vec!["1000".to_owned()] } ),
].into_iter().collect()
})
}
impl Context for Database {}
fn main() {
// GraphQLHandler takes a context factory function, the root object,
// and the mutation object. If we don't have any mutations to expose, we
// can use the empty tuple () to indicate absence.
let graphql_endpoint = GraphQLHandler::new(
context_factory, QueryRoot, EmptyMutation::<Database>::new());
// Start serving the schema at the root on port 8080.
Iron::new(graphql_endpoint).http("localhost:8080").unwrap();
}
```
See the the [`GraphQLHandler`][3] documentation for more information on what request methods are
supported.
[3]: ./struct.GraphQLHandler.html
[Iron]: https://github.com/iron/iron
[Juniper]: https://github.com/graphql-rust/juniper
[GraphQL]: http://graphql.org
[documentation]: https://docs.rs/juniper_iron
*/
#![doc(html_root_url = "https://docs.rs/juniper_iron/0.3.0")]
#[cfg(test)]
extern crate iron_test;
#[cfg(test)]
extern crate url;
use iron::{itry, method, middleware::Handler, mime::Mime, prelude::*, status};
use urlencoded::{UrlDecodingError, UrlEncodedQuery};
use std::{error::Error, fmt, io::Read};
use serde_json::error::Error as SerdeError;
use juniper::{
http, serde::Deserialize, DefaultScalarValue, GraphQLType, InputValue, RootNode, ScalarValue,
};
#[derive(serde_derive::Deserialize)]
#[serde(untagged)]
#[serde(bound = "InputValue<S>: Deserialize<'de>")]
enum GraphQLBatchRequest<S = DefaultScalarValue>
where
S: ScalarValue,
{
Single(http::GraphQLRequest<S>),
Batch(Vec<http::GraphQLRequest<S>>),
}
#[derive(serde_derive::Serialize)]
#[serde(untagged)]
enum GraphQLBatchResponse<'a, S = DefaultScalarValue>
where
S: ScalarValue,
{
Single(http::GraphQLResponse<'a, S>),
Batch(Vec<http::GraphQLResponse<'a, S>>),
}
impl<S> GraphQLBatchRequest<S>
where
S: ScalarValue,
{
pub fn execute_sync<'a, CtxT, QueryT, MutationT>(
&'a self,
root_node: &'a RootNode<QueryT, MutationT, S>,
context: &CtxT,
) -> GraphQLBatchResponse<'a, S>
where
QueryT: GraphQLType<S, Context = CtxT>,
MutationT: GraphQLType<S, Context = CtxT>,
{
match *self {
GraphQLBatchRequest::Single(ref request) => {
GraphQLBatchResponse::Single(request.execute_sync(root_node, context))
}
GraphQLBatchRequest::Batch(ref requests) => GraphQLBatchResponse::Batch(
requests
.iter()
.map(|request| request.execute_sync(root_node, context))
.collect(),
),
}
}
}
impl<'a, S> GraphQLBatchResponse<'a, S>
where
S: ScalarValue,
{
fn is_ok(&self) -> bool {
match *self {
GraphQLBatchResponse::Single(ref response) => response.is_ok(),
GraphQLBatchResponse::Batch(ref responses) => {
responses.iter().all(|response| response.is_ok())
}
}
}
}
/// Handler that executes `GraphQL` queries in the given schema
///
/// The handler responds to GET requests and POST requests only. In GET
/// requests, the query should be supplied in the `query` URL parameter, e.g.
/// `http://localhost:3000/graphql?query={hero{name}}`.
///
/// POST requests support both queries and variables. POST a JSON document to
/// this endpoint containing the field `"query"` and optionally `"variables"`.
/// The variables should be a JSON object containing the variable to value
/// mapping.
pub struct GraphQLHandler<'a, CtxFactory, Query, Mutation, CtxT, S = DefaultScalarValue>
where
S: ScalarValue,
CtxFactory: Fn(&mut Request) -> IronResult<CtxT> + Send + Sync + 'static,
CtxT: 'static,
Query: GraphQLType<S, Context = CtxT> + Send + Sync + 'static,
Mutation: GraphQLType<S, Context = CtxT> + Send + Sync + 'static,
{
context_factory: CtxFactory,
root_node: RootNode<'a, Query, Mutation, S>,
}
/// Handler that renders `GraphiQL` - a graphical query editor interface
pub struct GraphiQLHandler {
graphql_url: String,
}
/// Handler that renders `GraphQL Playground` - a graphical query editor interface
pub struct PlaygroundHandler {
graphql_url: String,
}
fn get_single_value<T>(mut values: Vec<T>) -> IronResult<T> {
if values.len() == 1 {
Ok(values.remove(0))
} else {
Err(GraphQLIronError::InvalidData("Duplicate URL query parameter").into())
}
}
fn parse_url_param(params: Option<Vec<String>>) -> IronResult<Option<String>> {
if let Some(values) = params {
get_single_value(values).map(Some)
} else {
Ok(None)
}
}
fn parse_variable_param<S>(params: Option<Vec<String>>) -> IronResult<Option<InputValue<S>>>
where
S: ScalarValue,
{
if let Some(values) = params {
Ok(
serde_json::from_str::<InputValue<S>>(get_single_value(values)?.as_ref())
.map(Some)
.map_err(GraphQLIronError::Serde)?,
)
} else {
Ok(None)
}
}
impl<'a, CtxFactory, Query, Mutation, CtxT, S>
GraphQLHandler<'a, CtxFactory, Query, Mutation, CtxT, S>
where
S: ScalarValue + 'a,
CtxFactory: Fn(&mut Request) -> IronResult<CtxT> + Send + Sync + 'static,
CtxT: 'static,
Query: GraphQLType<S, Context = CtxT, TypeInfo = ()> + Send + Sync + 'static,
Mutation: GraphQLType<S, Context = CtxT, TypeInfo = ()> + Send + Sync + 'static,
{
/// Build a new GraphQL handler
///
/// The context factory will receive the Iron request object and is
/// expected to construct a context object for the given schema. This can
/// be used to construct e.g. database connections or similar data that
/// the schema needs to execute the query.
pub fn new(context_factory: CtxFactory, query: Query, mutation: Mutation) -> Self {
GraphQLHandler {
context_factory,
root_node: RootNode::new(query, mutation),
}
}
fn handle_get(&self, req: &mut Request) -> IronResult<GraphQLBatchRequest<S>> {
let url_query_string = req
.get_mut::<UrlEncodedQuery>()
.map_err(GraphQLIronError::Url)?;
let input_query = parse_url_param(url_query_string.remove("query"))?
.ok_or_else(|| GraphQLIronError::InvalidData("No query provided"))?;
let operation_name = parse_url_param(url_query_string.remove("operationName"))?;
let variables = parse_variable_param(url_query_string.remove("variables"))?;
Ok(GraphQLBatchRequest::Single(http::GraphQLRequest::new(
input_query,
operation_name,
variables,
)))
}
fn handle_post(&self, req: &mut Request) -> IronResult<GraphQLBatchRequest<S>> {
let mut request_payload = String::new();
itry!(req.body.read_to_string(&mut request_payload));
Ok(
serde_json::from_str::<GraphQLBatchRequest<S>>(request_payload.as_str())
.map_err(GraphQLIronError::Serde)?,
)
}
fn execute_sync(
&self,
context: &CtxT,
request: GraphQLBatchRequest<S>,
) -> IronResult<Response> {
let response = request.execute_sync(&self.root_node, context);
let content_type = "application/json".parse::<Mime>().unwrap();
let json = serde_json::to_string_pretty(&response).unwrap();
let status = if response.is_ok() {
status::Ok
} else {
status::BadRequest
};
Ok(Response::with((content_type, status, json)))
}
}
impl GraphiQLHandler {
/// Build a new GraphiQL handler targeting the specified URL.
///
/// The provided URL should point to the URL of the attached `GraphQLHandler`. It can be
/// relative, so a common value could be `"/graphql"`.
pub fn new(graphql_url: &str) -> GraphiQLHandler {
GraphiQLHandler {
graphql_url: graphql_url.to_owned(),
}
}
}
impl PlaygroundHandler {
/// Build a new GraphQL Playground handler targeting the specified URL.
///
/// The provided URL should point to the URL of the attached `GraphQLHandler`. It can be
/// relative, so a common value could be `"/graphql"`.
pub fn new(graphql_url: &str) -> PlaygroundHandler {
PlaygroundHandler {
graphql_url: graphql_url.to_owned(),
}
}
}
impl<'a, CtxFactory, Query, Mutation, CtxT, S> Handler
for GraphQLHandler<'a, CtxFactory, Query, Mutation, CtxT, S>
where
S: ScalarValue + Sync + Send + 'static,
CtxFactory: Fn(&mut Request) -> IronResult<CtxT> + Send + Sync + 'static,
CtxT: 'static,
Query: GraphQLType<S, Context = CtxT, TypeInfo = ()> + Send + Sync + 'static,
Mutation: GraphQLType<S, Context = CtxT, TypeInfo = ()> + Send + Sync + 'static,
'a: 'static,
{
fn handle(&self, mut req: &mut Request) -> IronResult<Response> {
let context = (self.context_factory)(req)?;
let graphql_request = match req.method {
method::Get => self.handle_get(&mut req)?,
method::Post => self.handle_post(&mut req)?,
_ => return Ok(Response::with(status::MethodNotAllowed)),
};
self.execute_sync(&context, graphql_request)
}
}
impl Handler for GraphiQLHandler {
fn handle(&self, _: &mut Request) -> IronResult<Response> {
let content_type = "text/html; charset=utf-8".parse::<Mime>().unwrap();
Ok(Response::with((
content_type,
status::Ok,
juniper::graphiql::graphiql_source(&self.graphql_url),
)))
}
}
impl Handler for PlaygroundHandler {
fn handle(&self, _: &mut Request) -> IronResult<Response> {
let content_type = "text/html; charset=utf-8".parse::<Mime>().unwrap();
Ok(Response::with((
content_type,
status::Ok,
juniper::http::playground::playground_source(&self.graphql_url),
)))
}
}
#[derive(Debug)]
enum GraphQLIronError {
Serde(SerdeError),
Url(UrlDecodingError),
InvalidData(&'static str),
}
impl fmt::Display for GraphQLIronError {
fn fmt(&self, mut f: &mut fmt::Formatter) -> fmt::Result {
match *self {
GraphQLIronError::Serde(ref err) => fmt::Display::fmt(err, &mut f),
GraphQLIronError::Url(ref err) => fmt::Display::fmt(err, &mut f),
GraphQLIronError::InvalidData(err) => fmt::Display::fmt(err, &mut f),
}
}
}
impl Error for GraphQLIronError {
fn description(&self) -> &str {
match *self {
GraphQLIronError::Serde(ref err) => err.description(),
GraphQLIronError::Url(ref err) => err.description(),
GraphQLIronError::InvalidData(err) => err,
}
}
fn cause(&self) -> Option<&dyn Error> {
match *self {
GraphQLIronError::Serde(ref err) => Some(err),
GraphQLIronError::Url(ref err) => Some(err),
GraphQLIronError::InvalidData(_) => None,
}
}
}
impl From<GraphQLIronError> for IronError {
fn from(err: GraphQLIronError) -> IronError {
let message = format!("{}", err);
IronError::new(err, (status::BadRequest, message))
}
}
#[cfg(test)]
mod tests {
use super::*;
use iron::{Handler, Headers, Url};
use iron_test::{request, response};
use percent_encoding::{utf8_percent_encode, AsciiSet, CONTROLS};
use juniper::{
http::tests as http_tests,
tests::{model::Database, schema::Query},
EmptyMutation,
};
use super::GraphQLHandler;
/// https://url.spec.whatwg.org/#query-state
const QUERY_ENCODE_SET: &AsciiSet = &CONTROLS.add(b' ').add(b'"').add(b'#').add(b'<').add(b'>');
// This is ugly but it works. `iron_test` just dumps the path/url in headers
// and newer `hyper` doesn't allow unescaped "{" or "}".
fn fixup_url(url: &str) -> String {
let url = Url::parse(&format!("http://localhost:3000{}", url)).expect("url to parse");
let path: String = url
.path()
.iter()
.map(|x| x.to_string())
.collect::<Vec<String>>()
.join("/");
format!(
"http://localhost:3000{}?{}",
path,
utf8_percent_encode(url.query().unwrap_or(""), QUERY_ENCODE_SET)
)
}
struct TestIronIntegration;
impl http_tests::HTTPIntegration for TestIronIntegration {
fn get(&self, url: &str) -> http_tests::TestResponse {
let result = request::get(&fixup_url(url), Headers::new(), &make_handler());
match result {
Ok(response) => make_test_response(response),
Err(e) => make_test_error_response(e),
}
}
fn post(&self, url: &str, body: &str) -> http_tests::TestResponse {
let result = request::post(&fixup_url(url), Headers::new(), body, &make_handler());
match result {
Ok(response) => make_test_response(response),
Err(e) => make_test_error_response(e),
}
}
}
#[test]
fn test_iron_integration() {
let integration = TestIronIntegration;
http_tests::run_http_test_suite(&integration);
}
fn context_factory(_: &mut Request) -> IronResult<Database> {
Ok(Database::new())
}
fn make_test_error_response(_: IronError) -> http_tests::TestResponse {
// For now all errors return the same status code.
// `juniper_iron` users can choose to do something different if desired.
http_tests::TestResponse {
status_code: 400,
body: None,
content_type: "application/json".to_string(),
}
}
fn make_test_response(response: Response) -> http_tests::TestResponse {
let status_code = response
.status
.expect("No status code returned from handler")
.to_u16() as i32;
let content_type = String::from_utf8(
response
.headers
.get_raw("content-type")
.expect("No content type header from handler")[0]
.clone(),
)
.expect("Content-type header invalid UTF-8");
let body = response::extract_body_to_string(response);
http_tests::TestResponse {
status_code,
|
}
fn make_handler() -> Box<dyn Handler> {
Box::new(GraphQLHandler::new(
context_factory,
Query,
EmptyMutation::<Database>::new(),
))
}
}
|
body: Some(body),
content_type,
}
|
init.py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Workflow to set up gcloud environment."""
import os
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as c_exc
from googlecloudsdk.calliope import usage_text
from googlecloudsdk.command_lib import init_util
from googlecloudsdk.core import config
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import yaml
from googlecloudsdk.core.configurations import named_configs
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.credentials import store as c_store
from googlecloudsdk.core.diagnostics import network_diagnostics
from googlecloudsdk.core.resource import resource_projector
from googlecloudsdk.core.util import platforms
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class
|
(base.Command):
"""Initialize or reinitialize gcloud.
{command} launches an interactive Getting Started workflow for gcloud.
It performs the following setup steps:
- Authorizes gcloud and other SDK tools to access Google Cloud Platform using
your user account credentials, or lets you select from accounts whose
credentials are already available.
- Sets properties in a gcloud configuration, including the current project and
the default Google Compute Engine region and zone.
{command} can be used for initial setup of gcloud and to create new or
reinitialize gcloud configurations. More information can be found by
running `gcloud topic configurations`.
Properties set by {command} are local and persistent, and are not affected by
remote changes to the project. For example, the default Compute Engine zone in
your configuration remains stable, even if you or another user changes the
project-level default zone in the Cloud Platform Console.
To sync the configuration, re-run {command}
"""
@staticmethod
def Args(parser):
parser.add_argument(
'obsolete_project_arg',
nargs='?',
hidden=True,
help='THIS ARGUMENT NEEDS HELP TEXT.')
parser.add_argument(
'--console-only',
action='store_true',
help=('Prevent the command from launching a browser for '
'authorization.'))
parser.add_argument(
'--skip-diagnostics',
action='store_true',
help='Do not run diagnostics.')
def Run(self, args):
"""Allows user to select configuration, and initialize it."""
if args.obsolete_project_arg:
raise c_exc.InvalidArgumentException(
args.obsolete_project_arg,
'`gcloud init` has changed and no longer takes a PROJECT argument. '
'Please use `gcloud source repos clone` to clone this '
'project\'s source repositories.')
log.status.write('Welcome! This command will take you through '
'the configuration of gcloud.\n\n')
if properties.VALUES.core.disable_prompts.GetBool():
raise c_exc.InvalidArgumentException(
'disable_prompts/--quiet',
'gcloud init command cannot run with disabled prompts.')
configuration_name = self._PickConfiguration()
if not configuration_name:
return
log.status.write('Your current configuration has been set to: [{0}]\n\n'
.format(configuration_name))
if not args.skip_diagnostics:
log.status.write('You can skip diagnostics next time by using the '
'following flag:\n')
log.status.write(' gcloud init --skip-diagnostics\n\n')
network_passed = network_diagnostics.NetworkDiagnostic().RunChecks()
if not network_passed:
if not console_io.PromptContinue(
message='Network errors detected.',
prompt_string='Would you like to continue anyway',
default=False):
log.status.write('You can re-run diagnostics with the following '
'command:\n')
log.status.write(' gcloud info --run-diagnostics\n\n')
return
# User project quota is now the global default, but this command calls
# legacy APIs where it should be disabled. It must happen after the config
# settings are persisted so this temporary value doesn't get persisted as
# well.
base.DisableUserProjectQuota()
if not self._PickAccount(args.console_only, preselected=args.account):
return
if not self._PickProject(preselected=args.project):
return
self._PickDefaultRegionAndZone()
self._CreateBotoConfig()
self._Summarize(configuration_name)
def _PickAccount(self, console_only, preselected=None):
"""Checks if current credentials are valid, if not runs auth login.
Args:
console_only: bool, True if the auth flow shouldn't use the browser
preselected: str, disable prompts and use this value if not None
Returns:
bool, True if valid credentials are setup.
"""
new_credentials = False
accounts = c_store.AvailableAccounts()
if accounts:
# There is at least one credentialed account.
if preselected:
# Try to use the preselected account. Fail if its not credentialed.
account = preselected
if account not in accounts:
log.status.write('\n[{0}] is not one of your credentialed accounts '
'[{1}].\n'.format(account, ','.join(accounts)))
return False
# Fall through to the set the account property.
else:
# Prompt for the account to use.
idx = console_io.PromptChoice(
accounts + ['Log in with a new account'],
message='Choose the account you would like to use to perform '
'operations for this configuration:',
prompt_string=None)
if idx is None:
return False
if idx < len(accounts):
account = accounts[idx]
else:
new_credentials = True
elif preselected:
# Preselected account specified but there are no credentialed accounts.
log.status.write('\n[{0}] is not a credentialed account.\n'.format(
preselected))
return False
else:
# Must log in with new credentials.
answer = console_io.PromptContinue(
prompt_string='You must log in to continue. Would you like to log in')
if not answer:
return False
new_credentials = True
if new_credentials:
# Call `gcloud auth login` to get new credentials.
# `gcloud auth login` may have user interaction, do not suppress it.
browser_args = ['--no-launch-browser'] if console_only else []
if not self._RunCmd(['auth', 'login'],
['--force', '--brief'] + browser_args,
disable_user_output=False):
return False
# `gcloud auth login` already did `gcloud config set account`.
else:
# Set the config account to the already credentialed account.
properties.PersistProperty(properties.VALUES.core.account, account)
log.status.write('You are logged in as: [{0}].\n\n'
.format(properties.VALUES.core.account.Get()))
return True
def _PickConfiguration(self):
"""Allows user to re-initialize, create or pick new configuration.
Returns:
Configuration name or None.
"""
configs = named_configs.ConfigurationStore.AllConfigs()
active_config = named_configs.ConfigurationStore.ActiveConfig()
if not configs or active_config.name not in configs:
# Listing the configs will automatically create the default config. The
# only way configs could be empty here is if there are no configurations
# and the --configuration flag or env var is set to something that does
# not exist. If configs has items, but the active config is not in there,
# that similarly means that hey are using the flag or the env var and that
# config does not exist. In either case, just create it and go with that
# one as the one that as they have already selected it.
named_configs.ConfigurationStore.CreateConfig(active_config.name)
# Need to active it in the file, not just the environment.
active_config.Activate()
return active_config.name
# If there is a only 1 config, it is the default, and there are no
# properties set, assume it was auto created and that it should be
# initialized as if it didn't exist.
if len(configs) == 1:
default_config = configs.get(named_configs.DEFAULT_CONFIG_NAME, None)
if default_config and not default_config.GetProperties():
default_config.Activate()
return default_config.name
choices = []
log.status.write('Settings from your current configuration [{0}] are:\n'
.format(active_config.name))
log.status.flush()
log.status.write(yaml.dump(properties.VALUES.AllValues()))
log.out.flush()
log.status.write('\n')
log.status.flush()
choices.append(
'Re-initialize this configuration [{0}] with new settings '.format(
active_config.name))
choices.append('Create a new configuration')
config_choices = [name for name, c in sorted(configs.iteritems())
if not c.is_active]
choices.extend('Switch to and re-initialize '
'existing configuration: [{0}]'.format(name)
for name in config_choices)
idx = console_io.PromptChoice(choices, message='Pick configuration to use:')
if idx is None:
return None
if idx == 0: # If reinitialize was selected.
self._CleanCurrentConfiguration()
return active_config.name
if idx == 1: # Second option is to create new configuration.
return self._CreateConfiguration()
config_name = config_choices[idx - 2]
named_configs.ConfigurationStore.ActivateConfig(config_name)
return config_name
def _PickProject(self, preselected=None):
"""Allows user to select a project.
Args:
preselected: str, use this value if not None
Returns:
str, project_id or None if was not selected.
"""
project_id = init_util.PickProject(preselected=preselected)
if project_id is not None:
properties.PersistProperty(properties.VALUES.core.project, project_id)
log.status.write('Your current project has been set to: [{0}].\n\n'
.format(project_id))
return project_id
def _PickDefaultRegionAndZone(self):
"""Pulls metadata properties for region and zone and sets them in gcloud."""
try:
# Use --quiet flag to skip the enable api prompt.
project_info = self._RunCmd(['compute', 'project-info', 'describe'],
params=['--quiet'])
except Exception: # pylint:disable=broad-except
log.status.write("""\
Not setting default zone/region (this feature makes it easier to use
[gcloud compute] by setting an appropriate default value for the
--zone and --region flag).
See https://cloud.google.com/compute/docs/gcloud-compute section on how to set
default compute region and zone manually. If you would like [gcloud init] to be
able to do this for you the next time you run it, make sure the
Compute Engine API is enabled for your project on the
https://console.developers.google.com/apis page.
""")
return None
default_zone = None
default_region = None
if project_info is not None:
project_info = resource_projector.MakeSerializable(project_info)
metadata = project_info.get('commonInstanceMetadata', {})
for item in metadata.get('items', []):
if item['key'] == 'google-compute-default-zone':
default_zone = item['value']
elif item['key'] == 'google-compute-default-region':
default_region = item['value']
# We could not determine zone automatically. Before offering choices for
# zone and/or region ask user if he/she wants to do this.
if not default_zone:
answer = console_io.PromptContinue(
prompt_string=('Do you want to configure a default Compute '
'Region and Zone?'))
if not answer:
return
# Same logic applies to region and zone properties.
def SetProperty(name, default_value, list_command):
"""Set named compute property to default_value or get via list command."""
if not default_value:
values = self._RunCmd(list_command)
if values is None:
return
values = list(values)
message = (
'Which Google Compute Engine {0} would you like to use as project '
'default?\n'
'If you do not specify a {0} via a command line flag while working '
'with Compute Engine resources, the default is assumed.').format(
name)
idx = console_io.PromptChoice(
[value['name'] for value in values]
+ ['Do not set default {0}'.format(name)],
message=message, prompt_string=None, allow_freeform=True,
freeform_suggester=usage_text.TextChoiceSuggester())
if idx is None or idx == len(values):
return
default_value = values[idx]
properties.PersistProperty(properties.VALUES.compute.Property(name),
default_value['name'])
log.status.write('Your project default Compute Engine {0} has been set '
'to [{1}].\nYou can change it by running '
'[gcloud config set compute/{0} NAME].\n\n'
.format(name, default_value['name']))
return default_value
if default_zone:
default_zone = self._RunCmd(['compute', 'zones', 'describe'],
[default_zone])
zone = SetProperty('zone', default_zone, ['compute', 'zones', 'list'])
if zone and not default_region:
default_region = zone['region']
if default_region:
default_region = self._RunCmd(['compute', 'regions', 'describe'],
[default_region])
SetProperty('region', default_region, ['compute', 'regions', 'list'])
def _Summarize(self, configuration_name):
log.status.Print('Your Google Cloud SDK is configured and ready to use!\n')
log.status.Print(
'* Commands that require authentication will use {0} by default'
.format(properties.VALUES.core.account.Get()))
project = properties.VALUES.core.project.Get()
if project:
log.status.Print(
'* Commands will reference project `{0}` by default'
.format(project))
region = properties.VALUES.compute.region.Get()
if region:
log.status.Print(
'* Compute Engine commands will use region `{0}` by default'
.format(region))
zone = properties.VALUES.compute.zone.Get()
if zone:
log.status.Print(
'* Compute Engine commands will use zone `{0}` by default\n'
.format(zone))
log.status.Print(
'Run `gcloud help config` to learn how to change individual settings\n')
log.status.Print(
'This gcloud configuration is called [{config}]. You can create '
'additional configurations if you work with multiple accounts and/or '
'projects.'.format(config=configuration_name))
log.status.Print('Run `gcloud topic configurations` to learn more.\n')
log.status.Print('Some things to try next:\n')
log.status.Print(
'* Run `gcloud --help` to see the Cloud Platform services you can '
'interact with. And run `gcloud help COMMAND` to get help on any '
'gcloud command.')
log.status.Print(
'* Run `gcloud topic -h` to learn about advanced features of the SDK '
'like arg files and output formatting')
def _CreateConfiguration(self):
configuration_name = console_io.PromptResponse(
'Enter configuration name. Names start with a lower case letter and '
'contain only lower case letters a-z, digits 0-9, and hyphens \'-\': ')
configuration_name = configuration_name.strip()
named_configs.ConfigurationStore.CreateConfig(configuration_name)
named_configs.ConfigurationStore.ActivateConfig(configuration_name)
named_configs.ActivePropertiesFile.Invalidate()
return configuration_name
def _CreateBotoConfig(self):
gsutil_path = _FindGsutil()
if not gsutil_path:
log.debug('Unable to find [gsutil]. Not configuring default .boto '
'file')
return
boto_path = platforms.ExpandHomePath(os.path.join('~', '.boto'))
if os.path.exists(boto_path):
log.debug('Not configuring default .boto file. File already '
'exists at [{boto_path}].'.format(boto_path=boto_path))
return
# 'gsutil config -n' creates a default .boto file that the user can read and
# modify.
command_args = ['config', '-n', '-o', boto_path]
if platforms.OperatingSystem.Current() == platforms.OperatingSystem.WINDOWS:
gsutil_args = execution_utils.ArgsForCMDTool(gsutil_path,
*command_args)
else:
gsutil_args = execution_utils.ArgsForExecutableTool(gsutil_path,
*command_args)
return_code = execution_utils.Exec(gsutil_args, no_exit=True,
out_func=log.file_only_logger.debug,
err_func=log.file_only_logger.debug)
if return_code == 0:
log.status.write("""\
Created a default .boto configuration file at [{boto_path}]. See this file and
[https://cloud.google.com/storage/docs/gsutil/commands/config] for more
information about configuring Google Cloud Storage.
""".format(boto_path=boto_path))
else:
log.status.write('Error creating a default .boto configuration file. '
'Please run [gsutil config -n] if you would like to '
'create this file.\n')
def _CleanCurrentConfiguration(self):
properties.PersistProperty(properties.VALUES.core.account, None)
properties.PersistProperty(properties.VALUES.core.project, None)
properties.PersistProperty(properties.VALUES.compute.region, None)
properties.PersistProperty(properties.VALUES.compute.zone, None)
named_configs.ActivePropertiesFile.Invalidate()
def _RunCmd(self, cmd, params=None, disable_user_output=True):
if not self._cli_power_users_only.IsValidCommand(cmd):
log.info('Command %s does not exist.', cmd)
return None
if params is None:
params = []
args = cmd + params
log.info('Executing: [gcloud %s]', ' '.join(args))
try:
# Disable output from individual commands, so that we get
# command run results, and don't clutter output of init.
if disable_user_output:
args.append('--no-user-output-enabled')
if (properties.VALUES.core.verbosity.Get() is None and
disable_user_output):
# Unless user explicitly set verbosity, suppress from subcommands.
args.append('--verbosity=none')
if properties.VALUES.core.log_http.GetBool():
args.append('--log-http')
# TODO(b/38338044): Remove usage of ExecuteCommandDoNotUse
return resource_projector.MakeSerializable(
self.ExecuteCommandDoNotUse(args))
except SystemExit as exc:
log.info('[%s] has failed\n', ' '.join(cmd + params))
raise c_exc.FailedSubCommand(cmd + params, exc.code)
except BaseException:
log.info('Failed to run [%s]\n', ' '.join(cmd + params))
raise
def _FindGsutil():
"""Finds the bundled gsutil wrapper.
Returns:
The path to gsutil.
"""
sdk_bin_path = config.Paths().sdk_bin_path
if not sdk_bin_path:
return
if platforms.OperatingSystem.Current() == platforms.OperatingSystem.WINDOWS:
gsutil = 'gsutil.cmd'
else:
gsutil = 'gsutil'
return os.path.join(sdk_bin_path, gsutil)
|
Init
|
style.ts
|
import styled, { css } from 'styled-components';
|
const toastTypeValidations = {
info: css`
background: #ebfaff;
color: #3172b7;
`,
success: css`
background: #e6fffa;
color: #2e656a;
`,
error: css`
background: #fddede;
color: #c53030;
`,
};
interface ToastProps {
type?: 'success' | 'info' | 'error';
}
export const Container = styled(animated.div)<ToastProps>`
width: 300px;
display: flex;
position: relative;
margin: 20px;
padding: 16px;
border-radius: 5px;
box-shadow: 2px 2px 0 rgba(0, 0, 0, 0.2);
${props => toastTypeValidations[props.type || 'info']}
& + div {
margin-top: 10px;
}
> svg {
margin: 4px 12px 0 0;
}
div {
flex: 1;
p {
margin-top: 4px;
font-size: 14px;
opacity: 0.8;
line-height: 20px;
}
}
button {
position: absolute;
right: 16px;
color: inherit;
background: transparent;
border: 0;
opacity: 0.6;
}
`;
|
import { animated } from 'react-spring';
|
convert.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# modified from:
# https://gist.github.com/rotemtam/88d9a4efae243fc77ed4a0f9917c8f6c
import os
import glob
import click
import pandas as pd
import xml.etree.ElementTree as ET
def xml_to_csv(path: str) -> pd.DataFrame:
|
def xml_to_csv_file(infile: str, outfile: str):
xml_df = xml_to_csv(infile)
print(xml_df)
xml_df.to_csv(outfile, index=None)
@click.group(help='Converts a pascal xml to csv')
def cli():
pass
@cli.command()
@click.option('--dir', type=str,
help='Name of source directory,' +
' will convert all xml files in it')
@click.option('--out_dir', type=str, help='Name of the destination directory')
def directory(dir, out_dir):
files_convert = [x for x in os.listdir(dir) if x.endswith("xml")]
for xml_file in files_convert:
base = os.path.basename(xml_file)
filename = os.path.splitext(base)[0]
out_filename = filename + ".csv"
out_path = os.path.join(out_dir, out_filename)
xml_to_csv_file(os.path.join(dir, xml_file), out_path)
@cli.command()
@click.option('--file', type=str, help='File to be converted to csv')
@click.option('--out', type=str, help='Name of the destination file')
def xml(file, out):
xml_to_csv_file(file, out)
if __name__ == '__main__':
cli()
|
xml_list = []
for xml_file in glob.glob(path):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
bbx = member.find('bndbox')
xmin = int(bbx.find('xmin').text)
ymin = int(bbx.find('ymin').text)
xmax = int(bbx.find('xmax').text)
ymax = int(bbx.find('ymax').text)
label = member.find('name').text
# The columns are organized as the csv required by keras-retinanet
# https://github.com/fizyr/keras-retinanet#csv-datasets
# path/to/image.jpg,x1,y1,x2,y2,class_name
value = (root.find('filename').text,
# int(root.find('size')[0].text),
# int(root.find('size')[1].text),
xmin, ymin,
xmax, ymax,
label)
xml_list.append(value)
column_name = ['filename',
# 'width',
# 'height',
'xmin',
'ymin',
'xmax',
'ymax',
'class']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
|
test_del_contact.py
|
from models.contact import Contact
import random
import allure
|
if len(db.get_contact_list()) == 0:
app.contact.creation(Contact(first_name="test"))
with allure.step('Given a contact list and contact to delete'):
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
with allure.step('When I delete a contact %s from the list' % contact):
app.contact.delete_contact_by_id(contact.id)
with allure.step('Then the new contact list is equal to the old list without the deleted contact'):
new_contacts = db.get_contact_list()
assert len(old_contacts) - 1 == len(new_contacts)
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.group.get_contact_list(), key=Contact.id_or_max)
|
def test_delete_some_contact(app, db, check_ui):
with allure.step('Check contact'):
|
sys-project.js
|
import Mock from 'mockjs'
// 生成项目数据列表
var dataList = []
for (let i = 0; i < Math.floor(Math.random() * 10 + 1); i++) {
dataList.push(Mock.mock({
'id': '@increment',
'name': '@name',
'description': 'this is a test program',
'positionName': '北京C区',
'positionCoordinate': '12N, 23S',
'owner': 'cb84bc5a-5d38-4711-8e28-8a4298da1050',
'createTimestamp': 'datetime'
}))
}
// 获取项目列表
export function list () {
return {
// isOpen: false,
url: '/device/get/program',
type: 'get',
data: {
'msg': 'success',
'code': 200,
'data': {
'totalCount': dataList.length,
'pageSize': 10,
'totalPage': 1,
'currPage': 1,
'data': dataList
}
}
}
}
// 获取项目信息
export function info () {
return {
// isOpen: false,
url: '/sys/project/info',
type: 'get',
data: {
'msg': 'success',
'code': 200,
'user': dataList[0]
}
}
}
// 添加项目
export function add () {
return {
// isOpen: false,
url: '/sys/project/save',
type: 'post',
data: {
'msg': 'success',
'code': 200
}
}
}
// 修改项目
export function update () {
return {
// isOpen: false,
url: '/sys/project/update',
type: 'post',
|
}
}
}
// 删除项目
export function del () {
return {
// isOpen: false,
url: '/sys/project/delete',
type: 'post',
data: {
'msg': 'success',
'code': 200
}
}
}
|
data: {
'msg': 'success',
'code': 200
|
check_installed_cabal.rs
|
use crate::{async_command_pipe, check_env, file_exists, install_cabal};
use anyhow::Result;
use async_recursion::async_recursion;
#[async_recursion]
pub async fn check_installed_cabal() -> Result<String> {
let cabal = check_env("CABAL_BIN")?;
if file_exists(&cabal) {
let cmd = format!("{} -V | head -n1 | awk {}", cabal, "'{print $3}'");
let installed_cabal = async_command_pipe(&cmd).await?;
let installed_cabal = installed_cabal.trim().to_string();
Ok(installed_cabal)
} else {
install_cabal().await?;
check_installed_cabal().await
}
}
|
#[cfg(test)]
mod test {
// use crate::check_installed_cabal;
#[tokio::test]
#[ignore]
async fn test_check_installed_cabal() {
unimplemented!();
}
}
| |
tunnel.go
|
package gateway
import (
"io"
"sync"
"golang.org/x/crypto/ssh"
)
// a tunnel within a ssh connection
type tunnel struct {
connection *connection
channel ssh.Channel
channelType string
extraData []byte
active bool
done chan struct{}
closeOnce sync.Once
metadata map[string]interface{}
}
func newTunnel(connection *connection, channel ssh.Channel, channelType string, extraData []byte, metadata map[string]interface{}) *tunnel {
log.Infof("%s: new tunnel: type = %s, metadata = %v", connection, channelType, metadata)
self := &tunnel{
connection: connection,
channel: channel,
channelType: channelType,
extraData: extraData,
done: make(chan struct{}),
metadata: metadata,
}
connection.addTunnel(self)
return self
}
|
close(self.done)
})
}
func (self *tunnel) handleRequests(requests <-chan *ssh.Request) {
defer func() {
self.connection.deleteTunnel(self)
if err := self.channel.Close(); err != nil {
log.Warningf("%s: failed to close tunnel: %s", self.connection, err)
}
log.Infof("%s: tunnel closed: type = %s, metadata = %v", self.connection, self.channelType, self.metadata)
}()
for {
select {
case <-self.done:
return
case request, ok := <-requests:
if !ok {
return
}
// log.Debugf("%s: request received: type = %s, want_reply = %v, payload = %d", self.connection, request.Type, request.WantReply, len(request.Payload))
// reply to client
if request.WantReply {
if err := request.Reply(false, nil); err != nil {
log.Errorf("%s: failed to reply to request: %s", self.connection, err)
return
}
}
}
}
}
func (self *tunnel) handleTunnel(otherTunnel *tunnel) {
defer otherTunnel.close()
defer self.close()
done1 := make(chan struct{})
go func() {
defer close(done1)
io.Copy(self.channel, otherTunnel.channel)
}()
done2 := make(chan struct{})
go func() {
defer close(done2)
io.Copy(otherTunnel.channel, self.channel)
}()
// wait until one of them is done
select {
case <-done1:
case <-done2:
}
}
type tunnelStatus struct {
Type string `json:"type,omitempty"`
Metadata map[string]interface{} `json:"metadata,omitempty"`
}
func (self *tunnel) gatherStatus() *tunnelStatus {
return &tunnelStatus{
Type: self.channelType,
Metadata: self.metadata,
}
}
|
// close the tunnel
func (self *tunnel) close() {
self.closeOnce.Do(func() {
|
sort.rs
|
use crate::{filter::select_from_json_object, join::OrderedValue};
use itertools::Itertools;
use ndjson_common::{
error::NdJsonSpatialError, json_selector_parser::Selector, ndjson::NdjsonReader,
};
use std::{
cmp::Ordering,
io::{BufRead, Write},
};
pub fn sort<IN: BufRead, OUT: Write>(
input: IN,
output: &mut OUT,
selectors: Vec<(Vec<Selector>, bool)>,
) -> Result<(), NdJsonSpatialError> {
for item in NdjsonReader::new(input).flatten().sorted_by(|left, right| {
selectors
.iter()
.map(|(selector, ascending)| {
if *ascending {
Ord::cmp(
&select_from_json_object(left.clone(), selector).map(OrderedValue::from),
&select_from_json_object(right.clone(), selector).map(OrderedValue::from),
)
} else {
Ord::cmp(
&select_from_json_object(right.clone(), selector).map(OrderedValue::from),
&select_from_json_object(left.clone(), selector).map(OrderedValue::from),
)
}
})
.fold(Ordering::Equal, |acc, item| acc.then(item))
}) {
writeln!(output, "{}", item).expect("Unable to write to stdout");
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_sort_already_sorted() {
let input = "{\"bar\":3,\"foo\":4}\n{\"bar\":4,\"foo\":7}\n";
let mut output = vec![];
sort(
input.as_bytes(),
&mut output,
vec![(vec![Selector::Identifier("bar".into())], true)],
)
.unwrap();
assert_eq!(output, input.as_bytes());
let mut output = vec![];
sort(
input.as_bytes(),
&mut output,
vec![
(vec![Selector::Identifier("foo".into())], true),
(vec![Selector::Identifier("bar".into())], true),
],
)
.unwrap();
assert_eq!(output, input.as_bytes());
}
#[test]
fn test_sort_descending()
|
}
|
{
let input = "{\"bar\":3,\"foo\":4}\n{\"bar\":4,\"foo\":7}\n";
let mut output = vec![];
sort(
input.as_bytes(),
&mut output,
vec![(vec![Selector::Identifier("bar".into())], false)],
)
.unwrap();
assert_eq!(
output,
"{\"bar\":4,\"foo\":7}\n{\"bar\":3,\"foo\":4}\n".as_bytes()
);
}
|
fetch.rs
|
use crate::utils::client::StudioClientConfig;
use crate::{command::RoverOutput, Result};
use rover_client::operations::supergraph::fetch::{self, SupergraphFetchInput};
use rover_client::shared::GraphRef;
use ansi_term::Colour::{Cyan, Yellow};
use serde::Serialize;
use structopt::StructOpt;
#[derive(Debug, Serialize, StructOpt)]
pub struct Fetch {
/// <NAME>@<VARIANT> of graph in Apollo Studio to fetch from.
/// @<VARIANT> may be left off, defaulting to @current
#[structopt(name = "GRAPH_REF")]
#[serde(skip_serializing)]
graph: GraphRef,
/// Name of configuration profile to use
#[structopt(long = "profile", default_value = "default")]
#[serde(skip_serializing)]
profile_name: String,
}
impl Fetch {
pub fn run(&self, client_config: StudioClientConfig) -> Result<RoverOutput> {
let client = client_config.get_authenticated_client(&self.profile_name)?;
let graph_ref = self.graph.to_string();
eprintln!(
"Fetching supergraph SDL from {} using credentials from the {} profile.",
Cyan.normal().paint(&graph_ref),
Yellow.normal().paint(&self.profile_name)
);
let fetch_response = fetch::run(
SupergraphFetchInput {
graph_ref: self.graph.clone(),
},
&client,
)?;
Ok(RoverOutput::FetchResponse(fetch_response))
|
}
}
|
|
UserController.js
|
// Código responsa por proceder com o req, res que foi passado pela rota
/* Abaixo dentro do yup (framework de schema validation),
não temos export.default, portanto procedemos com o seguinte:*/
import * as Yup from 'yup'
import User from '../models/User'
class Us
|
// store: responsa por criar usuário
async store(req, res) {
// ANTES DE TUDO, VALIDAÇÕES:
/*abaixo o Yup espera um objeto (será o req.body) que tenha esta shape:*/
const schema = Yup.object().shape({
name: Yup.string().required(),
email: Yup.string().email().required(),
password: Yup.string().required().min(6), //min carac.
})
// dito isso, se for verdade que o isValid deu erro, então:
if (!(await schema.isValid(req.body))) {
return res.status(400).json({ error: 'Validations fails.' })
}
// VERIFICAÇÃO: o e-mail passado no req já existe ?:
const userExists = await User.findOne({ where: { email: req.body.email } })
if (userExists) {
return res.status(400).json({ error: 'User already exists.' })
}
const { id, name, email, provider } = await User.create(req.body)
// aqui na resposta somente vamos mandar para o front-end alguns dados do usuário:
return res.json({
id,
name,
email,
provider,
})
}
// update: responsa editar dados do usuário
async update(req, res) {
// ANTES DE TUDO, VALIDAÇÕES(código copiado do store-criação):
/*abaixo o Yup espera um objeto (será o req.body) que tenha esta shape:*/
const schema = Yup.object().shape({
/* sem required(): não é toda vez que o user edita nome*/
name: Yup.string(),
/* .required(), não é toda vez que o user edita nome*/
email: Yup.string().email(),
/* somente quando o user informar o oldPassword, password deve ser required
(se informar ele quer trocar)*/
oldpassword: Yup.string().min(6),
/* Yup abaixo que depende de oldpassword para ser required */
password: Yup.string().min(6).when( //quando..
'oldpassword', (oldpassword, field) =>
oldpassword ? field.required() : field
/*sobre o if acima: se oldpassword for informado [?],
então field (password) é required senão[:] field normal sem required!*/
),
/* Yup abaixo para confirmação de senhas */
confirmPassword: Yup.string().when( //quando..
'password', (password, field) =>
password ? field.required().oneOf([Yup.ref('password')]) : field
/*sobre o if acima: se password for informado [?],
então field (confirmPassword) é required ,
^ este .oneOf diz que confirmPassword tem que se referenciar[Yup.ref]
em password, sendo assim iguais.
senão[:] deixa o field normal sem required!*/
)
})
// dito isso, se for verdade que o isValid deu erro, então:
if (!(await schema.isValid(req.body))) {
return res.status(400).json({ error: 'Validations fails.' })
}
// aqui vamos capturar o que vem do JSON:
const { email, oldPassword } = req.body
// buscando dentro do banco o user que será modificado:
const user = await User.findByPk(req.userId)
// VERIFICAÇÃO: o email do req é dif. do banco (se for ele quer trocar)?
if (email !== user.email) {
const userExists = await User.findOne({ where: { email } })
if (userExists) {
return res.status(400).json({ error: 'User already exists.' })
}
}
// VERIFICAÇÃO:
/* se no json o oldPassword for true (ele quer trocar) AND
lá dentro do banco esse oldPassword estiver dif. de FALSE então..*/
if (oldPassword && !(await user.checkPassword(oldPassword))) {
return res.status(401).json({ error: 'Password does not match' })
}
// Depois de tudo validado, vamos realizar o update, baseado no que foi passado req.body
const { id, name, provider } = await user.update(req.body)
return res.json({
id,
name,
email,
provider
})
}
/* LEMBRE-SE: dentro de checkPassword() tem o bcrypt */
}
export default new UserController();
|
erController {
|
[...nextauth].ts
|
// https://github.com/nextauthjs/next-auth/issues/1162
// Google Interfaces: https://github.com/Marcus-Rise/BuyList/tree/master/src/server/google
import type { User, NextAuthOptions } from 'next-auth'
import NextAuth from 'next-auth'
import { signIn } from 'next-auth/react'
// @ts-ignore
import GoogleProvider from 'next-auth/providers/google'
import type { NextApiHandler } from 'next'
type GenericObject<T = unknown> = T & {
[key: string]: any // eslint-disable-line @typescript-eslint/no-explicit-any
}
interface AuthToken {
user: User
accessToken: string
accessTokenExpires?: number
expires_at?: number
refreshToken: string
error?: string
}
interface JwtInterface {
token: AuthToken
user: User
account: GenericObject
}
/**
* Takes a token, and returns a new token with updated
* `accessToken` and `accessTokenExpires`. If an error occurs,
* returns the old token and an error property
*/
const refreshAccessToken = async (
payload: AuthToken,
clientId: string,
clientSecret: string,
|
url.searchParams.set('client_secret', clientSecret)
url.searchParams.set('grant_type', 'refresh_token')
url.searchParams.set('refresh_token', payload.refreshToken)
const response = await fetch(url.toString(), {
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
},
method: 'POST',
})
const refreshToken = await response.json()
if (!response.ok) {
throw refreshToken
}
// Give a 10 sec buffer
const now = new Date()
const accessTokenExpires = now.setSeconds(
now.getSeconds() + parseInt(refreshToken.expires_in) - 10,
)
return {
...payload,
accessToken: refreshToken.access_token,
accessTokenExpires,
refreshToken: payload.refreshToken,
}
} catch (error) {
console.error('ERR', error)
return {
...payload,
error: 'RefreshAccessTokenError',
}
}
}
const AuthHandler: NextApiHandler = (req, res) => {
const scopes = [
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile',
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/drive.appdata',
'https://www.googleapis.com/auth/drive.activity',
'https://www.googleapis.com/auth/directory.readonly',
]
const JWT_SECRET = String(process.env.NEXTAUTH_JWT_SECRET)
const options: NextAuthOptions = {
providers: [
GoogleProvider({
clientId: String(process.env.GOOGLE_ID),
clientSecret: String(process.env.GOOGLE_SECRET),
authorization: {
url: 'https://accounts.google.com/o/oauth2/v2/auth',
params: {
response_type: 'code',
login_hint: '@newtelco.de',
// prompt: 'consent',
include_granted_scopes: 'true',
access_type: 'offline',
scope: scopes.join(' '),
},
},
}),
],
secret: JWT_SECRET,
jwt: {
// encryption: true,
secret: JWT_SECRET,
},
debug: process.env.NODE_ENV === 'development',
callbacks: {
// @ts-ignore
async jwt({ token, user, account }: JwtInterface): Promise<AuthToken> {
let res: AuthToken
const now = Date.now()
// Signing in
if (account && user) {
const accessToken = account.access_token
const refreshToken = account.refresh_token
res = {
accessToken,
accessTokenExpires: account.expires_at,
refreshToken,
user,
}
} else if (token.expires_at === null || now < token.expires_at) {
// Subsequent use of JWT, the user has been logged in before
// access token has not expired yet
res = token
} else {
// access token has expired, try to update it
res = await refreshAccessToken(
token,
String(process.env.GOOGLE_ID),
String(process.env.GOOGLE_SECRET),
)
}
return res
},
// @ts-ignore
async session({
token,
}: {
token: GenericObject
}): Promise<GenericObject> {
return Promise.resolve(token)
},
},
}
return NextAuth(req, res, options)
}
export default AuthHandler
|
): Promise<AuthToken> => {
try {
const url = new URL('https://accounts.google.com/o/oauth2/token')
url.searchParams.set('client_id', clientId)
|
test_netmiko_commit.py
|
#!/usr/bin/env python
"""
test_ssh_connect: verify ssh connectivity
test_config_mode: verify enter config mode
test_commit_base: test std .commit()
test_commit_confirm: test commit with confirm
test_confirm_delay: test commit-confirm with non-std delay
test_no_confirm: test commit-confirm with no confirm
test_commit_check: test commit check
test_commit_comment: test commit with comment
test_commit_andquit: test commit andquit
test_exit_config_mode: verify exit config mode
test_disconnect: cleanly disconnect the SSH session
"""
import time
import re
import random
import string
def gen_random(N=6):
return "".join(
[
random.choice(
string.ascii_lowercase + string.ascii_uppercase + string.digits
)
for x in range(N)
]
)
def retrieve_commands(commands):
"""
Retrieve context needed for a set of commit actions
"""
config_commands = commands["config"]
support_commit = commands.get("support_commit")
config_verify = commands["config_verification"]
return (config_commands, support_commit, config_verify)
def setup_initial_state(net_connect, commands, expected_responses):
"""
Setup initial configuration prior to change so that config change can be verified
"""
# Setup initial state
config_commands, support_commit, config_verify = retrieve_commands(commands)
setup_base_config(net_connect, config_commands[0:1])
cmd_response = expected_responses.get("cmd_response_init", config_commands[0])
initial_state = config_change_verify(net_connect, config_verify, cmd_response)
assert initial_state is True
return config_commands, support_commit, config_verify
def setup_base_config(net_connect, config_changes):
"""
Send set of config commands and commit
"""
net_connect.send_config_set(config_changes)
net_connect.commit()
def config_change_verify(net_connect, verify_cmd, cmd_response):
"""
Send verify_cmd down channel, verify cmd_response in output
"""
config_commands_output = net_connect.send_command_expect(verify_cmd)
if cmd_response in config_commands_output:
return True
else:
return False
def test_ssh_connect(net_connect, commands, expected_responses):
"""
Verify the connection was established successfully
"""
show_version = net_connect.send_command_expect(commands["version"])
assert expected_responses["version_banner"] in show_version
def test_config_mode(net_connect, commands, expected_responses):
"""
Test enter config mode
"""
net_connect.config_mode()
assert net_connect.check_config_mode() == True
def test_commit_base(net_connect, commands, expected_responses):
"""
Test .commit() with no options
"""
# Setup the initial config state
config_commands, support_commit, config_verify = setup_initial_state(
net_connect, commands, expected_responses
)
# Perform commit test
net_connect.send_config_set(config_commands)
net_connect.commit()
cmd_response = expected_responses.get("cmd_response_final", config_commands[-1])
final_state = config_change_verify(net_connect, config_verify, cmd_response)
assert final_state is True
def test_commit_confirm(net_connect, commands, expected_responses):
"""
Test confirm with confirm
"""
# Setup the initial config state
config_commands, support_commit, config_verify = setup_initial_state(
net_connect, commands, expected_responses
)
# Perform commit-confirm test
net_connect.send_config_set(config_commands)
if net_connect.device_type == "cisco_xr":
net_connect.commit(confirm=True, confirm_delay=60)
else:
net_connect.commit(confirm=True)
cmd_response = expected_responses.get("cmd_response_final", config_commands[-1])
final_state = config_change_verify(net_connect, config_verify, cmd_response)
assert final_state is True
# Perform confirm
net_connect.commit()
def test_confirm_delay(net_connect, commands, expected_responses):
"""
Test commit with confirm and non-default delay
"""
# Setup the initial config state
config_commands, support_commit, config_verify = setup_initial_state(
net_connect, commands, expected_responses
)
# Perform commit-confirm test
net_connect.send_config_set(config_commands)
if net_connect.device_type == "cisco_xr":
net_connect.commit(confirm=True, confirm_delay=60)
else:
net_connect.commit(confirm=True, confirm_delay=5)
cmd_response = expected_responses.get("cmd_response_final", config_commands[-1])
final_state = config_change_verify(net_connect, config_verify, cmd_response)
assert final_state is True
# Perform confirm
net_connect.commit()
def test_no_confirm(net_connect, commands, expected_responses):
"""
Perform commit-confirm, but don't confirm (verify rollback)
"""
# Setup the initial config state
config_commands, support_commit, config_verify = setup_initial_state(
net_connect, commands, expected_responses
)
# Perform commit-confirm test
net_connect.send_config_set(config_commands)
if net_connect.device_type == "cisco_xr":
net_connect.commit(confirm=True, confirm_delay=30)
else:
net_connect.commit(confirm=True, confirm_delay=1)
cmd_response = expected_responses.get("cmd_response_final", config_commands[-1])
final_state = config_change_verify(net_connect, config_verify, cmd_response)
assert final_state is True
time.sleep(130)
# Verify rolled back to initial state
cmd_response = expected_responses.get("cmd_response_init", config_commands[0])
init_state = config_change_verify(net_connect, config_verify, cmd_response)
assert init_state is True
def test_clear_msg(net_connect, commands, expected_responses):
"""
IOS-XR generates the following message upon a failed commit
One or more commits have occurred from other
configuration sessions since this session started
or since the last commit was made from this session.
You can use the 'show configuration commit changes'
command to browse the changes.
Do you wish to proceed with this commit anyway? [no]: yes
Clear it
"""
# Setup the initial config state
config_commands, support_commit, config_verify = retrieve_commands(commands)
if net_connect.device_type == "cisco_xr":
output = net_connect.send_config_set(config_commands)
output += net_connect.send_command_expect(
"commit", expect_string=r"Do you wish to"
)
output += net_connect.send_command_expect("yes", auto_find_prompt=False)
assert True is True
def test_commit_check(net_connect, commands, expected_responses):
"""
Test commit check
"""
# IOS-XR does not support commit check
if net_connect.device_type == "cisco_xr":
assert True is True
else:
# Setup the initial config state
config_commands, support_commit, config_verify = setup_initial_state(
net_connect, commands, expected_responses
)
# Perform commit-confirm test
net_connect.send_config_set(config_commands)
net_connect.commit(check=True)
# Verify at initial state
cmd_response = expected_responses.get("cmd_response_init", config_commands[0])
init_state = config_change_verify(net_connect, config_verify, cmd_response)
assert init_state is True
rollback = commands.get("rollback")
net_connect.send_config_set([rollback])
def test_commit_comment(net_connect, commands, expected_responses):
"""
Test commit with comment
"""
# Setup the initial config state
config_commands, support_commit, config_verify = setup_initial_state(
net_connect, commands, expected_responses
)
# Perform commit with comment
net_connect.send_config_set(config_commands)
net_connect.commit(comment="Unit test on commit with comment")
# Verify change was committed
cmd_response = expected_responses.get("cmd_response_final", config_commands[-1])
final_state = config_change_verify(net_connect, config_verify, cmd_response)
assert final_state is True
# Verify commit comment
commit_verification = commands.get("commit_verification")
tmp_output = net_connect.send_command(commit_verification)
if net_connect.device_type == "cisco_xr":
commit_comment = tmp_output
else:
commit_comment = tmp_output.split("\n")[2]
assert expected_responses.get("commit_comment") in commit_comment.strip()
def test_commit_andquit(net_connect, commands, expected_responses):
"""
Test commit and immediately quit configure mode
"""
# IOS-XR does not support commit and quit
if net_connect.device_type == "cisco_xr":
assert True is True
else:
# Setup the initial config state
config_commands, support_commit, config_verify = setup_initial_state(
net_connect, commands, expected_responses
)
# Execute change and commit
net_connect.send_config_set(config_commands)
net_connect.commit(and_quit=True)
# Verify change was committed
cmd_response = expected_responses.get("cmd_response_final", config_commands[-1])
config_verify = "show configuration | match archive"
final_state = config_change_verify(net_connect, config_verify, cmd_response)
assert final_state is True
def test_commit_label(net_connect, commands, expected_responses):
"""Test commit label for IOS-XR."""
if net_connect.device_type != "cisco_xr":
assert True is True
else:
# Setup the initial config state
config_commands, support_commit, config_verify = setup_initial_state(
net_connect, commands, expected_responses
)
# Execute change and commit
net_connect.send_config_set(config_commands)
label = "test_lbl_" + gen_random()
net_connect.commit(label=label)
# Verify commit label
commit_verification = commands.get("commit_verification")
tmp_output = net_connect.send_command(commit_verification)
match = re.search(r"Label: (.*)", tmp_output)
response_label = match.group(1)
response_label = response_label.strip()
assert label == response_label
def test_commit_label_comment(net_connect, commands, expected_responses):
"""
Test commit label for IOS-XR with comment
"""
# IOS-XR only test
if net_connect.device_type != "cisco_xr":
assert True is True
else:
# Setup the initial config state
config_commands, support_commit, config_verify = setup_initial_state(
net_connect, commands, expected_responses
)
# Execute change and commit
net_connect.send_config_set(config_commands)
label = "test_lbl_" + gen_random()
comment = "Test with comment and label"
net_connect.commit(label=label, comment=comment)
# Verify commit label
commit_verification = commands.get("commit_verification")
tmp_output = net_connect.send_command(commit_verification)
match = re.search(r"Label: (.*)", tmp_output)
response_label = match.group(1)
response_label = response_label.strip()
assert label == response_label
match = re.search(r"Comment: (.*)", tmp_output)
response_comment = match.group(1)
response_comment = response_comment.strip()
response_comment = response_comment.strip('"')
assert comment == response_comment
def test_commit_label_confirm(net_connect, commands, expected_responses):
"""
Test commit label for IOS-XR with confirm
"""
# IOS-XR only test
if net_connect.device_type != "cisco_xr":
assert True is True
else:
# Setup the initial config state
config_commands, support_commit, config_verify = setup_initial_state(
net_connect, commands, expected_responses
)
# Execute change and commit
net_connect.send_config_set(config_commands)
label = "test_lbl_" + gen_random()
net_connect.commit(label=label, confirm=True, confirm_delay=120)
cmd_response = expected_responses.get("cmd_response_final", config_commands[-1])
final_state = config_change_verify(net_connect, config_verify, cmd_response)
assert final_state is True
# Verify commit label
commit_verification = commands.get("commit_verification")
tmp_output = net_connect.send_command(commit_verification)
match = re.search(r"Label: (.*)", tmp_output)
response_label = match.group(1)
response_label = response_label.strip()
assert label == response_label
net_connect.commit()
def test_exit_config_mode(net_connect, commands, expected_responses):
"""
Test exit config mode
"""
net_connect.exit_config_mode()
time.sleep(1)
assert net_connect.check_config_mode() == False
def
|
(net_connect, commands, expected_responses):
"""
Terminate the SSH session
"""
net_connect.disconnect()
|
test_disconnect
|
ex052.py
|
n = int(input('Digite um número inteiro: '))
cont = 0
for c in range(1, n + 1):
if n % c == 0:
print('\033[034m{}\033[m'.format(c), end=' ')
cont += 1
else:
print('\033[031m{}\033[m'.format(c), end=' ')
print('\nO número {} foi divisível {} vezes '.format(n, cont))
if cont == 2:
print('Portanto, o número {} é PRIMO'.format(n))
else:
|
print('Portanto, o número {} NÃO é PRIMO'.format(n))
|
|
index.js
|
module.exports = require("./lib/redirect");
|
||
project_resource.go
|
package handlers
import (
"encoding/base64"
"fmt"
"io/ioutil"
"strings"
"github.com/go-openapi/runtime/middleware"
"github.com/go-openapi/swag"
utils "github.com/keptn/go-utils/pkg/lib"
"github.com/keptn/keptn/configuration-service/common"
"github.com/keptn/keptn/configuration-service/config"
"github.com/keptn/keptn/configuration-service/models"
"github.com/keptn/keptn/configuration-service/restapi/operations/project_resource"
)
// GetProjectProjectNameResourceHandlerFunc get list of project resources
func GetProjectProjectNameResourceHandlerFunc(params project_resource.GetProjectProjectNameResourceParams) middleware.Responder {
logger := utils.NewLogger("", "", "configuration-service")
if !common.ProjectExists(params.ProjectName) {
return project_resource.NewGetProjectProjectNameResourceNotFound().WithPayload(&models.Error{Code: 404, Message: swag.String("Project does not exist")})
}
common.LockProject(params.ProjectName)
defer common.UnlockProject(params.ProjectName)
err := common.CheckoutBranch(params.ProjectName, "master", *params.DisableUpstreamSync)
if err != nil {
logger.Error(fmt.Sprintf("Could not check out master branch of project %s", params.ProjectName))
logger.Error(err.Error())
return project_resource.NewGetProjectProjectNameResourceDefault(500).WithPayload(&models.Error{Code: 500, Message: swag.String("Could not check out branch")})
}
projectConfigPath := config.ConfigDir + "/" + params.ProjectName
result := common.GetPaginatedResources(projectConfigPath, params.PageSize, params.NextPageKey)
return project_resource.NewGetProjectProjectNameResourceOK().WithPayload(result)
}
// PutProjectProjectNameResourceHandlerFunc update list of project resources
func PutProjectProjectNameResourceHandlerFunc(params project_resource.PutProjectProjectNameResourceParams) middleware.Responder {
logger := utils.NewLogger("", "", "configuration-service")
if !common.ProjectExists(params.ProjectName) {
return project_resource.NewPostProjectProjectNameResourceBadRequest().WithPayload(&models.Error{Code: 400, Message: swag.String("Project does not exist")})
}
common.LockProject(params.ProjectName)
defer common.UnlockProject(params.ProjectName)
projectConfigPath := config.ConfigDir + "/" + params.ProjectName
logger.Debug("Updating resource(s) in: " + projectConfigPath)
logger.Debug("Checking out master branch")
err := common.CheckoutBranch(params.ProjectName, "master", false)
if err != nil {
logger.Error(fmt.Sprintf("Could not check out master branch of project %s", params.ProjectName))
logger.Error(err.Error())
return project_resource.NewPutProjectProjectNameResourceBadRequest().WithPayload(&models.Error{Code: 400, Message: swag.String("Could not check out branch")})
}
for _, res := range params.Resources.Resources {
filePath := projectConfigPath + "/" + *res.ResourceURI
logger.Debug("Updating resource: " + filePath)
common.WriteBase64EncodedFile(projectConfigPath+"/"+*res.ResourceURI, res.ResourceContent)
if strings.ToLower(*res.ResourceURI) == "shipyard.yaml" {
mv := common.GetProjectsMaterializedView()
logger.Debug("updating shipyard.yaml content for project " + params.ProjectName + " in mongoDB table")
err := mv.UpdateShipyard(params.ProjectName, res.ResourceContent)
if err != nil {
logger.Error("Could not update shipyard.yaml content for project " + params.ProjectName + ": " + err.Error())
return project_resource.NewPutProjectProjectNameResourceBadRequest().WithPayload(&models.Error{Code: 500, Message: swag.String(err.Error())})
}
}
}
logger.Debug("Staging Changes")
err = common.StageAndCommitAll(params.ProjectName, "Updated resources", true)
if err != nil {
logger.Error(fmt.Sprintf("Could not commit to master branch of project %s", params.ProjectName))
logger.Error(err.Error())
return project_resource.NewPutProjectProjectNameResourceBadRequest().WithPayload(&models.Error{Code: 400, Message: swag.String("Could not commit changes")})
}
logger.Debug("Successfully updated resources")
newVersion, err := common.GetCurrentVersion(params.ProjectName)
if err != nil {
logger.Error(err.Error())
return project_resource.NewPutProjectProjectNameResourceBadRequest().WithPayload(&models.Error{Code: 400, Message: swag.String("Coult not retrieve latest version")})
}
return project_resource.NewPutProjectProjectNameResourceCreated().WithPayload(&models.Version{
Version: newVersion,
})
}
// PostProjectProjectNameResourceHandlerFunc creates a list of new resources
func PostProjectProjectNameResourceHandlerFunc(params project_resource.PostProjectProjectNameResourceParams) middleware.Responder {
logger := utils.NewLogger("", "", "configuration-service")
if !common.ProjectExists(params.ProjectName) {
return project_resource.NewPostProjectProjectNameResourceBadRequest().WithPayload(&models.Error{Code: 400, Message: swag.String("Project does not exist")})
}
common.LockProject(params.ProjectName)
defer common.UnlockProject(params.ProjectName)
projectConfigPath := config.ConfigDir + "/" + params.ProjectName
logger.Debug("Creating new resource(s) in: " + projectConfigPath)
logger.Debug("Checking out master branch")
err := common.CheckoutBranch(params.ProjectName, "master", false)
if err != nil {
logger.Error(fmt.Sprintf("Could not check out master branch of project %s", params.ProjectName))
logger.Error(err.Error())
return project_resource.NewPostProjectProjectNameResourceBadRequest().WithPayload(&models.Error{Code: 400, Message: swag.String("Could not check out branch")})
}
for _, res := range params.Resources.Resources {
filePath := projectConfigPath + "/" + *res.ResourceURI
logger.Debug("Adding resource: " + filePath)
common.WriteBase64EncodedFile(projectConfigPath+"/"+*res.ResourceURI, res.ResourceContent)
}
logger.Debug("Staging Changes")
err = common.StageAndCommitAll(params.ProjectName, "Added resources", true)
if err != nil {
logger.Error(fmt.Sprintf("Could not commit to master branch of project %s", params.ProjectName))
logger.Error(err.Error())
return project_resource.NewPostProjectProjectNameResourceBadRequest().WithPayload(&models.Error{Code: 400, Message: swag.String("Could not commit changes")})
}
logger.Debug("Successfully added resources")
newVersion, err := common.GetCurrentVersion(params.ProjectName)
if err != nil {
logger.Error(err.Error())
return project_resource.NewPostProjectProjectNameResourceBadRequest().WithPayload(&models.Error{Code: 400, Message: swag.String("Could not retrieve latest version")})
}
return project_resource.NewPostProjectProjectNameResourceCreated().WithPayload(&models.Version{
Version: newVersion,
})
}
// GetProjectProjectNameResourceResourceURIHandlerFunc gets the specified resource
func GetProjectProjectNameResourceResourceURIHandlerFunc(params project_resource.GetProjectProjectNameResourceResourceURIParams) middleware.Responder {
logger := utils.NewLogger("", "", "configuration-service")
if !common.ProjectExists(params.ProjectName) {
return project_resource.NewGetProjectProjectNameResourceResourceURINotFound().WithPayload(&models.Error{Code: 404, Message: swag.String("Project not found")})
}
common.LockProject(params.ProjectName)
defer common.UnlockProject(params.ProjectName)
logger.Debug("Checking out master branch")
err := common.CheckoutBranch(params.ProjectName, "master", *params.DisableUpstreamSync)
if err != nil {
logger.Error(fmt.Sprintf("Could check out master branch of project %s", params.ProjectName))
logger.Error(err.Error())
return project_resource.NewGetProjectProjectNameResourceResourceURIDefault(500).WithPayload(&models.Error{Code: 500, Message: swag.String("Could not check out branch")})
}
projectConfigPath := config.ConfigDir + "/" + params.ProjectName
resourcePath := projectConfigPath + "/" + params.ResourceURI
if !common.FileExists(resourcePath) {
return project_resource.NewGetProjectProjectNameResourceResourceURINotFound().WithPayload(&models.Error{Code: 404, Message: swag.String("Project resource not found")})
}
dat, err := ioutil.ReadFile(resourcePath)
if err != nil {
logger.Error(err.Error())
return project_resource.NewGetProjectProjectNameResourceResourceURIDefault(500).WithPayload(&models.Error{Code: 500, Message: swag.String("Could not read file")})
}
resourceContent := base64.StdEncoding.EncodeToString(dat)
return project_resource.NewGetProjectProjectNameResourceResourceURIOK().WithPayload(
&models.Resource{
ResourceURI: ¶ms.ResourceURI,
ResourceContent: resourceContent,
})
}
// PutProjectProjectNameResourceResourceURIHandlerFunc updates a resource
func PutProjectProjectNameResourceResourceURIHandlerFunc(params project_resource.PutProjectProjectNameResourceResourceURIParams) middleware.Responder {
logger := utils.NewLogger("", "", "configuration-service")
if !common.ProjectExists(params.ProjectName) {
|
return project_resource.NewPutProjectProjectNameResourceResourceURIBadRequest().WithPayload(&models.Error{Code: 400, Message: swag.String("Project does not exist")})
}
common.LockProject(params.ProjectName)
defer common.UnlockProject(params.ProjectName)
projectConfigPath := config.ConfigDir + "/" + params.ProjectName
logger.Debug("Creating new resource(s) in: " + projectConfigPath)
logger.Debug("Checking out branch: master")
err := common.CheckoutBranch(params.ProjectName, "master", false)
if err != nil {
logger.Error(fmt.Sprintf("Could not check out master branch of project %s", params.ProjectName))
logger.Error(err.Error())
return project_resource.NewPutProjectProjectNameResourceResourceURIBadRequest().WithPayload(&models.Error{Code: 400, Message: swag.String("Could not check out branch")})
}
filePath := projectConfigPath + "/" + params.ResourceURI
common.WriteBase64EncodedFile(filePath, params.Resource.ResourceContent)
logger.Debug("Staging Changes")
err = common.StageAndCommitAll(params.ProjectName, "Updated resource: "+params.ResourceURI, true)
if err != nil {
logger.Error(fmt.Sprintf("Could not commit to master branch of project %s", params.ProjectName))
logger.Error(err.Error())
return project_resource.NewPutProjectProjectNameResourceResourceURIBadRequest().WithPayload(&models.Error{Code: 400, Message: swag.String("Could not commit changes")})
}
logger.Debug("Successfully updated resource: " + params.ResourceURI)
newVersion, err := common.GetCurrentVersion(params.ProjectName)
if err != nil {
logger.Error(err.Error())
return project_resource.NewPutProjectProjectNameResourceResourceURIBadRequest().WithPayload(&models.Error{Code: 400, Message: swag.String("Could not retrieve latest version")})
}
return project_resource.NewPutProjectProjectNameResourceResourceURICreated().WithPayload(&models.Version{
Version: newVersion,
})
}
// DeleteProjectProjectNameResourceResourceURIHandlerFunc deletes a project resource
func DeleteProjectProjectNameResourceResourceURIHandlerFunc(params project_resource.DeleteProjectProjectNameResourceResourceURIParams) middleware.Responder {
logger := utils.NewLogger("", "", "configuration-service")
if !common.ProjectExists(params.ProjectName) {
return project_resource.NewDeleteProjectProjectNameResourceResourceURIBadRequest().WithPayload(&models.Error{Code: 400, Message: swag.String("Project does not exist")})
}
common.LockProject(params.ProjectName)
defer common.UnlockProject(params.ProjectName)
err := common.CheckoutBranch(params.ProjectName, "master", false)
if err != nil {
logger.Error(fmt.Sprintf("Could not check out master branch of project %s", params.ProjectName))
logger.Error(err.Error())
return project_resource.NewDeleteProjectProjectNameResourceResourceURIDefault(500).WithPayload(&models.Error{Code: 500, Message: swag.String("Could not check out branch")})
}
projectConfigPath := config.ConfigDir + "/" + params.ProjectName
resourcePath := projectConfigPath + "/" + params.ResourceURI
err = common.DeleteFile(resourcePath)
if err != nil {
logger.Error(err.Error())
return project_resource.NewDeleteProjectProjectNameResourceResourceURIDefault(500).WithPayload(&models.Error{Code: 500, Message: swag.String("Could not delete file")})
}
logger.Debug("Staging Changes")
err = common.StageAndCommitAll(params.ProjectName, "Deleted resources", true)
if err != nil {
logger.Error(fmt.Sprintf("Could not commit to master branch of project %s", params.ProjectName))
logger.Error(err.Error())
return project_resource.NewDeleteProjectProjectNameResourceResourceURIDefault(500).WithPayload(&models.Error{Code: 500, Message: swag.String("Could not commit changes")})
}
logger.Debug("Successfully deleted resources")
return project_resource.NewDeleteProjectProjectNameResourceResourceURINoContent()
}
| |
lib.rs
|
/*
* Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#[macro_use]
extern crate capnp_rpc;
use actix::prelude::*;
use capnp_rpc::{rpc_twoparty_capnp, twoparty, RpcSystem};
use futures::{FutureExt, StreamExt};
use log::*;
pub mod external_capnp {
include!(concat!(env!("OUT_DIR"), "/external_capnp.rs"));
}
pub mod rpc;
pub async fn start_uds_policy_service<S: rpc::Dispatcher + 'static>(
service: S,
socket: std::path::PathBuf,
) -> std::io::Result<Addr<PolicyService>> {
let listener = Box::new(async_std::os::unix::net::UnixListener::bind(&socket).await?);
let external = capnp_rpc::new_client(service);
log::info!(r#"starting UDS policy service at "{}""#, socket.display());
Ok(PolicyService::create(|ctx| {
ctx.add_message_stream(
Box::leak(listener)
.incoming()
.map(|st| Connect(st.unwrap())),
);
PolicyService {
socket: Some(socket),
external,
}
}))
}
pub async fn start_tcp_policy_service<S: rpc::Dispatcher + 'static, A: std::net::ToSocketAddrs>(
service: S,
socket: A,
) -> std::io::Result<Addr<PolicyService>> {
let addr = socket
.to_socket_addrs()?
.next()
.ok_or_else(|| std::io::Error::new(std::io::ErrorKind::Other, "bad socket address"))?;
let listener = Box::new(async_std::net::TcpListener::bind(&addr).await?);
let external = capnp_rpc::new_client(service);
log::info!(r#"starting TCP policy service at "{}""#, addr);
Ok(PolicyService::create(|ctx| {
ctx.add_message_stream(
Box::leak(listener)
.incoming()
.map(|st| Connect(st.unwrap())),
);
PolicyService {
socket: None,
external,
}
}))
}
pub struct PolicyService {
socket: Option<std::path::PathBuf>,
external: external_capnp::external::Client,
}
impl Actor for PolicyService {
type Context = Context<Self>;
fn stopped(&mut self, _ctx: &mut Context<Self>) {
if let Some(socket) = &self.socket {
info!("removing: {}", socket.display());
std::fs::remove_file(socket).unwrap_or_else(|e| warn!("failed to remove: {}", e))
}
}
}
#[derive(Message)]
#[rtype("")]
pub struct Quit;
impl Handler<Quit> for PolicyService {
type Result = ();
fn handle(&mut self, _msg: Quit, _ctx: &mut Context<Self>) -> Self::Result {
System::current().stop()
}
}
// struct Connect(tokio_uds::UnixStream);
struct Connect<T: futures::io::AsyncReadExt + futures::io::AsyncWrite + 'static + Unpin>(T);
impl<T: futures::io::AsyncReadExt + futures::io::AsyncWrite + 'static + Unpin> Message
for Connect<T>
{
type Result = ();
}
impl<T: futures::io::AsyncReadExt + futures::io::AsyncWrite + 'static + Unpin> Handler<Connect<T>>
for PolicyService
{
type Result = ();
fn
|
(&mut self, msg: Connect<T>, _: &mut Context<Self>) {
// For each incoming connection we create `PolicyServiceInstance` actor
let (reader, writer) = msg.0.split();
let network = twoparty::VatNetwork::new(
reader,
writer,
rpc_twoparty_capnp::Side::Server,
Default::default(),
);
let rpc_system = RpcSystem::new(Box::new(network), Some(self.external.clone().client));
debug!("starting RPC connection");
actix::spawn(rpc_system.map(|_| ()))
}
}
|
handle
|
FinUserInfo.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class FinUserInfo(object):
def __init__(self):
self._user_id = None
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = FinUserInfo()
|
o.user_id = d['user_id']
return o
|
if 'user_id' in d:
|
GameState.ts
|
export interface GameState {
time: number
shoppers: ShopperState[]
frankens: FrankenState[]
money: number
rearInventory: number
frontInventory: number
bodyParts: number
price: number
}
export interface ShopperState {
cash: number
x: number
y: number
}
export interface FrankenState {
intelligence: number
x: number
y: number
assignment: FrankenAssignment
}
export enum FrankenAssignment {
MANNEQUIN,
RESTOCK,
JANITOR,
REGISTER,
SECURITY,
MANAGER,
|
IDLE
}
|
|
routers.go
|
/*
* OpenSource Issue Träcking System
*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* API version: 1.0.0
* Contact: [email protected]
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package router
import (
"encoding/json"
"fmt"
"github.com/DevBoxFanBoy/opists/pkg/api/v1/model"
"github.com/DevBoxFanBoy/opists/pkg/logger"
"github.com/gorilla/mux"
"io/ioutil"
"net/http"
"net/http/pprof"
"os"
"strconv"
)
// A Route defines the parameters for an api endpoint
type Route struct {
Name string
Method string
Pattern string
HandlerFunc http.HandlerFunc
}
// Routes are a collection of defined api endpoints
type Routes []Route
// Router defines the required methods for retrieving api routes
type Router interface {
Routes() Routes
}
// NewRouter creates a new router for any number of api routers
func NewRouter(routers ...Router) *mux.Router {
router := mux.NewRouter().StrictSlash(true)
for _, api := range routers {
for _, route := range api.Routes() {
var handler http.Handler
handler = route.HandlerFunc
handler = logger.Logger(handler, route.Name)
router.PathPrefix("/rest").
Methods(route.Method).
Path(route.Pattern).
Name(route.Name).
Handler(handler)
}
}
return router
}
func AddRoutes(router *mux.Router, prefix string, routers ...Router) {
for _, api := range routers {
for _, route := range api.Routes() {
var handler http.Handler
handler = route.HandlerFunc
handler = logger.Logger(handler, route.Name)
router.PathPrefix(prefix).
Methods(route.Method).
Path(route.Pattern).
Name(route.Name).
Handler(handler)
}
}
}
func AddFaviconRoute(router *mux.Router) {
router.Methods("GET").Path("/favicon.ico").HandlerFunc(GetFaviconResource)
}
func GetFaviconResource(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadFile("favicon.ico")
if err != nil {
http.Error(w, http.StatusText(404), 404)
return
}
fmt.Fprintf(w, "%s", body)
}
func AddPProf(router *mux.Router) {
router.HandleFunc("/debug/pprof/", pprof.Index)
router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
router.HandleFunc("/debug/pprof/profile", pprof.Profile)
router.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
router.Handle("/debug/pprof/allocs", pprof.Handler("allocs"))
router.Handle("/debug/pprof/block", pprof.Handler("block"))
router.Handle("/debug/pprof/goroutine", pprof.Handler("goroutine"))
router.Handle("/debug/pprof/heap", pprof.Handler("heap"))
router.Handle("/debug/pprof/mutex", pprof.Handler("mutex"))
router.Handle("/debug/pprof/threadcreate", pprof.Handler("threadcreate"))
}
// EncodeJSONResponse uses the json encoder to write an interface to the http response with an optional status code
func EncodeJSONResponse(i interface{}, status *int, w http.ResponseWriter) error {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
if status != nil {
w.WriteHeader(*status)
} else {
w.WriteHeader(http.StatusOK)
}
if i == nil {
return json.NewEncoder(w).Encode("")
}
return json.NewEncoder(w).Encode(i)
}
func NotFoundError(w http.ResponseWriter, err error) {
errorResponse := model.ErrorResponse{
Code: 404,
Message: err.Error(),
}
status := http.StatusNotFound
EncodeJSONResponse(errorResponse, &status, w)
}
func InternalError(w http.ResponseWriter, err error) {
errorResponse := model.ErrorResponse{
Code: 500,
Message: err.Error(),
}
status := http.StatusInternalServerError
EncodeJSONResponse(errorResponse, &status, w)
}
func BadRequest(w http.ResponseWriter, err error) {
errorResponse := model.ErrorResponse{
Code: 400,
Message: err.Error(),
}
status := http.StatusBadRequest
EncodeJSONResponse(errorResponse, &status, w)
}
func BadRequestErrorResponse(w http.ResponseWriter, errorResponse model.ErrorResponse) {
status := http.StatusBadRequest
EncodeJSONResponse(errorResponse, &status, w)
}
func HandleErrorResponses(w http.ResponseWriter, errorResponse model.ErrorResponse) {
switch errorResponse.Code {
case 400:
BadRequestErrorResponse(w, errorResponse)
return
case 404:
NotFoundErrorResponse(w, errorResponse)
return
case 409:
ConflictErrorResponse(w, errorResponse)
default:
InternalErrorResponse(w, errorResponse)
return
}
}
func NotFoundErrorResponse(w http.ResponseWriter, errorResponse model.ErrorResponse) {
status := http.StatusNotFound
EncodeJSONResponse(errorResponse, &status, w)
}
func ConflictErrorResponse(w http.ResponseWriter, errorResponse model.ErrorResponse) {
status := http.StatusConflict
EncodeJSONResponse(errorResponse, &status, w)
}
func InternalErrorResponse(w http.ResponseWriter, errorResponse model.ErrorResponse) {
status := http.StatusInternalServerError
EncodeJSONResponse(errorResponse, &status, w)
}
// ReadFormFileToTempFile reads file data from a request form and writes it to a temporary file
func ReadFormFileToTempFile(r *http.Request, key string) (*os.File, error) {
r.ParseForm()
formFile, _, err := r.FormFile(key)
if err != nil {
return nil, err
}
defer formFile.Close()
file, err := ioutil.TempFile("tmp", key)
if err != nil {
return nil, err
}
defer file.Close()
fileBytes, err := ioutil.ReadAll(formFile)
if err != nil {
return nil, err
}
file.Write(fileBytes)
return file, nil
}
// parseIntParameter parses a sting parameter to an int64
func P
|
param string) (int64, error) {
return strconv.ParseInt(param, 10, 64)
}
|
arseIntParameter(
|
build.rs
|
extern crate cmake;
extern crate cc;
use std::env;
fn
|
() {
let dst = cmake::Config::new("wabt")
// Turn off building tests and tools. This not only speeds up the build but
// also prevents building executables that for some reason are put
// into the `wabt` directory which now is deemed as an error.
//
// Since that is all targets available, also don't specify target
// (by default it is set to `--target install`).
// Otherwise, there will be an error message "No rule to make target `install'".
.define("BUILD_TESTS", "OFF")
.define("BUILD_TOOLS", "OFF")
.no_build_target(true)
.build();
let mut out_build_dir = dst;
out_build_dir.push("build");
println!("cargo:rustc-link-search=native={}", out_build_dir.display());
#[cfg(windows)]
println!("cargo:rustc-link-search=native={}\\Debug", out_build_dir.display());
println!("cargo:rustc-link-lib=static=wabt");
// We need to link against C++ std lib
if let Some(cpp_stdlib) = get_cpp_stdlib() {
println!("cargo:rustc-link-lib={}", cpp_stdlib);
}
let mut cfg = cc::Build::new();
cfg.file("wabt/src/emscripten-helpers.cc")
.file("wabt_shim.cc")
.include("wabt")
// This is needed for config.h generated by cmake.
.include(out_build_dir)
// We link to stdlib above.
.cpp_link_stdlib(None)
.warnings(false)
.cpp(true)
.flag("-std=c++11")
.compile("wabt_shim");
}
// See https://github.com/alexcrichton/gcc-rs/blob/88ac58e25/src/lib.rs#L1197
fn get_cpp_stdlib() -> Option<String> {
env::var("TARGET").ok().and_then(|target| {
if target.contains("msvc") {
None
} else if target.contains("darwin") {
Some("c++".to_string())
} else if target.contains("freebsd") {
Some("c++".to_string())
} else if target.contains("musl") {
Some("static=stdc++".to_string())
} else {
Some("stdc++".to_string())
}
})
}
|
main
|
group_test.py
|
import _initpath
import pyradox
#result = pyradox.txt.parse_file('D:/Steam/steamapps/common/Europa Universalis IV/common/prices/00_prices.txt')
#print(result)
result = pyradox.parse("""
regular_group = { 1 2 3 }
empty_tree = {}
mixed_group = {
10
{}
{ a = 1 b = 2 }
20
}
player_countries={
ITA={
user="Evil4Zerggin"
country_leader=yes
pinned_theatres={
{
id=16
type=67
}
}
}
|
print(result)
|
}
""")
|
open_callback.go
|
package bss
//Licensed under the Apache License, Version 2.0 (the "License");
|
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// OpenCallback invokes the bss.OpenCallback API synchronously
// api document: https://help.aliyun.com/api/bss/opencallback.html
func (client *Client) OpenCallback(request *OpenCallbackRequest) (response *OpenCallbackResponse, err error) {
response = CreateOpenCallbackResponse()
err = client.DoAction(request, response)
return
}
// OpenCallbackWithChan invokes the bss.OpenCallback API asynchronously
// api document: https://help.aliyun.com/api/bss/opencallback.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) OpenCallbackWithChan(request *OpenCallbackRequest) (<-chan *OpenCallbackResponse, <-chan error) {
responseChan := make(chan *OpenCallbackResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.OpenCallback(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// OpenCallbackWithCallback invokes the bss.OpenCallback API asynchronously
// api document: https://help.aliyun.com/api/bss/opencallback.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) OpenCallbackWithCallback(request *OpenCallbackRequest, callback func(response *OpenCallbackResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *OpenCallbackResponse
var err error
defer close(result)
response, err = client.OpenCallback(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// OpenCallbackRequest is the request struct for api OpenCallback
type OpenCallbackRequest struct {
*requests.RpcRequest
ParamStr string `position:"Query" name:"paramStr"`
}
// OpenCallbackResponse is the response struct for api OpenCallback
type OpenCallbackResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
Success bool `json:"Success" xml:"Success"`
Code string `json:"Code" xml:"Code"`
Message string `json:"Message" xml:"Message"`
Data string `json:"Data" xml:"Data"`
}
// CreateOpenCallbackRequest creates a request to invoke OpenCallback API
func CreateOpenCallbackRequest() (request *OpenCallbackRequest) {
request = &OpenCallbackRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("Bss", "2014-07-14", "OpenCallback", "bss", "openAPI")
return
}
// CreateOpenCallbackResponse creates a response to parse from OpenCallback response
func CreateOpenCallbackResponse() (response *OpenCallbackResponse) {
response = &OpenCallbackResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
|
//you may not use this file except in compliance with the License.
|
cppunit.py
|
import os
from schema import Or
from testplan.common.config import ConfigOption
from ..base import ProcessRunnerTest, ProcessRunnerTestConfig
from ...importers.cppunit import CPPUnitResultImporter, CPPUnitImportedResult
class CppunitConfig(ProcessRunnerTestConfig):
"""
Configuration object for :py:class:`~testplan.testing.cpp.cppunit.Cppunit`.
"""
@classmethod
def get_options(cls):
return {
ConfigOption("file_output_flag", default="-y"): Or(
None, lambda x: x.startswith("-")
),
ConfigOption("output_path", default=""): str,
ConfigOption("filtering_flag", default=None): Or(
None, lambda x: x.startswith("-")
),
ConfigOption("cppunit_filter", default=""): str,
ConfigOption("listing_flag", default=None): Or(
None, lambda x: x.startswith("-")
),
ConfigOption("parse_test_context", default=None): Or(
None, lambda x: callable(x)
),
}
class Cppunit(ProcessRunnerTest):
"""
Subprocess test runner for Cppunit: https://sourceforge.net/projects/cppunit
For original docs please see:
http://cppunit.sourceforge.net/doc/1.8.0/
http://cppunit.sourceforge.net/doc/cvs/cppunit_cookbook.html
Please note that the binary (either native binary or script) should output
in XML format so that Testplan is able to parse the result. By default
Testplan reads from stdout, but if `file_output_flag` is set (e.g. "-y"),
the binary should accept a file path and write the result to that file,
which will be loaded and parsed by Testplan. For example:
.. code-block:: bash
./cppunit_bin -y /path/to/test/result
:param name: Test instance name, often used as uid of test entity.
:type name: ``str``
:param binary: Path to the application binary or script.
:type binary: ``str``
:param description: Description of test instance.
:type description: ``str``
:param file_output_flag: Customized command line flag for specifying path
of output file, default to -y
:type file_output_flag: ``NoneType`` or ``str``
:param output_path: Where to save the test report, should work with
`file_output_flag`, if not provided a default path can be generated.
:type output_path: ``str``
:param filtering_flag: Customized command line flag for filtering testcases,
"-t" is suggested, for example: ./cppunit_bin -t *.some_text.*
:type filtering_flag: ``NoneType`` or ``str``
:param cppunit_filter: Native test filter pattern that will be used by
Cppunit internally.
:type cppunit_filter: ``str``
:param listing_flag: Customized command line flag for listing all testcases,
"-l" is suggested, for example: ./cppunit_bin -l
:type listing_flag: ``NoneType`` or ``str``
:param parse_test_context: Function to parse the output which contains
listed test suites and testcases. refer to the default implementation
:py:meth:`~testplan.testing.cpp.cppunit.Cppunit.parse_test_context`.
:type parse_test_context: ``NoneType`` or ``callable``
Also inherits all
:py:class:`~testplan.testing.base.ProcessRunnerTest` options.
"""
CONFIG = CppunitConfig
def __init__(
self,
name,
binary,
description=None,
file_output_flag="-y",
output_path="",
filtering_flag=None,
cppunit_filter="",
listing_flag=None,
parse_test_context=None,
**options
):
options.update(self.filter_locals(locals()))
super(Cppunit, self).__init__(**options)
@property
def report_path(self):
if self.cfg.file_output_flag and self.cfg.output_path:
return self.cfg.output_path
else:
return os.path.join(self._runpath, "report.xml")
def _test_command(self):
cmd = [self.cfg.binary]
if self.cfg.filtering_flag and self.cfg.cppunit_filter:
cmd.extend([self.cfg.filtering_flag, self.cfg.cppunit_filter])
if self.cfg.file_output_flag:
cmd.extend([self.cfg.file_output_flag, self.report_path])
return cmd
def _list_command(self):
if self.cfg.listing_flag:
return [self.cfg.binary, self.cfg.listing_flag]
else:
return super(Cppunit, self)._list_command()
def read_test_data(self):
importer = CPPUnitResultImporter(
self.report_path if self.cfg.file_output_flag else self.stdout
)
return importer.import_result()
def process_test_data(self, test_data: CPPUnitImportedResult):
"""
XML output contains entries for skipped testcases
as well, which are not included in the report.
"""
return test_data.results()
def parse_test_context(self, test_list_output):
"""
Default implementation of parsing Cppunit test listing from stdout.
Assume the format of output is like that of GTest listing. If the
Cppunit test lists the test suites and testcases in other format,
then this function needs to be re-implemented.
"""
# Sample command line output:
#
# Comparison.
# testNotEqual
# testGreater
# testLess
# testMisc
# LogicalOp.
# testOr
# testAnd
# testNot
# testXor
#
#
# Sample Result:
#
# [
# ['Comparison',
# ['testNotEqual', 'testGreater', 'testLess', 'testMisc']],
|
# ['LogicalOp', ['testOr', 'testAnd', 'testNot', 'testXor']],
# ]
if self.cfg.parse_test_context:
return self.cfg.parse_test_context(test_list_output)
# Default implementation: suppose that the output of
# listing testcases is the same like that of GTest.
result = []
for line in test_list_output.splitlines():
line = line.rstrip()
if line.endswith(".") and len(line.lstrip()) > 1:
result.append([line.lstrip()[:-1], []])
elif result and (line.startswith(" ") or line.startswith("\t")):
result[-1][1].append(line.lstrip())
return result
def update_test_report(self):
"""
Attach XML report contents to the report, which can be
used by XML exporters, but will be discarded by serializers.
"""
super(Cppunit, self).update_test_report()
try:
with open(
self.report_path if self.cfg.file_output_flag else self.stdout
) as report_xml:
self.result.report.xml_string = report_xml.read()
except Exception:
self.result.report.xml_string = ""
def test_command_filter(self, testsuite_pattern, testcase_pattern):
"""
Return the base test command with additional filtering to run a
specific set of testcases.
"""
if testsuite_pattern not in (
"*",
self._DEFAULT_SUITE_NAME,
self._VERIFICATION_SUITE_NAME,
):
raise RuntimeError(
"Cannot run individual test suite {}".format(testsuite_pattern)
)
if testcase_pattern not in ("*", self._VERIFICATION_TESTCASE_NAME):
self.logger.debug(
'Should run testcases in pattern "%s", but cannot run'
" individual testcases thus will run the whole test suite",
testcase_pattern,
)
return self.test_command()
def list_command_filter(self, testsuite_pattern, testcase_pattern):
"""
Return the base list command with additional filtering to list a
specific set of testcases.
"""
return None # Cppunit does not support listing by filter
| |
transpose.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::*;
pub struct TransposedGraph<G: ControlFlowGraph> {
base_graph: G,
start_node: G::Node,
}
impl<G: ControlFlowGraph> TransposedGraph<G> {
pub fn new(base_graph: G) -> Self {
let start_node = base_graph.start_node();
Self::with_start(base_graph, start_node)
}
pub fn with_start(base_graph: G, start_node: G::Node) -> Self {
TransposedGraph { base_graph: base_graph, start_node: start_node }
}
}
impl<G: ControlFlowGraph> ControlFlowGraph for TransposedGraph<G> {
type Node = G::Node;
fn num_nodes(&self) -> usize {
self.base_graph.num_nodes()
}
fn start_node(&self) -> Self::Node
|
fn predecessors<'graph>(&'graph self, node: Self::Node)
-> <Self as GraphPredecessors<'graph>>::Iter {
self.base_graph.successors(node)
}
fn successors<'graph>(&'graph self, node: Self::Node)
-> <Self as GraphSuccessors<'graph>>::Iter {
self.base_graph.predecessors(node)
}
}
impl<'graph, G: ControlFlowGraph> GraphPredecessors<'graph> for TransposedGraph<G> {
type Item = G::Node;
type Iter = <G as GraphSuccessors<'graph>>::Iter;
}
impl<'graph, G: ControlFlowGraph> GraphSuccessors<'graph> for TransposedGraph<G> {
type Item = G::Node;
type Iter = <G as GraphPredecessors<'graph>>::Iter;
}
|
{
self.start_node
}
|
API.ts
|
/* tslint:disable */
/* eslint-disable */
// This file was automatically generated and should not be edited.
export type CreateConversationInput = {
id?: string | null,
name: string,
createdAt?: string | null,
};
export type ModelConversationConditionInput = {
name?: ModelStringInput | null,
createdAt?: ModelStringInput | null,
and?: Array< ModelConversationConditionInput | null > | null,
or?: Array< ModelConversationConditionInput | null > | null,
not?: ModelConversationConditionInput | null,
};
export type ModelStringInput = {
ne?: string | null,
eq?: string | null,
le?: string | null,
lt?: string | null,
ge?: string | null,
gt?: string | null,
contains?: string | null,
notContains?: string | null,
between?: Array< string | null > | null,
beginsWith?: string | null,
attributeExists?: boolean | null,
attributeType?: ModelAttributeTypes | null,
size?: ModelSizeInput | null,
};
export enum ModelAttributeTypes {
binary = "binary",
binarySet = "binarySet",
bool = "bool",
list = "list",
map = "map",
number = "number",
numberSet = "numberSet",
string = "string",
stringSet = "stringSet",
_null = "_null",
}
export type ModelSizeInput = {
ne?: number | null,
eq?: number | null,
le?: number | null,
lt?: number | null,
ge?: number | null,
gt?: number | null,
between?: Array< number | null > | null,
};
export type CreateMessageInput = {
id?: string | null,
content?: string | null,
createdAt?: string | null,
owner?: string | null,
chatbot?: boolean | null,
isSent?: boolean | null,
file?: S3ObjectInput | null,
messageConversationId?: string | null,
};
export type S3ObjectInput = {
bucket: string,
region: string,
key: string,
};
export type ModelMessageConditionInput = {
content?: ModelStringInput | null,
createdAt?: ModelStringInput | null,
chatbot?: ModelBooleanInput | null,
isSent?: ModelBooleanInput | null,
messageConversationId?: ModelIDInput | null,
and?: Array< ModelMessageConditionInput | null > | null,
or?: Array< ModelMessageConditionInput | null > | null,
not?: ModelMessageConditionInput | null,
};
export type ModelBooleanInput = {
ne?: boolean | null,
eq?: boolean | null,
attributeExists?: boolean | null,
attributeType?: ModelAttributeTypes | null,
};
export type ModelIDInput = {
ne?: string | null,
eq?: string | null,
le?: string | null,
lt?: string | null,
ge?: string | null,
gt?: string | null,
contains?: string | null,
notContains?: string | null,
between?: Array< string | null > | null,
beginsWith?: string | null,
attributeExists?: boolean | null,
attributeType?: ModelAttributeTypes | null,
size?: ModelSizeInput | null,
};
export type CreateUserInput = {
id?: string | null,
username: string,
registered?: boolean | null,
};
export type ModelUserConditionInput = {
username?: ModelStringInput | null,
registered?: ModelBooleanInput | null,
and?: Array< ModelUserConditionInput | null > | null,
or?: Array< ModelUserConditionInput | null > | null,
not?: ModelUserConditionInput | null,
};
export type CreateConvoLinkInput = {
id?: string | null,
name?: string | null,
status?: string | null,
convoLinkUserId?: string | null,
convoLinkConversationId?: string | null,
};
export type ModelConvoLinkConditionInput = {
name?: ModelStringInput | null,
status?: ModelStringInput | null,
convoLinkUserId?: ModelIDInput | null,
and?: Array< ModelConvoLinkConditionInput | null > | null,
or?: Array< ModelConvoLinkConditionInput | null > | null,
not?: ModelConvoLinkConditionInput | null,
};
export type UpdateConvoLinkInput = {
|
convoLinkConversationId?: string | null,
};
export type SearchableMessageFilterInput = {
id?: SearchableIDFilterInput | null,
content?: SearchableStringFilterInput | null,
createdAt?: SearchableStringFilterInput | null,
owner?: SearchableStringFilterInput | null,
chatbot?: SearchableBooleanFilterInput | null,
isSent?: SearchableBooleanFilterInput | null,
messageConversationId?: SearchableIDFilterInput | null,
and?: Array< SearchableMessageFilterInput | null > | null,
or?: Array< SearchableMessageFilterInput | null > | null,
not?: SearchableMessageFilterInput | null,
};
export type SearchableIDFilterInput = {
ne?: string | null,
gt?: string | null,
lt?: string | null,
gte?: string | null,
lte?: string | null,
eq?: string | null,
match?: string | null,
matchPhrase?: string | null,
matchPhrasePrefix?: string | null,
multiMatch?: string | null,
exists?: boolean | null,
wildcard?: string | null,
regexp?: string | null,
range?: Array< string | null > | null,
};
export type SearchableStringFilterInput = {
ne?: string | null,
gt?: string | null,
lt?: string | null,
gte?: string | null,
lte?: string | null,
eq?: string | null,
match?: string | null,
matchPhrase?: string | null,
matchPhrasePrefix?: string | null,
multiMatch?: string | null,
exists?: boolean | null,
wildcard?: string | null,
regexp?: string | null,
range?: Array< string | null > | null,
};
export type SearchableBooleanFilterInput = {
eq?: boolean | null,
ne?: boolean | null,
};
export type SearchableMessageSortInput = {
field?: SearchableMessageSortableFields | null,
direction?: SearchableSortDirection | null,
};
export enum SearchableMessageSortableFields {
id = "id",
content = "content",
createdAt = "createdAt",
owner = "owner",
chatbot = "chatbot",
isSent = "isSent",
messageConversationId = "messageConversationId",
}
export enum SearchableSortDirection {
asc = "asc",
desc = "desc",
}
export type SearchableUserFilterInput = {
id?: SearchableIDFilterInput | null,
username?: SearchableStringFilterInput | null,
registered?: SearchableBooleanFilterInput | null,
and?: Array< SearchableUserFilterInput | null > | null,
or?: Array< SearchableUserFilterInput | null > | null,
not?: SearchableUserFilterInput | null,
};
export type SearchableUserSortInput = {
field?: SearchableUserSortableFields | null,
direction?: SearchableSortDirection | null,
};
export enum SearchableUserSortableFields {
id = "id",
username = "username",
registered = "registered",
}
export type SearchableConvoLinkFilterInput = {
id?: SearchableIDFilterInput | null,
name?: SearchableStringFilterInput | null,
status?: SearchableStringFilterInput | null,
convoLinkUserId?: SearchableIDFilterInput | null,
and?: Array< SearchableConvoLinkFilterInput | null > | null,
or?: Array< SearchableConvoLinkFilterInput | null > | null,
not?: SearchableConvoLinkFilterInput | null,
};
export type SearchableConvoLinkSortInput = {
field?: SearchableConvoLinkSortableFields | null,
direction?: SearchableSortDirection | null,
};
export enum SearchableConvoLinkSortableFields {
id = "id",
name = "name",
status = "status",
convoLinkUserId = "convoLinkUserId",
}
export type CreateConvoMutationVariables = {
input: CreateConversationInput,
condition?: ModelConversationConditionInput | null,
};
export type CreateConvoMutation = {
createConvo: {
__typename: "Conversation",
id: string,
name: string,
createdAt: string | null,
messages: {
__typename: "ModelMessageConnection",
nextToken: string | null,
} | null,
associated: {
__typename: "ModelConvoLinkConnection",
nextToken: string | null,
} | null,
updatedAt: string,
} | null,
};
export type CreateMessageMutationVariables = {
input: CreateMessageInput,
condition?: ModelMessageConditionInput | null,
};
export type CreateMessageMutation = {
createMessage: {
__typename: "Message",
id: string,
content: string | null,
createdAt: string | null,
owner: string | null,
chatbot: boolean | null,
isSent: boolean | null,
file: {
__typename: "S3Object",
bucket: string,
region: string,
key: string,
} | null,
messageConversationId: string | null,
conversation: {
__typename: "Conversation",
id: string,
name: string,
createdAt: string | null,
updatedAt: string,
} | null,
updatedAt: string,
} | null,
};
export type RegisterUserMutationVariables = {
input: CreateUserInput,
condition?: ModelUserConditionInput | null,
};
export type RegisterUserMutation = {
registerUser: {
__typename: "User",
id: string,
username: string,
registered: boolean | null,
userConversations: {
__typename: "ModelConvoLinkConnection",
nextToken: string | null,
} | null,
createdAt: string,
updatedAt: string,
owner: string | null,
} | null,
};
export type CreateConvoLinkMutationVariables = {
input: CreateConvoLinkInput,
condition?: ModelConvoLinkConditionInput | null,
};
export type CreateConvoLinkMutation = {
createConvoLink: {
__typename: "ConvoLink",
id: string,
name: string | null,
status: string | null,
convoLinkUserId: string | null,
user: {
__typename: "User",
id: string,
username: string,
registered: boolean | null,
createdAt: string,
updatedAt: string,
owner: string | null,
} | null,
conversation: {
__typename: "Conversation",
id: string,
name: string,
createdAt: string | null,
updatedAt: string,
} | null,
createdAt: string,
updatedAt: string,
} | null,
};
export type UpdateConvoLinkMutationVariables = {
input: UpdateConvoLinkInput,
condition?: ModelConvoLinkConditionInput | null,
};
export type UpdateConvoLinkMutation = {
updateConvoLink: {
__typename: "ConvoLink",
id: string,
name: string | null,
status: string | null,
convoLinkUserId: string | null,
user: {
__typename: "User",
id: string,
username: string,
registered: boolean | null,
createdAt: string,
updatedAt: string,
owner: string | null,
} | null,
conversation: {
__typename: "Conversation",
id: string,
name: string,
createdAt: string | null,
updatedAt: string,
} | null,
createdAt: string,
updatedAt: string,
} | null,
};
export type DetectCelebsQueryVariables = {
bucket?: string | null,
key?: string | null,
};
export type DetectCelebsQuery = {
detectCelebs: {
__typename: "AI",
bucket: string | null,
key: string | null,
bot: string | null,
text: string | null,
language: string | null,
voice: string | null,
response: string | null,
} | null,
};
export type DetectLabelsQueryVariables = {
bucket?: string | null,
key?: string | null,
};
export type DetectLabelsQuery = {
detectLabels: {
__typename: "AI",
bucket: string | null,
key: string | null,
bot: string | null,
text: string | null,
language: string | null,
voice: string | null,
response: string | null,
} | null,
};
export type DetectLanguageQueryVariables = {
text?: string | null,
};
export type DetectLanguageQuery = {
detectLanguage: {
__typename: "AI",
bucket: string | null,
key: string | null,
bot: string | null,
text: string | null,
language: string | null,
voice: string | null,
response: string | null,
} | null,
};
export type DetectEntitiesQueryVariables = {
language?: string | null,
text?: string | null,
};
export type DetectEntitiesQuery = {
detectEntities: {
__typename: "AI",
bucket: string | null,
key: string | null,
bot: string | null,
text: string | null,
language: string | null,
voice: string | null,
response: string | null,
} | null,
};
export type DetectSentimentQueryVariables = {
language?: string | null,
text?: string | null,
};
export type DetectSentimentQuery = {
detectSentiment: {
__typename: "AI",
bucket: string | null,
key: string | null,
bot: string | null,
text: string | null,
language: string | null,
voice: string | null,
response: string | null,
} | null,
};
export type InvokeBotQueryVariables = {
bot?: string | null,
text?: string | null,
};
export type InvokeBotQuery = {
invokeBot: {
__typename: "AI",
bucket: string | null,
key: string | null,
bot: string | null,
text: string | null,
language: string | null,
voice: string | null,
response: string | null,
} | null,
};
export type DictateQueryVariables = {
bucket?: string | null,
key?: string | null,
voice?: string | null,
text?: string | null,
};
export type DictateQuery = {
dictate: {
__typename: "AI",
bucket: string | null,
key: string | null,
bot: string | null,
text: string | null,
language: string | null,
voice: string | null,
response: string | null,
} | null,
};
export type TranslateQueryVariables = {
language?: string | null,
text?: string | null,
};
export type TranslateQuery = {
translate: {
__typename: "AI",
bucket: string | null,
key: string | null,
bot: string | null,
text: string | null,
language: string | null,
voice: string | null,
response: string | null,
} | null,
};
export type GetConvoQueryVariables = {
id: string,
};
export type GetConvoQuery = {
getConvo: {
__typename: "Conversation",
id: string,
name: string,
createdAt: string | null,
messages: {
__typename: "ModelMessageConnection",
nextToken: string | null,
} | null,
associated: {
__typename: "ModelConvoLinkConnection",
nextToken: string | null,
} | null,
updatedAt: string,
} | null,
};
export type GetUserQueryVariables = {
id: string,
};
export type GetUserQuery = {
getUser: {
__typename: "User",
id: string,
username: string,
registered: boolean | null,
userConversations: {
__typename: "ModelConvoLinkConnection",
nextToken: string | null,
} | null,
createdAt: string,
updatedAt: string,
owner: string | null,
} | null,
};
export type SearchMessagesQueryVariables = {
filter?: SearchableMessageFilterInput | null,
sort?: SearchableMessageSortInput | null,
limit?: number | null,
nextToken?: string | null,
from?: number | null,
};
export type SearchMessagesQuery = {
searchMessages: {
__typename: "SearchableMessageConnection",
items: Array< {
__typename: "Message",
id: string,
content: string | null,
createdAt: string | null,
owner: string | null,
chatbot: boolean | null,
isSent: boolean | null,
messageConversationId: string | null,
updatedAt: string,
} | null > | null,
nextToken: string | null,
total: number | null,
} | null,
};
export type SearchUsersQueryVariables = {
filter?: SearchableUserFilterInput | null,
sort?: SearchableUserSortInput | null,
limit?: number | null,
nextToken?: string | null,
from?: number | null,
};
export type SearchUsersQuery = {
searchUsers: {
__typename: "SearchableUserConnection",
items: Array< {
__typename: "User",
id: string,
username: string,
registered: boolean | null,
createdAt: string,
updatedAt: string,
owner: string | null,
} | null > | null,
nextToken: string | null,
total: number | null,
} | null,
};
export type SearchConvoLinksQueryVariables = {
filter?: SearchableConvoLinkFilterInput | null,
sort?: SearchableConvoLinkSortInput | null,
limit?: number | null,
nextToken?: string | null,
from?: number | null,
};
export type SearchConvoLinksQuery = {
searchConvoLinks: {
__typename: "SearchableConvoLinkConnection",
items: Array< {
__typename: "ConvoLink",
id: string,
name: string | null,
status: string | null,
convoLinkUserId: string | null,
createdAt: string,
updatedAt: string,
} | null > | null,
nextToken: string | null,
total: number | null,
} | null,
};
export type OnCreateMessageSubscriptionVariables = {
messageConversationId: string,
};
export type OnCreateMessageSubscription = {
onCreateMessage: {
__typename: "Message",
id: string,
content: string | null,
createdAt: string | null,
owner: string | null,
chatbot: boolean | null,
isSent: boolean | null,
file: {
__typename: "S3Object",
bucket: string,
region: string,
key: string,
} | null,
messageConversationId: string | null,
conversation: {
__typename: "Conversation",
id: string,
name: string,
createdAt: string | null,
updatedAt: string,
} | null,
updatedAt: string,
} | null,
};
export type OnUpdateConvoLinkSubscriptionVariables = {
convoLinkUserId?: string | null,
status?: string | null,
};
export type OnUpdateConvoLinkSubscription = {
onUpdateConvoLink: {
__typename: "ConvoLink",
id: string,
name: string | null,
status: string | null,
convoLinkUserId: string | null,
user: {
__typename: "User",
id: string,
username: string,
registered: boolean | null,
createdAt: string,
updatedAt: string,
owner: string | null,
} | null,
conversation: {
__typename: "Conversation",
id: string,
name: string,
createdAt: string | null,
updatedAt: string,
} | null,
createdAt: string,
updatedAt: string,
} | null,
};
|
id: string,
name?: string | null,
status?: string | null,
convoLinkUserId?: string | null,
|
service.go
|
// Copyright (c) nano Authors. All Rights Reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package component
import (
"errors"
"reflect"
)
type (
//Handler represents a message.Message's handler's meta information.
//Handler represents a message.Message's handler's meta information.
Handler struct {
Receiver reflect.Value // receiver of method
Method reflect.Method // method stub
Type reflect.Type // low-level type of method
IsRawArg bool // whether the data need to serialize
}
// Service implements a specific service, some of it's methods will be
// called when the correspond events is occurred.
Service struct {
Name string // name of service
Type reflect.Type // type of the receiver
Receiver reflect.Value // receiver of methods for the service
Handlers map[string]*Handler // registered methods
SchedName string // name of scheduler variable in session data
Options options // options
}
)
func NewService(comp Component, opts []Option) *Service {
s := &Service{
Type: reflect.TypeOf(comp),
Receiver: reflect.ValueOf(comp),
}
// apply options
for i := range opts {
opt := opts[i]
opt(&s.Options)
}
if name := s.Options.name; name != "" {
s.Name = name
} else {
s.Name = reflect.Indirect(s.Receiver).Type().Name()
}
s.SchedName = s.Options.schedName
return s
}
// suitableMethods returns suitable methods of typ
func (s *Service) suitableHandlerMethods(typ reflect.Type) map[string]*Handler {
methods := make(map[string]*Handler)
for m := 0; m < typ.NumMethod(); m++ {
method := typ.Method(m)
mt := method.Type
mn := method.Name
if isHandlerMethod(method)
|
}
return methods
}
// ExtractHandler extract the set of methods from the
// receiver value which satisfy the following conditions:
// - exported method of exported type
// - two arguments, both of exported type
// - the first argument is *session.Session
// - the second argument is []byte or a pointer
func (s *Service) ExtractHandler() error {
typeName := reflect.Indirect(s.Receiver).Type().Name()
if typeName == "" {
return errors.New("no service name for type " + s.Type.String())
}
if !isExported(typeName) {
return errors.New("type " + typeName + " is not exported")
}
// Install the methods
s.Handlers = s.suitableHandlerMethods(s.Type)
if len(s.Handlers) == 0 {
str := ""
// To help the user, see if a pointer receiver would work.
method := s.suitableHandlerMethods(reflect.PtrTo(s.Type))
if len(method) != 0 {
str = "type " + s.Name + " has no exported methods of suitable type (hint: pass a pointer to value of that type)"
} else {
str = "type " + s.Name + " has no exported methods of suitable type"
}
return errors.New(str)
}
for i := range s.Handlers {
s.Handlers[i].Receiver = s.Receiver
}
return nil
}
|
{
raw := false
if mt.In(2) == typeOfBytes {
raw = true
}
// rewrite handler name
if s.Options.nameFunc != nil {
mn = s.Options.nameFunc(mn)
}
methods[mn] = &Handler{Method: method, Type: mt.In(2), IsRawArg: raw}
}
|
registry.go
|
/*
Copyright 2018 The Kubepack Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package registry
import (
"fmt"
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
"k8s.io/apiserver/pkg/registry/rest"
)
// REST implements a RESTStorage for API services against etcd
type REST struct {
*genericregistry.Store
}
// RESTInPeace is just a simple function that panics on error.
// Otherwise returns the given storage object. It is meant to be
// a wrapper for apps registries.
func RESTInPeace(storage rest.StandardStorage, err error) rest.StandardStorage
|
{
if err != nil {
err = fmt.Errorf("unable to create REST storage for a resource due to %v, will die", err)
panic(err)
}
return storage
}
|
|
user-manager-form.service.ts
|
// Copyright (c) 2020 Benjamin La Madrid
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
import { Inject, Injectable } from '@angular/core';
import { Observable } from 'rxjs';
import { DataManagerFormService } from '../../data-manager-form.aservice';
import { Person } from 'src/app/data/models/entities/Person';
import { User } from 'src/app/data/models/entities/User';
import { DATA_INJECTION_TOKENS } from 'src/app/data/data-injection-tokens';
import { EntityCrudIService } from 'src/app/data/entity.crud.iservice';
@Injectable()
export class
|
extends DataManagerFormService<User> {
constructor(
@Inject(DATA_INJECTION_TOKENS.usersCrud) public dataService: EntityCrudIService<User>,
@Inject(DATA_INJECTION_TOKENS.people) protected peopleDataService: Partial<EntityCrudIService<Person>>
) {
super();
}
public getPeople(): Observable<Person[]> {
return this.peopleDataService.readAll();
}
}
|
UserManagerFormService
|
updatepwdhandler.go
|
package handler
import (
"net/http"
"backend/service/bookstore/cmd/api/internal/logic/reader"
"backend/service/bookstore/cmd/api/internal/svc"
"backend/service/bookstore/cmd/api/internal/types"
"github.com/tal-tech/go-zero/rest/httpx"
)
func
|
(ctx *svc.ServiceContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var req types.UpdatePwdReq
if err := httpx.Parse(r, &req); err != nil {
httpx.Error(w, err)
return
}
l := logic.NewUpdatePwdLogic(r.Context(), ctx)
resp, err := l.UpdatePwd(req)
if err != nil {
httpx.Error(w, err)
} else {
httpx.OkJson(w, resp)
}
}
}
|
UpdatePwdHandler
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.