file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
mod.rs | #[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::I2scfgr {
#[doc = r" Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline(always)] | F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
}
#[doc = r" Value of the field"]
pub struct I2smodR {
bits: u8,
}
impl I2smodR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct I2seR {
bits: u8,
}
impl I2seR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct I2scfgR {
bits: u8,
}
impl I2scfgR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct PcmsyncR {
bits: u8,
}
impl PcmsyncR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct I2sstdR {
bits: u8,
}
impl I2sstdR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct CkpolR {
bits: u8,
}
impl CkpolR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct DatlenR {
bits: u8,
}
impl DatlenR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct ChlenR {
bits: u8,
}
impl ChlenR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u8 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _I2smodW<'a> {
w: &'a mut W,
}
impl<'a> _I2smodW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u8) -> &'a mut W {
const MASK: u8 = 1;
const OFFSET: u8 = 11;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _I2seW<'a> {
w: &'a mut W,
}
impl<'a> _I2seW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u8) -> &'a mut W {
const MASK: u8 = 1;
const OFFSET: u8 = 10;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _I2scfgW<'a> {
w: &'a mut W,
}
impl<'a> _I2scfgW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u8) -> &'a mut W {
const MASK: u8 = 3;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _PcmsyncW<'a> {
w: &'a mut W,
}
impl<'a> _PcmsyncW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u8) -> &'a mut W {
const MASK: u8 = 1;
const OFFSET: u8 = 7;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _I2sstdW<'a> {
w: &'a mut W,
}
impl<'a> _I2sstdW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u8) -> &'a mut W {
const MASK: u8 = 3;
const OFFSET: u8 = 4;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CkpolW<'a> {
w: &'a mut W,
}
impl<'a> _CkpolW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u8) -> &'a mut W {
const MASK: u8 = 1;
const OFFSET: u8 = 3;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _DatlenW<'a> {
w: &'a mut W,
}
impl<'a> _DatlenW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u8) -> &'a mut W {
const MASK: u8 = 3;
const OFFSET: u8 = 1;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _ChlenW<'a> {
w: &'a mut W,
}
impl<'a> _ChlenW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, bits: u8) -> &'a mut W {
const MASK: u8 = 1;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((bits & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 11 - I2S mode selection"]
#[inline(always)]
pub fn i2smod(&self) -> I2smodR {
let bits = {
const MASK: u8 = 1;
const OFFSET: u8 = 11;
((self.bits >> OFFSET) & MASK as u32) as u8
};
I2smodR { bits }
}
#[doc = "Bit 10 - I2S Enable"]
#[inline(always)]
pub fn i2se(&self) -> I2seR {
let bits = {
const MASK: u8 = 1;
const OFFSET: u8 = 10;
((self.bits >> OFFSET) & MASK as u32) as u8
};
I2seR { bits }
}
#[doc = "Bits 8:9 - I2S configuration mode"]
#[inline(always)]
pub fn i2scfg(&self) -> I2scfgR {
let bits = {
const MASK: u8 = 3;
const OFFSET: u8 = 8;
((self.bits >> OFFSET) & MASK as u32) as u8
};
I2scfgR { bits }
}
#[doc = "Bit 7 - PCM frame synchronization"]
#[inline(always)]
pub fn pcmsync(&self) -> PcmsyncR {
let bits = {
const MASK: u8 = 1;
const OFFSET: u8 = 7;
((self.bits >> OFFSET) & MASK as u32) as u8
};
PcmsyncR { bits }
}
#[doc = "Bits 4:5 - I2S standard selection"]
#[inline(always)]
pub fn i2sstd(&self) -> I2sstdR {
let bits = {
const MASK: u8 = 3;
const OFFSET: u8 = 4;
((self.bits >> OFFSET) & MASK as u32) as u8
};
I2sstdR { bits }
}
#[doc = "Bit 3 - Steady state clock polarity"]
#[inline(always)]
pub fn ckpol(&self) -> CkpolR {
let bits = {
const MASK: u8 = 1;
const OFFSET: u8 = 3;
((self.bits >> OFFSET) & MASK as u32) as u8
};
CkpolR { bits }
}
#[doc = "Bits 1:2 - Data length to be transferred"]
#[inline(always)]
pub fn datlen(&self) -> DatlenR {
let bits = {
const MASK: u8 = 3;
const OFFSET: u8 = 1;
((self.bits >> OFFSET) & MASK as u32) as u8
};
DatlenR { bits }
}
#[doc = "Bit 0 - Channel length (number of bits per audio channel)"]
#[inline(always)]
pub fn chlen(&self) -> ChlenR {
let bits = {
const MASK: u8 = 1;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u8
};
ChlenR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline(always)]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 11 - I2S mode selection"]
#[inline(always)]
pub fn i2smod(&mut self) -> _I2smodW {
_I2smodW { w: self }
}
#[doc = "Bit 10 - I2S Enable"]
#[inline(always)]
pub fn i2se(&mut self) -> _I2seW {
_I2seW { w: self }
}
#[doc = "Bits 8:9 - I2S configuration mode"]
#[inline(always)]
pub fn i2scfg(&mut self) -> _I2scfgW {
_I2scfgW { w: self }
}
#[doc = "Bit 7 - PCM frame synchronization"]
#[inline(always)]
pub fn pcmsync(&mut self) -> _PcmsyncW {
_PcmsyncW { w: self }
}
#[doc = "Bits 4:5 - I2S standard selection"]
#[inline(always)]
pub fn i2sstd(&mut self) -> _I2sstdW {
_I2sstdW { w: self }
}
#[doc = "Bit 3 - Steady state clock polarity"]
#[inline(always)]
pub fn ckpol(&mut self) -> _CkpolW {
_CkpolW { w: self }
}
#[doc = "Bits 1:2 - Data length to be transferred"]
#[inline(always)]
pub fn datlen(&mut self) -> _DatlenW {
_DatlenW { w: self }
}
#[doc = "Bit 0 - Channel length (number of bits per audio channel)"]
#[inline(always)]
pub fn chlen(&mut self) -> _ChlenW {
_ChlenW { w: self }
}
} | pub fn write<F>(&self, f: F)
where |
test_tf_funcs.py | # !/usr/bin/env python3
# -*-coding:utf-8-*-
# @file: test_tf_funcs.py
# @brief:
# @author: Changjiang Cai, [email protected], [email protected]
# @version: 0.0.1
# @creation date: 13-08-2019
# @last modified: Tue 13 Aug 2019 05:38:05 PM EDT
import tensorflow as tf
import numpy as np
if __name__ == "__main__":
| """ test tf.gather_nd """
# data is [[[ 0 1]
# [ 2 3]
# [ 4 5]]
#
# [[ 6 7]
# [ 8 9]
# [10 11]]]
data = np.reshape(np.arange(12), [2, 3, 2])
x = tf.constant(data)
idx_1 = [[[0, 0, 0], [0, 1, 1]], [[1, 0, 1], [1, 1, 0]]] # 2 x 2 x 3
result1 = tf.gather_nd(x, idx_1)
idx_2 = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]] # 4 x 3
result2 = tf.gather_nd(x, idx_2)
# Construct a 'Session' to execute the graph.
sess = tf.Session()
# Execute the graph and store the value that `e` represents in `result`.
x, res1, res2 = sess.run([x, result1, result2])
print ('x = {}'.format(x))
print ('res1 = {}'.format(res1))
print ('res2 = {}'.format(res2)) |
|
context.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, StringType
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext", "UDFRegistration"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession(sparkContext)
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=None):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is not None, return
defaultValue. If the key is not set and defaultValue is None, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10") | """
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return UDFRegistration(self)
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@ignore_unicode_prefix
@since(1.2)
def registerFunction(self, name, f, returnType=StringType()):
"""Registers a python function (including lambda function) as a UDF
so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not given it default to a string and conversion will automatically
be done. For any other return type, the produced object must match the specified type.
:param name: name of the UDF
:param f: python function
:param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlContext.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)=u'4')]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
"""
self.sparkSession.catalog.registerFunction(name, f, returnType)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or
:class:`pyspark.sql.types.StringType`, it must match the
real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temp table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
.. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate().
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
if jhiveContext is None:
sparkSession = SparkSession.builder.enableHiveSupport().getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
class UDFRegistration(object):
"""Wrapper for user-defined function registration."""
def __init__(self, sqlContext):
self.sqlContext = sqlContext
def register(self, name, f, returnType=StringType()):
return self.sqlContext.registerFunction(name, f, returnType)
register.__doc__ = SQLContext.registerFunction.__doc__
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test() | u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50' |
exec_handler.go | package component
import (
"bufio"
"fmt"
"io"
"os"
"github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
"github.com/redhat-developer/odo/pkg/kclient"
"github.com/redhat-developer/odo/pkg/log"
"github.com/redhat-developer/odo/pkg/machineoutput"
"github.com/redhat-developer/odo/pkg/util"
"k8s.io/klog"
)
type execHandler struct {
kubeClient kclient.ClientInterface
podName string
show bool
}
const ShellExecutable string = "/bin/sh"
func NewExecHandler(kubeClient kclient.ClientInterface, podName string, show bool) *execHandler {
return &execHandler{
kubeClient: kubeClient,
podName: podName,
show: show,
}
}
func (o *execHandler) ApplyImage(image v1alpha2.Component) error {
return nil
}
func (o *execHandler) ApplyKubernetes(kubernetes v1alpha2.Component) error {
return nil
}
func (o *execHandler) Execute(command v1alpha2.Command) error {
msg := fmt.Sprintf("Executing %s command %q on container %q", command.Id, command.Exec.CommandLine, command.Exec.Component)
spinner := log.Spinner(msg)
defer spinner.End(false)
logger := machineoutput.NewMachineEventLoggingClient()
stdoutWriter, stdoutChannel, stderrWriter, stderrChannel := logger.CreateContainerOutputWriter()
cmdline := getCmdline(command)
err := executeCommand(o.kubeClient, command.Exec.Component, o.podName, cmdline, o.show, stdoutWriter, stderrWriter)
closeWriterAndWaitForAck(stdoutWriter, stdoutChannel, stderrWriter, stderrChannel)
spinner.End(true)
return err
}
func getCmdline(command v1alpha2.Command) []string {
// deal with environment variables
var cmdLine string
setEnvVariable := util.GetCommandStringFromEnvs(command.Exec.Env)
if setEnvVariable == "" {
cmdLine = command.Exec.CommandLine
} else {
cmdLine = setEnvVariable + " && " + command.Exec.CommandLine
}
// Change to the workdir and execute the command
var cmd []string
if command.Exec.WorkingDir != "" | else {
cmd = []string{ShellExecutable, "-c", cmdLine}
}
return cmd
}
func closeWriterAndWaitForAck(stdoutWriter *io.PipeWriter, stdoutChannel chan interface{}, stderrWriter *io.PipeWriter, stderrChannel chan interface{}) {
if stdoutWriter != nil {
_ = stdoutWriter.Close()
<-stdoutChannel
}
if stderrWriter != nil {
_ = stderrWriter.Close()
<-stderrChannel
}
}
// ExecuteCommand executes the given command in the pod's container
func executeCommand(client kclient.ClientInterface, containerName string, podName string, command []string, show bool, consoleOutputStdout *io.PipeWriter, consoleOutputStderr *io.PipeWriter) (err error) {
stdoutReader, stdoutWriter := io.Pipe()
stderrReader, stderrWriter := io.Pipe()
var cmdOutput string
klog.V(2).Infof("Executing command %v for pod: %v in container: %v", command, podName, containerName)
// Read stdout and stderr, store their output in cmdOutput, and also pass output to consoleOutput Writers (if non-nil)
stdoutCompleteChannel := startReaderGoroutine(stdoutReader, show, &cmdOutput, consoleOutputStdout)
stderrCompleteChannel := startReaderGoroutine(stderrReader, show, &cmdOutput, consoleOutputStderr)
err = client.ExecCMDInContainer(containerName, podName, command, stdoutWriter, stderrWriter, nil, false)
// Block until we have received all the container output from each stream
_ = stdoutWriter.Close()
<-stdoutCompleteChannel
_ = stderrWriter.Close()
<-stderrCompleteChannel
if err != nil {
// It is safe to read from cmdOutput here, as the goroutines are guaranteed to have terminated at this point.
klog.V(2).Infof("ExecuteCommand returned an an err: %v. for command '%v'. output: %v", err, command, cmdOutput)
return fmt.Errorf("unable to exec command %v: \n%v: %w", command, cmdOutput, err)
}
return
}
// This goroutine will automatically pipe the output from the writer (passed into ExecCMDInContainer) to
// the loggers.
// The returned channel will contain a single nil entry once the reader has closed.
func startReaderGoroutine(reader io.Reader, show bool, cmdOutput *string, consoleOutput *io.PipeWriter) chan interface{} {
result := make(chan interface{})
go func() {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
line := scanner.Text()
if log.IsDebug() || show {
_, err := fmt.Fprintln(os.Stdout, line)
if err != nil {
log.Errorf("Unable to print to stdout: %s", err.Error())
}
}
*cmdOutput += fmt.Sprintln(line)
if consoleOutput != nil {
_, err := consoleOutput.Write([]byte(line + "\n"))
if err != nil {
log.Errorf("Error occurred on writing string to consoleOutput writer: %s", err.Error())
}
}
}
result <- nil
}()
return result
}
| {
// since we are using /bin/sh -c, the command needs to be within a single double quote instance, for example "cd /tmp && pwd"
cmd = []string{ShellExecutable, "-c", "cd " + command.Exec.WorkingDir + " && " + cmdLine}
} |
python_deploy.py | # regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# brief Example code on load and run TVM module.s
# file python_deploy.py
import tvm
import numpy as np
def verify(mod, fname):
# Get the function from the module
f = mod.get_function(fname)
# Use tvm.nd.array to convert numpy ndarray to tvm
# NDArray type, so that function can be invoked normally
N = 10
x = tvm.nd.array(np.arange(N, dtype=np.float32))
y = tvm.nd.array(np.zeros(N, dtype=np.float32))
# Invoke the function
f(x, y)
np_x = x.asnumpy()
np_y = y.asnumpy()
# Verify correctness of function
assert(np.all([xi+1 == yi for xi, yi in zip(np_x, np_y)]))
print("Finish verification...")
if __name__ == "__main__":
# The normal dynamic loading method for deployment
mod_dylib = tvm.module.load("lib/test_addone_dll.so")
print("Verify dynamic loading from test_addone_dll.so")
verify(mod_dylib, "addone")
# There might be methods to use the system lib way in
# python, but dynamic loading is good enough for now. | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information |
|
gifroulette.go | package handler
import (
"fmt"
"strings"
"github.com/morganonbass/celexacreams/celexacreams"
"github.com/bwmarrin/discordgo"
)
// GifRoulette responds to "GifRoulette"
type GifRoulette struct{
R, D bool
}
// Should I reply to the invoking message?
func (h *GifRoulette) Reply() bool {
return h.R
}
// Should I delete the invoking message?
func (h *GifRoulette) DeleteInvocation() bool {
return h.D
}
// Help returns a brief help string
func (h *GifRoulette) Help(short bool) string {
if short {
return "Returns a random giphy result for the supplied search term"
} else {
return fmt.Sprintf("Usage: `%vgifroulette Jason Momoa`\n\nReturn: Probably a pretty hot gif", celexacreams.Prefix)
}
}
// Handle shows a random gif returned by the supplied search string
func (h *GifRoulette) Handle(m *discordgo.MessageCreate, c *discordgo.Channel, s *discordgo.Session) (string, string, []byte, error) {
command, err := celexacreams.ExtractCommand(m.ContentWithMentionsReplaced())
if len(command) <= 1 {
return "You should supply a search string, what do you think I am, a mind reader " + m.Author.Mention() + "?", "", make([]byte, 0), nil
}
searchString := strings.Join(command[1:], " ")
if err != nil {
return "", "", make([]byte, 0), err
}
url, err := celexacreams.GetRandomGIF(searchString)
if err != nil |
return url, "", make([]byte, 0), nil
}
| {
return "", "", make([]byte, 0), &celexacreams.CelexaError{
"GIF error: " + err.Error(),
}
} |
elasticsearch_integration_test.go | package elasticsearch_test
import (
"bytes"
"errors"
"strings"
"testing"
"github.com/fastly/cli/pkg/app"
"github.com/fastly/cli/pkg/mock"
"github.com/fastly/cli/pkg/testutil"
"github.com/fastly/go-fastly/v3/fastly"
)
func TestElasticsearchCreate(t *testing.T) {
args := testutil.Args
for _, testcase := range []struct {
args []string
api mock.API
wantError string
wantOutput string
}{
{
args: args("logging elasticsearch create --service-id 123 --version 1 --name log --index logs --autoclone"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
CloneVersionFn: testutil.CloneVersionResult(4),
},
wantError: "error parsing arguments: required flag --url not provided",
},
{
args: args("logging elasticsearch create --service-id 123 --version 1 --name log --url example.com --autoclone"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
CloneVersionFn: testutil.CloneVersionResult(4),
},
wantError: "error parsing arguments: required flag --index not provided",
},
{
args: args("logging elasticsearch create --service-id 123 --version 1 --name log --index logs --url example.com --autoclone"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
CloneVersionFn: testutil.CloneVersionResult(4),
CreateElasticsearchFn: createElasticsearchOK,
},
wantOutput: "Created Elasticsearch logging endpoint log (service 123 version 4)",
},
{
args: args("logging elasticsearch create --service-id 123 --version 1 --name log --index logs --url example.com --autoclone"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
CloneVersionFn: testutil.CloneVersionResult(4),
CreateElasticsearchFn: createElasticsearchError,
},
wantError: errTest.Error(),
},
} {
t.Run(strings.Join(testcase.args, " "), func(t *testing.T) {
var stdout bytes.Buffer
opts := testutil.NewRunOpts(testcase.args, &stdout)
opts.APIClient = mock.APIClient(testcase.api)
err := app.Run(opts)
testutil.AssertErrorContains(t, err, testcase.wantError)
testutil.AssertStringContains(t, stdout.String(), testcase.wantOutput)
})
}
}
func TestElasticsearchList(t *testing.T) {
args := testutil.Args
for _, testcase := range []struct {
args []string
api mock.API
wantError string
wantOutput string
}{
{
args: args("logging elasticsearch list --service-id 123 --version 1"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
ListElasticsearchFn: listElasticsearchsOK,
},
wantOutput: listElasticsearchsShortOutput,
},
{
args: args("logging elasticsearch list --service-id 123 --version 1 --verbose"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
ListElasticsearchFn: listElasticsearchsOK,
},
wantOutput: listElasticsearchsVerboseOutput,
},
{
args: args("logging elasticsearch list --service-id 123 --version 1 -v"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
ListElasticsearchFn: listElasticsearchsOK,
},
wantOutput: listElasticsearchsVerboseOutput,
},
{
args: args("logging elasticsearch --verbose list --service-id 123 --version 1"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
ListElasticsearchFn: listElasticsearchsOK,
},
wantOutput: listElasticsearchsVerboseOutput,
},
{
args: args("logging -v elasticsearch list --service-id 123 --version 1"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
ListElasticsearchFn: listElasticsearchsOK,
},
wantOutput: listElasticsearchsVerboseOutput,
},
{
args: args("logging elasticsearch list --service-id 123 --version 1"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
ListElasticsearchFn: listElasticsearchsError,
},
wantError: errTest.Error(),
},
} {
t.Run(strings.Join(testcase.args, " "), func(t *testing.T) {
var stdout bytes.Buffer
opts := testutil.NewRunOpts(testcase.args, &stdout)
opts.APIClient = mock.APIClient(testcase.api)
err := app.Run(opts)
testutil.AssertErrorContains(t, err, testcase.wantError)
testutil.AssertString(t, testcase.wantOutput, stdout.String())
})
}
}
func TestElasticsearchDescribe(t *testing.T) {
args := testutil.Args
for _, testcase := range []struct {
args []string
api mock.API
wantError string
wantOutput string
}{
{
args: args("logging elasticsearch describe --service-id 123 --version 1"),
wantError: "error parsing arguments: required flag --name not provided",
},
{
args: args("logging elasticsearch describe --service-id 123 --version 1 --name logs"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
GetElasticsearchFn: getElasticsearchError,
},
wantError: errTest.Error(),
},
{
args: args("logging elasticsearch describe --service-id 123 --version 1 --name logs"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
GetElasticsearchFn: getElasticsearchOK,
},
wantOutput: describeElasticsearchOutput,
},
} {
t.Run(strings.Join(testcase.args, " "), func(t *testing.T) {
var stdout bytes.Buffer
opts := testutil.NewRunOpts(testcase.args, &stdout)
opts.APIClient = mock.APIClient(testcase.api)
err := app.Run(opts)
testutil.AssertErrorContains(t, err, testcase.wantError)
testutil.AssertString(t, testcase.wantOutput, stdout.String())
})
}
}
func | (t *testing.T) {
args := testutil.Args
for _, testcase := range []struct {
args []string
api mock.API
wantError string
wantOutput string
}{
{
args: args("logging elasticsearch update --service-id 123 --version 1 --new-name log"),
wantError: "error parsing arguments: required flag --name not provided",
},
{
args: args("logging elasticsearch update --service-id 123 --version 1 --name logs --new-name log --autoclone"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
CloneVersionFn: testutil.CloneVersionResult(4),
UpdateElasticsearchFn: updateElasticsearchError,
},
wantError: errTest.Error(),
},
{
args: args("logging elasticsearch update --service-id 123 --version 1 --name logs --new-name log --autoclone"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
CloneVersionFn: testutil.CloneVersionResult(4),
UpdateElasticsearchFn: updateElasticsearchOK,
},
wantOutput: "Updated Elasticsearch logging endpoint log (service 123 version 4)",
},
} {
t.Run(strings.Join(testcase.args, " "), func(t *testing.T) {
var stdout bytes.Buffer
opts := testutil.NewRunOpts(testcase.args, &stdout)
opts.APIClient = mock.APIClient(testcase.api)
err := app.Run(opts)
testutil.AssertErrorContains(t, err, testcase.wantError)
testutil.AssertStringContains(t, stdout.String(), testcase.wantOutput)
})
}
}
func TestElasticsearchDelete(t *testing.T) {
args := testutil.Args
for _, testcase := range []struct {
args []string
api mock.API
wantError string
wantOutput string
}{
{
args: args("logging elasticsearch delete --service-id 123 --version 1"),
wantError: "error parsing arguments: required flag --name not provided",
},
{
args: args("logging elasticsearch delete --service-id 123 --version 1 --name logs --autoclone"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
CloneVersionFn: testutil.CloneVersionResult(4),
DeleteElasticsearchFn: deleteElasticsearchError,
},
wantError: errTest.Error(),
},
{
args: args("logging elasticsearch delete --service-id 123 --version 1 --name logs --autoclone"),
api: mock.API{
ListVersionsFn: testutil.ListVersions,
CloneVersionFn: testutil.CloneVersionResult(4),
DeleteElasticsearchFn: deleteElasticsearchOK,
},
wantOutput: "Deleted Elasticsearch logging endpoint logs (service 123 version 4)",
},
} {
t.Run(strings.Join(testcase.args, " "), func(t *testing.T) {
var stdout bytes.Buffer
opts := testutil.NewRunOpts(testcase.args, &stdout)
opts.APIClient = mock.APIClient(testcase.api)
err := app.Run(opts)
testutil.AssertErrorContains(t, err, testcase.wantError)
testutil.AssertStringContains(t, stdout.String(), testcase.wantOutput)
})
}
}
var errTest = errors.New("fixture error")
func createElasticsearchOK(i *fastly.CreateElasticsearchInput) (*fastly.Elasticsearch, error) {
return &fastly.Elasticsearch{
ServiceID: i.ServiceID,
ServiceVersion: i.ServiceVersion,
Name: "log",
ResponseCondition: "Prevent default logging",
Format: `%h %l %u %t "%r" %>s %b`,
Index: "logs",
URL: "example.com",
Pipeline: "logs",
User: "user",
Password: "password",
RequestMaxEntries: 2,
RequestMaxBytes: 2,
Placement: "none",
TLSCACert: "-----BEGIN CERTIFICATE-----foo",
TLSHostname: "example.com",
TLSClientCert: "-----BEGIN CERTIFICATE-----bar",
TLSClientKey: "-----BEGIN PRIVATE KEY-----bar",
FormatVersion: 2,
}, nil
}
func createElasticsearchError(i *fastly.CreateElasticsearchInput) (*fastly.Elasticsearch, error) {
return nil, errTest
}
func listElasticsearchsOK(i *fastly.ListElasticsearchInput) ([]*fastly.Elasticsearch, error) {
return []*fastly.Elasticsearch{
{
ServiceID: i.ServiceID,
ServiceVersion: i.ServiceVersion,
Name: "logs",
ResponseCondition: "Prevent default logging",
Format: `%h %l %u %t "%r" %>s %b`,
Index: "logs",
URL: "example.com",
Pipeline: "logs",
User: "user",
Password: "password",
RequestMaxEntries: 2,
RequestMaxBytes: 2,
Placement: "none",
TLSCACert: "-----BEGIN CERTIFICATE-----foo",
TLSHostname: "example.com",
TLSClientCert: "-----BEGIN CERTIFICATE-----bar",
TLSClientKey: "-----BEGIN PRIVATE KEY-----bar",
FormatVersion: 2,
},
{
ServiceID: i.ServiceID,
ServiceVersion: i.ServiceVersion,
Name: "analytics",
Index: "analytics",
URL: "example.com",
Pipeline: "analytics",
User: "user",
Password: "password",
RequestMaxEntries: 2,
RequestMaxBytes: 2,
Placement: "none",
TLSCACert: "-----BEGIN CERTIFICATE-----foo",
TLSHostname: "example.com",
TLSClientCert: "-----BEGIN CERTIFICATE-----bar",
TLSClientKey: "-----BEGIN PRIVATE KEY-----bar",
ResponseCondition: "Prevent default logging",
Format: `%h %l %u %t "%r" %>s %b`,
FormatVersion: 2,
},
}, nil
}
func listElasticsearchsError(i *fastly.ListElasticsearchInput) ([]*fastly.Elasticsearch, error) {
return nil, errTest
}
var listElasticsearchsShortOutput = strings.TrimSpace(`
SERVICE VERSION NAME
123 1 logs
123 1 analytics
`) + "\n"
var listElasticsearchsVerboseOutput = strings.TrimSpace(`
Fastly API token not provided
Fastly API endpoint: https://api.fastly.com
Service ID: 123
Version: 1
Elasticsearch 1/2
Service ID: 123
Version: 1
Name: logs
Index: logs
URL: example.com
Pipeline: logs
TLS CA certificate: -----BEGIN CERTIFICATE-----foo
TLS client certificate: -----BEGIN CERTIFICATE-----bar
TLS client key: -----BEGIN PRIVATE KEY-----bar
TLS hostname: example.com
User: user
Password: password
Format: %h %l %u %t "%r" %>s %b
Format version: 2
Response condition: Prevent default logging
Placement: none
Elasticsearch 2/2
Service ID: 123
Version: 1
Name: analytics
Index: analytics
URL: example.com
Pipeline: analytics
TLS CA certificate: -----BEGIN CERTIFICATE-----foo
TLS client certificate: -----BEGIN CERTIFICATE-----bar
TLS client key: -----BEGIN PRIVATE KEY-----bar
TLS hostname: example.com
User: user
Password: password
Format: %h %l %u %t "%r" %>s %b
Format version: 2
Response condition: Prevent default logging
Placement: none
`) + "\n\n"
func getElasticsearchOK(i *fastly.GetElasticsearchInput) (*fastly.Elasticsearch, error) {
return &fastly.Elasticsearch{
ServiceID: i.ServiceID,
ServiceVersion: i.ServiceVersion,
Name: "logs",
ResponseCondition: "Prevent default logging",
Format: `%h %l %u %t "%r" %>s %b`,
Index: "logs",
URL: "example.com",
Pipeline: "logs",
User: "user",
Password: "password",
RequestMaxEntries: 2,
RequestMaxBytes: 2,
Placement: "none",
TLSCACert: "-----BEGIN CERTIFICATE-----foo",
TLSHostname: "example.com",
TLSClientCert: "-----BEGIN CERTIFICATE-----bar",
TLSClientKey: "-----BEGIN PRIVATE KEY-----bar",
FormatVersion: 2,
}, nil
}
func getElasticsearchError(i *fastly.GetElasticsearchInput) (*fastly.Elasticsearch, error) {
return nil, errTest
}
var describeElasticsearchOutput = strings.TrimSpace(`
Service ID: 123
Version: 1
Name: logs
Index: logs
URL: example.com
Pipeline: logs
TLS CA certificate: -----BEGIN CERTIFICATE-----foo
TLS client certificate: -----BEGIN CERTIFICATE-----bar
TLS client key: -----BEGIN PRIVATE KEY-----bar
TLS hostname: example.com
User: user
Password: password
Format: %h %l %u %t "%r" %>s %b
Format version: 2
Response condition: Prevent default logging
Placement: none
`) + "\n"
func updateElasticsearchOK(i *fastly.UpdateElasticsearchInput) (*fastly.Elasticsearch, error) {
return &fastly.Elasticsearch{
ServiceID: i.ServiceID,
ServiceVersion: i.ServiceVersion,
Name: "log",
ResponseCondition: "Prevent default logging",
Format: `%h %l %u %t "%r" %>s %b`,
Index: "logs",
URL: "example.com",
Pipeline: "logs",
User: "user",
Password: "password",
RequestMaxEntries: 2,
RequestMaxBytes: 2,
Placement: "none",
TLSCACert: "-----BEGIN CERTIFICATE-----foo",
TLSHostname: "example.com",
TLSClientCert: "-----BEGIN CERTIFICATE-----bar",
TLSClientKey: "-----BEGIN PRIVATE KEY-----bar",
FormatVersion: 2,
}, nil
}
func updateElasticsearchError(i *fastly.UpdateElasticsearchInput) (*fastly.Elasticsearch, error) {
return nil, errTest
}
func deleteElasticsearchOK(i *fastly.DeleteElasticsearchInput) error {
return nil
}
func deleteElasticsearchError(i *fastly.DeleteElasticsearchInput) error {
return errTest
}
| TestElasticsearchUpdate |
io-uring.rs | //! A Http/1 server read a temporary file filled with dummy string
//! and return it's content as response.
/*
Need io-uring feature flag to start. e.g:
cargo run --example io-uring --features io-uring
*/
#[global_allocator]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
use std::{io::Write, rc::Rc};
use tempfile::NamedTempFile;
use tokio_uring::fs::File;
use xitca_web::{
dev::fn_service,
http::{const_header_value::TEXT_UTF8, header::CONTENT_TYPE},
request::WebRequest,
response::WebResponse,
App, HttpServer,
};
const HELLO: &[u8] = b"hello world!";
fn main() -> std::io::Result<()> {
tokio_uring::start(async {
HttpServer::new(move || {
// a temporary file with 64 hello world string.
let mut file = NamedTempFile::new().unwrap();
for _ in 0..64 {
file.write_all(HELLO).unwrap();
}
App::with_current_thread_state(Rc::new(file)).service(fn_service(handler))
})
.bind("127.0.0.1:8080")?
.run()
.await
})
}
async fn handler(req: &mut WebRequest<'_, Rc<NamedTempFile>>) -> Result<WebResponse, Box<dyn std::error::Error>> {
let file = File::open(req.state().path()).await?;
let res = read(&file).await;
file.close().await?;
let buf = res?;
let mut res = req.as_response(buf);
res.headers_mut().append(CONTENT_TYPE, TEXT_UTF8);
Ok(res)
}
async fn | (file: &File) -> std::io::Result<Vec<u8>> {
let cap = 64 * HELLO.len();
let mut buf = Vec::with_capacity(cap);
let mut current = 0;
while current < cap {
let (res, b) = file.read_at(buf, current as u64).await;
let n = res?;
buf = b;
current += n;
}
Ok(buf)
}
| read |
bitfield_test.go | package bitfield
import (
"testing"
"github.com/stretchr/testify/require"
)
func | (t *testing.T) {
a := New(128)
b := New(128)
a.Set(10)
b.Set(10)
a.Set(42)
b.Set(42)
a.Set(100)
b.Set(100)
require.True(t, a.IsSet(42))
require.False(t, b.IsSet(43))
require.True(t, a.IsSubset(b))
v := uint64(1<<10 | 1<<42)
require.Equal(t, v, a[0])
require.Equal(t, v, b[0])
require.True(t, a.Equals(b))
c := a.Copy()
require.True(t, c.Equals(b))
z := New(128)
require.True(t, z.IsSubset(c))
c.And(a)
require.True(t, c.Equals(b))
c.And(z)
require.True(t, c.Equals(z))
c = New(64)
require.False(t, z.IsSubset(c))
c[0] = a[0]
require.False(t, c.Equals(a))
require.True(t, c.IsSubset(a))
b.And(c)
require.False(t, b.Equals(a))
}
| TestFields |
gemfile_parser_test.go | package bundleinstall_test
import (
"fmt"
"io/ioutil"
"os"
"strings"
"testing"
bundleinstall "github.com/paketo-community/bundle-install"
"github.com/sclevine/spec"
. "github.com/onsi/gomega"
)
const GEMFILE_TEMPLATE = `source 'https://rubygems.org'
ruby %s`
func | (t *testing.T, context spec.G, it spec.S) {
var (
Expect = NewWithT(t).Expect
path string
parser bundleinstall.GemfileParser
)
it.Before(func() {
file, err := ioutil.TempFile("", "Gemfile")
Expect(err).NotTo(HaveOccurred())
defer file.Close()
path = file.Name()
parser = bundleinstall.NewGemfileParser()
})
it.After(func() {
Expect(os.RemoveAll(path)).To(Succeed())
})
context("ParseVersion", func() {
context("when given different types of versions", func() {
it("parses the versions correctly", func() {
versions := []string{
`"2.6.0"`,
`"~> 2.6.0"`,
`"~> 2.7.0"`,
`'~> 2.7.0'`,
`'~> 2.10.0'`,
`'~> 2.10.10'`,
`'~> 10.0.0'`,
`'~>10.0.0'`,
`'~> 10.0.0'`,
`'~> 10.0.0'`,
`"< 2.10.10"`,
`"> 2.10.10"`,
`"<= 2.10.10"`,
`">= 2.10.10"`,
`"= 2.10.10"`,
}
for _, v := range versions {
expectedVersion := strings.Trim(v, `"'`)
Expect(ioutil.WriteFile(path, []byte(fmt.Sprintf(GEMFILE_TEMPLATE, v)), 0644)).To(Succeed())
version, err := parser.ParseVersion(path)
Expect(err).NotTo(HaveOccurred())
Expect(version).To(Equal(expectedVersion))
}
})
})
context("when the Gemfile file does not exist", func() {
it.Before(func() {
Expect(os.Remove(path)).To(Succeed())
})
it("returns an empty version", func() {
version, err := parser.ParseVersion(path)
Expect(err).NotTo(HaveOccurred())
Expect(version).To(BeEmpty())
})
})
context("failure cases", func() {
context("when the Gemfile cannot be opened", func() {
it.Before(func() {
Expect(os.Chmod(path, 0000)).To(Succeed())
})
it("returns an error", func() {
_, err := parser.ParseVersion(path)
Expect(err).To(MatchError(ContainSubstring("failed to parse Gemfile:")))
Expect(err).To(MatchError(ContainSubstring("permission denied")))
})
})
})
})
}
| testGemfileParser |
elanaspantry_feedspider.py | from scrapy.spider import BaseSpider
from scrapy.http import Request
from scrapy.selector import XmlXPathSelector
from openrecipes.spiders.elanaspantry_spider import ElanaspantryMixin
class ElanaspantryfeedSpider(BaseSpider, ElanaspantryMixin):
name = "elanaspantry.feed"
allowed_domains = [
"www.elanaspantry.com",
"feeds.feedburner.com",
"feedproxy.google.com",
]
start_urls = [
"http://feeds.feedburner.com/elanaspantry",
]
def parse(self, response):
| xxs = XmlXPathSelector(response)
links = xxs.select("//item/*[local-name()='origLink']/text()").extract()
return [Request(x, callback=self.parse_item) for x in links] |
|
_Redux.ts | /**
* Curried function meant to replicate experience of 'connect' method of 'react-redux'
* @param {function} mapStateToProps maps store state to individual props
* @return {function} a function that receives a class to use as super, to return a subclass that has can connect to the redux store
*/
export function connectToStore(mapStateToProps: Function): Function {
const windowObj = window as any
return function (Wrapped: any): Function {
return class ConnectedToRedux extends Wrapped {
private readonly getState: Function = windowObj.reduxStore.getState // TODO: any better way to do this?
private unsubscribe: Function = () => {}
constructor(props: any) {
super(props)
this.watchProps = this.watchProps.bind(this)
this.connect() | private connect(): void {
this.unsubscribe = windowObj.reduxStore.subscribe(this.watchProps)
}
public disconnectFromStore() {
this.unsubscribe()
}
public watchProps(): void {
const state = this.getState()
const mappedProps = mapStateToProps(state)
if (Wrapped.prototype.hasOwnProperty('componentWillReceiveProps')) {
super.componentWillReceiveProps(mappedProps)
}
}
}
}
} | }
|
main.rs | use anyhow::Result;
use lazy_static::lazy_static;
use regex::Regex;
use serenity::{
async_trait,
model::{channel::Message, gateway::Ready},
prelude::*,
};
use std::{collections::HashMap, env, sync::Arc};
mod puppystonk;
mod puppyweather;
mod puppywhy;
use shakmaty::Position;
struct Handler {
openweather_token: String,
google_maps_token: String,
}
struct ChessGame;
impl TypeMapKey for ChessGame {
type Value = Arc<RwLock<HashMap<String, Box<(shakmaty::Chess, Option<String>)>>>>;
}
async fn chess(ctx: &Context, msg: &Message) -> Result<String> {
let san_str = &msg.content[12..];
let san: shakmaty::san::San = san_str.parse()?;
let game_lock = {
let data_read = ctx.data.read().await;
data_read
.get::<ChessGame>()
.expect("Expected ChessGame")
.clone()
};
{
let current_player = msg.author.id.to_string();
let mut map = game_lock.write().await;
let entry = map
.entry(msg.channel_id.to_string())
.or_insert_with(|| Box::new((shakmaty::Chess::default(), None)));
if let Some(previous_player) = &(entry.1) {
if previous_player == ¤t_player {
return Ok("Someone else has to make a move first!!!!!".to_string());
}
}
let pos = &entry.0;
let mov = san.to_move(pos)?;
if let Ok(p) = pos.clone().play(&mov) {
let fen = shakmaty::fen::epd(&p);
if let Some(f) = fen.split(' ').next() {
let status: &str;
match p.outcome() {
None => {
**entry = (p, Some(current_player));
status = "";
}
Some(outcome) => {
match outcome {
shakmaty::Outcome::Decisive { winner: w } => match w {
shakmaty::Color::White => status = "White wins!",
shakmaty::Color::Black => status = "Black wins!",
},
shakmaty::Outcome::Draw => status = "Draw!",
}
**entry = (shakmaty::Chess::default(), None);
}
}
return Ok(format!("{} https://chess.dllu.net/{}.png", status, f));
}
}
}
Ok("Illegal move!!!!!".to_string())
}
#[async_trait]
impl EventHandler for Handler {
// Set a handler for the `message` event - so that whenever a new message
// is received - the closure (or function) passed will be called.
//
// Event handlers are dispatched through a threadpool, and so multiple
// events can be dispatched simultaneously.
async fn message(&self, ctx: Context, msg: Message) {
if msg.is_own(&ctx.cache).await {
return;
}
lazy_static! {
static ref WOOF_RE: Regex = Regex::new(
r"^((oua+f+\s*)+|(w(a|o|0|u|🌕)+r*f\s*)+|(aw+(o|0|🌕)+\s*)+|(b(a|o)+rk\s*)+|(汪\s*)+|(ワン\s*)+|(わん\s*)+|(гав\s*)+|(uowhf\s*)+)+(!|!)*$"
)
.unwrap();
static ref WEATHER_RE: Regex = Regex::new(r"^puppy weather\s\w+").unwrap();
static ref STONK_RE: Regex = Regex::new(r"^puppy stonk\s\w+").unwrap();
static ref CHESS_RE: Regex = Regex::new(r"^puppy chess\s\w*").unwrap();
}
let content = &msg.content;
let lower = content.to_lowercase();
if WOOF_RE.is_match(&lower) {
if let Err(why) = msg.reply(&ctx.http, content).await {
println!("Error sending message: {:?}", why);
}
} else if lower == "puppy why" {
if let Err(why) = msg.reply(&ctx.http, puppywhy::why()).await {
println!("Error sending message: {:?}", why);
}
} else if lower == "puppy how" {
if let Err(why) = msg
.reply(&ctx.http, "https://github.com/dllu/discord-woofer-rust")
.await
{
println!("Error sending message: {:?}", why);
}
} else if STONK_RE.is_match(&lower) {
let ticker = &lower[12..];
let stonk = puppystonk::stonk(ticker).await.unwrap();
if let Err(why) = msg.reply(&ctx.http, &stonk).await {
println!("Error sending message: {:?}", why);
}
} else if WEATHER_RE.is_match(&lower) {
let address = &lower[14..];
// TODO: error handlin
let location = puppyweather::geocode(address.to_string(), &self.google_maps_token)
.await
.unwrap();
let weather = puppyweather::weather(&location, &self.openweather_token)
.await
.unwrap();
let response = puppyweather::weather_string(address.to_string(), &location, weather);
if let Err(why) = msg.reply(&ctx.http, response).await {
println!("Error sending message: {:?}", why); | } else if CHESS_RE.is_match(&lower) {
let res = chess(&ctx, &msg).await;
match res {
Ok(s) => {
if let Err(why) = msg.reply(&ctx.http, s).await {
println!("Error sending message: {:?}", why);
}
}
Err(_) => {
if let Err(why) = msg.reply(&ctx.http, "Illegal move!!!!!").await {
println!("Error sending message: {:?}", why);
}
}
}
}
}
// Set a handler to be called on the `ready` event. This is called when a
// shard is booted, and a READY payload is sent by Discord. This payload
// contains data like the current user's guild Ids, current user data,
// private channels, and more.
//
// In this case, just print what the current user's username is.
async fn ready(&self, _: Context, ready: Ready) {
println!("{} is connected!", ready.user.name);
}
}
#[tokio::main]
async fn main() {
let discord_token =
env::var("DISCORD_TOKEN").expect("Expected $DISCORD_TOKEN in the environment");
let openweather_token =
env::var("FORECAST_TOKEN").expect("Expected $FORECAST_TOKEN in the environment");
let google_maps_token =
env::var("GOOGLE_MAPS_TOKEN").expect("Expected $GOOGLE_MAPS_TOKEN in the environment");
// Create a new instance of the Client, logging in as a bot. This will
// automatically prepend your bot token with "Bot ", which is a requirement
// by Discord for bot users.
let handler = Handler {
openweather_token,
google_maps_token,
};
let mut client = Client::builder(&discord_token)
.event_handler(handler)
.await
.expect("Err creating client");
{
let mut data = client.data.write().await;
data.insert::<ChessGame>(Arc::new(RwLock::new(HashMap::default())));
}
// Finally, start a single shard, and start listening to events.
//
// Shards will automatically attempt to reconnect, and will perform
// exponential backoff until it reconnects.
if let Err(why) = client.start().await {
println!("Client error: {:?}", why);
}
} | } |
midi.rs | use fraction::ToPrimitive;
use crate::{io::*, gp::*};
//MIDI channels
pub const CHANNEL_DEFAULT_NAMES: [&str; 128] = ["Piano", "Bright Piano", "Electric Grand", "Honky Tonk Piano", "Electric Piano 1", "Electric Piano 2",
"Harpsichord", "Clavinet", "Celesta",
"Glockenspiel",
"Music Box",
"Vibraphone", "Marimba", "Xylophone", "Tubular Bell",
"Dulcimer",
"Hammond Organ", "Perc Organ", "Rock Organ", "Church Organ", "Reed Organ",
"Accordion",
"Harmonica",
"Tango Accordion",
"Nylon Str Guitar", "Steel String Guitar", "Jazz Electric Gtr", "Clean Guitar", "Muted Guitar", "Overdrive Guitar", "Distortion Guitar", "Guitar Harmonics",
"Acoustic Bass", "Fingered Bass", "Picked Bass", "Fretless Bass", "Slap Bass 1", "Slap Bass 2", "Syn Bass 1", "Syn Bass 2",
"Violin", "Viola", "Cello", "Contrabass",
"Tremolo Strings", "Pizzicato Strings",
"Orchestral Harp",
"Timpani",
"Ensemble Strings", "Slow Strings", "Synth Strings 1", "Synth Strings 2",
"Choir Aahs", "Voice Oohs", "Syn Choir",
"Orchestra Hit",
"Trumpet", "Trombone", "Tuba", "Muted Trumpet", "French Horn", "Brass Ensemble", "Syn Brass 1", "Syn Brass 2",
"Soprano Sax", "Alto Sax", "Tenor Sax", "Baritone Sax",
"Oboe", "English Horn", "Bassoon", "Clarinet", "Piccolo", "Flute", "Recorder", "Pan Flute", "Bottle Blow", "Shakuhachi", "Whistle", "Ocarina",
"Syn Square Wave", "Syn Saw Wave", "Syn Calliope", "Syn Chiff", "Syn Charang", "Syn Voice", "Syn Fifths Saw", "Syn Brass and Lead",
"Fantasia", "Warm Pad", "Polysynth", "Space Vox", "Bowed Glass", "Metal Pad", "Halo Pad", "Sweep Pad", "Ice Rain", "Soundtrack", "Crystal", "Atmosphere",
"Brightness", "Goblins", "Echo Drops", "Sci Fi",
"Sitar", "Banjo", "Shamisen", "Koto", "Kalimba",
"Bag Pipe",
"Fiddle",
"Shanai",
"Tinkle Bell",
"Agogo",
"Steel Drums", "Woodblock", "Taiko Drum", "Melodic Tom", "Syn Drum", "Reverse Cymbal",
"Guitar Fret Noise", "Breath Noise",
"Seashore", "Bird", "Telephone", "Helicopter", "Applause", "Gunshot"];
pub const DEFAULT_PERCUSSION_CHANNEL: u8 = 9;
/// A MIDI channel describes playing data for a track.
#[derive(Debug,Copy,Clone)]
pub struct MidiChannel {
pub channel: u8,
pub effect_channel: u8,
instrument: i32,
pub volume: i8,
pub balance: i8,
pub chorus: i8,
pub reverb: i8,
pub phaser: i8,
pub tremolo: i8,
pub bank: u8,
}
impl Default for MidiChannel {
fn default() -> Self { MidiChannel { channel: 0, effect_channel: 1, instrument: 25, volume: 104, balance: 64, chorus: 0, reverb: 0, phaser: 0, tremolo: 0, bank: 0, }}
}
impl MidiChannel {
pub(crate) fn is_percussion_channel(self) -> bool {
(self.channel % 16) == DEFAULT_PERCUSSION_CHANNEL
}
pub(crate) fn set_instrument(mut self, instrument: i32) {
if instrument == -1 && self.is_percussion_channel() { self.instrument = 0; }
else {self.instrument = instrument;}
}
pub(crate) fn get_instrument(self) -> i32 {self.instrument}
pub(crate) fn get_instrument_name(&self) -> String {String::from(CHANNEL_DEFAULT_NAMES[self.instrument.to_usize().unwrap()])} //TODO: FIXME: does not seems OK
}
impl Song{
/// Read all the MIDI channels
pub(crate) fn read_midi_channels(&mut self, data: &[u8], seek: &mut usize) { for i in 0u8..64u8 { self.channels.push(self.read_midi_channel(data, seek, i)); } }
/// Read MIDI channels. Guitar Pro format provides 64 channels (4 MIDI ports by 16 hannels), the channels are stored in this order:
///`port1/channel1`, `port1/channel2`, ..., `port1/channel16`, `port2/channel1`, ..., `port4/channel16`.
///
/// Each channel has the following form:
///
/// * **Instrument**: `int`
/// * **Volume**: `byte`
/// * **Balance**: `byte`
/// * **Chorus**: `byte`
/// * **Reverb**: `byte`
/// * **Phaser**: `byte`
/// * **Tremolo**: `byte`
/// * **blank1**: `byte` => Backward compatibility with version 3.0
/// * **blank2**: `byte` => Backward compatibility with version 3.0
pub(crate) fn read_midi_channel(&self, data: &[u8], seek: &mut usize, channel: u8) -> MidiChannel {
let instrument = read_int(data, seek);
let mut c = MidiChannel{channel, effect_channel: channel, ..Default::default()};
c.volume = read_signed_byte(data, seek); c.balance = read_signed_byte(data, seek);
c.chorus = read_signed_byte(data, seek); c.reverb = read_signed_byte(data, seek); c.phaser = read_signed_byte(data, seek); c.tremolo = read_signed_byte(data, seek);
c.set_instrument(instrument);
//println!("Channel: {}\t Volume: {}\tBalance: {}\tInstrument={}, {}, {}", c.channel, c.volume, c.balance, instrument, c.get_instrument(), c.get_instrument_name());
*seek += 2; //Backward compatibility with version 3.0
c
}
/// Read MIDI channel. MIDI channel in Guitar Pro is represented by two integers. First is zero-based number of channel, second is zero-based number of channel used for effects.
pub(crate) fn read_channel(&mut self, data: &[u8], seek: &mut usize) -> usize { //TODO: fixme for writing
let index = read_int(data, seek) - 1;
let effect_channel = read_int(data, seek) - 1;
if 0 <= index && index < self.channels.len().to_i32().unwrap() {
if self.channels[index.to_usize().unwrap()].instrument < 0 {self.channels[index.to_usize().unwrap()].instrument = 0;}
if !self.channels[index.to_usize().unwrap()].is_percussion_channel() {self.channels[index.to_usize().unwrap()].effect_channel = effect_channel.to_u8().unwrap();}
}
index.to_usize().unwrap()
}
pub(crate) fn write_midi_channels(&self, data: &mut Vec<u8>) {
for i in 0..self.channels.len() {
println!("writing channel: {:?}", self.channels[i]);
if self.channels[i].is_percussion_channel() && self.channels[i].instrument == 0 {write_i32(data, -1);}
else {write_i32(data, self.channels[i].instrument);}
write_signed_byte(data, self.from_channel_short(self.channels[i].volume));
write_signed_byte(data, self.from_channel_short(self.channels[i].balance));
write_signed_byte(data, self.from_channel_short(self.channels[i].chorus));
write_signed_byte(data, self.from_channel_short(self.channels[i].reverb));
write_signed_byte(data, self.from_channel_short(self.channels[i].phaser));
write_signed_byte(data, self.from_channel_short(self.channels[i].tremolo));
write_placeholder_default(data, 2); //Backward compatibility with version 3.0
}
}
fn from_channel_short(&self, data: i8) -> i8 { std::cmp::max(-128, std::cmp::min(127, (data >> 3) - 1)) + 1 } | } |
|
helloworld.py | from flask import Flask, request
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
class Greeting (Resource):
def | (self):
return 'Hello Python World! from Indrani'
api.add_resource(Greeting, '/') # Route_1
if __name__ == '__main__':
app.run('0.0.0.0','3333')
| get |
opentelemetry_tracing.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from contextlib import contextmanager
from google.api_core.exceptions import GoogleAPICallError
logger = logging.getLogger(__name__)
try:
from opentelemetry import trace
from opentelemetry.instrumentation.utils import http_status_to_canonical_code
from opentelemetry.trace.status import Status
HAS_OPENTELEMETRY = True
except ImportError:
logger.info(
"This service is instrumented using OpenTelemetry."
"OpenTelemetry could not be imported; please"
"add opentelemetry-api and opentelemetry-instrumentation"
"packages in order to get BigQuery Tracing data."
)
HAS_OPENTELEMETRY = False
_default_attributes = {
"db.system": "BigQuery"
} # static, default values assigned to all spans
@contextmanager
def | (name, attributes=None, client=None, job_ref=None):
"""Creates a ContextManager for a Span to be exported to the configured exporter.
If no configuration exists yields None.
Args:
name (str): Name that will be set for the span being created
attributes (Optional[dict]):
Additional attributes that pertain to
the specific API call (i.e. not a default attribute)
client (Optional[google.cloud.bigquery.client.Client]):
Pass in a Client object to extract any attributes that may be
relevant to it and add them to the created spans.
job_ref (Optional[google.cloud.bigquery.job._AsyncJob])
Pass in a _AsyncJob object to extract any attributes that may be
relevant to it and add them to the created spans.
Yields:
opentelemetry.trace.Span: Yields the newly created Span.
Raises:
google.api_core.exceptions.GoogleAPICallError:
Raised if a span could not be yielded or issue with call to
OpenTelemetry.
"""
final_attributes = _get_final_span_attributes(attributes, client, job_ref)
if not HAS_OPENTELEMETRY:
yield None
return
tracer = trace.get_tracer(__name__)
# yield new span value
with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:
try:
yield span
except GoogleAPICallError as error:
if error.code is not None:
span.set_status(Status(http_status_to_canonical_code(error.code)))
raise
def _get_final_span_attributes(attributes=None, client=None, job_ref=None):
final_attributes = {}
final_attributes.update(_default_attributes.copy())
if client:
client_attributes = _set_client_attributes(client)
final_attributes.update(client_attributes)
if job_ref:
job_attributes = _set_job_attributes(job_ref)
final_attributes.update(job_attributes)
if attributes:
final_attributes.update(attributes)
return final_attributes
def _set_client_attributes(client):
return {"db.name": client.project, "location": client.location}
def _set_job_attributes(job_ref):
job_attributes = {
"db.name": job_ref.project,
"location": job_ref.location,
"num_child_jobs": job_ref.num_child_jobs,
"job_id": job_ref.job_id,
"parent_job_id": job_ref.parent_job_id,
"state": job_ref.state,
}
job_attributes["hasErrors"] = job_ref.error_result is not None
if job_ref.created is not None:
job_attributes["timeCreated"] = job_ref.created.isoformat()
if job_ref.started is not None:
job_attributes["timeStarted"] = job_ref.started.isoformat()
if job_ref.ended is not None:
job_attributes["timeEnded"] = job_ref.ended.isoformat()
return job_attributes
| create_span |
openapi3gen.go | // Package openapi3gen generates OpenAPIv3 JSON schemas from Go types.
package openapi3gen
import (
"encoding/json"
"math"
"reflect"
"strings"
"time"
"github.com/getkin/kin-openapi/jsoninfo"
"github.com/getkin/kin-openapi/openapi3"
)
// CycleError indicates that a type graph has one or more possible cycles.
type CycleError struct{}
func (err *CycleError) Error() string { return "detected cycle" }
// Option allows tweaking SchemaRef generation
type Option func(*generatorOpt)
type generatorOpt struct {
useAllExportedFields bool
}
// UseAllExportedFields changes the default behavior of only
// generating schemas for struct fields with a JSON tag.
func UseAllExportedFields() Option {
return func(x *generatorOpt) { x.useAllExportedFields = true }
}
// NewSchemaRefForValue uses reflection on the given value to produce a SchemaRef.
func NewSchemaRefForValue(value interface{}, opts ...Option) (*openapi3.SchemaRef, map[*openapi3.SchemaRef]int, error) |
type Generator struct {
opts generatorOpt
Types map[reflect.Type]*openapi3.SchemaRef
// SchemaRefs contains all references and their counts.
// If count is 1, it's not ne
// An OpenAPI identifier has been assigned to each.
SchemaRefs map[*openapi3.SchemaRef]int
}
func NewGenerator(opts ...Option) *Generator {
gOpt := &generatorOpt{}
for _, f := range opts {
f(gOpt)
}
return &Generator{
Types: make(map[reflect.Type]*openapi3.SchemaRef),
SchemaRefs: make(map[*openapi3.SchemaRef]int),
opts: *gOpt,
}
}
func (g *Generator) GenerateSchemaRef(t reflect.Type) (*openapi3.SchemaRef, error) {
//check generatorOpt consistency here
return g.generateSchemaRefFor(nil, t)
}
func (g *Generator) generateSchemaRefFor(parents []*jsoninfo.TypeInfo, t reflect.Type) (*openapi3.SchemaRef, error) {
if ref := g.Types[t]; ref != nil {
g.SchemaRefs[ref]++
return ref, nil
}
ref, err := g.generateWithoutSaving(parents, t)
if ref != nil {
g.Types[t] = ref
g.SchemaRefs[ref]++
}
return ref, err
}
func (g *Generator) generateWithoutSaving(parents []*jsoninfo.TypeInfo, t reflect.Type) (*openapi3.SchemaRef, error) {
typeInfo := jsoninfo.GetTypeInfo(t)
for _, parent := range parents {
if parent == typeInfo {
return nil, &CycleError{}
}
}
if cap(parents) == 0 {
parents = make([]*jsoninfo.TypeInfo, 0, 4)
}
parents = append(parents, typeInfo)
for t.Kind() == reflect.Ptr {
t = t.Elem()
}
if strings.HasSuffix(t.Name(), "Ref") {
_, a := t.FieldByName("Ref")
v, b := t.FieldByName("Value")
if a && b {
vs, err := g.generateSchemaRefFor(parents, v.Type)
if err != nil {
return nil, err
}
refSchemaRef := RefSchemaRef
g.SchemaRefs[refSchemaRef]++
ref := openapi3.NewSchemaRef(t.Name(), &openapi3.Schema{
OneOf: []*openapi3.SchemaRef{
refSchemaRef,
vs,
},
})
g.SchemaRefs[ref]++
return ref, nil
}
}
schema := &openapi3.Schema{}
switch t.Kind() {
case reflect.Func, reflect.Chan:
return nil, nil // ignore
case reflect.Bool:
schema.Type = "boolean"
case reflect.Int:
schema.Type = "integer"
case reflect.Int8:
schema.Type = "integer"
schema.Min = &minInt8
schema.Max = &maxInt8
case reflect.Int16:
schema.Type = "integer"
schema.Min = &minInt16
schema.Max = &maxInt16
case reflect.Int32:
schema.Type = "integer"
schema.Format = "int32"
case reflect.Int64:
schema.Type = "integer"
schema.Format = "int64"
case reflect.Uint8:
schema.Type = "integer"
schema.Min = &zeroInt
schema.Max = &maxUint8
case reflect.Uint16:
schema.Type = "integer"
schema.Min = &zeroInt
schema.Max = &maxUint16
case reflect.Uint32:
schema.Type = "integer"
schema.Min = &zeroInt
schema.Max = &maxUint32
case reflect.Uint64:
schema.Type = "integer"
schema.Min = &zeroInt
schema.Max = &maxUint64
case reflect.Float32:
schema.Type = "number"
schema.Format = "float"
case reflect.Float64:
schema.Type = "number"
schema.Format = "double"
case reflect.String:
schema.Type = "string"
case reflect.Slice:
if t.Elem().Kind() == reflect.Uint8 {
if t == rawMessageType {
return &openapi3.SchemaRef{Value: schema}, nil
}
schema.Type = "string"
schema.Format = "byte"
} else {
schema.Type = "array"
items, err := g.generateSchemaRefFor(parents, t.Elem())
if err != nil {
return nil, err
}
if items != nil {
g.SchemaRefs[items]++
schema.Items = items
}
}
case reflect.Map:
schema.Type = "object"
additionalProperties, err := g.generateSchemaRefFor(parents, t.Elem())
if err != nil {
return nil, err
}
if additionalProperties != nil {
g.SchemaRefs[additionalProperties]++
schema.AdditionalProperties = additionalProperties
}
case reflect.Struct:
if t == timeType {
schema.Type = "string"
schema.Format = "date-time"
} else {
for _, fieldInfo := range typeInfo.Fields {
// Only fields with JSON tag are considered (by default)
if !fieldInfo.HasJSONTag && !g.opts.useAllExportedFields {
continue
}
// If asked, try to use yaml tag
name, fType := fieldInfo.JSONName, fieldInfo.Type
if !fieldInfo.HasJSONTag && g.opts.useAllExportedFields {
ff := t.Field(fieldInfo.Index[len(fieldInfo.Index)-1])
if tag, ok := ff.Tag.Lookup("yaml"); ok && tag != "-" {
name, fType = tag, ff.Type
}
}
ref, err := g.generateSchemaRefFor(parents, fType)
if err != nil {
return nil, err
}
if ref != nil {
g.SchemaRefs[ref]++
schema.WithPropertyRef(name, ref)
}
}
// Object only if it has properties
if schema.Properties != nil {
schema.Type = "object"
}
}
}
return openapi3.NewSchemaRef(t.Name(), schema), nil
}
var RefSchemaRef = openapi3.NewSchemaRef("Ref",
openapi3.NewObjectSchema().WithProperty("$ref", openapi3.NewStringSchema().WithMinLength(1)))
var (
timeType = reflect.TypeOf(time.Time{})
rawMessageType = reflect.TypeOf(json.RawMessage{})
zeroInt = float64(0)
maxInt8 = float64(math.MaxInt8)
minInt8 = float64(math.MinInt8)
maxInt16 = float64(math.MaxInt16)
minInt16 = float64(math.MinInt16)
maxUint8 = float64(math.MaxUint8)
maxUint16 = float64(math.MaxUint16)
maxUint32 = float64(math.MaxUint32)
maxUint64 = float64(math.MaxUint64)
)
| {
g := NewGenerator(opts...)
ref, err := g.GenerateSchemaRef(reflect.TypeOf(value))
for ref := range g.SchemaRefs {
ref.Ref = ""
}
return ref, g.SchemaRefs, err
} |
fetcher.go | // Package utl (fetcher.go) :
// These methods are for retrieving data from URL.
package utl
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"strconv"
"time"
)
// RequestParams : Parameters for FetchAPI
type RequestParams struct {
Method string
APIURL string
Data io.Reader
Contenttype string
Accesstoken string
Dtime int64
}
// FetchAPI : For fetching data to URL.
func (r *RequestParams) FetchAPI() ([]byte, error) {
req, err := http.NewRequest(
r.Method,
r.APIURL,
r.Data,
)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", r.Contenttype)
req.Header.Set("Authorization", "Bearer "+r.Accesstoken)
client := &http.Client{
Timeout: time.Duration(r.Dtime) * time.Second,
}
res, err := client.Do(req)
if err != nil || res.StatusCode-300 >= 0 {
var msg []byte
var er string
| msg = []byte(err.Error())
er = err.Error()
} else {
errmsg, err := ioutil.ReadAll(res.Body)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v. ", err)
os.Exit(1)
}
msg = errmsg
er = "Status Code: " + strconv.Itoa(res.StatusCode)
}
return msg, errors.New(er)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
defer res.Body.Close()
return body, err
} | if res == nil {
|
Estado.js | mport React, {Component} from "react";
import PropTypes from "prop-types";
class Estado extends Component{
constructor(props){
super(props);
}
render(){
console.log(this.props);
console.log(this.props.recursos);
return(<div>
<div className="nombre">{this.props.recursos.nombre_recurso} </div>
<div class="cantidad">{this.props.recursos.cantidad_recurso}</div>
</div>
);
}
}
Estado.PropTypes={
recursos:PropTypes.object.isRequired
}; |
export default Estado; | |
layer.py | """ $lic$
Copyright (C) 2016-2019 by The Board of Trustees of Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
from . import data_category_enum as de
from . import loop_enum as le
from .. import util
from .data_dim_loops import DataDimLoops
class Layer(util.ContentHashClass):
'''
Base NN layer.
Includes only the output neuron parameters.
nofm: # ofmap channels
hofm, wofm: ofmap height/width
htrd, wtrd: stride height/width
'''
def __init__(self, nofm, sofm, strd=1):
if isinstance(sofm, int):
hofm = sofm
wofm = sofm
elif len(sofm) == 2:
hofm = sofm[0]
wofm = sofm[1]
else:
raise ValueError('Layer: sofm is invalid ({}), '
'needs to be either one integer or '
'a pair of integers'.format(sofm))
assert hofm > 0 and wofm > 0
if isinstance(strd, int):
htrd = strd
wtrd = strd
elif len(strd) == 2:
htrd = strd[0]
wtrd = strd[1]
else:
raise ValueError('Layer: strd is invalid ({}), '
'needs to be either one integer or '
'a pair of integers'.format(strd))
assert htrd > 0 and wtrd > 0
self.nofm = nofm
self.hofm = hofm
self.wofm = wofm
self.htrd = htrd
self.wtrd = wtrd
@staticmethod
def data_loops():
''' Dimension loops of the data. '''
raise NotImplementedError
def input_layer(self):
''' Get the input layer parameters. '''
raise NotImplementedError(self.__class__.__name__)
@property
def nifm(self):
''' Number of fmap channels of input layer. '''
return self.input_layer().nofm
@property
def hifm(self):
''' Fmap height of input layer. '''
return self.input_layer().hofm
@property
def wifm(self):
''' Fmap width of input layer. '''
return self.input_layer().wofm
def ofmap_size(self, batch_size=1, word_size=1):
'''
Get size of one output fmap with `batch_size`.
If `word_size` is set to word byte size, return size in bytes.
'''
return self.hofm * self.wofm * batch_size * word_size
def total_ofmap_size(self, batch_size=1, word_size=1):
'''
Get total size of all output fmaps with `batch_size`.
If `word_size` is set to word byte size, return size in bytes.
'''
return self.nofm * self.ofmap_size(batch_size, word_size)
def ifmap_size(self, batch_size=1, word_size=1):
'''
Get size of one input fmap with `batch_size`.
If `word_size` is set to word byte size, return size in bytes.
'''
return self.input_layer().ofmap_size(batch_size, word_size)
def total_ifmap_size(self, batch_size=1, word_size=1):
'''
Get total size of all input fmaps with `batch_size`.
If `word_size` is set to word byte size, return size in bytes.
'''
return self.input_layer().total_ofmap_size(batch_size, word_size)
def ops_per_neuron(self):
|
def total_ops(self, batch_size=1):
''' Get total number of operations. '''
return self.total_ofmap_size() * self.ops_per_neuron() * batch_size
def is_valid_padding_sifm(self, sifm):
''' Whether the given `sifm` is valid when allowing padding. '''
if isinstance(sifm, int):
hifm = sifm
wifm = sifm
elif len(sifm) == 2:
hifm = sifm[0]
wifm = sifm[1]
else:
raise ValueError('Layer: sifm is invalid ({}), '
'needs to be either one integer or '
'a pair of integers'.format(sifm))
h_padding_rng = sorted((self.hofm * self.htrd, self.hifm))
w_padding_rng = sorted((self.wofm * self.wtrd, self.wifm))
return (h_padding_rng[0] <= hifm <= h_padding_rng[1]
and w_padding_rng[0] <= wifm <= w_padding_rng[1])
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nofm={}'.format(repr(self.nofm)),
'sofm={}'.format(repr((self.hofm, self.wofm))),
'strd={}'.format(repr((self.htrd, self.wtrd)))]))
class InputLayer(Layer):
'''
NN input layer parameters.
'''
@staticmethod
def data_loops():
dls = [None] * de.NUM
dls[de.FIL] = DataDimLoops()
dls[de.IFM] = DataDimLoops()
dls[de.OFM] = DataDimLoops(le.OFM, le.BAT)
return tuple(dls)
def input_layer(self):
return None
def ops_per_neuron(self):
return 0
class ConvLayer(Layer):
'''
NN convolutional layer parameters.
nifm (C): # ifmap channels
nofm (M): # ofmap channels
hifm, wifm (H): ifmap height/width
hofm, wofm (E): ofmap height/width
hfil, wfil (R): weight filter width/height
htrd, wtrd (U): stride height/width
'''
def __init__(self, nifm, nofm, sofm, sfil, strd=1):
super(ConvLayer, self).__init__(nofm, sofm, strd=strd)
if isinstance(sfil, int):
hfil = sfil
wfil = sfil
elif len(sfil) == 2:
hfil = sfil[0]
wfil = sfil[1]
else:
raise ValueError('ConvLayer: sfil is invalid ({}), '
'needs to be either one integer or '
'a pair of integers'.format(sfil))
self.hfil = hfil
self.wfil = wfil
hifm = self.hfil + (self.hofm - 1) * self.htrd
wifm = self.wfil + (self.wofm - 1) * self.wtrd
self.inlayer = Layer(nifm, (hifm, wifm))
@staticmethod
def data_loops():
dls = [None] * de.NUM
dls[de.FIL] = DataDimLoops(le.IFM, le.OFM)
dls[de.IFM] = DataDimLoops(le.IFM, le.BAT)
dls[de.OFM] = DataDimLoops(le.OFM, le.BAT)
return tuple(dls)
def input_layer(self):
return self.inlayer
def ops_per_neuron(self):
# 2D convolution across all ifmap channels.
return self.hfil * self.wfil * self.nifm
def filter_size(self, word_size=1):
'''
Get size of one weight filter.
If `word_size` is set to word byte size, return size in bytes.
'''
return self.hfil * self.wfil * word_size
def total_filter_size(self, word_size=1):
'''
Get total size of all weight filters.
If `word_size` is set to word byte size, return size in bytes.
'''
return self.nifm * self.nofm * self.filter_size(word_size)
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nifm={}'.format(repr(self.nifm)),
'nofm={}'.format(repr(self.nofm)),
'sofm={}'.format(repr((self.hofm, self.wofm))),
'sfil={}'.format(repr((self.hfil, self.wfil))),
'strd={}'.format(repr((self.htrd, self.wtrd)))]))
class FCLayer(ConvLayer):
'''
NN fully-connected layer parameters.
As a special case of CONVLayer.
hifm = hfil, wifm = wfil, strd = 1, hofm = wofm = 1
'''
def __init__(self, nifm, nofm, sfil=1):
super(FCLayer, self).__init__(nifm, nofm, 1, sfil)
assert self.hofm == 1 and self.wofm == 1
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nifm={}'.format(repr(self.nifm)),
'nofm={}'.format(repr(self.nofm)),
'sfil={}'.format(repr((self.hfil, self.wfil)))]))
class LocalRegionLayer(Layer):
'''
NN layer which computes on a local region. The layer has no or limited
shared weights, whose impact can be ignored during scheduling.
Includes pooling layer, normalization layer, and element-wise layer.
'''
def __init__(self, nofm, sofm, nreg, sreg, ntrd=1, strd=1):
super(LocalRegionLayer, self).__init__(nofm, sofm, strd=strd)
if isinstance(sreg, int):
hreg = sreg
wreg = sreg
elif len(sreg) == 2:
hreg = sreg[0]
wreg = sreg[1]
else:
raise ValueError('LocalRegionLayer: sreg is invalid ({}), '
'needs to be either one integer or '
'a pair of integers'.format(sreg))
if nreg > 1 and (hreg * wreg) > 1:
raise ValueError('LocalRegionLayer: local region cannot be a mix '
'of both n ({}) and h & w ({}, {})'
.format(nreg, hreg, wreg))
self.nreg = nreg
self.hreg = hreg
self.wreg = wreg
self.ntrd = ntrd
nifm = self.nofm * self.ntrd # ignore all-zero padding channels.
hifm = self.hreg + (self.hofm - 1) * self.htrd
wifm = self.wreg + (self.wofm - 1) * self.wtrd
self.inlayer = Layer(nifm, (hifm, wifm))
@staticmethod
def data_loops():
dls = [None] * de.NUM
dls[de.FIL] = DataDimLoops()
dls[de.IFM] = DataDimLoops(le.OFM, le.BAT)
dls[de.OFM] = DataDimLoops(le.OFM, le.BAT)
return tuple(dls)
def input_layer(self):
return self.inlayer
def ops_per_neuron(self):
# Each output point corresponds to merging a local region.
return self.region_size()
def region_size(self):
''' The size of the local region corresponding to one output point. '''
return self.nreg * self.hreg * self.wreg
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nofm={}'.format(repr(self.nofm)),
'sofm={}'.format(repr((self.hofm, self.wofm))),
'nreg={}'.format(repr(self.nreg)),
'sreg={}'.format(repr((self.hreg, self.wreg))),
'ntrd={}'.format(repr(self.ntrd)),
'strd={}'.format(repr((self.htrd, self.wtrd)))]))
class PoolingLayer(LocalRegionLayer):
'''
NN pooling layer parameters.
As a special case of LocalRegionLayer.
nreg = ntrd = 1
'''
def __init__(self, nofm, sofm, sreg, strd=None):
if strd is None:
strd = sreg
super(PoolingLayer, self).__init__(nofm, sofm, 1, sreg,
ntrd=1, strd=strd)
assert self.nreg == 1
assert self.ntrd == 1
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nofm={}'.format(repr(self.nofm)),
'sofm={}'.format(repr((self.hofm, self.wofm))),
'sreg={}'.format(repr((self.hreg, self.wreg))),
'strd={}'.format(repr((self.htrd, self.wtrd)))]))
class EltwiseLayer(LocalRegionLayer):
'''
NN element-wise layer parameters.
As a special case of LocalRegionLayer.
nreg = ntrd, sreg = 1
'''
def __init__(self, nofm, sofm, nreg):
super(EltwiseLayer, self).__init__(nofm, sofm, nreg, 1,
ntrd=nreg, strd=1)
assert self.hreg == self.wreg == 1
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join([
'nofm={}'.format(repr(self.nofm)),
'sofm={}'.format(repr((self.hofm, self.wofm))),
'nreg={}'.format(repr(self.nreg))]))
| ''' Number of operations per neuron. '''
raise NotImplementedError(self.__class__.__name__) |
map.go | package ebpf
import (
"fmt"
"unsafe"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
// MapSpec defines a Map.
type MapSpec struct {
// Name is passed to the kernel as a debug aid. Must only contain
// alpha numeric and '_' characters.
Name string
Type MapType
KeySize uint32
ValueSize uint32
MaxEntries uint32
Flags uint32
// InnerMap is used as a template for ArrayOfMaps and HashOfMaps
InnerMap *MapSpec
}
func (ms *MapSpec) String() string {
return fmt.Sprintf("%s(keySize=%d, valueSize=%d, maxEntries=%d, flags=%d)", ms.Type, ms.KeySize, ms.ValueSize, ms.MaxEntries, ms.Flags)
}
// Copy returns a copy of the spec.
func (ms *MapSpec) Copy() *MapSpec {
if ms == nil {
return nil
}
cpy := *ms
cpy.InnerMap = ms.InnerMap.Copy()
return &cpy
}
// Map represents a Map file descriptor.
//
// It is not safe to close a map which is used by other goroutines.
//
// Methods which take interface{} arguments by default encode
// them using binary.Read/Write in the machine's native endianness.
//
// Implement Marshaler on the arguments if you need custom encoding.
type Map struct {
fd *bpfFD
abi MapABI
// Per CPU maps return values larger than the size in the spec
fullValueSize int
}
// NewMap creates a new Map.
//
// Creating a map for the first time will perform feature detection
// by creating small, temporary maps.
func NewMap(spec *MapSpec) (*Map, error) {
if spec.Type != ArrayOfMaps && spec.Type != HashOfMaps {
return createMap(spec, nil)
}
if spec.InnerMap == nil {
return nil, errors.Errorf("%s requires InnerMap", spec.Type)
}
template, err := createMap(spec.InnerMap, nil)
if err != nil {
return nil, err
}
defer template.Close()
return createMap(spec, template.fd)
}
func createMap(spec *MapSpec, inner *bpfFD) (*Map, error) {
cpy := *spec
switch spec.Type {
case ArrayOfMaps:
fallthrough
case HashOfMaps:
if spec.ValueSize != 0 {
return nil, errors.Errorf("ValueSize must be zero for map of map")
}
cpy.ValueSize = 4
case PerfEventArray:
if spec.KeySize != 0 {
return nil, errors.Errorf("KeySize must be zero for perf event array")
}
if spec.ValueSize != 0 {
return nil, errors.Errorf("ValueSize must be zero for perf event array")
}
if spec.MaxEntries != 0 {
return nil, errors.Errorf("MaxEntries must be zero for perf event array")
}
n, err := possibleCPUs()
if err != nil {
return nil, errors.Wrap(err, "perf event array")
}
cpy.KeySize = 4
cpy.ValueSize = 4
cpy.MaxEntries = uint32(n)
}
attr := bpfMapCreateAttr{
mapType: cpy.Type,
keySize: cpy.KeySize,
valueSize: cpy.ValueSize,
maxEntries: cpy.MaxEntries,
flags: cpy.Flags,
}
if inner != nil {
var err error
attr.innerMapFd, err = inner.value()
if err != nil {
return nil, errors.Wrap(err, "map create")
}
}
name, err := newBPFObjName(spec.Name)
if err != nil {
return nil, errors.Wrap(err, "map create")
}
if haveObjName.Result() {
attr.mapName = name
}
fd, err := bpfMapCreate(&attr)
if err != nil {
return nil, errors.Wrap(err, "map create")
}
return newMap(fd, newMapABIFromSpec(&cpy))
}
func newMap(fd *bpfFD, abi *MapABI) (*Map, error) {
m := &Map{
fd,
*abi,
int(abi.ValueSize),
}
if !abi.Type.hasPerCPUValue() {
return m, nil
}
possibleCPUs, err := possibleCPUs()
if err != nil {
return nil, err
}
m.fullValueSize = align(int(abi.ValueSize), 8) * possibleCPUs
return m, nil
}
func (m *Map) String() string {
return fmt.Sprintf("%s#%d", m.abi.Type, m.fd)
}
// ABI gets the ABI of the Map
func (m *Map) ABI() MapABI {
return m.abi
}
// Get retrieves a value from a Map.
//
// Calls Close() on valueOut if it is of type **Map or **Program,
// and *valueOut is not nil.
func (m *Map) Get(key, valueOut interface{}) (bool, error) {
valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize)
err := m.lookup(key, valuePtr)
if errors.Cause(err) == unix.ENOENT {
return false, nil
}
if err != nil {
return false, err
}
if valueBytes == nil {
return true, nil
}
if m.abi.Type.hasPerCPUValue() {
return true, unmarshalPerCPUValue(valueOut, int(m.abi.ValueSize), valueBytes)
}
switch value := valueOut.(type) {
case **Map:
m, err := unmarshalMap(valueBytes)
if err != nil {
return true, err
}
(*value).Close()
*value = m
return true, nil
case *Map:
return true, errors.Errorf("can't unmarshal into %T, need %T", value, (**Map)(nil))
case Map:
return true, errors.Errorf("can't unmarshal into %T, need %T", value, (**Map)(nil))
case **Program:
p, err := unmarshalProgram(valueBytes)
if err != nil {
return true, err
}
(*value).Close()
*value = p
return true, nil
case *Program:
return true, errors.Errorf("can't unmarshal into %T, need %T", value, (**Program)(nil))
case Program:
return true, errors.Errorf("can't unmarshal into %T, need %T", value, (**Program)(nil))
default:
return true, unmarshalBytes(valueOut, valueBytes)
}
}
// GetBytes gets a value from Map
func (m *Map) GetBytes(key interface{}) ([]byte, error) {
valueBytes := make([]byte, m.fullValueSize)
valuePtr := newPtr(unsafe.Pointer(&valueBytes[0]))
err := m.lookup(key, valuePtr)
if errors.Cause(err) == unix.ENOENT {
return nil, nil
}
return valueBytes, err
}
func (m *Map) lookup(key interface{}, valueOut syscallPtr) error {
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
if err != nil {
return errors.Wrap(err, "key")
}
return bpfMapLookupElem(m.fd, keyPtr, valueOut)
}
// Create creates a new value in a map, failing if the key exists already
func (m *Map) Create(key, value interface{}) error {
return m.update(key, value, _NoExist)
}
// Put replaces or creates a value in map
func (m *Map) Put(key, value interface{}) error {
return m.update(key, value, _Any)
}
// Replace replaces a value in a map, failing if the value did not exist
func (m *Map) Replace(key, value interface{}) error {
return m.update(key, value, _Exist)
}
// Delete removes a value.
//
// Use DeleteStrict if you desire an error if key does not exist.
func (m *Map) Delete(key interface{}) error {
err := m.DeleteStrict(key)
if err == unix.ENOENT {
return nil
}
return err
}
// DeleteStrict removes a key and returns an error if the
// key doesn't exist.
func (m *Map) DeleteStrict(key interface{}) error {
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
if err != nil {
return err
}
return bpfMapDeleteElem(m.fd, keyPtr)
}
// NextKey finds the key following an initial key.
//
// See NextKeyBytes for details.
func (m *Map) NextKey(key, nextKeyOut interface{}) (bool, error) {
nextKeyPtr, nextKeyBytes := makeBuffer(nextKeyOut, int(m.abi.KeySize))
err := m.nextKey(key, nextKeyPtr)
if errors.Cause(err) == unix.ENOENT {
return false, nil
}
if err != nil {
return false, err
}
if nextKeyBytes == nil {
return true, nil
}
err = unmarshalBytes(nextKeyOut, nextKeyBytes)
if err != nil {
return false, err
}
return true, nil
}
// NextKeyBytes returns the key following an initial key as a byte slice.
//
// Passing nil will return the first key.
//
// Use Iterate if you want to traverse all entries in the map.
func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) {
nextKey := make([]byte, m.abi.KeySize)
nextKeyPtr := newPtr(unsafe.Pointer(&nextKey[0]))
err := m.nextKey(key, nextKeyPtr)
if errors.Cause(err) == unix.ENOENT {
return nil, nil
}
return nextKey, err
}
func (m *Map) nextKey(key interface{}, nextKeyOut syscallPtr) error {
var (
keyPtr syscallPtr
err error
)
if key != nil {
keyPtr, err = marshalPtr(key, int(m.abi.KeySize))
if err != nil {
return err
}
}
return bpfMapGetNextKey(m.fd, keyPtr, nextKeyOut)
}
// Iterate traverses a map.
//
// It's safe to create multiple iterators at the same time.
//
// It's not possible to guarantee that all keys in a map will be
// returned if there are concurrent modifications to the map.
func (m *Map) Iterate() *MapIterator {
return newMapIterator(m)
}
// Close removes a Map
func (m *Map) Close() error {
if m == nil {
// This makes it easier to clean up when iterating maps
// of maps / programs.
return nil
}
return m.fd.close()
}
// FD gets the file descriptor of the Map.
//
// Calling this function is invalid after Close has been called.
func (m *Map) FD() int {
fd, err := m.fd.value()
if err != nil {
// Best effort: -1 is the number most likely to be an
// invalid file descriptor.
return -1
}
return int(fd)
}
// Clone creates a duplicate of the Map.
//
// Closing the duplicate does not affect the original, and vice versa.
// Changes made to the map are reflected by both instances however.
//
// Cloning a nil Map returns nil.
func (m *Map) Clone() (*Map, error) {
if m == nil {
return nil, nil
}
dup, err := m.fd.dup()
if err != nil {
return nil, errors.Wrap(err, "can't clone map")
}
return newMap(dup, &m.abi)
}
// Pin persists the map past the lifetime of the process that created it.
//
// This requires bpffs to be mounted above fileName. See http://cilium.readthedocs.io/en/doc-1.0/kubernetes/install/#mounting-the-bpf-fs-optional
func (m *Map) Pin(fileName string) error {
return bpfPinObject(fileName, m.fd)
}
// LoadPinnedMap load a Map from a BPF file.
//
// Requires at least Linux 4.13, and is not compatible with
// nested maps. Use LoadPinnedMapExplicit in these situations.
func LoadPinnedMap(fileName string) (*Map, error) {
fd, err := bpfGetObject(fileName)
if err != nil {
return nil, err
}
abi, err := newMapABIFromFd(fd)
if err != nil {
_ = fd.close()
return nil, err
}
return newMap(fd, abi)
}
// LoadPinnedMapExplicit loads a map with explicit parameters.
func LoadPinnedMapExplicit(fileName string, abi *MapABI) (*Map, error) {
fd, err := bpfGetObject(fileName)
if err != nil {
return nil, err
}
return newMap(fd, abi)
}
func (m *Map) update(key, value interface{}, putType uint64) error {
keyPtr, err := marshalPtr(key, int(m.abi.KeySize))
if err != nil {
return err
}
var valuePtr syscallPtr
if m.abi.Type.hasPerCPUValue() {
valuePtr, err = marshalPerCPUValue(value, int(m.abi.ValueSize))
} else {
valuePtr, err = marshalPtr(value, int(m.abi.ValueSize))
}
if err != nil {
return err
}
return bpfMapUpdateElem(m.fd, keyPtr, valuePtr, putType)
}
func unmarshalMap(buf []byte) (*Map, error) {
if len(buf) != 4 {
return nil, errors.New("map id requires 4 byte value")
}
// Looking up an entry in a nested map or prog array returns an id,
// not an fd.
id := nativeEndian.Uint32(buf)
fd, err := bpfGetMapFDByID(id)
if err != nil {
return nil, err
}
abi, err := newMapABIFromFd(fd)
if err != nil {
_ = fd.close()
return nil, err
}
return newMap(fd, abi)
}
// MarshalBinary implements BinaryMarshaler.
func (m *Map) MarshalBinary() ([]byte, error) {
fd, err := m.fd.value()
if err != nil {
return nil, err
}
buf := make([]byte, 4)
nativeEndian.PutUint32(buf, fd)
return buf, nil
}
// MapIterator iterates a Map.
//
// See Map.Iterate.
type MapIterator struct {
target *Map
prevKey interface{}
prevBytes []byte
done bool
err error
}
func | (target *Map) *MapIterator {
return &MapIterator{
target: target,
prevBytes: make([]byte, int(target.abi.KeySize)),
}
}
// Next decodes the next key and value.
//
// Returns false if there are no more entries.
//
// See Map.Get for further caveats around valueOut.
func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool {
if mi.err != nil || mi.done {
return false
}
for {
var nextBytes []byte
nextBytes, mi.err = mi.target.NextKeyBytes(mi.prevKey)
if mi.err != nil {
return false
}
if nextBytes == nil {
mi.done = true
return false
}
// The user can get access to nextBytes since unmarshalBytes
// does not copy when unmarshaling into a []byte.
// Make a copy to prevent accidental corruption of
// iterator state.
copy(mi.prevBytes, nextBytes)
mi.prevKey = mi.prevBytes
var ok bool
ok, mi.err = mi.target.Get(nextBytes, valueOut)
if mi.err != nil {
return false
}
if !ok {
// Even though the key should be valid, we couldn't look up
// its value. If we're iterating a hash map this is probably
// because a concurrent delete removed the value before we
// could get it. If we're iterating one of the fd maps like
// ProgramArray it means that a given slot doesn't have
// a valid fd associated.
// In either case there isn't much we can do, so just
// continue to the next key.
continue
}
mi.err = unmarshalBytes(keyOut, nextBytes)
return mi.err == nil
}
}
// Err returns any encountered error.
//
// The method must be called after Next returns nil.
func (mi *MapIterator) Err() error {
return mi.err
}
| newMapIterator |
lib.rs | #[cfg(feature = "rayon")]
extern crate rayon;
#[cfg(feature = "rayon")]
use rayon::prelude::*;
#[cfg(feature = "c_api")]
pub mod c_api;
const ROTATE: u32 = 'z' as u32 - 'a' as u32 + 1;
/// Encrypt the input using rot26.
#[inline(always)]
pub fn encrypt(input: &str) -> String {
encrypt_any(input, 26)
}
/// Decrypt the input using rot26.
#[inline(always)]
pub fn decrypt(input: &str) -> String {
decrypt_any(input, 26)
}
/// Encrypt the input using rot13.
/// Warning: Security researchers have managed to crack rot13.
/// New users are recommended to use rot26 for the best security.
#[inline(always)]
pub fn encrypt_rot13(input: &str) -> String {
encrypt_any(input, 13)
}
/// Decrypt the input using rot13.
/// Warning: Security researchers have managed to crack rot13.
/// New users are recommended to use rot26 for the best security.
#[inline(always)]
pub fn decrypt_rot13(input: &str) -> String {
decrypt_any(input, 13)
}
/// Encrypt using any amount.
/// Warning: Please carefully choose the right amount.
/// New users are recommended to use rot26 for the best security.
pub fn encrypt_any(input: &str, amount: u32) -> String |
/// Decrypt using any amount.
/// Warning: Please carefully choose the right amount.
/// New users are recommended to use rot26 for the best security.
pub fn decrypt_any(input: &str, amount: u32) -> String {
let closure = |c| {
let base = match c {
'a'...'z' => 'a' as u32,
'A'...'Z' => 'A' as u32,
_ => return c
};
std::char::from_u32(((c as u32 - base + ROTATE - amount) % ROTATE) + base).unwrap()
};
#[cfg(not(feature = "rayon"))]
{ input.chars().map(closure).collect() }
#[cfg(feature = "rayon")]
{ input.par_chars().map(closure).collect() }
}
#[cfg(test)]
mod tests {
use ::*;
#[test]
fn test_rot26() {
let plain = "hello";
let encrypted = encrypt(plain);
assert_eq!(encrypted, "hello");
let decrypted = decrypt(&encrypted);
assert_eq!(plain, decrypted);
}
#[test]
fn test_rot13() {
let plain = "hello";
let encrypted = encrypt_rot13(plain);
assert_eq!(encrypted, "uryyb");
let decrypted = decrypt_rot13(&encrypted);
assert_eq!(plain, decrypted);
}
#[test]
fn test_rot13_all() {
let plain = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
let encrypted = encrypt_rot13(plain);
assert_eq!(encrypted, "nopqrstuvwxyzabcdefghijklmNOPQRSTUVWXYZABCDEFGHIJKLM");
let decrypted = decrypt_rot13(&encrypted);
assert_eq!(plain, decrypted);
}
#[test]
fn test_rot_any() {
let amount = 1;
let plain = "hello";
let encrypted = encrypt_any(plain, amount);
assert_eq!(encrypted, "ifmmp");
let decrypted = decrypt_any(&encrypted, amount);
assert_eq!(plain, decrypted);
}
}
| {
let closure = |c| {
let base = match c {
'a'...'z' => 'a' as u32,
'A'...'Z' => 'A' as u32,
_ => return c
};
std::char::from_u32(((c as u32 - base + amount) % ROTATE) + base).unwrap()
};
#[cfg(not(feature = "rayon"))]
{ input.chars().map(closure).collect() }
#[cfg(feature = "rayon")]
{ input.par_chars().map(closure).collect() }
} |
parse_test.go | package serverinfo
import (
"os"
"testing"
)
func TestParseServerInfo(t *testing.T) {
inputFiles := []string{
"negative-space.xml",
"na-values.xml",
}
for _, inputFile := range inputFiles {
inputFile := inputFile
t.Run(inputFile, func(t *testing.T) {
t.Parallel()
reader, err := os.Open("testdata/" + inputFile)
if err != nil { |
if _, err := Parse(reader); err != nil {
t.Errorf("got error %q", err)
}
})
}
} | t.Fatalf("error opening test data: %s", err)
} |
statement.go | /*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package policy
import (
"encoding/json"
"fmt"
"strings"
"github.com/minio/minio/pkg/policy/condition"
)
// Statement - policy statement.
type Statement struct {
SID ID `json:"Sid,omitempty"`
Effect Effect `json:"Effect"`
Principal Principal `json:"Principal"`
Actions ActionSet `json:"Action"`
Resources ResourceSet `json:"Resource"`
Conditions condition.Functions `json:"Condition,omitempty"`
}
// IsAllowed - checks given policy args is allowed to continue the Rest API.
func (statement Statement) IsAllowed(args Args) bool {
check := func() bool {
if !statement.Principal.Match(args.AccountName) {
return false
}
if !statement.Actions.Contains(args.Action) {
return false
}
resource := args.BucketName
if args.ObjectName != "" {
if !strings.HasPrefix(args.ObjectName, "/") {
resource += "/"
}
resource += args.ObjectName
}
if !statement.Resources.Match(resource, args.ConditionValues) {
return false
}
return statement.Conditions.Evaluate(args.ConditionValues)
}
return statement.Effect.IsAllowed(check())
}
// isValid - checks whether statement is valid or not.
func (statement Statement) isValid() error {
if !statement.Effect.IsValid() {
return fmt.Errorf("invalid Effect %v", statement.Effect)
}
if !statement.Principal.IsValid() {
return fmt.Errorf("invalid Principal %v", statement.Principal)
}
if len(statement.Actions) == 0 {
return fmt.Errorf("Action must not be empty")
}
if len(statement.Resources) == 0 {
return fmt.Errorf("Resource must not be empty")
}
for action := range statement.Actions {
if action.isObjectAction() {
if !statement.Resources.objectResourceExists() {
return fmt.Errorf("unsupported Resource found %v for action %v", statement.Resources, action)
}
} else {
if !statement.Resources.bucketResourceExists() {
return fmt.Errorf("unsupported Resource found %v for action %v", statement.Resources, action)
}
}
keys := statement.Conditions.Keys()
keyDiff := keys.Difference(actionConditionKeyMap[action])
if !keyDiff.IsEmpty() {
return fmt.Errorf("unsupported condition keys '%v' used for action '%v'", keyDiff, action)
}
}
return nil
}
// MarshalJSON - encodes JSON data to Statement.
func (statement Statement) MarshalJSON() ([]byte, error) {
if err := statement.isValid(); err != nil {
return nil, err
}
// subtype to avoid recursive call to MarshalJSON()
type subStatement Statement
ss := subStatement(statement)
return json.Marshal(ss)
}
// UnmarshalJSON - decodes JSON data to Statement.
func (statement *Statement) UnmarshalJSON(data []byte) error {
// subtype to avoid recursive call to UnmarshalJSON()
type subStatement Statement
var ss subStatement
if err := json.Unmarshal(data, &ss); err != nil {
return err
}
s := Statement(ss)
if err := s.isValid(); err != nil {
return err
} | *statement = s
return nil
}
// Validate - validates Statement is for given bucket or not.
func (statement Statement) Validate(bucketName string) error {
if err := statement.isValid(); err != nil {
return err
}
return statement.Resources.Validate(bucketName)
}
// NewStatement - creates new statement.
func NewStatement(effect Effect, principal Principal, actionSet ActionSet, resourceSet ResourceSet, conditions condition.Functions) Statement {
return Statement{
Effect: effect,
Principal: principal,
Actions: actionSet,
Resources: resourceSet,
Conditions: conditions,
}
} | |
ws_model.rs | use crate::rest_model::{string_or_float, Asks, Bids, OrderBook, OrderSide, OrderStatus, OrderType, TimeInForce};
#[derive(Debug, Serialize, Deserialize)]
#[serde(tag = "e")]
pub enum WebsocketEvent {
#[serde(alias = "aggTrade")]
AggTrade(Box<TradesEvent>),
#[serde(alias = "trade")]
Trade(Box<TradeEvent>),
#[serde(alias = "kline")]
Kline(Box<KlineEvent>),
#[serde(alias = "24hrMiniTicker")]
DayMiniTicker(Box<MiniDayTickerEvent>),
#[serde(alias = "24hrTicker")]
DayTicker(Box<DayTickerEvent>),
#[serde(alias = "depthUpdate")]
DepthOrderBook(Box<DepthOrderBookEvent>),
#[serde(alias = "outboundAccountPosition")]
AccountPositionUpdate(Box<AccountPositionUpdate>),
#[serde(alias = "balanceUpdate")]
BalanceUpdate(Box<BalanceUpdate>),
#[serde(alias = "executionReport")]
OrderUpdate(Box<OrderUpdate>),
#[serde(alias = "listStatus")]
ListOrderUpdate(Box<OrderListUpdate>),
}
#[derive(Serialize, Deserialize, Debug)]
pub struct QueryResult {
pub result: Option<String>,
pub id: i64,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct TradesEvent {
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "a")]
pub aggregated_trade_id: u64,
#[serde(rename = "p")]
pub price: String,
#[serde(rename = "q")]
pub qty: String,
#[serde(rename = "f")]
pub first_break_trade_id: u64,
#[serde(rename = "l")]
pub last_break_trade_id: u64,
#[serde(rename = "T")]
pub trade_order_time: u64,
#[serde(rename = "m")]
pub is_buyer_maker: bool,
#[serde(skip, rename = "M")]
pub m_ignore: bool,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct TradeEvent {
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "t")]
pub trade_id: u64,
#[serde(rename = "p")]
pub price: String,
#[serde(rename = "q")]
pub qty: String,
#[serde(rename = "b")]
pub buyer_order_id: u64,
#[serde(rename = "a")]
pub seller_order_id: u64,
#[serde(rename = "T")]
pub trade_order_time: u64,
#[serde(rename = "m")]
pub is_buyer_maker: bool,
#[serde(skip, rename = "M")]
pub m_ignore: bool,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct DayTickerEvent {
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "p")]
pub price_change: String,
#[serde(rename = "P")]
pub price_change_percent: String,
#[serde(rename = "w")]
pub average_price: String,
#[serde(rename = "x")]
pub prev_close: String,
#[serde(rename = "c")]
pub current_close: String,
#[serde(rename = "Q")]
pub current_close_qty: String,
#[serde(rename = "b")]
pub best_bid: String,
#[serde(rename = "B")]
pub best_bid_qty: String,
#[serde(rename = "a")]
pub best_ask: String,
#[serde(rename = "A")]
pub best_ask_qty: String,
#[serde(rename = "o")]
pub open: String,
#[serde(rename = "h")]
pub high: String,
#[serde(rename = "l")]
pub low: String,
#[serde(rename = "v")]
pub volume: String,
#[serde(rename = "q")]
pub quote_volume: String,
#[serde(rename = "O")]
pub open_time: u64,
#[serde(rename = "C")]
pub close_time: u64,
#[serde(rename = "F")]
pub first_trade_id: i64,
#[serde(rename = "L")]
pub last_trade_id: i64,
#[serde(rename = "n")]
pub num_trades: u64,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct MiniDayTickerEvent {
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "c")]
pub current_close: String,
#[serde(rename = "o")]
pub open: String,
#[serde(rename = "h")]
pub high: String,
#[serde(rename = "l")]
pub low: String,
#[serde(rename = "v")]
pub volume: String,
#[serde(rename = "q")]
pub quote_volume: String,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct KlineEvent {
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "k")]
pub kline: Kline,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Kline {
#[serde(rename = "t")]
pub start_time: i64,
#[serde(rename = "T")]
pub end_time: i64,
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "i")]
pub interval: String,
#[serde(rename = "f")]
pub first_trade_id: i64,
#[serde(rename = "L")]
pub last_trade_id: i64,
#[serde(rename = "o")]
pub open: String,
#[serde(rename = "c")]
pub close: String,
#[serde(rename = "h")]
pub high: String,
#[serde(rename = "l")]
pub low: String,
#[serde(rename = "v")]
pub volume: String,
#[serde(rename = "n")]
pub number_of_trades: i64,
#[serde(rename = "x")]
pub is_final_bar: bool,
#[serde(rename = "q")]
pub quote_volume: String,
#[serde(rename = "V")]
pub active_buy_volume: String,
#[serde(rename = "Q")]
pub active_volume_buy_quote: String,
#[serde(skip, rename = "B")]
pub ignore_me: String,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct DepthOrderBookEvent {
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "U")]
pub first_update_id: u64,
#[serde(rename = "u")]
pub final_update_id: u64,
#[serde(rename = "b")]
pub bids: Vec<Bids>,
#[serde(rename = "a")]
pub asks: Vec<Asks>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct BookTickerEvent {
#[serde(rename = "u")]
pub update_id: u64,
#[serde(rename = "s")]
pub symbol: String, |
#[serde(rename = "B")]
pub best_bid_qty: String,
#[serde(rename = "a")]
pub best_ask: String,
#[serde(rename = "A")]
pub best_ask_qty: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct StreamEvent {
stream: String,
pub data: WebsocketEventAlt,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(untagged)]
pub enum WebsocketEventAlt {
WebsocketEvent(WebsocketEvent),
Orderbook(Box<OrderBook>),
}
impl StreamEvent {
/// Returns (stream_name, channel)
pub fn parse_stream(&self) -> (String, String) {
let mut parsed = self.stream.clone();
if let Some(0) = parsed.find('!') {
parsed.remove(0);
}
let split = parsed.split_once("@").unwrap_or((&parsed, ""));
(split.0.to_string(), split.1.to_string())
}
}
/// User Stream related events
/// Account position update
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct AccountPositionUpdate {
#[serde(alias = "E")]
pub event_time: u64,
#[serde(alias = "u")]
pub last_update_time: u64,
#[serde(alias = "B")]
pub balances: Vec<EventBalance>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct AccountUpdate {
#[serde(alias = "E")]
pub event_time: u64,
/// Maker commission rate (bips)
#[serde(alias = "m")]
maker_commission_rate: u64,
/// Taker commission rate (bips)
#[serde(alias = "t")]
taker_commission_rate: u64,
/// Buyer commission rate (bips)
#[serde(alias = "b")]
buyer_commission_rate: u64,
/// Seller commission rate (bips)
#[serde(alias = "s")]
seller_commission_rate: u64,
#[serde(alias = "T")]
can_trade: bool,
#[serde(alias = "W")]
can_withdraw: bool,
#[serde(alias = "D")]
can_deposit: bool,
#[serde(alias = "B")]
pub balances: Vec<EventBalance>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct EventBalance {
#[serde(rename = "a")]
pub asset: String,
#[serde(rename = "f")]
pub free: String,
#[serde(rename = "l")]
pub locked: String,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct BalanceUpdate {
#[serde(alias = "E")]
pub event_time: u64,
#[serde(rename = "a")]
pub asset: String,
#[serde(rename = "d")]
#[serde(with = "string_or_float")]
pub delta: f64,
#[serde(alias = "T")]
pub clear_time: u64,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct OrderUpdate {
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "s")]
pub symbol: String,
#[serde(rename = "c")]
pub client_order_id: String,
#[serde(rename = "S")]
pub side: OrderSide,
#[serde(rename = "o")]
pub order_type: OrderType,
#[serde(rename = "f")]
pub time_in_force: TimeInForce,
#[serde(rename = "q")]
#[serde(with = "string_or_float")]
pub qty: f64,
#[serde(rename = "p")]
#[serde(with = "string_or_float")]
pub price: f64,
#[serde(rename = "P")]
#[serde(with = "string_or_float")]
pub stop_price: f64,
#[serde(rename = "F")]
#[serde(with = "string_or_float")]
pub iceberg_qty: f64,
#[serde(rename = "g")]
pub order_list_id: i64,
#[serde(rename = "C")]
pub origin_client_id: Option<String>,
#[serde(rename = "x")]
pub execution_type: OrderStatus,
#[serde(rename = "X")]
pub current_order_status: OrderStatus,
#[serde(rename = "r")]
pub order_reject_reason: String,
#[serde(rename = "i")]
pub order_id: u64,
#[serde(rename = "l")]
#[serde(with = "string_or_float")]
pub qty_last_executed: f64,
#[serde(rename = "z")]
#[serde(with = "string_or_float")]
pub cumulative_filled_qty: f64,
#[serde(rename = "L")]
#[serde(with = "string_or_float")]
pub last_executed_price: f64,
#[serde(rename = "n")]
#[serde(with = "string_or_float")]
pub commission: f64,
#[serde(rename = "N")]
pub commission_asset: Option<String>,
#[serde(rename = "T")]
pub trade_order_time: u64,
#[serde(rename = "t")]
pub trade_id: i64,
#[serde(skip, rename = "I")]
pub i_ignore: u64,
#[serde(rename = "w")]
pub is_order_on_the_book: bool,
#[serde(rename = "m")]
pub is_buyer_maker: bool,
#[serde(skip, rename = "M")]
pub m_ignore: bool,
#[serde(rename = "O")]
pub order_creation_time: u64,
#[serde(rename = "Z")]
#[serde(with = "string_or_float")]
pub cumulative_quote_asset_transacted_qty: f64,
/// (i.e. lastPrice * lastQty)
#[serde(rename = "Y")]
#[serde(with = "string_or_float")]
pub last_quote_asset_transacted_qty: f64,
#[serde(rename = "Q")]
#[serde(with = "string_or_float")]
pub quote_order_qty: f64,
}
/// For OCO Events
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct OrderListUpdate {
#[serde(rename = "E")]
pub event_time: u64,
#[serde(rename = "S")]
pub symbol: String,
#[serde(rename = "g")]
order_list_id: i64,
#[serde(rename = "c")]
contingency_type: String,
#[serde(rename = "l")]
list_status_type: String,
#[serde(rename = "L")]
list_order_status: String,
#[serde(rename = "r")]
list_reject_reason: String,
#[serde(rename = "C")]
list_client_order_id: String,
#[serde(rename = "T")]
pub transaction_time: u64,
#[serde(rename = "O")]
pub objects: Vec<OrderListTransaction>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct OrderListTransaction {
#[serde(rename = "S")]
pub symbol: String,
#[serde(rename = "i")]
pub order_id: i64,
#[serde(rename = "c")]
pub client_order_id: i64,
} |
#[serde(rename = "b")]
pub best_bid: String, |
ws_dual_camera.py | # ws_dual_camera.py
# WSmith 12/23/20
# utilize modified module ws_csi_camera for the camera class
import cv2
import numpy as np
import ws_csi_camera as ws
from importlib import reload
reload(ws) # ws is under development
def display(sensor_mode=ws.S_MODE_3_1280_720_60,
dispW=ws.DISP_W_M3_M4_one_half,
dispH=ws.DISP_H_M3_M4_one_half,
display_fps=True):
# at present, display the picam and a webcam: in the future, display two picams
picam = ws.CSI_Camera(display_fps=display_fps)
webcam = ws.CSI_Camera(display_fps=display_fps)
# this only needed for the picam
picam.create_gstreamer_pipeline(sensor_id=0, sensor_mode=sensor_mode, flip_method=0,
display_height=dispH, display_width=dispW)
picam.open(picam.gstreamer_pipeline)
webcam.open(1)
picam.start() | txt = "Picam on left: Sensor Mode {}, Display {} x {}".format(sensor_mode, dispW, dispH)
cv2.namedWindow(txt, cv2.WINDOW_AUTOSIZE)
while True:
_, imgL = picam.read()
_, imgR = webcam.read()
imgR = cv2.resize(imgR, (imgL.shape[1], imgL.shape[0]))
img = np.hstack((imgL, imgR))
cv2.imshow(txt, img)
keyCode = cv2.waitKey(5) & 0xFF
if keyCode == ord('q'):
break
picam.stop()
webcam.stop()
picam.release()
webcam.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
display(sensor_mode=ws.S_MODE_2_1920_1080_30,
dispW=ws.DISP_W_M2_one_quarter, dispH=ws.DISP_H_M2_one_quarter) | webcam.start()
|
lib.rs | use std::fmt::{Display, Formatter};
#[derive(Debug, Clone)]
pub enum EGError {
Owned(String),
StaticBorrow(&'static str)
}
impl Display for EGError {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
match self {
EGError::Owned(s) => write!(f, "{}", s),
&EGError::StaticBorrow(s) => write!(f, "{}", s)
}
}
}
#[derive(Debug, Clone)]
pub enum Value {
Int(i64),
Float(f64),
String(std::string::String),
List(Vec<Value>)
}
impl Value {
pub fn expect_int(&self) -> Result<i64, EGError> {
if let Value::Int(i) = self {
Ok(*i)
} else {
Err(EGError::StaticBorrow("value is not an integer"))
}
}
pub fn expect_float(&self) -> Result<f64, EGError> {
if let Value::Float(f) = self {
Ok(*f)
} else {
Err(EGError::StaticBorrow("value is not a float number"))
}
}
pub fn expect_str(&self) -> Result<&String, EGError> {
if let Value::String(s) = self {
Ok(s)
} else {
Err(EGError::StaticBorrow("value is not a string"))
}
}
pub fn expect_list(&self) -> Result<&Vec<Value>, EGError> {
if let Value::List(ls) = self {
Ok(ls)
} else {
Err(EGError::StaticBorrow("value is not a list"))
}
}
pub fn maybe_as_bool(&self) -> Option<bool> {
match self {
Value::Int(i) => Some(*i != 0),
Value::String(s) => Some(s == "true" || s == "True" || s == "1"),
_ => None
}
}
}
impl Display for Value {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
match self {
Value::Int(i) => write!(f, "{}", i),
Value::Float(lf) => write!(f, "{}", lf),
Value::String(s) => write!(f, "{}", s),
Value::List(ls) => {
write!(f, "[")?;
for i in 0..ls.len() {
write!(f, "{}", ls[i])?;
}
if let Some(last) = ls.last() {
write!(f, "{}", last)?;
}
write!(f, "]")
}
}
}
}
pub enum TreeNode {
Leaf { key: String, value: Value },
Branch { key: Option<String>, children: Vec<TreeNode> },
}
impl TreeNode {
fn chain_to_string(chain: &[String]) -> String {
debug_assert!(chain.len() != 0);
let mut ret = String::new();
for s in &chain[0..chain.len() - 1] {
ret.push_str(s);
ret.push('.');
}
ret.push_str(&chain[chain.len() - 1]);
ret
}
pub fn root_node() -> TreeNode {
TreeNode::Branch { key: None, children: Vec::new() }
}
pub fn find(&self, chain: &[String]) -> Result<&Value, EGError> {
if chain.len() == 0 {
Err(EGError::StaticBorrow("empty input chain"))
} else {
self.find_impl(chain, chain)
}
}
pub fn insert(&mut self, chain: &[String], value: Value) -> Result<(), EGError> {
if chain.len() == 0 {
Err(EGError::StaticBorrow("empty input chain"))
} else {
self.insert_impl(chain, chain, value)
}
}
fn find_impl(&self, chain: &[String], orig_chain: &[String]) -> Result<&Value, EGError> {
match self {
TreeNode::Leaf{ key, value } => {
if chain.len() == 0 {
Ok(value)
} else {
Err(EGError::Owned(format!("{} is a leaf node and has no children",
TreeNode::chain_to_string(orig_chain))))
}
},
TreeNode::Branch{ key, children} => {
if chain.len() != 0 {
if let Some(child) =
children.iter().find(|&node| *node.key() == chain[0]) {
child.find_impl(&chain[1..], orig_chain)
} else {
Err(EGError::Owned(format!("{} does not exist", TreeNode::chain_to_string(chain))))
}
} else {
Err(EGError::Owned(format!("{} is a branch node and has no value",
TreeNode::chain_to_string(orig_chain))))
}
}
}
}
fn insert_impl(&mut self, chain: &[String], orig_chain: &[String], value: Value) -> Result<(), EGError> {
match self {
TreeNode::Leaf { key, value } => {
let _ = key;
let _ = value;
Err(EGError::Owned(format!("{} is already a leaf node",
TreeNode::chain_to_string(orig_chain))))
},
TreeNode::Branch { key, children } => {
match chain.len() {
0 => Err(EGError::Owned(format!("{} is already a branch node",
TreeNode::chain_to_string(orig_chain)))),
1 => {
if let Some(child) =
children.iter().find(|&child| *child.key() == chain[0]) {
Err(EGError::Owned(format!("{} already exists", TreeNode::chain_to_string(orig_chain))))
} else {
children.push(TreeNode::Leaf { key: chain[0].to_string(), value });
Ok(())
}
},
_ => {
if let Some(child) =
children.iter_mut().find(|child| *child.key() == chain[0]) {
child.insert_impl(&chain[1..], orig_chain, value)
} else {
let mut new_child =
TreeNode::Branch { key: Some(chain[0].to_string()), children: Vec::new() };
assert!(new_child.insert_impl(&chain[1..], orig_chain, value).is_ok());
children.push(new_child);
Ok(())
}
}
}
}
}
}
fn key(&self) -> &String {
match self {
TreeNode::Leaf { key, value } => key,
TreeNode::Branch { key, children } =>
key.as_ref().expect("should not fetch the key of root node")
}
}
}
#[derive(Debug)]
pub struct ParseResult(Vec<String>, Value);
pub struct LineParser<'a> {
line: &'a [u8],
cur: usize,
chain: Vec<String>
}
impl<'a> LineParser<'a> {
fn new(line: &'a [u8]) -> Self {
LineParser { line, cur: 0, chain: Vec::new(), }
}
fn skip_whitespace(&mut self) {
while self.cur < self.line.len()
&& self.line[self.cur].is_ascii_whitespace() {
self.cur += 1;
}
}
fn parse_id_part(&mut self) -> Result<String, EGError> {
let mut buffer = Vec::new();
while self.cur < self.line.len()
&& (self.line[self.cur].is_ascii_alphanumeric()
|| [b'-', b'_', b'?', b'!', b'$', b'@']
.iter()
.find(|&&ch| ch == self.line[self.cur])
.is_some()) {
buffer.push(self.line[self.cur]);
self.cur += 1;
}
if buffer.len() == 0 {
return Err(EGError::StaticBorrow("empty identifier"));
}
Ok(String::from_utf8_lossy(&buffer[..]).to_string())
}
fn parse_number(&mut self) -> Result<Value, EGError> {
debug_assert!(self.line[self.cur].is_ascii_digit());
let mut buffer = Vec::new();
while self.cur < self.line.len() && self.line[self.cur].is_ascii_digit() {
buffer.push(self.line[self.cur]);
self.cur += 1;
}
if self.cur < self.line.len() && self.line[self.cur] == b'.' {
self.cur += 1;
let mut buffer2 = Vec::new();
while self.cur < self.line.len() && self.line[self.cur].is_ascii_digit() {
buffer2.push(self.line[self.cur]);
self.cur += 1;
}
if buffer2.len() == 0 {
return Err(EGError::StaticBorrow("no digit after floating point"));
}
buffer.push(b'.');
buffer.append(&mut buffer2);
Ok(Value::Float(String::from_utf8_lossy(&buffer).parse().unwrap()))
} else { |
fn parse_string(&mut self) -> Result<Value, EGError> {
debug_assert!(self.line[self.cur] == b'"');
self.cur += 1;
let mut buffer = Vec::new();
while self.cur < self.line.len() && self.line[self.cur] != b'"' {
if self.line[self.cur] == b'\\' {
let peek = self.cur + 1;
if peek >= self.line.len() {
return Err(EGError::StaticBorrow("unexpected end of file"))
}
match self.line[peek] {
b'\\' => buffer.push(b'\\'),
b'"' => buffer.push(b'"'),
b'n' => buffer.push(b'\n'),
_ => return Err(EGError::Owned(format!("incorrect conversion sequence \\{}", self.line[peek])))
}
self.cur += 2;
} else {
buffer.push(self.line[self.cur]);
self.cur += 1;
}
}
if self.cur >= self.line.len() || self.line[self.cur] != b'"' {
return Err(EGError::StaticBorrow("unclosed string"))
}
self.cur += 1;
Ok(Value::String(String::from_utf8_lossy(&buffer).to_string()))
}
fn parse_list(&mut self) -> Result<Value, EGError> {
debug_assert!(self.line[self.cur] == b'[');
self.cur += 1;
self.skip_whitespace();
let mut values = Vec::new();
while self.cur < self.line.len() && self.line[self.cur] != b']' {
let value = self.parse_value()?;
values.push(value);
self.skip_whitespace();
if self.cur < self.line.len()
&& self.line[self.cur] != b','
&& self.line[self.cur] != b']' {
return Err(EGError::StaticBorrow("missing ',' as value separator"))
}
if self.line[self.cur] == b',' {
self.cur += 1;
}
self.skip_whitespace();
}
if self.cur >= self.line.len() || self.line[self.cur] != b']' {
return Err(EGError::StaticBorrow("unclosed list"))
}
self.cur += 1;
Ok(Value::List(values))
}
fn parse_value(&mut self) -> Result<Value, EGError> {
if self.cur >= self.line.len() {
return Err(EGError::StaticBorrow("unexpected end of file"))
}
if self.line[self.cur].is_ascii_digit() {
self.parse_number()
} else if self.line[self.cur] == b'"' {
self.parse_string()
} else if self.line[self.cur] == b'[' {
self.parse_list()
} else {
Err(EGError::StaticBorrow("wrong value type"))
}
}
pub fn parse(mut self) -> Result<ParseResult, EGError> {
self.skip_whitespace();
let first_part = self.parse_id_part()?;
self.chain.push(first_part);
while self.cur < self.line.len() && self.line[self.cur] == b'.'{
self.cur += 1;
let part = self.parse_id_part()?;
self.chain.push(part)
}
self.skip_whitespace();
if self.cur >= self.line.len() || self.line[self.cur] != b'=' {
return Err(EGError::StaticBorrow("expected '='"))
}
self.cur += 1;
self.skip_whitespace();
let value = self.parse_value()?;
Ok(ParseResult(self.chain, value))
}
}
#[cfg(test)]
mod tests {
use crate::{TreeNode, Value, LineParser, ParseResult};
#[test]
fn test_insert_and_find() {
let mut root_node = TreeNode::Branch{ key: None, children: Vec::new() };
let chain_a = ["a".to_string()];
let chain_b_c = ["b".to_string(), "c".to_string()];
let chain_b_d_e = ["b".to_string(), "d".to_string(), "e".to_string()];
let chain_b_d_f = ["b".to_string(), "d".to_string(), "f".to_string()];
assert!(root_node.insert(&chain_a, Value::Int(42)).is_ok());
assert!(root_node.insert(&chain_b_c, Value::Float(3.14)).is_ok());
assert!(root_node.insert(&chain_b_d_e,
Value::String("x".to_string())).is_ok());
assert!(root_node.insert(&chain_b_d_f,
Value::String("testing".to_string())).is_ok());
assert_eq!(root_node.find(&chain_a).unwrap().expect_int().unwrap(), 42);
assert_eq!(root_node.find(&chain_b_c).unwrap().expect_float().unwrap(), 3.14);
assert_eq!(root_node.find(&chain_b_d_e).unwrap().expect_str().unwrap(), "x");
assert_eq!(root_node.find(&chain_b_d_f).unwrap().expect_str().unwrap(), "testing");
}
#[test]
fn test_insert_conflict() {
let mut root_node = TreeNode::Branch { key: None, children: Vec::new() };
let ok_chains = [
vec!["a".to_string()],
vec!["b".to_string(), "c".to_string()],
vec!["b".to_string(), "d".to_string(), "e".to_string()],
vec!["b".to_string(), "d".to_string(), "f".to_string()]
];
let err_chains = [
vec!["a".to_string()],
vec!["a".to_string(), "b".to_string()],
vec!["b".to_string(), "d".to_string()],
vec!["b".to_string(), "d".to_string(), "f".to_string(), "nmsl".to_string()]
];
for chain in ok_chains.iter() {
root_node.insert(&chain[..], Value::Int(42)).unwrap();
}
for chain in err_chains.iter() {
assert!(root_node.insert(&chain[..], Value::Int(42)).is_err());
}
}
#[test]
fn test_parse_line() {
let result =
LineParser::new(b"a.b.c.d = [ 101 , \"string\", [\"91\\n\", 91], 324.5] ").parse();
let ParseResult(chain, value) = result.unwrap();
assert_eq!(chain.len(), 4);
assert_eq!(chain[0], "a");
assert_eq!(chain[1], "b");
assert_eq!(chain[2], "c");
assert_eq!(chain[3], "d");
if let Value::List(ls) = &value {
if let Value::Int(101) = ls[0] {} else { panic!() }
if let Value::String(s) = &ls[1] {
if s != "string" { panic!() }
} else {
panic!()
}
if let Value::List(_) = ls[2] {} else { panic!() }
if let Value::Float(f) = ls[3] {
if f != 324.5 { panic!() }
} else {
panic!()
}
} else {
panic!()
}
}
} | Ok(Value::Int(String::from_utf8_lossy(&buffer).parse().unwrap()))
}
} |
cosmos.base.tendermint.v1beta1.rs | /// GetValidatorSetByHeightRequest is the request type for the Query/GetValidatorSetByHeight RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetValidatorSetByHeightRequest {
#[prost(int64, tag = "1")]
pub height: i64,
/// pagination defines an pagination for the request.
#[prost(message, optional, tag = "2")]
pub pagination: ::core::option::Option<super::super::query::v1beta1::PageRequest>,
}
/// GetValidatorSetByHeightResponse is the response type for the Query/GetValidatorSetByHeight RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetValidatorSetByHeightResponse {
#[prost(int64, tag = "1")]
pub block_height: i64,
#[prost(message, repeated, tag = "2")]
pub validators: ::prost::alloc::vec::Vec<Validator>,
/// pagination defines an pagination for the response.
#[prost(message, optional, tag = "3")]
pub pagination: ::core::option::Option<super::super::query::v1beta1::PageResponse>,
}
/// GetLatestValidatorSetRequest is the request type for the Query/GetValidatorSetByHeight RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetLatestValidatorSetRequest {
/// pagination defines an pagination for the request.
#[prost(message, optional, tag = "1")]
pub pagination: ::core::option::Option<super::super::query::v1beta1::PageRequest>,
}
/// GetLatestValidatorSetResponse is the response type for the Query/GetValidatorSetByHeight RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetLatestValidatorSetResponse {
#[prost(int64, tag = "1")]
pub block_height: i64,
#[prost(message, repeated, tag = "2")]
pub validators: ::prost::alloc::vec::Vec<Validator>,
/// pagination defines an pagination for the response.
#[prost(message, optional, tag = "3")]
pub pagination: ::core::option::Option<super::super::query::v1beta1::PageResponse>,
}
/// Validator is the type for the validator-set.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Validator {
#[prost(string, tag = "1")]
pub address: ::prost::alloc::string::String,
#[prost(message, optional, tag = "2")]
pub pub_key: ::core::option::Option<::prost_types::Any>,
#[prost(int64, tag = "3")]
pub voting_power: i64,
#[prost(int64, tag = "4")]
pub proposer_priority: i64,
}
/// GetBlockByHeightRequest is the request type for the Query/GetBlockByHeight RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetBlockByHeightRequest {
#[prost(int64, tag = "1")]
pub height: i64,
}
/// GetBlockByHeightResponse is the response type for the Query/GetBlockByHeight RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetBlockByHeightResponse {
#[prost(message, optional, tag = "1")]
pub block_id: ::core::option::Option<::tendermint_proto::types::BlockId>,
#[prost(message, optional, tag = "2")]
pub block: ::core::option::Option<::tendermint_proto::types::Block>,
}
/// GetLatestBlockRequest is the request type for the Query/GetLatestBlock RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetLatestBlockRequest {}
/// GetLatestBlockResponse is the response type for the Query/GetLatestBlock RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetLatestBlockResponse {
#[prost(message, optional, tag = "1")]
pub block_id: ::core::option::Option<::tendermint_proto::types::BlockId>,
#[prost(message, optional, tag = "2")]
pub block: ::core::option::Option<::tendermint_proto::types::Block>,
}
/// GetSyncingRequest is the request type for the Query/GetSyncing RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetSyncingRequest {}
/// GetSyncingResponse is the response type for the Query/GetSyncing RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetSyncingResponse {
#[prost(bool, tag = "1")]
pub syncing: bool,
}
/// GetNodeInfoRequest is the request type for the Query/GetNodeInfo RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetNodeInfoRequest {}
/// GetNodeInfoResponse is the request type for the Query/GetNodeInfo RPC method.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetNodeInfoResponse {
#[prost(message, optional, tag = "1")]
pub default_node_info: ::core::option::Option<::tendermint_proto::p2p::DefaultNodeInfo>,
#[prost(message, optional, tag = "2")]
pub application_version: ::core::option::Option<VersionInfo>,
}
/// VersionInfo is the type for the GetNodeInfoResponse message.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct VersionInfo {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
#[prost(string, tag = "2")]
pub app_name: ::prost::alloc::string::String,
#[prost(string, tag = "3")]
pub version: ::prost::alloc::string::String,
#[prost(string, tag = "4")]
pub git_commit: ::prost::alloc::string::String,
#[prost(string, tag = "5")]
pub build_tags: ::prost::alloc::string::String,
#[prost(string, tag = "6")]
pub go_version: ::prost::alloc::string::String,
#[prost(message, repeated, tag = "7")]
pub build_deps: ::prost::alloc::vec::Vec<Module>,
#[prost(string, tag = "8")]
pub cosmos_sdk_version: ::prost::alloc::string::String,
}
/// Module is the type for VersionInfo
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Module {
/// module path
#[prost(string, tag = "1")]
pub path: ::prost::alloc::string::String,
/// module version
#[prost(string, tag = "2")]
pub version: ::prost::alloc::string::String,
/// checksum
#[prost(string, tag = "3")]
pub sum: ::prost::alloc::string::String,
}
#[doc = r" Generated client implementations."]
pub mod service_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
#[doc = " Service defines the gRPC querier service for tendermint queries."]
#[derive(Debug, Clone)]
pub struct ServiceClient<T> {
inner: tonic::client::Grpc<T>,
}
impl ServiceClient<tonic::transport::Channel> {
#[doc = r" Attempt to create a new client by connecting to a given endpoint."]
pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
where
D: std::convert::TryInto<tonic::transport::Endpoint>,
D::Error: Into<StdError>,
{
let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
Ok(Self::new(conn))
}
}
impl<T> ServiceClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::ResponseBody: Body + Send + 'static,
T::Error: Into<StdError>,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> ServiceClient<InterceptedService<T, F>>
where
F: tonic::service::Interceptor,
T: tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
Response = http::Response<
<T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
>,
>,
<T as tonic::codegen::Service<http::Request<tonic::body::BoxBody>>>::Error:
Into<StdError> + Send + Sync,
{
ServiceClient::new(InterceptedService::new(inner, interceptor))
}
#[doc = r" Compress requests with `gzip`."]
#[doc = r""]
#[doc = r" This requires the server to support it otherwise it might respond with an"]
#[doc = r" error."]
pub fn send_gzip(mut self) -> Self {
self.inner = self.inner.send_gzip();
self
}
#[doc = r" Enable decompressing responses with `gzip`."]
pub fn accept_gzip(mut self) -> Self {
self.inner = self.inner.accept_gzip();
self
}
#[doc = " GetNodeInfo queries the current node info."]
pub async fn get_node_info(
&mut self,
request: impl tonic::IntoRequest<super::GetNodeInfoRequest>,
) -> Result<tonic::Response<super::GetNodeInfoResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/cosmos.base.tendermint.v1beta1.Service/GetNodeInfo",
);
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " GetSyncing queries node syncing."]
pub async fn get_syncing(
&mut self,
request: impl tonic::IntoRequest<super::GetSyncingRequest>,
) -> Result<tonic::Response<super::GetSyncingResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/cosmos.base.tendermint.v1beta1.Service/GetSyncing",
);
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " GetLatestBlock returns the latest block."]
pub async fn | (
&mut self,
request: impl tonic::IntoRequest<super::GetLatestBlockRequest>,
) -> Result<tonic::Response<super::GetLatestBlockResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/cosmos.base.tendermint.v1beta1.Service/GetLatestBlock",
);
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " GetBlockByHeight queries block for given height."]
pub async fn get_block_by_height(
&mut self,
request: impl tonic::IntoRequest<super::GetBlockByHeightRequest>,
) -> Result<tonic::Response<super::GetBlockByHeightResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/cosmos.base.tendermint.v1beta1.Service/GetBlockByHeight",
);
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " GetLatestValidatorSet queries latest validator-set."]
pub async fn get_latest_validator_set(
&mut self,
request: impl tonic::IntoRequest<super::GetLatestValidatorSetRequest>,
) -> Result<tonic::Response<super::GetLatestValidatorSetResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/cosmos.base.tendermint.v1beta1.Service/GetLatestValidatorSet",
);
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " GetValidatorSetByHeight queries validator-set at a given height."]
pub async fn get_validator_set_by_height(
&mut self,
request: impl tonic::IntoRequest<super::GetValidatorSetByHeightRequest>,
) -> Result<tonic::Response<super::GetValidatorSetByHeightResponse>, tonic::Status>
{
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/cosmos.base.tendermint.v1beta1.Service/GetValidatorSetByHeight",
);
self.inner.unary(request.into_request(), path, codec).await
}
}
}
| get_latest_block |
box.py | from pygame import Surface
from pygame.sprite import Sprite
class Box(Sprite):
| def __init__(self, color, x=0, y=0):
super().__init__()
self.image = Surface((50, 50))
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.color = color
self.image.fill(self.color) |
|
issue.go | /*
Copyright 2017, RadiantBlue Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package issue
import (
"fmt"
)
type IssuesMap map[string]*Issues
type Issues []string
type Issue string
func (i *Issue) String() string {
return string(*i)
}
func NewIssue(format string, a ...interface{}) *Issue {
temp := Issue(fmt.Sprintf(format, a...))
return &temp
}
func NewUnusedVariable(varName, value string) *Issue {
temp := Issue(fmt.Sprintf("Unused variable [${%s}] with value [%s]", varName, value))
return &temp
}
func NewVersionMismatch(packag, verA, verB string) *Issue {
if verA == "" |
if verB == "" {
verB = "NONE"
}
temp := Issue(fmt.Sprintf("Version mismatch on package [%s]: [%s] [%s]", packag, verA, verB))
return &temp
}
func NewUnknownSha(name, sha string) *Issue {
temp := Issue(fmt.Sprintf("Unknown sha [%s] for package [%s]", sha, name))
return &temp
}
func NewWeakVersion(name, version, tag string) *Issue {
temp := Issue(fmt.Sprintf("Version [%s] on package [%s] is not definite. Tag: [%s]", version, name, tag))
return &temp
}
| {
verA = "NONE"
} |
main.rs | mod data;
fn compute_bit_histogram(numbers: &Vec<Vec<u32>>) -> Vec<u32> {
numbers
.clone()
.into_iter()
.reduce(|total_bits, bits| {
total_bits
.iter()
.zip(bits.iter())
.map(|(x, y)| x + y)
.collect::<Vec<u32>>()
})
.unwrap()
}
fn find_most_common_bits(histogram: &Vec<u32>, numbers: &Vec<Vec<u32>>) -> Vec<u32> {
histogram
.iter()
.map(|&bit_count| {
if is_majority_of(&numbers, bit_count) {
1
} else {
0
}
})
.collect::<Vec<u32>>()
}
fn as_number(bits: &Vec<u32>) -> u32 {
bits.iter().fold(0, |result, bit| (result << 1) | bit)
}
fn | (numbers: &Vec<Vec<u32>>, bit_count: u32) -> bool {
let number_count = numbers.len() as u32;
let is_even = number_count & 1 == 0;
if is_even {
bit_count >= number_count / 2
} else {
bit_count >= number_count / 2 + 1
}
}
fn count_bits(numbers: &Vec<Vec<u32>>, bit: usize) -> u32 {
numbers
.iter()
.map(|bits| bits[bit])
.fold(0, |total_bits, bits| total_bits + bits)
}
fn calculate_solution(report: &str) -> (u32, u32) {
let numbers = report
.lines()
.map(|number| {
number
.chars()
.map(|c| if c == '1' { 1 } else { 0 })
.collect::<Vec<u32>>()
})
.collect::<Vec<Vec<u32>>>();
let histogram = compute_bit_histogram(&numbers);
let most_common_bits = find_most_common_bits(&histogram, &numbers);
let bit_width = histogram.len();
let gamma_rate = as_number(&most_common_bits);
let epsilon_rate = (!gamma_rate) & ((1u32 << bit_width) - 1);
println!("g {} e {} h {:?}", gamma_rate, epsilon_rate, histogram);
let mut oxygen: Option<Vec<u32>> = None;
let mut filtered_o2 = numbers.clone();
for bit in 0..bit_width {
let number_count = filtered_o2.len() as u32;
let most_common_bit = if is_majority_of(&filtered_o2, count_bits(&filtered_o2, bit)) {
1
} else {
0
};
println!(
"bit: {} number_count: {} most_common_bit o2: {}",
bit, number_count, most_common_bit
);
filtered_o2 = filtered_o2
.into_iter()
.filter(|n| n[bit] == most_common_bit)
.collect::<Vec<Vec<u32>>>();
println!("filtered o2: {:?}", filtered_o2);
if filtered_o2.len() == 1 {
oxygen = Some(filtered_o2[0].clone());
break;
}
}
let oxygen_rate = as_number(&oxygen.unwrap());
let mut co2: Option<Vec<u32>> = None;
let mut filtered_co2 = numbers;
for bit in 0..bit_width {
let number_count = filtered_co2.len() as u32;
let least_common_bit = if is_majority_of(&filtered_co2, count_bits(&filtered_co2, bit)) {
0
} else {
1
};
println!(
"bit: {} number_count: {} least_common_bit co2: {}",
bit, number_count, least_common_bit
);
filtered_co2 = filtered_co2
.into_iter()
.filter(|n| n[bit] == least_common_bit)
.collect::<Vec<Vec<u32>>>();
println!("filtered co2: {:?}", filtered_co2);
if filtered_co2.len() == 1 {
co2 = Some(filtered_co2[0].clone());
break;
}
}
let co2_rate = as_number(&co2.unwrap());
println!("co2_rate {} oxygen_rate {}", co2_rate, oxygen_rate);
(gamma_rate * epsilon_rate, oxygen_rate * co2_rate)
}
fn main() {
let (power_consumption, life_support_rating) = calculate_solution(data::REPORT);
println!("Solution {}, {}", power_consumption, life_support_rating);
}
| is_majority_of |
Gruntfile.js | module.exports = function ( grunt ) {
| 'use strict';
/**
* Tasks and configuration
*
* @link https://github.com/firstandthird/load-grunt-config
*/
require('load-grunt-config')(grunt, {
configPath: require('path').join( process.cwd(), 'grunt-tasks' ),
init: true,
data: {
config: {
"theme" : {
"name" : "_frc",
"description" : "Theme for <code>_frc</code>",
"theme_uri" : "",
"author" : "Frantic",
"author_uri" : "http://www.frantic.com/",
"text_domain" : "_frc",
"version" : "1.0.0",
"location" : "../../.",
"browser_support" : "last 3 versions",
"assets" : {
"css" : {
"location" : "../css",
"admin" : "admin.css",
"editor" : "editor-style.css",
"login" : "login.css",
"main" : "main.min.css",
"print" : "print.css"
},
"js" : {
"location" : "../js",
"main" : "main.min.js"
}
},
"source" : {
"js" : {
"location" : "js",
"main" : "main.js"
},
"lang_files" : "../../languages",
"php_files" : "../../.",
"sass" : {
"location" : "sass",
"admin" : "admin.scss",
"editor" : "editor-style.scss",
"login" : "login.scss",
"main" : "main.scss",
"print" : "print.scss"
}
}
}
},
},
// Can optionally pass options to load-grunt-tasks.
// If you set to false, it will disable auto loading tasks.
loadGruntTasks: {
pattern: 'grunt-*',
config: require('./package.json'),
scope: 'devDependencies'
},
// Can post process config object before it gets passed to grunt
postProcess: function( config ) {},
// Allows to manipulate the config object before it gets merged with the data object
preMerge: function( config, data ) {}
});
require('time-grunt')( grunt );
}; | |
clients.go | package kube
import (
"os"
"strings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/jenkins-x/jx-kube-client/v3/pkg/kubeclient"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/dynamic"
fakedyn "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/rest"
)
// LazyCreateDynamicClient lazily creates the dynamic client if its not defined
func LazyCreateDynamicClient(client dynamic.Interface) (dynamic.Interface, error) {
if client != nil {
return client, nil
}
if IsNoKubernetes() {
scheme := runtime.NewScheme()
return fakedyn.NewSimpleDynamicClient(scheme), nil
}
f := kubeclient.NewFactory()
cfg, err := f.CreateKubeConfig()
if err != nil {
return nil, errors.Wrap(err, "failed to get kubernetes config")
}
client, err = dynamic.NewForConfig(cfg)
if err != nil {
return nil, errors.Wrap(err, "error building dynamic clientset")
}
return client, nil
}
// LazyCreateKubeClient lazy creates the kube client if its not defined
func LazyCreateKubeClient(client kubernetes.Interface) (kubernetes.Interface, error) {
return LazyCreateKubeClientWithMandatory(client, false)
}
// LazyCreateKubeClientWithMandatory if mandatory is specified then the env vars are ignored to determine if we use
// kubernetes or not
func LazyCreateKubeClientWithMandatory(client kubernetes.Interface, mandatory bool) (kubernetes.Interface, error) {
if client != nil {
return client, nil
}
if !mandatory && IsNoKubernetes() {
return NewFakeKubernetesClient("default"), nil
}
f := kubeclient.NewFactory()
cfg, err := f.CreateKubeConfig()
if err != nil |
client, err = kubernetes.NewForConfig(cfg)
if err != nil {
return client, errors.Wrap(err, "error building kubernetes clientset")
}
return client, nil
}
// LazyCreateKubeClientAndNamespace lazy creates the kube client and/or the current namespace if not already defined
func LazyCreateKubeClientAndNamespace(client kubernetes.Interface, ns string) (kubernetes.Interface, string, error) {
if client != nil && ns != "" {
return client, ns, nil
}
if IsNoKubernetes() {
if ns == "" {
ns = "default"
}
if client == nil {
client = NewFakeKubernetesClient(ns)
}
return client, ns, nil
}
if client == nil {
f := kubeclient.NewFactory()
cfg, err := f.CreateKubeConfig()
if err != nil {
return client, ns, errors.Wrap(err, "failed to get kubernetes config")
}
client, err = kubernetes.NewForConfig(cfg)
if err != nil {
return client, ns, errors.Wrap(err, "error building kubernetes clientset")
}
}
if ns == "" {
var err error
ns, err = kubeclient.CurrentNamespace()
if err != nil {
return client, ns, errors.Wrap(err, "failed to get current kubernetes namespace")
}
}
return client, ns, nil
}
// IsInCluster tells if we are running incluster
func IsInCluster() bool {
_, err := rest.InClusterConfig()
return err == nil
}
// IsNoKubernetes returns true if we are inside a GitHub Action or not using kubernetes
func IsNoKubernetes() bool {
// disable k8s by default if inside a github action
if strings.ToLower(os.Getenv("GITHUB_ACTIONS")) == "true" {
return strings.ToLower(os.Getenv("JX_KUBERNETES")) != "true"
}
return strings.ToLower(os.Getenv("JX_NO_KUBERNETES")) == "true"
}
// NewFakeKubernetesClient creates a fake k8s client if we have disabled kubernetes
func NewFakeKubernetesClient(ns string) *fake.Clientset {
return fake.NewSimpleClientset(&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: ns,
},
})
}
| {
return client, errors.Wrap(err, "failed to get kubernetes config")
} |
webpack.config.js | var path = require('path')
var babelrc = JSON.parse(require('fs').readFileSync('.babelrc').toString())
var pkg = require('./package.json')
var webpack = require('webpack')
var ignore = [
new webpack.IgnorePlugin(/react\/addons/),
new webpack.IgnorePlugin(/react\/lib\/ReactContext/),
new webpack.IgnorePlugin(/react\/lib\/ExecutionEnvironment/)
]
module.exports = {
entry: pkg.main,
output: { path: path.resolve(__dirname, 'dist'), filename: 'bundle.js' },
devtool: 'source-map',
stats: { colors: true },
node: { fs: 'empty'},
plugins: ignore,
module: {
loaders: [{
test: /.jsx?$/,
loader: 'babel-loader',
include: path.resolve(__dirname, 'src'),
query: babelrc
}, {
test: /\.json$/,
loader: 'json'
}, {
test: /\.css$/, | test: /\.woff(\?v=\d+\.\d+\.\d+)?$/,
loader: "url?limit=10000&mimetype=application/font-woff"
}, {
test: /\.woff2(\?v=\d+\.\d+\.\d+)?$/,
loader: "url?limit=10000&mimetype=application/font-woff"
}, {
test: /\.ttf(\?v=\d+\.\d+\.\d+)?$/,
loader: "url?limit=10000&mimetype=application/octet-stream"
}, {
test: /\.eot(\?v=\d+\.\d+\.\d+)?$/,
loader: "file"
}, {
test: /\.svg(\?v=\d+\.\d+\.\d+)?$/,
loader: "url?limit=10000&mimetype=image/svg+xml"
}
]
}
} | loader: 'style!css?sourceMap'
}, { |
curve.rs | use crate::payout_curve::basis::BSplineBasis;
use crate::payout_curve::splineobject::SplineObject;
use crate::payout_curve::utils::*;
use crate::payout_curve::Error;
use ndarray::prelude::*;
use ndarray::s;
use std::cmp::max;
fn default_basis() -> Result<Vec<BSplineBasis>, Error> {
let out = vec![BSplineBasis::new(None, None, None)?];
Ok(out)
}
#[derive(Clone, Debug)]
pub struct | {
pub spline: SplineObject,
}
/// Represents a curve: an object with a one-dimensional parameter space.
impl Curve {
/// Construct a curve with the given basis and control points. In theory,
/// the curve could be defined in some Euclidean space of dimension N, but
/// for the moment only curves in E^2 are supported via hard-coding. That is,
/// any valid basis set can be passed in when instantiating this object,
/// but only the first one is every considered in the methods provided.
///
/// The default is to create a linear one-element mapping from (0,1) to the
/// unit interval.
///
/// ### parameters
/// * basis: The underlying B-Spline basis
/// * controlpoints: An *n* × *d* matrix of control points
/// * rational: Whether the curve is rational (in which case the
/// control points are interpreted as pre-multiplied with the weight,
/// which is the last coordinate)
pub fn new(
bases: Option<Vec<BSplineBasis>>,
controlpoints: Option<Array2<f64>>,
rational: Option<bool>,
) -> Result<Self, Error> {
let bases = bases.unwrap_or(default_basis()?);
let spline = SplineObject::new(bases, controlpoints, rational)?;
Ok(Curve { spline })
}
/// Evaluate the object at given parametric values.
///
/// ### parameters
/// * t: collection of parametric coordinates in which to evaluate
/// Realistically, this should actually be an Array1 object, but for
/// consistency with the underlying SplineObject methods the collection
/// is used instead.
///
/// ### returns
/// * 2D array
// pub fn evaluate(&self, t: &mut &[Array1<f64>], tensor: bool) -> Result<ArrayD<f64>, Error> {
pub fn evaluate(&self, t: &mut &[Array1<f64>]) -> Result<Array2<f64>, Error> {
self.spline.validate_domain(t)?;
let mut tx = t[0].clone().to_owned();
let n_csr = self.spline.bases[0].evaluate(&mut tx, 0, true)?;
// kludge...
let n0 = self.spline.controlpoints.shape()[0];
let n1 = self.spline.controlpoints.shape()[1];
let flat_controlpoints = self
.ravel(&self.spline.controlpoints)
.into_raw_vec()
.to_vec();
let arr = Array2::<f64>::from_shape_vec((n0, n1), flat_controlpoints)?;
let mut result = n_csr.todense().dot(&arr);
// if the spline is rational, we apply the weights and omit the weights column
if self.spline.rational {
let wpos = result.shape()[1] - 1;
let weights = &&result.slice(s![.., wpos]);
let mut temp = Array2::<f64>::zeros((result.shape()[0], wpos));
for i in 0..self.spline.dimension {
{
let mut slice = temp.slice_mut(s![.., i]);
let update = result.slice(s![.., i]).to_owned() / weights.to_owned();
slice.assign(&update.view());
}
}
result = temp;
}
Ok(result)
}
/// Extend the curve by merging another curve to the end of it.
///
/// The curves are glued together in a C0 fashion with enough repeated
/// knots. The function assumes that the end of this curve perfectly
/// matches the start of the input curve.
///
/// Obviously, neither curve can be periodic, which enables us
/// to assume all knot vectors are open.
///
/// ### parameters
/// * curve: Another curve
///
/// ### returns
pub fn append(&mut self, othercurve: Curve) -> Result<(), Error> {
if self.spline.bases[0].periodic > -1 || othercurve.spline.bases[0].periodic > -1 {
return Result::Err(Error::CannotConnectPeriodicCurves);
};
let mut extending_curve = othercurve;
// make sure both are in the same space, and (if needed) have rational weights
self.spline
.make_splines_compatible(&mut extending_curve.spline)?;
let p1 = self.spline.order(0)?[0];
let p2 = extending_curve.spline.order(0)?[0];
if p1 < p2 {
self.raise_order(p2 - p1)?;
} else {
extending_curve.raise_order(p1 - p2)?;
}
let p = max(p1, p2);
let old_knot = self.spline.knots(0, Some(true))[0].clone();
let mut add_knot = extending_curve.spline.knots(0, Some(true))[0].clone();
add_knot -= add_knot[0];
add_knot += old_knot[old_knot.len() - 1];
let mut new_knot = Array1::<f64>::zeros(add_knot.len() + old_knot.len() - p - 1);
{
let mut slice = new_knot.slice_mut(s![..old_knot.len() - 1]);
let update = old_knot.slice(s![..old_knot.len() - 1]);
slice.assign(&update.view());
}
{
let mut slice = new_knot.slice_mut(s![old_knot.len() - 1..]);
let update = add_knot.slice(s![p..]);
slice.assign(&update.view());
}
let rational = self.spline.rational as usize;
let n1 = self.spline.controlpoints.shape()[0];
let n2 = extending_curve.spline.controlpoints.shape()[0];
let n3 = self.spline.dimension + rational;
let mut new_controlpoints = Array2::<f64>::zeros((n1 + n2 - 1, n3));
{
let mut slice = new_controlpoints.slice_mut(s![..n1, ..]);
let update = self.spline.controlpoints.slice(s![.., ..]);
slice.assign(&update.view());
}
{
let mut slice = new_controlpoints.slice_mut(s![n1.., ..]);
let update = extending_curve.spline.controlpoints.slice(s![1.., ..]);
slice.assign(&update.view());
}
self.spline.bases = vec![BSplineBasis::new(Some(p), Some(new_knot), None)?];
self.spline.controlpoints = new_controlpoints.into_dyn().to_owned();
Ok(())
}
/// Raise the polynomial order of the curve.
///
/// ### parameters
/// * amount: Number of times to raise the order
pub fn raise_order(&mut self, amount: usize) -> Result<(), Error> {
if amount == 0 {
return Ok(());
}
// work outside of self, copy back in at the end
let mut new_basis = self.spline.bases[0].clone();
new_basis.raise_order(amount);
// set up an interpolation problem. This is in projective space,
// so no problems for rational cases
let mut interpolation_pts_t = new_basis.greville();
let n_old = self.spline.bases[0].evaluate(&mut interpolation_pts_t, 0, true)?;
let n_new = new_basis.evaluate(&mut interpolation_pts_t, 0, true)?;
// Some kludge required to enable .dot(), which doesn't work on dynamic
// arrays. Surely a better way to do this, but this is quick and dirty
// and valid since we're in curve land
let n0 = self.spline.controlpoints.shape()[0];
let n1 = self.spline.controlpoints.shape()[1];
let flat_controlpoints = self
.ravel(&self.spline.controlpoints)
.into_raw_vec()
.to_vec();
let arr = Array2::<f64>::from_shape_vec((n0, n1), flat_controlpoints)?;
let interpolation_pts_x = n_old.todense().dot(&arr);
let res = n_new.matrix_solve(&interpolation_pts_x)?;
self.spline.controlpoints = res.into_dyn().to_owned();
self.spline.bases = vec![new_basis];
Ok(())
}
/// Computes the euclidian length of the curve in geometric space
///
/// .. math:: \\int_{t_0}^{t_1}\\sqrt{x(t)^2 + y(t)^2 + z(t)^2} dt
///
/// ### parameters
/// * t0: lower integration limit
/// * t1: upper integration limit
pub fn length(&self, t0: Option<f64>, t1: Option<f64>) -> Result<f64, Error> {
let mut knots = &self.spline.knots(0, Some(false))[0];
// keep only integration boundaries within given start (t0) and stop (t1) interval
let new_knots_0 = t0
.map(|t0| {
let i = bisect_left(knots, &t0, knots.len());
let mut vec = Vec::<f64>::with_capacity(&knots.to_vec()[i..].len() + 1);
vec.push(t0);
for elem in knots.to_vec()[i..].iter() {
vec.push(*elem);
}
Array1::<f64>::from_vec(vec)
})
.unwrap_or_else(|| knots.to_owned());
knots = &new_knots_0;
let new_knots_1 = t1
.map(|t1| {
let i = bisect_right(knots, &t1, knots.len());
let mut vec = Vec::<f64>::with_capacity(&knots.to_vec()[..i].len() + 1);
for elem in knots.to_vec()[..i].iter() {
vec.push(*elem);
}
vec.push(t1);
Array1::<f64>::from_vec(vec)
})
.unwrap_or_else(|| knots.to_owned());
knots = &new_knots_1;
let klen = knots.len();
let gleg = GaussLegendreQuadrature::new(self.spline.order(0)?[0] + 1)?;
let t = &knots.to_vec()[..klen - 1]
.iter()
.zip(knots.to_vec()[1..].iter())
.map(|(t0, t1)| (&gleg.sample_points + 1.) / 2. * (t1 - t0) + *t0)
.collect::<Vec<_>>();
let w = &knots.to_vec()[..klen - 1]
.iter()
.zip(knots.to_vec()[1..].iter())
.map(|(t0, t1)| &gleg.weights / 2. * (t1 - t0))
.collect::<Vec<_>>();
let t_flat = self.flattened(&t[..]);
let w_flat = self.flattened(&w[..]);
let dx = self.derivative(&t_flat, 1, Some(true))?;
let det_j = Array1::<f64>::from_vec(
dx.mapv(|e| e.powi(2))
.sum_axis(Axis(1))
.mapv(f64::sqrt)
.iter()
.copied()
.collect::<Vec<_>>(),
);
let out = det_j.dot(&w_flat);
Ok(out)
}
fn flattened(&self, vec_arr: &[Array1<f64>]) -> Array1<f64> {
let alloc = vec_arr.iter().fold(0, |sum, e| sum + e.len());
let mut vec_out = Vec::<f64>::with_capacity(alloc);
for arr in vec_arr.iter() {
for e in arr.to_vec().iter() {
vec_out.push(*e);
}
}
Array1::<f64>::from_vec(vec_out)
}
/// left here as a private method as it assumes C-contiguous ordering,
/// which is fine for where we use it here.
fn ravel(&self, arr: &ArrayD<f64>) -> Array1<f64> {
let alloc = arr.shape().iter().product();
let mut vec = Vec::<f64>::with_capacity(alloc);
for e in arr.iter() {
vec.push(*e)
}
Array1::<f64>::from_vec(vec)
}
/// Evaluate the derivative of the curve at the given parametric values.
///
/// This function returns an *n* × *dim* array, where *n* is the number of
/// evaluation points, and *dim* is the physical dimension of the curve.
/// **At this point in time, only `dim == 1` works, owing to the provisional
/// constraints on the struct `Curve` itself.**
///
/// ### parameters
/// * t: Parametric coordinates in which to evaluate
/// * d: Number of derivatives to compute
/// * from_right: Evaluation in the limit from right side
pub fn derivative(
&self,
t: &Array1<f64>,
d: usize,
from_right: Option<bool>,
) -> Result<ArrayD<f64>, Error> {
let from_right = from_right.unwrap_or(true);
if !self.spline.rational || d < 2 || d > 3 {
let mut tx = &vec![t.clone().to_owned()][..];
let res = self.spline.derivative(&mut tx, &[d], &[from_right], true)?;
return Ok(res);
}
// init rusult array
let mut res = Array2::<f64>::zeros((t.len(), self.spline.dimension));
// annoying fix to make the controlpoints not dynamic--implicit
// assumption of 2D curve only!
let n0 = self.spline.controlpoints.shape()[0];
let n1 = self.spline.controlpoints.shape()[1];
let flat_controlpoints = self
.ravel(&self.spline.controlpoints)
.into_raw_vec()
.to_vec();
let static_controlpoints = Array2::<f64>::from_shape_vec((n0, n1), flat_controlpoints)?;
let mut t_eval = t.clone();
let d2 = self.spline.bases[0]
.evaluate(&mut t_eval, 2, from_right)?
.todense()
.dot(&static_controlpoints);
let d1 = self.spline.bases[0]
.evaluate(&mut t_eval, 1, from_right)?
.todense()
.dot(&static_controlpoints);
let d0 = self.spline.bases[0]
.evaluate(&mut t_eval, 0, true)?
.todense()
.dot(&static_controlpoints);
let w0 = &d0.slice(s![.., d0.shape()[1] - 1]).to_owned();
let w1 = &d1.slice(s![.., d1.shape()[1] - 1]).to_owned();
let w2 = &d2.slice(s![.., d2.shape()[1] - 1]).to_owned();
if d == 2 {
let w0_cube = &w0.mapv(|e| e.powi(3)).to_owned();
for i in 0..self.spline.dimension {
{
let update = &((d2.slice(s![.., i]).to_owned() * w0 * w0
- 2. * w1
* (d1.slice(s![.., i]).to_owned() * w0
- d0.slice(s![.., i]).to_owned() * w1)
- d0.slice(s![.., i]).to_owned() * w2 * w1)
/ w0_cube);
let mut slice = res.slice_mut(s![.., i]);
slice.assign(update);
}
}
}
if d == 3 {
let d3 = self.spline.bases[0]
.evaluate(&mut t_eval, 3, from_right)?
.todense()
.dot(&static_controlpoints);
let w3 = &d3.slice(s![.., d3.shape()[1] - 1]);
let w0_four = w0.mapv(|e| e.powi(6));
for i in 0..self.spline.dimension {
{
let h0 = &(d1.slice(s![.., i]).to_owned() * w0
- d0.slice(s![.., i]).to_owned() * w1);
let h1 = &(d2.slice(s![.., i]).to_owned() * w0
- d0.slice(s![.., i]).to_owned() * w2);
let h2 = &(d3.slice(s![.., i]).to_owned() * w0
+ d2.slice(s![.., i]).to_owned() * w1
- d1.slice(s![.., i]).to_owned() * w2
- d0.slice(s![.., i]).to_owned() * w3);
let g0 = &(h1 * w0 - 2. * h0 * w1);
let g1 = &(h2 * w0 - 2. * h0 * w2 - h1 * w1);
let update = (g1 * w0 - 3. * g0 * w1) / &w0_four;
let mut slice = res.slice_mut(s![.., i]);
slice.assign(&update);
}
}
}
Ok(res.into_dyn().to_owned())
}
/// Computes the L2 (squared and per knot span) between this
/// curve and a target curve as well as the L_infinity error:
///
/// .. math:: ||\\boldsymbol{x_h}(t)-\\boldsymbol{x}(t)||_{L^2(t_1,t_2)}^2 = \\int_{t_1}^{t_2}
/// |\\boldsymbol{x_h}(t)-\\boldsymbol{x}(t)|^2 dt, \\quad \\forall \\;\\text{knots}\\;t_1 <
/// t_2
///
/// .. math:: ||\\boldsymbol{x_h}(t)-\\boldsymbol{x}(t)||_{L^\\infty} = \\max_t
/// |\\boldsymbol{x_h}(t)-\\boldsymbol{x}(t)|
///
/// ### parameters
/// * function target: callable function which takes as input a vector
/// of evaluation points t and gives as output a matrix x where
/// x[i,j] is component j evaluated at point t[i]
///
/// ### returns
/// * L2 error per knot-span
pub fn error(
&self,
target: impl Fn(&Array1<f64>) -> Array2<f64>,
) -> Result<(Array1<f64>, f64), Error> {
let knots = &self.spline.knots(0, Some(false))[0];
let n = self.spline.order(0)?[0];
let gleg = GaussLegendreQuadrature::new(n + 1)?;
let mut error_l2 = Vec::with_capacity(knots.len() - 1);
let mut error_linf = Vec::with_capacity(knots.len() - 1);
for (t0, t1) in knots.to_vec()[..knots.len() - 1]
.iter()
.zip(&mut knots.to_vec()[1..].iter())
{
let tg = (&gleg.sample_points + 1.) / 2. * (t1 - t0) + *t0;
let eval = vec![tg.clone()];
let wg = &gleg.weights / 2. * (t1 - t0);
let exact = target(&tg);
let error = self.evaluate(&mut &eval[..])? - exact;
let error_2 = &error.mapv(|e| e.powi(2)).sum_axis(Axis(1));
let error_abs = &error_2.mapv(|e| e.sqrt());
let l2_val = error_2.dot(&wg);
let linf_val = error_abs.iter().copied().fold(f64::NEG_INFINITY, f64::max);
error_l2.push(l2_val);
error_linf.push(linf_val);
}
let out_inf = error_linf.iter().copied().fold(f64::NEG_INFINITY, f64::max);
Ok((Array1::<f64>::from_vec(error_l2), out_inf))
}
}
| Curve |
test_comments.rs | use crate::workdir::Workdir;
#[test]
fn comments() |
#[test]
fn comments_long() {
let wrk = Workdir::new("comments");
wrk.create(
"comments.csv",
vec![
svec!["# test file to see how comments work", ""],
svec!["# this is another comment before the header", ""],
svec!["# DATA DICTIONARY", ""],
svec!["# column1 - alphabetic; id of the column", ""],
svec!["# column2 - numeric; just a number", ""],
svec!["column1", "column2"],
svec!["a", "1"],
svec!["#b", "2"],
svec!["c", "3"],
svec!["#d - this row is corrupted skip", "extra col2"],
svec!["e", "5"],
],
);
let mut cmd = wrk.command("input");
cmd.env("QSV_COMMENTS", "# is the comment character");
cmd.arg("comments.csv");
let got: Vec<Vec<String>> = wrk.read_stdout(&mut cmd);
let expected = vec![
svec!["column1", "column2"],
svec!["a", "1"],
svec!["c", "3"],
svec!["e", "5"],
];
assert_eq!(got, expected);
}
#[test]
fn comments_unicode_supported() {
let wrk = Workdir::new("comments");
wrk.create(
"comments.csv",
vec![
svec!["Ǽ test file to see how comments work", ""],
svec!["Ǽ yet another comment", ""],
svec!["column1", "column2"],
svec!["a", "1"],
svec!["Ǽb", "2"],
svec!["c", "3"],
],
);
let mut cmd = wrk.command("input");
cmd.env("QSV_COMMENTS", "Ǽ");
cmd.arg("comments.csv");
let got: Vec<Vec<String>> = wrk.read_stdout(&mut cmd);
let expected = vec![
svec!["column1", "column2"],
svec!["a", "1"],
svec!["c", "3"],
];
assert_eq!(got, expected);
}
#[test]
fn comments_count() {
let wrk = Workdir::new("comments");
wrk.create(
"comments.csv",
vec![
svec!["Ǽ test file to see how comments work", ""],
svec!["Ǽ yet another comment", ""],
svec!["column1", "column2"],
svec!["a", "1"],
svec!["Ǽb", "2"],
svec!["c", "3"],
],
);
let mut cmd = wrk.command("count");
cmd.env("QSV_COMMENTS", "Ǽ");
cmd.arg("comments.csv");
let got_count: usize = wrk.stdout(&mut cmd);
rassert_eq!(got_count, 2);
}
#[test]
fn comments_headers() {
let wrk = Workdir::new("comments");
wrk.create(
"comments.csv",
vec![
svec!["// test file to see how comments work", ""],
svec!["// yet another comment", ""],
svec!["/ still a comment", ""],
svec!["column1", "column2"],
svec!["a", "1"],
svec!["//b", "2"],
svec!["c", "3"],
],
);
let mut cmd = wrk.command("headers");
cmd.env("QSV_COMMENTS", "/");
cmd.arg("comments.csv");
let got: String = wrk.stdout(&mut cmd);
let expected = "\
1 column1
2 column2";
assert_eq!(got, expected);
}
| {
let wrk = Workdir::new("comments");
wrk.create(
"comments.csv",
vec![
svec!["# test file to see how comments work", ""],
svec!["# this is another comment before the header", ""],
svec!["# DATA DICTIONARY", ""],
svec!["# column1 - alphabetic; id of the column", ""],
svec!["# column2 - numeric; just a number", ""],
svec!["column1", "column2"],
svec!["a", "1"],
svec!["#b", "2"],
svec!["c", "3"],
svec!["#d - this row is corrupted skip", "extra col2"],
svec!["e", "5"],
],
);
let mut cmd = wrk.command("input");
cmd.env("QSV_COMMENTS", "#");
cmd.arg("comments.csv");
let got: Vec<Vec<String>> = wrk.read_stdout(&mut cmd);
let expected = vec![
svec!["column1", "column2"],
svec!["a", "1"],
svec!["c", "3"],
svec!["e", "5"],
];
assert_eq!(got, expected);
} |
kicad_to_digikey_bom.py | import csv
import sys
bom = {}
reader = csv.reader(sys.stdin)
header = next(reader)
columns = {}
for (column_name, column_number) in zip(header, range(0, len(header))):
columns[column_name.strip()] = column_number
# Digikey ignores the headers and makes one map columns, so this
# is unnecessary and also annoying.
# print ",".join(("Digi-Key Part Number", "Manufacturer Name", "Manufacturer Part Number", "Customer Reference", "Quantity 1", "Quantity 2", "Quantity 3"))
for row in reader:
entry = {}
for (column_name, column_number) in columns.iteritems():
if column_number < len(row):
entry[column_name] = row[column_number].strip()
else:
entry[column_name] = ""
dist = entry.get('Distributor', '')
distpn = entry.get('Distributor PN', '')
mfg = entry.get('Manufacturer', '')
pn = entry.get('PN', '')
value = entry.get('Value', '')
if dist != 'Digikey':
print >>sys.stderr, "no digikey part number for reference %s, value %s footprint %s"% (entry['Reference'], entry['Value'], entry['Footprint'])
else:
bom.setdefault(dist + distpn + mfg + pn, []).append(entry)
for (ref, entries) in bom.iteritems():
dist = entries[0].get('Distributor', '') | pn = entries[0].get('PN', '')
refs = " ".join([ref['Reference'] for ref in entries])
print ",".join((distpn, mfg, pn, refs, str(len(entries)), "10", "100")) | distpn = entries[0].get('Distributor PN', '')
mfg = entries[0].get('Manufacturer', '') |
menu.ts | import * as path from 'path';
import {existsSync, writeFileSync} from 'fs';
import {app, shell, Menu, MenuItemConstructorOptions, BrowserWindow, dialog} from 'electron';
import {
is,
appMenu,
openUrlMenuItem,
aboutMenuItem,
openNewGitHubIssue,
debugInfo
} from 'electron-util';
import config from './config';
import {sendAction, showRestartDialog} from './util';
import {generateSubmenu as generateEmojiSubmenu} from './emoji';
import {toggleMenuBarMode} from './menu-bar-mode';
export default async function updateMenu(): Promise<Menu> {
const newConversationItem: MenuItemConstructorOptions = {
label: 'New Conversation',
accelerator: 'CommandOrControl+N',
click() {
sendAction('new-conversation');
}
};
const switchItems: MenuItemConstructorOptions[] = [
{
label: 'Switch to Work Chat…',
accelerator: 'CommandOrControl+Shift+2',
visible: !config.get('useWorkChat'),
click() {
config.set('useWorkChat', true);
app.relaunch();
app.quit();
}
},
{
label: 'Switch to Messenger…',
accelerator: 'CommandOrControl+Shift+1',
visible: config.get('useWorkChat'),
click() {
config.set('useWorkChat', false);
app.relaunch();
app.quit();
}
},
{
label: 'Log Out',
click() {
sendAction('log-out');
}
}
];
const vibrancySubmenu: MenuItemConstructorOptions[] = [
{
label: 'No Vibrancy',
type: 'checkbox',
checked: config.get('vibrancy') === 'none',
async click() {
config.set('vibrancy', 'none');
sendAction('update-vibrancy');
await updateMenu();
}
},
{
label: 'Sidebar-only Vibrancy',
type: 'checkbox',
checked: config.get('vibrancy') === 'sidebar',
async click() {
config.set('vibrancy', 'sidebar');
sendAction('update-vibrancy');
await updateMenu();
}
},
{
label: 'Full-window Vibrancy',
type: 'checkbox',
checked: config.get('vibrancy') === 'full',
async click() {
config.set('vibrancy', 'full');
sendAction('update-vibrancy');
await updateMenu();
}
}
];
const privacySubmenu: MenuItemConstructorOptions[] = [
{
label: 'Block Seen Indicator',
type: 'checkbox',
checked: config.get('block.chatSeen' as any),
click(menuItem) {
config.set('block.chatSeen' as any, menuItem.checked);
}
},
{
label: 'Block Typing Indicator',
type: 'checkbox',
checked: config.get('block.typingIndicator' as any),
click(menuItem) {
config.set('block.typingIndicator' as any, menuItem.checked);
}
},
{
label: 'Block Delivery Receipts',
type: 'checkbox',
checked: config.get('block.deliveryReceipt' as any),
click(menuItem) {
config.set('block.deliveryReceipt' as any, menuItem.checked);
}
}
];
const advancedSubmenu: MenuItemConstructorOptions[] = [
{
label: 'Custom Styles',
click() {
const filePath = path.join(app.getPath('userData'), 'custom.css');
const defaultCustomStyle = `/*
This is the custom styles file where you can add anything you want.
The styles here will be injected into Messenger and will override default styles.
If you want to disable styles but keep the config, just comment the lines that you don't want to be used.
Here are some dark mode color variables to get you started.
Edit them to change color scheme of Messenger.
Press Command/Ctrl+R in Messenger to see your changes.
*/
:root {
--base: #000;
--base-ninety: rgba(255, 255, 255, 0.9);
--base-seventy-five: rgba(255, 255, 255, 0.75);
--base-seventy: rgba(255, 255, 255, 0.7);
--base-fifty: rgba(255, 255, 255, 0.5);
--base-fourty: rgba(255, 255, 255, 0.4);
--base-thirty: rgba(255, 255, 255, 0.3);
--base-twenty: rgba(255, 255, 255, 0.2);
--base-five: rgba(255, 255, 255, 0.05);
--base-ten: rgba(255, 255, 255, 0.1);
--base-nine: rgba(255, 255, 255, 0.09);
--container-color: #323232;
--container-dark-color: #1e1e1e;
--list-header-color: #222;
--blue: #0084ff;
--selected-conversation-background: linear-gradient(hsla(209, 110%, 45%, 0.9), hsla(209, 110%, 42%, 0.9));
}
`;
if (!existsSync(filePath)) {
writeFileSync(filePath, defaultCustomStyle, 'utf8');
}
shell.openItem(filePath);
}
}
];
const preferencesSubmenu: MenuItemConstructorOptions[] = [
{
label: 'Privacy',
submenu: privacySubmenu
},
{
label: 'Emoji Style',
submenu: await generateEmojiSubmenu(updateMenu)
},
{
label: 'Bounce Dock on Message',
type: 'checkbox',
visible: is.macos,
checked: config.get('bounceDockOnMessage'),
click() {
config.set('bounceDockOnMessage', !config.get('bounceDockOnMessage'));
}
},
{
label: 'Autoplay Videos',
id: 'video-autoplay',
type: 'checkbox',
checked: config.get('autoplayVideos'),
click() {
config.set('autoplayVideos', !config.get('autoplayVideos'));
sendAction('toggle-video-autoplay');
}
},
{
label: 'Show Message Preview in Notifications',
type: 'checkbox',
checked: config.get('notificationMessagePreview'),
click(menuItem) {
config.set('notificationMessagePreview', menuItem.checked);
}
},
{
label: 'Mute Notifications',
id: 'mute-notifications',
type: 'checkbox',
checked: config.get('notificationsMuted'),
click() {
sendAction('toggle-mute-notifications');
}
},
{
label: 'Show Unread Badge',
type: 'checkbox',
checked: config.get('showUnreadBadge'),
click() {
config.set('showUnreadBadge', !config.get('showUnreadBadge'));
}
},
{
label: 'Hardware Acceleration',
type: 'checkbox',
checked: config.get('hardwareAcceleration'),
click() {
config.set('hardwareAcceleration', !config.get('hardwareAcceleration'));
showRestartDialog('Messenger needs to be restarted to change hardware acceleration.');
}
},
{
label: 'Show Menu Bar Icon',
id: 'menuBarMode',
type: 'checkbox',
visible: is.macos,
checked: config.get('menuBarMode'),
click() {
config.set('menuBarMode', !config.get('menuBarMode'));
const [win] = BrowserWindow.getAllWindows();
toggleMenuBarMode(win);
}
},
{
label: 'Show Menu Bar Icon',
id: 'menuBarMode',
type: 'checkbox',
visible: is.macos,
checked: config.get('menuBarMode'),
click() {
config.set('menuBarMode', !config.get('menuBarMode'));
const [win] = BrowserWindow.getAllWindows();
toggleMenuBarMode(win);
}
},
{
label: 'Always on Top',
type: 'checkbox',
accelerator: 'CommandOrControl+Shift+T',
checked: config.get('alwaysOnTop'),
click(menuItem, focusedWindow) {
config.set('alwaysOnTop', menuItem.checked);
focusedWindow.setAlwaysOnTop(menuItem.checked);
}
},
{
label: 'Launch at Login',
visible: is.macos || is.windows,
type: 'checkbox',
checked: app.getLoginItemSettings().openAtLogin,
click(menuItem) {
app.setLoginItemSettings({
openAtLogin: menuItem.checked,
openAsHidden: menuItem.checked
});
}
},
{
label: 'Auto Hide Menu Bar',
type: 'checkbox',
visible: !is.macos,
checked: config.get('autoHideMenuBar'),
click(menuItem, focusedWindow) {
config.set('autoHideMenuBar', menuItem.checked);
focusedWindow.setAutoHideMenuBar(menuItem.checked);
focusedWindow.setMenuBarVisibility(!menuItem.checked);
if (menuItem.checked) {
dialog.showMessageBox({
type: 'info',
message: 'Press the Alt key to toggle the menu bar.',
buttons: ['OK']
});
}
}
},
{
label: 'Flash Window on Message',
type: 'checkbox',
visible: !is.macos,
checked: config.get('flashWindowOnMessage'),
click(menuItem) {
config.set('flashWindowOnMessage', menuItem.checked);
}
},
{
label: 'Show Tray Icon',
type: 'checkbox',
enabled: is.linux || is.windows,
checked: config.get('showTrayIcon'),
click() {
config.set('showTrayIcon', !config.get('showTrayIcon'));
sendAction('toggle-tray-icon');
}
},
{
label: 'Launch Minimized',
type: 'checkbox',
visible: !is.macos,
checked: config.get('launchMinimized'),
click() {
config.set('launchMinimized', !config.get('launchMinimized'));
sendAction('toggle-tray-icon');
}
},
{
label: 'Quit on Window Close',
type: 'checkbox',
checked: config.get('quitOnWindowClose'),
click() {
config.set('quitOnWindowClose', !config.get('quitOnWindowClose'));
}
},
{
type: 'separator'
},
{
label: 'Advanced',
submenu: advancedSubmenu
}
];
const viewSubmenu: MenuItemConstructorOptions[] = [
{
label: 'Reset Text Size',
accelerator: 'CommandOrControl+0',
click() {
sendAction('zoom-reset');
}
},
{
label: 'Increase Text Size',
accelerator: 'CommandOrControl+Plus',
click() {
sendAction('zoom-in');
}
},
{
label: 'Decrease Text Size',
accelerator: 'CommandOrControl+-',
click() {
sendAction('zoom-out');
}
},
{
type: 'separator'
},
{
label: 'Follow System Appearance',
type: 'checkbox',
visible: is.macos,
checked: config.get('followSystemAppearance'),
async click() {
config.set('followSystemAppearance', !config.get('followSystemAppearance'));
sendAction('set-dark-mode');
await updateMenu();
}
},
{
label: 'Dark Mode',
id: 'darkMode',
type: 'checkbox',
checked: config.get('darkMode'),
enabled: !is.macos || !config.get('followSystemAppearance'),
accelerator: 'CommandOrControl+D',
click() {
config.set('darkMode', !config.get('darkMode'));
sendAction('set-dark-mode');
}
},
{
label: 'Vibrancy',
visible: is.macos,
submenu: vibrancySubmenu
},
{
type: 'separator'
},
{
label: 'Hide Names and Avatars',
id: 'privateMode',
type: 'checkbox',
checked: config.get('privateMode'),
accelerator: 'CommandOrControl+Shift+N',
click() {
config.set('privateMode', !config.get('privateMode'));
sendAction('set-private-mode');
}
},
{
type: 'separator'
},
{
label: 'Show Sidebar',
type: 'checkbox',
checked: !config.get('sidebarHidden'),
accelerator: 'CommandOrControl+Shift+S',
click() {
sendAction('toggle-sidebar');
}
},
{
label: 'Show Message Buttons',
type: 'checkbox',
checked: config.get('showMessageButtons'),
click() {
config.set('showMessageButtons', !config.get('showMessageButtons'));
sendAction('toggle-message-buttons');
}
},
{
type: 'separator'
},
{
label: 'Show Active Contacts',
click() {
sendAction('show-active-contacts-view');
}
},
{
label: 'Show Message Requests',
click() {
sendAction('show-message-requests-view');
}
},
{
label: 'Show Hidden Threads',
click() {
sendAction('show-hidden-threads-view');
}
},
{
label: 'Toggle Unread Threads',
click() {
sendAction('toggle-unread-threads-view');
}
}
];
const conversationSubmenu: MenuItemConstructorOptions[] = [
{
label: 'Mute Conversation',
accelerator: 'CommandOrControl+Shift+M',
click() {
sendAction('mute-conversation');
}
},
{
label: 'Hide Conversation',
accelerator: 'CommandOrControl+Shift+H',
click() {
sendAction('hide-conversation');
}
},
{
label: 'Delete Conversation',
accelerator: 'CommandOrControl+Shift+D',
click() {
sendAction('delete-conversation');
}
},
{
type: 'separator'
},
{
label: 'Select Next Conversation',
accelerator: 'Control+Tab',
click() {
sendAction('next-conversation');
}
},
{
label: 'Select Previous Conversation',
accelerator: 'Control+Shift+Tab',
click() {
sendAction('previous-conversation');
}
},
{
type: 'separator'
},
{
label: 'Find Conversation',
accelerator: 'CommandOrControl+K',
click() {
sendAction('find');
}
},
{
label: 'Search in Conversation',
accelerator: 'CommandOrControl+F',
click() {
sendAction('search');
}
},
{
type: 'separator'
},
{
label: 'Insert GIF',
accelerator: 'CommandOrControl+G',
click() {
sendAction('insert-gif');
}
},
{
label: 'Insert Sticker',
accelerator: 'CommandOrControl+S',
click() {
sendAction('insert-sticker');
}
},
{
label: 'Insert Emoji',
accelerator: 'CommandOrControl+E',
click() {
sendAction('insert-emoji');
}
},
{
label: 'Attach Files',
accelerator: 'CommandOrControl+Shift+A',
click() {
sendAction('attach-files');
}
},
{
label: 'Focus Text Input',
accelerator: 'CommandOrControl+I',
click() {
sendAction('focus-text-input');
}
}
];
const helpSubmenu: MenuItemConstructorOptions[] = [
openUrlMenuItem({
label: 'Website',
url: 'https://sindresorhus.com/caprine'
}),
openUrlMenuItem({
label: 'Source Code',
url: 'https://github.com/sindresorhus/caprine'
}),
openUrlMenuItem({
label: 'Donate…',
url: 'https://github.com/sindresorhus/caprine?sponsor=1'
}),
{
label: 'Report an Issue…',
click() {
const body = `
<!-- Please succinctly describe your issue and steps to reproduce it. -->
---
${debugInfo()}`;
openNewGitHubIssue({
user: 'sindresorhus',
repo: 'caprine',
body
});
}
}
];
if (!is.macos) {
helpSubmenu.push(
{
type: 'separator'
},
aboutMenuItem({
icon: path.join(__dirname, '..', 'static', 'Icon.png'),
text: 'Created by Sindre Sorhus'
})
);
}
const debugSubmenu: MenuItemConstructorOptions[] = [
{
label: 'Show Settings',
click() {
config.openInEditor();
}
},
{
label: 'Show App Data',
click() {
shell.openItem(app.getPath('userData'));
}
},
{
type: 'separator'
},
{
label: 'Delete Settings',
click() {
config.clear();
app.relaunch();
app.quit();
}
},
{
label: 'Delete App Data',
click() {
shell.moveItemToTrash(app.getPath('userData'));
app.relaunch();
app.quit();
}
}
];
const macosTemplate: MenuItemConstructorOptions[] = [
appMenu([
{
label: 'Preferences',
submenu: preferencesSubmenu
},
{
label: 'Facebook Settings…',
accelerator: 'Command+,',
click() {
sendAction('show-preferences');
}
},
{
type: 'separator'
},
...switchItems
]),
{
// @ts-ignore Buggy Electron types
role: 'fileMenu',
submenu: [
newConversationItem,
{
type: 'separator'
},
{
role: 'close'
}
]
},
{
// @ts-ignore Buggy Electron types
role: 'editMenu'
},
{
// @ts-ignore Buggy Electron types
role: 'viewMenu',
submenu: viewSubmenu
},
{
label: 'Conversation',
submenu: conversationSubmenu
},
{
// @ts-ignore Buggy Electron types
role: 'windowMenu'
},
{
role: 'help',
submenu: helpSubmenu
}
];
const linuxWindowsTemplate: MenuItemConstructorOptions[] = [
{
// @ts-ignore Buggy Electron types
role: 'fileMenu',
submenu: [
newConversationItem,
{
type: 'separator'
},
{
label: 'Messenger Settings',
submenu: preferencesSubmenu
},
{
label: 'Messenger Settings',
accelerator: 'Control+,',
click() {
sendAction('show-preferences');
}
},
{
type: 'separator'
},
...switchItems,
{
type: 'separator'
},
{
role: 'quit'
}
]
},
{
// @ts-ignore Buggy Electron types
role: 'editMenu'
},
{
// @ts-ignore Buggy Electron types
role: 'viewMenu',
submenu: viewSubmenu
},
{
label: 'Conversation',
submenu: conversationSubmenu
},
{
role: 'help',
submenu: helpSubmenu
}
];
const template = is.macos ? macosTemplate : linuxWindowsTemplate;
if (is.development) {
template.push({
label: 'Debug',
submenu: debugSubmenu
});
}
const menu = Menu.buildFromTemplate(template);
Menu.setApplicationMenu(menu); | } |
return menu; |
new_ecdsa.go | // Original file is from the go src code. Modified by the Baidu blockchain team HawkJing.
//
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ecdsa implements the Elliptic Curve Digital Signature Algorithm, as
// defined in FIPS 186-3.
//
// This implementation derives the nonce from an AES-CTR CSPRNG keyed by
// ChopMD(256, SHA2-512(priv.D || entropy || hash)). The CSPRNG key is IRO by
// a result of Coron; the AES-CTR stream is IRO under standard assumptions.
package sign
// References:
// [NSA]: Suite B implementer's guide to FIPS 186-3,
// http://www.nsa.gov/ia/_files/ecdsa.pdf
// [SECG]: SECG, SEC1
// http://www.secg.org/sec1-v2.pdf
import (
// "crypto"
"crypto/ecdsa"
"crypto/elliptic"
"math/big"
)
// A invertible implements fast inverse mod Curve.Params().N
type invertible interface {
// Inverse returns the inverse of k in GF(P)
Inverse(k *big.Int) *big.Int
}
// combinedMult implements fast multiplication S1*g + S2*p (g - generator, p - arbitrary point)
type combinedMult interface {
CombinedMult(bigX, bigY *big.Int, baseScalar, scalar []byte) (x, y *big.Int)
}
const (
aesIV = "IV for ECDSA CTR"
)
// PublicKey represents an ECDSA public key.
//type PublicKey struct {
// elliptic.Curve
// X, Y *big.Int
//}
// PrivateKey represents an ECDSA private key.
//type PrivateKey struct {
// PublicKey
// D *big.Int
//}
var one = new(big.Int).SetInt64(1)
// randFieldElement returns a random element of the field underlying the given
// curve using the procedure given in [NSA] A.2.1.
func | (c elliptic.Curve, entropy []byte) (k *big.Int, err error) {
params := c.Params()
// b := make([]byte, params.BitSize/8+8)
// _, err = io.ReadFull(rand, b)
// if err != nil {
// return
// }
// k = new(big.Int).SetBytes(b)
k = new(big.Int).SetBytes(entropy)
n := new(big.Int).Sub(params.N, one)
k.Mod(k, n)
k.Add(k, one)
return
}
// GenerateKey generates a public and private key pair.
func GenerateKeyBySeed(c elliptic.Curve, seed []byte) (*ecdsa.PrivateKey, error) {
k, err := randFieldElement(c, seed)
if err != nil {
return nil, err
}
priv := new(ecdsa.PrivateKey)
priv.PublicKey.Curve = c
priv.D = k
priv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes())
return priv, nil
}
| randFieldElement |
main.rs | // organizing crabs, as one does
// https://adventofcode.com/2021/day/7
use std::vec::Vec;
use std::fs::File;
use std::io::{BufRead, BufReader};
// const FILENAME: &str = "example.txt";
const FILENAME: &str = "positions.txt";
// parse the input file to get starting positions
fn parse_input(filename: &str) -> Vec<u64> {
// initialize result
let mut result: Vec<u64> = Vec::new();
// set up a reader and file object
let file = File::open(filename).expect("Unable to open file.");
let reader = BufReader::new(file);
// there should only be one line
let line = reader.lines().next().expect("Unable to read first line.").expect("Same");
for age in line.split(',') {
result.push(age.parse().expect("Got a non-digit input."))
}
return result;
}
fn calculate_cost(positions: &[u64], dest: u64) -> u64 {
// returns the cost it would take to move to the given destination
let mut cost = 0;
for pos in positions {
let steps = ((dest as i64) - (*pos as i64)).abs() as u64;
for step in 1..(steps+1) {
cost += step;
}
}
cost
}
fn main() {
// get starting positions
let mut positions = parse_input(FILENAME);
// get the median
positions.sort();
let median = positions[positions.len() / 2]; | // calculate costs to get to median
let mut cost = 0;
for pos in &positions {
cost += ((median as i64) - (*pos as i64)).abs();
}
println!("Optimum position for Part A is {} (costs {})", median, cost);
// for part B we brute force things
let mut min_cost = u64::MAX;
let mut target = 0;
for tgt in positions[0]..(positions[positions.len()-1]+1) {
let cost = calculate_cost(&positions, tgt);
if cost < min_cost {
min_cost = cost;
target = tgt;
}
}
println!("Optimum position for Part B is {} (costs {})", target, min_cost);
} | |
sencebgan.py | import tensorflow as tf
from base.base_model import BaseModel
from utils.alad_utils import get_getter
import utils.alad_utils as sn
class SENCEBGAN(BaseModel):
def __init__(self, config):
super(SENCEBGAN, self).__init__(config)
self.build_model()
self.init_saver()
def build_model(self):
############################################################################################
# INIT
############################################################################################
# Kernel initialization for the convolutions
if self.config.trainer.init_type == "normal":
self.init_kernel = tf.random_normal_initializer(mean=0.0, stddev=0.02)
elif self.config.trainer.init_type == "xavier":
self.init_kernel = tf.contrib.layers.xavier_initializer(
uniform=False, seed=None, dtype=tf.float32
)
# Placeholders
self.is_training_gen = tf.placeholder(tf.bool)
self.is_training_dis = tf.placeholder(tf.bool)
self.is_training_enc_g = tf.placeholder(tf.bool)
self.is_training_enc_r = tf.placeholder(tf.bool)
self.feature_match1 = tf.placeholder(tf.float32)
self.feature_match2 = tf.placeholder(tf.float32)
self.image_input = tf.placeholder(
tf.float32, shape=[None] + self.config.trainer.image_dims, name="x"
)
self.noise_tensor = tf.placeholder(
tf.float32, shape=[None, self.config.trainer.noise_dim], name="noise"
)
############################################################################################
# MODEL
############################################################################################
self.logger.info("Building training graph...")
with tf.variable_scope("SENCEBGAN"):
# First training part
# G(z) ==> x'
with tf.variable_scope("Generator_Model"):
self.image_gen = self.generator(self.noise_tensor)
# Discriminator outputs
with tf.variable_scope("Discriminator_Model"):
self.embedding_real, self.decoded_real = self.discriminator(
self.image_input, do_spectral_norm=self.config.trainer.do_spectral_norm
)
self.embedding_fake, self.decoded_fake = self.discriminator(
self.image_gen, do_spectral_norm=self.config.trainer.do_spectral_norm
)
# Second training part
# E(x) ==> z'
with tf.variable_scope("Encoder_G_Model"):
self.image_encoded = self.encoder_g(self.image_input)
# G(z') ==> G(E(x)) ==> x''
with tf.variable_scope("Generator_Model"):
self.image_gen_enc = self.generator(self.image_encoded)
# Discriminator outputs
with tf.variable_scope("Discriminator_Model"):
self.embedding_enc_fake, self.decoded_enc_fake = self.discriminator(
self.image_gen_enc, do_spectral_norm=self.config.trainer.do_spectral_norm
)
self.embedding_enc_real, self.decoded_enc_real = self.discriminator(
self.image_input, do_spectral_norm=self.config.trainer.do_spectral_norm
)
with tf.variable_scope("Discriminator_Model_XX"):
self.im_logit_real, self.im_f_real = self.discriminator_xx(
self.image_input,
self.image_input,
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.im_logit_fake, self.im_f_fake = self.discriminator_xx(
self.image_input,
self.image_gen_enc,
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
# Third training part
with tf.variable_scope("Encoder_G_Model"):
self.image_encoded_r = self.encoder_g(self.image_input)
with tf.variable_scope("Generator_Model"):
self.image_gen_enc_r = self.generator(self.image_encoded_r)
with tf.variable_scope("Encoder_R_Model"):
self.image_ege = self.encoder_r(self.image_gen_enc_r)
with tf.variable_scope("Discriminator_Model_ZZ"):
self.z_logit_real, self.z_f_real = self.discriminator_zz(
self.image_encoded_r,
self.image_encoded_r,
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.z_logit_fake, self.z_f_fake = self.discriminator_zz(
self.image_encoded_r,
self.image_ege,
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
############################################################################################
# LOSS FUNCTIONS
############################################################################################
with tf.name_scope("Loss_Functions"):
with tf.name_scope("Generator_Discriminator"):
# Discriminator Loss
if self.config.trainer.mse_mode == "norm":
self.disc_loss_real = tf.reduce_mean(
self.mse_loss(
self.decoded_real,
self.image_input,
mode="norm",
order=self.config.trainer.order,
)
)
self.disc_loss_fake = tf.reduce_mean(
self.mse_loss(
self.decoded_fake,
self.image_gen,
mode="norm",
order=self.config.trainer.order,
)
)
elif self.config.trainer.mse_mode == "mse":
self.disc_loss_real = self.mse_loss(
self.decoded_real,
self.image_input,
mode="mse",
order=self.config.trainer.order,
)
self.disc_loss_fake = self.mse_loss(
self.decoded_fake,
self.image_gen,
mode="mse",
order=self.config.trainer.order,
)
self.loss_discriminator = (
tf.math.maximum(self.config.trainer.disc_margin - self.disc_loss_fake, 0)
+ self.disc_loss_real
)
# Generator Loss
pt_loss = 0
if self.config.trainer.pullaway:
pt_loss = self.pullaway_loss(self.embedding_fake)
self.loss_generator = self.disc_loss_fake + self.config.trainer.pt_weight * pt_loss
# New addition to enforce visual similarity
delta_noise = self.embedding_real - self.embedding_fake
delta_flat = tf.layers.Flatten()(delta_noise)
loss_noise_gen = tf.reduce_mean(tf.norm(delta_flat, ord=2, axis=1, keepdims=False))
self.loss_generator += 0.1 * loss_noise_gen
with tf.name_scope("Encoder_G"):
if self.config.trainer.mse_mode == "norm":
self.loss_enc_rec = tf.reduce_mean(
self.mse_loss(
self.image_gen_enc,
self.image_input,
mode="norm",
order=self.config.trainer.order,
)
)
self.loss_enc_f = tf.reduce_mean(
self.mse_loss(
self.decoded_enc_real,
self.decoded_enc_fake,
mode="norm",
order=self.config.trainer.order,
)
)
elif self.config.trainer.mse_mode == "mse":
self.loss_enc_rec = tf.reduce_mean(
self.mse_loss(
self.image_gen_enc,
self.image_input,
mode="mse",
order=self.config.trainer.order,
)
)
self.loss_enc_f = tf.reduce_mean(
self.mse_loss(
self.embedding_enc_real,
self.embedding_enc_fake,
mode="mse",
order=self.config.trainer.order,
)
)
self.loss_encoder_g = (
self.loss_enc_rec + self.config.trainer.encoder_f_factor * self.loss_enc_f
)
if self.config.trainer.enable_disc_xx:
self.enc_xx_real = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.im_logit_real, labels=tf.zeros_like(self.im_logit_real)
)
self.enc_xx_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.im_logit_fake, labels=tf.ones_like(self.im_logit_fake)
)
self.enc_loss_xx = tf.reduce_mean(self.enc_xx_real + self.enc_xx_fake)
self.loss_encoder_g += self.enc_loss_xx
with tf.name_scope("Encoder_R"):
if self.config.trainer.mse_mode == "norm":
self.loss_encoder_r = tf.reduce_mean(
self.mse_loss(
self.image_ege,
self.image_encoded_r,
mode="norm",
order=self.config.trainer.order,
)
)
elif self.config.trainer.mse_mode == "mse":
self.loss_encoder_r = tf.reduce_mean(
self.mse_loss(
self.image_ege,
self.image_encoded_r,
mode="mse",
order=self.config.trainer.order,
)
)
if self.config.trainer.enable_disc_zz:
self.enc_zz_real = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.z_logit_real, labels=tf.zeros_like(self.z_logit_real)
)
self.enc_zz_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.z_logit_fake, labels=tf.ones_like(self.z_logit_fake)
)
self.enc_loss_zz = tf.reduce_mean(self.enc_zz_real + self.enc_zz_fake)
self.loss_encoder_r += self.enc_loss_zz
if self.config.trainer.enable_disc_xx:
with tf.name_scope("Discriminator_XX"):
self.loss_xx_real = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.im_logit_real, labels=tf.ones_like(self.im_logit_real)
)
self.loss_xx_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.im_logit_fake, labels=tf.zeros_like(self.im_logit_fake)
)
self.dis_loss_xx = tf.reduce_mean(self.loss_xx_real + self.loss_xx_fake)
if self.config.trainer.enable_disc_zz:
with tf.name_scope("Discriminator_ZZ"):
self.loss_zz_real = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.z_logit_real, labels=tf.ones_like(self.z_logit_real)
)
self.loss_zz_fake = tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.z_logit_fake, labels=tf.zeros_like(self.z_logit_fake)
)
self.dis_loss_zz = tf.reduce_mean(self.loss_zz_real + self.loss_zz_fake)
############################################################################################
# OPTIMIZERS
############################################################################################
with tf.name_scope("Optimizers"):
self.generator_optimizer = tf.train.AdamOptimizer(
self.config.trainer.standard_lr_gen,
beta1=self.config.trainer.optimizer_adam_beta1,
beta2=self.config.trainer.optimizer_adam_beta2,
)
self.encoder_g_optimizer = tf.train.AdamOptimizer(
self.config.trainer.standard_lr_enc,
beta1=self.config.trainer.optimizer_adam_beta1,
beta2=self.config.trainer.optimizer_adam_beta2,
)
self.encoder_r_optimizer = tf.train.AdamOptimizer(
self.config.trainer.standard_lr_enc,
beta1=self.config.trainer.optimizer_adam_beta1,
beta2=self.config.trainer.optimizer_adam_beta2,
)
self.discriminator_optimizer = tf.train.AdamOptimizer(
self.config.trainer.standard_lr_dis,
beta1=self.config.trainer.optimizer_adam_beta1,
beta2=self.config.trainer.optimizer_adam_beta2,
)
# Collect all the variables
all_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
# Generator Network Variables
self.generator_vars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Generator_Model")
]
# Discriminator Network Variables
self.discriminator_vars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Discriminator_Model")
]
# Discriminator Network Variables
self.encoder_g_vars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Encoder_G_Model")
]
self.encoder_r_vars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Encoder_R_Model")
]
self.dxxvars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Discriminator_Model_XX")
]
self.dzzvars = [
v for v in all_variables if v.name.startswith("SENCEBGAN/Discriminator_Model_ZZ")
]
# Generator Network Operations
self.gen_update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Generator_Model"
)
# Discriminator Network Operations
self.disc_update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Discriminator_Model"
)
self.encg_update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Encoder_G_Model"
)
self.encr_update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Encoder_R_Model"
)
self.update_ops_dis_xx = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Discriminator_Model_XX"
)
self.update_ops_dis_zz = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope="SENCEBGAN/Discriminator_Model_ZZ"
)
with tf.control_dependencies(self.gen_update_ops):
self.gen_op = self.generator_optimizer.minimize(
self.loss_generator,
var_list=self.generator_vars,
global_step=self.global_step_tensor,
)
with tf.control_dependencies(self.disc_update_ops):
self.disc_op = self.discriminator_optimizer.minimize(
self.loss_discriminator, var_list=self.discriminator_vars
)
with tf.control_dependencies(self.encg_update_ops):
self.encg_op = self.encoder_g_optimizer.minimize(
self.loss_encoder_g,
var_list=self.encoder_g_vars,
global_step=self.global_step_tensor,
)
with tf.control_dependencies(self.encr_update_ops):
self.encr_op = self.encoder_r_optimizer.minimize(
self.loss_encoder_r,
var_list=self.encoder_r_vars,
global_step=self.global_step_tensor,
)
if self.config.trainer.enable_disc_xx:
with tf.control_dependencies(self.update_ops_dis_xx):
self.disc_op_xx = self.discriminator_optimizer.minimize(
self.dis_loss_xx, var_list=self.dxxvars
)
if self.config.trainer.enable_disc_zz:
with tf.control_dependencies(self.update_ops_dis_zz):
self.disc_op_zz = self.discriminator_optimizer.minimize(
self.dis_loss_zz, var_list=self.dzzvars
)
# Exponential Moving Average for Estimation
self.dis_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_dis = self.dis_ema.apply(self.discriminator_vars)
self.gen_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_gen = self.gen_ema.apply(self.generator_vars)
self.encg_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_encg = self.encg_ema.apply(self.encoder_g_vars)
self.encr_ema = tf.train.ExponentialMovingAverage(decay=self.config.trainer.ema_decay)
maintain_averages_op_encr = self.encr_ema.apply(self.encoder_r_vars)
if self.config.trainer.enable_disc_xx:
self.dis_xx_ema = tf.train.ExponentialMovingAverage(
decay=self.config.trainer.ema_decay
)
maintain_averages_op_dis_xx = self.dis_xx_ema.apply(self.dxxvars)
if self.config.trainer.enable_disc_zz:
self.dis_zz_ema = tf.train.ExponentialMovingAverage(
decay=self.config.trainer.ema_decay
)
maintain_averages_op_dis_zz = self.dis_zz_ema.apply(self.dzzvars)
with tf.control_dependencies([self.disc_op]):
self.train_dis_op = tf.group(maintain_averages_op_dis)
with tf.control_dependencies([self.gen_op]):
self.train_gen_op = tf.group(maintain_averages_op_gen)
with tf.control_dependencies([self.encg_op]):
self.train_enc_g_op = tf.group(maintain_averages_op_encg)
with tf.control_dependencies([self.encr_op]):
self.train_enc_r_op = tf.group(maintain_averages_op_encr)
if self.config.trainer.enable_disc_xx:
with tf.control_dependencies([self.disc_op_xx]):
self.train_dis_op_xx = tf.group(maintain_averages_op_dis_xx)
if self.config.trainer.enable_disc_zz:
with tf.control_dependencies([self.disc_op_zz]):
self.train_dis_op_zz = tf.group(maintain_averages_op_dis_zz)
############################################################################################
# TESTING
############################################################################################
self.logger.info("Building Testing Graph...")
with tf.variable_scope("SENCEBGAN"):
with tf.variable_scope("Discriminator_Model"):
self.embedding_q_ema, self.decoded_q_ema = self.discriminator(
self.image_input,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
with tf.variable_scope("Generator_Model"):
self.image_gen_ema = self.generator(
self.embedding_q_ema, getter=get_getter(self.gen_ema)
)
with tf.variable_scope("Discriminator_Model"):
self.embedding_rec_ema, self.decoded_rec_ema = self.discriminator(
self.image_gen_ema,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
# Second Training Part
with tf.variable_scope("Encoder_G_Model"):
self.image_encoded_ema = self.encoder_g(
self.image_input, getter=get_getter(self.encg_ema)
)
with tf.variable_scope("Generator_Model"):
self.image_gen_enc_ema = self.generator(
self.image_encoded_ema, getter=get_getter(self.gen_ema)
)
with tf.variable_scope("Discriminator_Model"):
self.embedding_enc_fake_ema, self.decoded_enc_fake_ema = self.discriminator(
self.image_gen_enc_ema,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.embedding_enc_real_ema, self.decoded_enc_real_ema = self.discriminator(
self.image_input,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
if self.config.trainer.enable_disc_xx:
with tf.variable_scope("Discriminator_Model_XX"):
self.im_logit_real_ema, self.im_f_real_ema = self.discriminator_xx(
self.image_input,
self.image_input,
getter=get_getter(self.dis_xx_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.im_logit_fake_ema, self.im_f_fake_ema = self.discriminator_xx(
self.image_input,
self.image_gen_enc_ema,
getter=get_getter(self.dis_xx_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
# Third training part
with tf.variable_scope("Encoder_G_Model"):
self.image_encoded_r_ema = self.encoder_g(self.image_input)
with tf.variable_scope("Generator_Model"):
self.image_gen_enc_r_ema = self.generator(self.image_encoded_r_ema)
with tf.variable_scope("Encoder_R_Model"):
self.image_ege_ema = self.encoder_r(self.image_gen_enc_r_ema)
with tf.variable_scope("Discriminator_Model"):
self.embedding_encr_fake_ema, self.decoded_encr_fake_ema = self.discriminator(
self.image_gen_enc_r_ema,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.embedding_encr_real_ema, self.decoded_encr_real_ema = self.discriminator(
self.image_input,
getter=get_getter(self.dis_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
if self.config.trainer.enable_disc_zz:
with tf.variable_scope("Discriminator_Model_ZZ"):
self.z_logit_real_ema, self.z_f_real_ema = self.discriminator_zz(
self.image_encoded_r_ema,
self.image_encoded_r_ema,
getter=get_getter(self.dis_zz_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
self.z_logit_fake_ema, self.z_f_fake_ema = self.discriminator_zz(
self.image_encoded_r_ema,
self.image_ege_ema,
getter=get_getter(self.dis_zz_ema),
do_spectral_norm=self.config.trainer.do_spectral_norm,
)
with tf.name_scope("Testing"):
with tf.name_scope("Image_Based"):
delta = self.image_input - self.image_gen_enc_ema
self.rec_residual = -delta
delta_flat = tf.layers.Flatten()(delta)
img_score_l1 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="img_loss__1"
)
self.img_score_l1 = tf.squeeze(img_score_l1)
delta = self.decoded_enc_fake_ema - self.decoded_enc_real_ema
delta_flat = tf.layers.Flatten()(delta)
img_score_l2 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="img_loss__2"
)
self.img_score_l2 = tf.squeeze(img_score_l2)
with tf.name_scope("Noise_Based"):
delta = self.image_encoded_r_ema - self.image_ege_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_1 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="final_score_1"
)
self.final_score_1 = tf.squeeze(final_score_1)
self.score_comb_im = (
1 * self.img_score_l1
+ self.feature_match1 * self.final_score_1
)
delta = self.image_encoded_r_ema - self.embedding_enc_fake_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_2 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="final_score_2"
)
self.final_score_2 = tf.squeeze(final_score_2)
delta = self.embedding_encr_real_ema - self.embedding_encr_fake_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_3 = tf.norm(
delta_flat, ord=2, axis=1, keepdims=False, name="final_score_3"
)
self.final_score_3 = tf.squeeze(final_score_3)
# Combo 1
self.score_comb_z = (
(1 - self.feature_match2) * self.final_score_2
+ self.feature_match2 * self.final_score_3
)
# Combo 2
if self.config.trainer.enable_disc_xx:
delta = self.im_f_real_ema - self.im_f_fake_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_4 = tf.norm(
delta_flat, ord=1, axis=1, keepdims=False, name="final_score_4"
)
self.final_score_4 = tf.squeeze(final_score_4)
delta = self.z_f_real_ema - self.z_f_fake_ema
delta_flat = tf.layers.Flatten()(delta)
final_score_6 = tf.norm(
delta_flat, ord=1, axis=1, keepdims=False, name="final_score_6"
)
self.final_score_6 = tf.squeeze(final_score_6)
############################################################################################
# TENSORBOARD
############################################################################################
if self.config.log.enable_summary:
with tf.name_scope("train_summary"):
with tf.name_scope("dis_summary"):
tf.summary.scalar("loss_disc", self.loss_discriminator, ["dis"])
tf.summary.scalar("loss_disc_real", self.disc_loss_real, ["dis"])
tf.summary.scalar("loss_disc_fake", self.disc_loss_fake, ["dis"])
if self.config.trainer.enable_disc_xx:
tf.summary.scalar("loss_dis_xx", self.dis_loss_xx, ["enc_g"])
if self.config.trainer.enable_disc_zz:
tf.summary.scalar("loss_dis_zz", self.dis_loss_zz, ["enc_r"])
with tf.name_scope("gen_summary"):
tf.summary.scalar("loss_generator", self.loss_generator, ["gen"])
with tf.name_scope("enc_summary"):
tf.summary.scalar("loss_encoder_g", self.loss_encoder_g, ["enc_g"])
tf.summary.scalar("loss_encoder_r", self.loss_encoder_r, ["enc_r"])
with tf.name_scope("img_summary"):
tf.summary.image("input_image", self.image_input, 1, ["img_1"])
tf.summary.image("reconstructed", self.image_gen, 1, ["img_1"])
# From discriminator in part 1
tf.summary.image("decoded_real", self.decoded_real, 1, ["img_1"])
tf.summary.image("decoded_fake", self.decoded_fake, 1, ["img_1"])
# Second Stage of Training
tf.summary.image("input_enc", self.image_input, 1, ["img_2"])
tf.summary.image("reconstructed", self.image_gen_enc, 1, ["img_2"])
# From discriminator in part 2
tf.summary.image("decoded_enc_real", self.decoded_enc_real, 1, ["img_2"])
tf.summary.image("decoded_enc_fake", self.decoded_enc_fake, 1, ["img_2"])
# Testing
tf.summary.image("input_image", self.image_input, 1, ["test"])
tf.summary.image("reconstructed", self.image_gen_enc_r_ema, 1, ["test"])
tf.summary.image("residual", self.rec_residual, 1, ["test"])
self.sum_op_dis = tf.summary.merge_all("dis")
self.sum_op_gen = tf.summary.merge_all("gen")
self.sum_op_enc_g = tf.summary.merge_all("enc_g")
self.sum_op_enc_r = tf.summary.merge_all("enc_r")
self.sum_op_im_1 = tf.summary.merge_all("img_1")
self.sum_op_im_2 = tf.summary.merge_all("img_2")
self.sum_op_im_test = tf.summary.merge_all("test")
self.sum_op = tf.summary.merge([self.sum_op_dis, self.sum_op_gen])
###############################################################################################
# MODULES
###############################################################################################
def generator(self, noise_input, getter=None):
with tf.variable_scope("Generator", custom_getter=getter, reuse=tf.AUTO_REUSE):
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_g = tf.layers.Dense(
units=2 * 2 * 256, kernel_initializer=self.init_kernel, name="fc"
)(noise_input)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
x_g = tf.reshape(x_g, [-1, 2, 2, 256])
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=128,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=64,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=32,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.layers.batch_normalization(
x_g,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_gen,
name="batch_normalization",
)
x_g = tf.nn.leaky_relu(
features=x_g, alpha=self.config.trainer.leakyReLU_alpha, name="relu"
)
net_name = "Layer_5"
with tf.variable_scope(net_name):
x_g = tf.layers.Conv2DTranspose(
filters=1,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2t",
)(x_g)
x_g = tf.tanh(x_g, name="tanh")
return x_g
def discriminator(self, image_input, getter=None, do_spectral_norm=False):
layers = sn if do_spectral_norm else tf.layers
with tf.variable_scope("Discriminator", custom_getter=getter, reuse=tf.AUTO_REUSE):
with tf.variable_scope("Encoder"):
x_e = tf.reshape(
image_input,
[-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],
)
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_e = layers.conv2d(
x_e,
filters=32,
kernel_size=5,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv",
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
# 14 x 14 x 64
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_e = layers.conv2d(
x_e,
filters=64,
kernel_size=5,
padding="same",
strides=2,
kernel_initializer=self.init_kernel,
name="conv",
)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
# 7 x 7 x 128
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_e = layers.conv2d(
x_e,
filters=128,
kernel_size=5,
padding="same",
strides=2,
kernel_initializer=self.init_kernel,
name="conv",
)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
# 4 x 4 x 256
x_e = tf.layers.Flatten()(x_e)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_e = layers.dense(
x_e,
units=self.config.trainer.noise_dim,
kernel_initializer=self.init_kernel,
name="fc",
)
embedding = x_e
with tf.variable_scope("Decoder"):
net = tf.reshape(embedding, [-1, 1, 1, self.config.trainer.noise_dim])
net_name = "layer_1"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=256,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv1",
)(net)
net = tf.layers.batch_normalization(
inputs=net,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="tconv1/bn",
)
net = tf.nn.relu(features=net, name="tconv1/relu")
net_name = "layer_2"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=128,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv2",
)(net)
net = tf.layers.batch_normalization(
inputs=net,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="tconv2/bn",
)
net = tf.nn.relu(features=net, name="tconv2/relu")
net_name = "layer_3"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=64,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv3",
)(net)
net = tf.layers.batch_normalization(
inputs=net,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="tconv3/bn",
)
net = tf.nn.relu(features=net, name="tconv3/relu")
net_name = "layer_4"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=32,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv4",
)(net)
net = tf.layers.batch_normalization(
inputs=net,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_dis,
name="tconv4/bn",
)
net = tf.nn.relu(features=net, name="tconv4/relu")
net_name = "layer_5"
with tf.variable_scope(net_name):
net = tf.layers.Conv2DTranspose(
filters=1,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="tconv5",
)(net)
decoded = tf.nn.tanh(net, name="tconv5/tanh")
return embedding, decoded
def encoder_g(self, image_input, getter=None):
with tf.variable_scope("Encoder_G", custom_getter=getter, reuse=tf.AUTO_REUSE):
x_e = tf.reshape(
image_input,
[-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],
)
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=64,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_g,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=128,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_g,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=256,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_g,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
x_e = tf.layers.Flatten()(x_e)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_e = tf.layers.Dense(
units=self.config.trainer.noise_dim,
kernel_initializer=self.init_kernel,
name="fc",
)(x_e)
return x_e
def encoder_r(self, image_input, getter=None):
|
# Regularizer discriminator for the Generator Encoder
def discriminator_xx(self, img_tensor, recreated_img, getter=None, do_spectral_norm=False):
""" Discriminator architecture in tensorflow
Discriminates between (x, x) and (x, rec_x)
Args:
img_tensor:
recreated_img:
getter: for exponential moving average during inference
reuse: sharing variables or not
do_spectral_norm:
"""
layers = sn if do_spectral_norm else tf.layers
with tf.variable_scope("Discriminator_xx", reuse=tf.AUTO_REUSE, custom_getter=getter):
net = tf.concat([img_tensor, recreated_img], axis=1)
net_name = "layer_1"
with tf.variable_scope(net_name):
net = layers.conv2d(
net,
filters=64,
kernel_size=4,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv1",
)
net = tf.nn.leaky_relu(
features=net, alpha=self.config.trainer.leakyReLU_alpha, name="conv2/leaky_relu"
)
net = tf.layers.dropout(
net,
rate=self.config.trainer.dropout_rate,
training=self.is_training_enc_g,
name="dropout",
)
with tf.variable_scope(net_name, reuse=True):
weights = tf.get_variable("conv1/kernel")
net_name = "layer_2"
with tf.variable_scope(net_name):
net = layers.conv2d(
net,
filters=128,
kernel_size=4,
strides=2,
padding="same",
kernel_initializer=self.init_kernel,
name="conv2",
)
net = tf.nn.leaky_relu(
features=net, alpha=self.config.trainer.leakyReLU_alpha, name="conv2/leaky_relu"
)
net = tf.layers.dropout(
net,
rate=self.config.trainer.dropout_rate,
training=self.is_training_enc_g,
name="dropout",
)
net = tf.layers.Flatten()(net)
intermediate_layer = net
net_name = "layer_3"
with tf.variable_scope(net_name):
net = tf.layers.dense(net, units=1, kernel_initializer=self.init_kernel, name="fc")
logits = tf.squeeze(net)
return logits, intermediate_layer
# Regularizer discriminator for the Reconstruction Encoder
def discriminator_zz(self, noise_tensor, recreated_noise, getter=None, do_spectral_norm=False):
""" Discriminator architecture in tensorflow
Discriminates between (z, z) and (z, rec_z)
Args:
noise_tensor:
recreated_noise:
getter: for exponential moving average during inference
reuse: sharing variables or not
do_spectral_norm:
"""
layers = sn if do_spectral_norm else tf.layers
with tf.variable_scope("Discriminator_zz", reuse=tf.AUTO_REUSE, custom_getter=getter):
y = tf.concat([noise_tensor, recreated_noise], axis=-1)
net_name = "y_layer_1"
with tf.variable_scope(net_name):
y = layers.dense(y, units=64, kernel_initializer=self.init_kernel, name="fc")
y = tf.nn.leaky_relu(features=y, alpha=self.config.trainer.leakyReLU_alpha)
y = tf.layers.dropout(
y,
rate=self.config.trainer.dropout_rate,
training=self.is_training_enc_r,
name="dropout",
)
net_name = "y_layer_2"
with tf.variable_scope(net_name):
y = layers.dense(y, units=32, kernel_initializer=self.init_kernel, name="fc")
y = tf.nn.leaky_relu(features=y, alpha=self.config.trainer.leakyReLU_alpha)
y = tf.layers.dropout(
y,
rate=self.config.trainer.dropout_rate,
training=self.is_training_enc_r,
name="dropout",
)
intermediate_layer = y
net_name = "y_layer_3"
with tf.variable_scope(net_name):
y = layers.dense(y, units=1, kernel_initializer=self.init_kernel, name="fc")
logits = tf.squeeze(y)
return logits, intermediate_layer
###############################################################################################
# CUSTOM LOSSES
###############################################################################################
def mse_loss(self, pred, data, mode="norm", order=2):
if mode == "norm":
delta = pred - data
delta = tf.layers.Flatten()(delta)
loss_val = tf.norm(delta, ord=order, axis=1, keepdims=False)
elif mode == "mse":
loss_val = tf.reduce_mean(tf.squared_difference(pred, data))
return loss_val
def pullaway_loss(self, embeddings):
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))
normalized_embeddings = embeddings / norm
similarity = tf.matmul(normalized_embeddings, normalized_embeddings, transpose_b=True)
batch_size = tf.cast(tf.shape(embeddings)[0], tf.float32)
pt_loss = (tf.reduce_sum(similarity) - batch_size) / (batch_size * (batch_size - 1))
return pt_loss
def init_saver(self):
self.saver = tf.train.Saver(max_to_keep=self.config.log.max_to_keep)
| with tf.variable_scope("Encoder_R", custom_getter=getter, reuse=tf.AUTO_REUSE):
x_e = tf.reshape(
image_input,
[-1, self.config.data_loader.image_size, self.config.data_loader.image_size, 1],
)
net_name = "Layer_1"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=64,
kernel_size=5,
strides=(2, 2),
padding="same",
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_r,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
net_name = "Layer_2"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=128,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_r,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
net_name = "Layer_3"
with tf.variable_scope(net_name):
x_e = tf.layers.Conv2D(
filters=256,
kernel_size=5,
padding="same",
strides=(2, 2),
kernel_initializer=self.init_kernel,
name="conv",
)(x_e)
x_e = tf.layers.batch_normalization(
x_e,
momentum=self.config.trainer.batch_momentum,
training=self.is_training_enc_r,
)
x_e = tf.nn.leaky_relu(
features=x_e, alpha=self.config.trainer.leakyReLU_alpha, name="leaky_relu"
)
x_e = tf.layers.Flatten()(x_e)
net_name = "Layer_4"
with tf.variable_scope(net_name):
x_e = tf.layers.Dense(
units=self.config.trainer.noise_dim,
kernel_initializer=self.init_kernel,
name="fc",
)(x_e)
return x_e |
geographic_view_service.pb.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.13.0
// source: google/ads/googleads/v2/services/geographic_view_service.proto
package services
import (
context "context"
reflect "reflect"
sync "sync"
proto "github.com/golang/protobuf/proto"
resources "google.golang.org/genproto/googleapis/ads/googleads/v2/resources"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// Request message for [GeographicViewService.GetGeographicView][google.ads.googleads.v2.services.GeographicViewService.GetGeographicView].
type GetGeographicViewRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The resource name of the geographic view to fetch.
ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
}
func (x *GetGeographicViewRequest) Reset() {
*x = GetGeographicViewRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v2_services_geographic_view_service_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GetGeographicViewRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetGeographicViewRequest) ProtoMessage() {}
func (x *GetGeographicViewRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v2_services_geographic_view_service_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetGeographicViewRequest.ProtoReflect.Descriptor instead.
func (*GetGeographicViewRequest) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v2_services_geographic_view_service_proto_rawDescGZIP(), []int{0}
}
func (x *GetGeographicViewRequest) GetResourceName() string {
if x != nil {
return x.ResourceName
}
return ""
}
var File_google_ads_googleads_v2_services_geographic_view_service_proto protoreflect.FileDescriptor
var file_google_ads_googleads_v2_services_geographic_view_service_proto_rawDesc = []byte{
0x0a, 0x3e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x73, 0x2f, 0x67, 0x65, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x76, 0x69,
0x65, 0x77, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x12, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x73, 0x1a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x65, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x73, 0x2f, 0x67, 0x65, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63,
0x5f, 0x76, 0x69, 0x65, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66,
0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f,
0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x70,
0x0a, 0x18, 0x47, 0x65, 0x74, 0x47, 0x65, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x56,
0x69, 0x65, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x54, 0x0a, 0x0d, 0x72, 0x65,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x42, 0x2f, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x29, 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x65, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x56, 0x69,
0x65, 0x77, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65,
0x32, 0x84, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x56,
0x69, 0x65, 0x77, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xcd, 0x01, 0x0a, 0x11, 0x47,
0x65, 0x74, 0x47, 0x65, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x56, 0x69, 0x65, 0x77,
0x12, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x65, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69,
0x63, 0x56, 0x69, 0x65, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x61, 0x64, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
0x2e, 0x47, 0x65, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x56, 0x69, 0x65, 0x77, 0x22,
0x49, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33, 0x12, 0x31, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x63, 0x75, 0x73, 0x74,
0x6f, 0x6d, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x67, 0x65, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68,
0x69, 0x63, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x72, 0x65, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0x1b, 0xca, 0x41, 0x18, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x81, 0x02, 0x0a, 0x24, 0x63, 0x6f, 0x6d, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
0x42, 0x1a, 0x47, 0x65, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x56, 0x69, 0x65, 0x77,
0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72,
0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x61, 0x64, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x3b,
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0xa2, 0x02, 0x03, 0x47, 0x41, 0x41, 0xaa, 0x02,
0x20, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x73, 0x2e, 0x47, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x41, 0x64, 0x73, 0x2e, 0x56, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x73, 0xca, 0x02, 0x20, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x73, 0x5c, 0x47,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x5c, 0x56, 0x32, 0x5c, 0x53, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x73, 0xea, 0x02, 0x24, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x41,
0x64, 0x73, 0x3a, 0x3a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x3a, 0x3a, 0x56,
0x32, 0x3a, 0x3a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
}
var (
file_google_ads_googleads_v2_services_geographic_view_service_proto_rawDescOnce sync.Once
file_google_ads_googleads_v2_services_geographic_view_service_proto_rawDescData = file_google_ads_googleads_v2_services_geographic_view_service_proto_rawDesc
)
func file_google_ads_googleads_v2_services_geographic_view_service_proto_rawDescGZIP() []byte {
file_google_ads_googleads_v2_services_geographic_view_service_proto_rawDescOnce.Do(func() {
file_google_ads_googleads_v2_services_geographic_view_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_ads_googleads_v2_services_geographic_view_service_proto_rawDescData)
})
return file_google_ads_googleads_v2_services_geographic_view_service_proto_rawDescData
}
var file_google_ads_googleads_v2_services_geographic_view_service_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_google_ads_googleads_v2_services_geographic_view_service_proto_goTypes = []interface{}{
(*GetGeographicViewRequest)(nil), // 0: google.ads.googleads.v2.services.GetGeographicViewRequest
(*resources.GeographicView)(nil), // 1: google.ads.googleads.v2.resources.GeographicView
}
var file_google_ads_googleads_v2_services_geographic_view_service_proto_depIdxs = []int32{
0, // 0: google.ads.googleads.v2.services.GeographicViewService.GetGeographicView:input_type -> google.ads.googleads.v2.services.GetGeographicViewRequest
1, // 1: google.ads.googleads.v2.services.GeographicViewService.GetGeographicView:output_type -> google.ads.googleads.v2.resources.GeographicView
1, // [1:2] is the sub-list for method output_type
0, // [0:1] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_google_ads_googleads_v2_services_geographic_view_service_proto_init() }
func file_google_ads_googleads_v2_services_geographic_view_service_proto_init() {
if File_google_ads_googleads_v2_services_geographic_view_service_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_google_ads_googleads_v2_services_geographic_view_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetGeographicViewRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_ads_googleads_v2_services_geographic_view_service_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_google_ads_googleads_v2_services_geographic_view_service_proto_goTypes,
DependencyIndexes: file_google_ads_googleads_v2_services_geographic_view_service_proto_depIdxs,
MessageInfos: file_google_ads_googleads_v2_services_geographic_view_service_proto_msgTypes,
}.Build()
File_google_ads_googleads_v2_services_geographic_view_service_proto = out.File
file_google_ads_googleads_v2_services_geographic_view_service_proto_rawDesc = nil
file_google_ads_googleads_v2_services_geographic_view_service_proto_goTypes = nil
file_google_ads_googleads_v2_services_geographic_view_service_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// GeographicViewServiceClient is the client API for GeographicViewService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type GeographicViewServiceClient interface {
// Returns the requested geographic view in full detail.
GetGeographicView(ctx context.Context, in *GetGeographicViewRequest, opts ...grpc.CallOption) (*resources.GeographicView, error)
}
type geographicViewServiceClient struct {
cc grpc.ClientConnInterface
}
func NewGeographicViewServiceClient(cc grpc.ClientConnInterface) GeographicViewServiceClient {
return &geographicViewServiceClient{cc}
}
func (c *geographicViewServiceClient) GetGeographicView(ctx context.Context, in *GetGeographicViewRequest, opts ...grpc.CallOption) (*resources.GeographicView, error) {
out := new(resources.GeographicView)
err := c.cc.Invoke(ctx, "/google.ads.googleads.v2.services.GeographicViewService/GetGeographicView", in, out, opts...)
if err != nil |
return out, nil
}
// GeographicViewServiceServer is the server API for GeographicViewService service.
type GeographicViewServiceServer interface {
// Returns the requested geographic view in full detail.
GetGeographicView(context.Context, *GetGeographicViewRequest) (*resources.GeographicView, error)
}
// UnimplementedGeographicViewServiceServer can be embedded to have forward compatible implementations.
type UnimplementedGeographicViewServiceServer struct {
}
func (*UnimplementedGeographicViewServiceServer) GetGeographicView(context.Context, *GetGeographicViewRequest) (*resources.GeographicView, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetGeographicView not implemented")
}
func RegisterGeographicViewServiceServer(s *grpc.Server, srv GeographicViewServiceServer) {
s.RegisterService(&_GeographicViewService_serviceDesc, srv)
}
func _GeographicViewService_GetGeographicView_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetGeographicViewRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GeographicViewServiceServer).GetGeographicView(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.ads.googleads.v2.services.GeographicViewService/GetGeographicView",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GeographicViewServiceServer).GetGeographicView(ctx, req.(*GetGeographicViewRequest))
}
return interceptor(ctx, in, info, handler)
}
var _GeographicViewService_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.ads.googleads.v2.services.GeographicViewService",
HandlerType: (*GeographicViewServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetGeographicView",
Handler: _GeographicViewService_GetGeographicView_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/ads/googleads/v2/services/geographic_view_service.proto",
}
| {
return nil, err
} |
parent_notebook_request_builder.go | package parentnotebook
import (
i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f "github.com/microsoft/kiota-abstractions-go"
iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242 "github.com/microsoftgraph/msgraph-sdk-go/models"
ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a "github.com/microsoftgraph/msgraph-sdk-go/models/odataerrors"
)
// ParentNotebookRequestBuilder provides operations to manage the parentNotebook property of the microsoft.graph.onenotePage entity.
type ParentNotebookRequestBuilder struct {
// Path parameters for the request
pathParameters map[string]string
// The request adapter to use to execute the requests.
requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter
// Url template to use to build the URL for the current request builder
urlTemplate string
}
// ParentNotebookRequestBuilderGetQueryParameters the notebook that contains the page. Read-only.
type ParentNotebookRequestBuilderGetQueryParameters struct {
// Expand related entities
Expand []string `uriparametername:"%24expand"`
// Select properties to be returned
Select []string `uriparametername:"%24select"`
}
// ParentNotebookRequestBuilderGetRequestConfiguration configuration for the request such as headers, query parameters, and middleware options.
type ParentNotebookRequestBuilderGetRequestConfiguration struct {
// Request headers
Headers map[string]string
// Request options
Options []i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestOption
// Request query parameters
QueryParameters *ParentNotebookRequestBuilderGetQueryParameters
}
// NewParentNotebookRequestBuilderInternal instantiates a new ParentNotebookRequestBuilder and sets the default values.
func NewParentNotebookRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*ParentNotebookRequestBuilder) {
m := &ParentNotebookRequestBuilder{
}
m.urlTemplate = "{+baseurl}/users/{user%2Did}/onenote/notebooks/{notebook%2Did}/sections/{onenoteSection%2Did}/pages/{onenotePage%2Did}/parentNotebook{?%24select,%24expand}";
urlTplParams := make(map[string]string)
for idx, item := range pathParameters {
urlTplParams[idx] = item
}
m.pathParameters = urlTplParams;
m.requestAdapter = requestAdapter;
return m
}
// NewParentNotebookRequestBuilder instantiates a new ParentNotebookRequestBuilder and sets the default values.
func NewParentNotebookRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*ParentNotebookRequestBuilder) |
// CreateGetRequestInformation the notebook that contains the page. Read-only.
func (m *ParentNotebookRequestBuilder) CreateGetRequestInformation()(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
return m.CreateGetRequestInformationWithRequestConfiguration(nil);
}
// CreateGetRequestInformationWithRequestConfiguration the notebook that contains the page. Read-only.
func (m *ParentNotebookRequestBuilder) CreateGetRequestInformationWithRequestConfiguration(requestConfiguration *ParentNotebookRequestBuilderGetRequestConfiguration)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
requestInfo := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.GET
if requestConfiguration != nil {
if requestConfiguration.QueryParameters != nil {
requestInfo.AddQueryParameters(*(requestConfiguration.QueryParameters))
}
requestInfo.AddRequestHeaders(requestConfiguration.Headers)
requestInfo.AddRequestOptions(requestConfiguration.Options)
}
return requestInfo, nil
}
// Get the notebook that contains the page. Read-only.
func (m *ParentNotebookRequestBuilder) Get()(iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.Notebookable, error) {
return m.GetWithRequestConfigurationAndResponseHandler(nil, nil);
}
// GetWithRequestConfigurationAndResponseHandler the notebook that contains the page. Read-only.
func (m *ParentNotebookRequestBuilder) GetWithRequestConfigurationAndResponseHandler(requestConfiguration *ParentNotebookRequestBuilderGetRequestConfiguration, responseHandler i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ResponseHandler)(iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.Notebookable, error) {
requestInfo, err := m.CreateGetRequestInformationWithRequestConfiguration(requestConfiguration);
if err != nil {
return nil, err
}
errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {
"4XX": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,
"5XX": ia572726a95efa92ddd544552cd950653dc691023836923576b2f4bf716cf204a.CreateODataErrorFromDiscriminatorValue,
}
res, err := m.requestAdapter.SendAsync(requestInfo, iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.CreateNotebookFromDiscriminatorValue, responseHandler, errorMapping)
if err != nil {
return nil, err
}
return res.(iadcd81124412c61e647227ecfc4449d8bba17de0380ddda76f641a29edf2b242.Notebookable), nil
}
| {
urlParams := make(map[string]string)
urlParams["request-raw-url"] = rawUrl
return NewParentNotebookRequestBuilderInternal(urlParams, requestAdapter)
} |
user.entity.ts | import {
Entity,
BaseEntity,
PrimaryGeneratedColumn, | } from 'typeorm';
import { Exclude } from 'class-transformer';
@Entity({ name: 'users' })
export class User extends BaseEntity {
@PrimaryGeneratedColumn()
id: number;
@Column({
type: 'varchar',
length: 50,
unique: true,
})
username: string;
@Column({
type: 'varchar',
length: 100,
})
@Exclude()
password: string;
@Column({
type: 'varchar',
length: 100,
unique: true,
})
email: string;
@CreateDateColumn({
default: 'now()',
nullable: true,
})
@Exclude()
createdAt: string;
@UpdateDateColumn({
default: 'now()',
nullable: true,
})
@Exclude()
updatedAt: string;
} | Column,
CreateDateColumn,
UpdateDateColumn, |
main.rs | use std::io::prelude::*;
use std::net::TcpListener;
use std::net::TcpStream;
fn | () {
// 1. Listen to 127.0.0.1 port 8888
let listener = TcpListener::bind("127.0.0.1:8888").unwrap();
println!("listening started, ready to accept");
// 2. For each connected stream, match Err if error happens, otherwise
// call handle_connection function
for raw_stream in listener.incoming() {
match raw_stream {
// 2.1 Call panic and print error log if error occurs
Err(error) => panic!("Error happened while accepting client: {}", error),
// 2.2 Otherwise print welcome message and call handler
Ok(mut stream) => {
stream.write(b"Welcome to rust server!\r\n").unwrap();
handle_connection(stream);
}
}
}
}
// 3. Connection handler
fn handle_connection(mut stream: TcpStream) {
// 3.1 Define a buffer to hold client msg
let mut buffer = [0; 1024];
// 3.2 Read from client
stream.read(&mut buffer).unwrap();
// 3.3 Print message sent from client side
println!("Client message: {}", String::from_utf8_lossy(&buffer[..]));
// 3.4 Echo message to client
stream.write(b"Your message has been received!\r\n").unwrap();
} | main |
exercise040.py | nota1 = float(input('Digite a primeira nota: '))
nota2 = float(input('Digite a segunda nota: '))
media = (nota1+nota2)/2
if media >= 7:
print('Aprovado! Com média {:.2f}'.format(media))
elif media >= 5 and media < 7: | print('Recuperação! Com média {:.2f}'.format(media))
else:
print('Reprovado! Com média {:.2f}'.format(media)) |
|
context.js | import $http from '@/libs/axios'
/* 查询
* @id menuType
*/
export const contextMenu = (data) => {
return $http.request({
api: 'user',
url: '/context/enums/'+ data,
method: 'get'
})
};
/* 查询 | export const contextEnums = async (api) => {
let data ;
await $http.request({
api: api,
url: '/context/enums',
method: 'get'
}).then(res => {
data = res
});
return data
};
/* 查询
* @id menuType
*/
export const contextEnumsByApiType = async (api,type) => {
return $http.request({
api: api,
url: '/context/enums/' + type,
method: 'get'
})
}; | * @id menuType
*/ |
transaction_metadata.go | package hornet
import (
"encoding/binary"
"fmt"
"time"
"github.com/iotaledger/hive.go/bitmask"
"github.com/iotaledger/hive.go/objectstorage"
"github.com/iotaledger/hive.go/syncutils"
"github.com/gohornet/hornet/pkg/model/milestone"
)
const (
TransactionMetadataSolid = 0
TransactionMetadataConfirmed = 1
TransactionMetadataConflicting = 2
TransactionMetadataIsHead = 3
TransactionMetadataIsTail = 4
TransactionMetadataIsValue = 5
)
type TransactionMetadata struct {
objectstorage.StorableObjectFlags
syncutils.RWMutex
txHash Hash
// Metadata
metadata bitmask.BitMask
// Unix time when the Tx became solid (needed for local modifiers for tipselection)
solidificationTimestamp int32
// The index of the milestone which confirmed this tx
confirmationIndex milestone.Index
// youngestRootSnapshotIndex is the highest confirmed index of the past cone of this transaction
youngestRootSnapshotIndex milestone.Index
// oldestRootSnapshotIndex is the lowest confirmed index of the past cone of this transaction
oldestRootSnapshotIndex milestone.Index
// rootSnapshotCalculationIndex is the solid index yrtsi and ortsi were calculated at
rootSnapshotCalculationIndex milestone.Index
// trunkHash is the trunk of the transaction
trunkHash Hash
// branchHash is the branch of the transaction
branchHash Hash
// bundleHash is the bundle of the transaction
bundleHash Hash
}
func NewTransactionMetadata(txHash Hash) *TransactionMetadata |
func (m *TransactionMetadata) GetTxHash() Hash {
return m.txHash
}
func (m *TransactionMetadata) GetTrunkHash() Hash {
return m.trunkHash
}
func (m *TransactionMetadata) GetBranchHash() Hash {
return m.branchHash
}
func (m *TransactionMetadata) GetBundleHash() Hash {
return m.bundleHash
}
func (m *TransactionMetadata) IsTail() bool {
m.RLock()
defer m.RUnlock()
return m.metadata.HasFlag(TransactionMetadataIsTail)
}
func (m *TransactionMetadata) IsHead() bool {
m.RLock()
defer m.RUnlock()
return m.metadata.HasFlag(TransactionMetadataIsHead)
}
func (m *TransactionMetadata) IsValue() bool {
m.RLock()
defer m.RUnlock()
return m.metadata.HasFlag(TransactionMetadataIsValue)
}
func (m *TransactionMetadata) GetSolidificationTimestamp() int32 {
m.RLock()
defer m.RUnlock()
return m.solidificationTimestamp
}
func (m *TransactionMetadata) IsSolid() bool {
m.RLock()
defer m.RUnlock()
return m.metadata.HasFlag(TransactionMetadataSolid)
}
func (m *TransactionMetadata) SetSolid(solid bool) {
m.Lock()
defer m.Unlock()
if solid != m.metadata.HasFlag(TransactionMetadataSolid) {
if solid {
m.solidificationTimestamp = int32(time.Now().Unix())
} else {
m.solidificationTimestamp = 0
}
m.metadata = m.metadata.ModifyFlag(TransactionMetadataSolid, solid)
m.SetModified(true)
}
}
func (m *TransactionMetadata) IsConfirmed() bool {
m.RLock()
defer m.RUnlock()
return m.metadata.HasFlag(TransactionMetadataConfirmed)
}
func (m *TransactionMetadata) GetConfirmed() (bool, milestone.Index) {
m.RLock()
defer m.RUnlock()
return m.metadata.HasFlag(TransactionMetadataConfirmed), m.confirmationIndex
}
func (m *TransactionMetadata) SetConfirmed(confirmed bool, confirmationIndex milestone.Index) {
m.Lock()
defer m.Unlock()
if confirmed != m.metadata.HasFlag(TransactionMetadataConfirmed) {
if confirmed {
m.confirmationIndex = confirmationIndex
} else {
m.confirmationIndex = 0
}
m.metadata = m.metadata.ModifyFlag(TransactionMetadataConfirmed, confirmed)
m.SetModified(true)
}
}
func (m *TransactionMetadata) IsConflicting() bool {
m.RLock()
defer m.RUnlock()
return m.metadata.HasFlag(TransactionMetadataConflicting)
}
func (m *TransactionMetadata) SetConflicting(conflicting bool) {
m.Lock()
defer m.Unlock()
if conflicting != m.metadata.HasFlag(TransactionMetadataConflicting) {
m.metadata = m.metadata.ModifyFlag(TransactionMetadataConflicting, conflicting)
m.SetModified(true)
}
}
func (m *TransactionMetadata) SetRootSnapshotIndexes(yrtsi milestone.Index, ortsi milestone.Index, rtsci milestone.Index) {
m.Lock()
defer m.Unlock()
m.youngestRootSnapshotIndex = yrtsi
m.oldestRootSnapshotIndex = ortsi
m.rootSnapshotCalculationIndex = rtsci
m.SetModified(true)
}
func (m *TransactionMetadata) GetRootSnapshotIndexes() (yrtsi milestone.Index, ortsi milestone.Index, rtsci milestone.Index) {
m.RLock()
defer m.RUnlock()
return m.youngestRootSnapshotIndex, m.oldestRootSnapshotIndex, m.rootSnapshotCalculationIndex
}
func (m *TransactionMetadata) SetAdditionalTxInfo(trunkHash Hash, branchHash Hash, bundleHash Hash, isHead bool, isTail bool, isValue bool) {
m.Lock()
defer m.Unlock()
m.trunkHash = trunkHash
m.branchHash = branchHash
m.bundleHash = bundleHash
m.metadata = m.metadata.ModifyFlag(TransactionMetadataIsHead, isHead).ModifyFlag(TransactionMetadataIsTail, isTail).ModifyFlag(TransactionMetadataIsValue, isValue)
m.SetModified(true)
}
func (m *TransactionMetadata) GetMetadata() byte {
m.RLock()
defer m.RUnlock()
return byte(m.metadata)
}
// ObjectStorage interface
func (m *TransactionMetadata) Update(_ objectstorage.StorableObject) {
panic(fmt.Sprintf("TransactionMetadata should never be updated: %v", m.txHash.Trytes()))
}
func (m *TransactionMetadata) ObjectStorageKey() []byte {
return m.txHash
}
func (m *TransactionMetadata) ObjectStorageValue() (data []byte) {
m.Lock()
defer m.Unlock()
/*
1 byte metadata bitmask
4 bytes uint32 solidificationTimestamp
4 bytes uint32 confirmationIndex
4 bytes uint32 youngestRootSnapshotIndex
4 bytes uint32 oldestRootSnapshotIndex
4 bytes uint32 rootSnapshotCalculationIndex
49 bytes hash trunk
49 bytes hash branch
49 bytes hash bundle
*/
value := make([]byte, 21)
value[0] = byte(m.metadata)
binary.LittleEndian.PutUint32(value[1:], uint32(m.solidificationTimestamp))
binary.LittleEndian.PutUint32(value[5:], uint32(m.confirmationIndex))
binary.LittleEndian.PutUint32(value[9:], uint32(m.youngestRootSnapshotIndex))
binary.LittleEndian.PutUint32(value[13:], uint32(m.oldestRootSnapshotIndex))
binary.LittleEndian.PutUint32(value[17:], uint32(m.rootSnapshotCalculationIndex))
value = append(value, m.trunkHash...)
value = append(value, m.branchHash...)
value = append(value, m.bundleHash...)
return value
}
func (m *TransactionMetadata) UnmarshalObjectStorageValue(data []byte) (consumedBytes int, err error) {
m.Lock()
defer m.Unlock()
/*
1 byte metadata bitmask
4 bytes uint32 solidificationTimestamp
4 bytes uint32 confirmationIndex
4 bytes uint32 youngestRootSnapshotIndex
4 bytes uint32 oldestRootSnapshotIndex
4 bytes uint32 rootSnapshotCalculationIndex
49 bytes hash trunk
49 bytes hash branch
49 bytes hash bundle
*/
m.metadata = bitmask.BitMask(data[0])
m.solidificationTimestamp = int32(binary.LittleEndian.Uint32(data[1:5]))
m.confirmationIndex = milestone.Index(binary.LittleEndian.Uint32(data[5:9]))
m.youngestRootSnapshotIndex = milestone.Index(binary.LittleEndian.Uint32(data[9:13]))
m.oldestRootSnapshotIndex = milestone.Index(binary.LittleEndian.Uint32(data[13:17]))
m.rootSnapshotCalculationIndex = 0
if len(data) > 17 {
// ToDo: Remove at next DbVersion update
m.rootSnapshotCalculationIndex = milestone.Index(binary.LittleEndian.Uint32(data[17:21]))
if len(data) == 21+49+49+49 {
m.trunkHash = Hash(data[21 : 21+49])
m.branchHash = Hash(data[21+49 : 21+49+49])
m.bundleHash = Hash(data[21+49+49 : 21+49+49+49])
}
}
return len(data), nil
}
| {
return &TransactionMetadata{
txHash: txHash,
}
} |
dataReader.py | import os
import config
import json
import tensorflow as tf
import numpy as np
from collections import defaultdict
class Reader:
def __init__(self, mode, data_dir, anchors_path, num_classes, tfrecord_num = 12, input_shape = 416, max_boxes = 20):
"""
Introduction
------------
构造函数
Parameters
----------
data_dir: 文件路径
mode: 数据集模式
anchors: 数据集聚类得到的anchor
num_classes: 数据集图片类别数量
input_shape: 图像输入模型的大小
max_boxes: 每张图片最大的box数量
jitter: 随机长宽比系数
hue: 调整hsv颜色空间系数
sat: 调整饱和度系数
cont: 调整对比度系数
bri: 调整亮度系数
"""
self.data_dir = data_dir
self.input_shape = input_shape
self.max_boxes = max_boxes
self.mode = mode
self.annotations_file = {'train' : config.train_annotations_file, 'val' : config.val_annotations_file}
self.data_file = {'train': config.train_data_file, 'val': config.val_data_file}
self.anchors_path = anchors_path | self.class_names = self._get_class(config.classes_path)
if len(self.TfrecordFile) == 0:
self.convert_to_tfrecord(self.data_dir, tfrecord_num)
def _get_anchors(self):
"""
Introduction
------------
获取anchors
Returns
-------
anchors: anchor数组
"""
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def _get_class(self, classes_path):
"""
Introduction
------------
获取类别名字
Returns
-------
class_names: coco数据集类别对应的名字
"""
classes_path = os.path.expanduser(classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def Preprocess_true_boxes(self, true_boxes):
"""
Introduction
------------
对训练数据的ground truth box进行预处理
Parameters
----------
true_boxes: ground truth box 形状为[boxes, 5], x_min, y_min, x_max, y_max, class_id
"""
num_layers = len(self.anchors) // 3
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
true_boxes = np.array(true_boxes, dtype='float32')
input_shape = np.array([self.input_shape, self.input_shape], dtype='int32')
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2.
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
true_boxes[..., 0:2] = boxes_xy / input_shape[::-1]
true_boxes[..., 2:4] = boxes_wh / input_shape[::-1]
grid_shapes = [input_shape // 32, input_shape // 16, input_shape // 8]
y_true = [np.zeros((grid_shapes[l][0], grid_shapes[l][1], len(anchor_mask[l]), 5 + self.num_classes), dtype='float32') for l in range(num_layers)]
# 这里扩充维度是为了后面应用广播计算每个图中所有box的anchor互相之间的iou
anchors = np.expand_dims(self.anchors, 0)
anchors_max = anchors / 2.
anchors_min = -anchors_max
# 因为之前对box做了padding, 因此需要去除全0行
valid_mask = boxes_wh[..., 0] > 0
wh = boxes_wh[valid_mask]
# 为了应用广播扩充维度
wh = np.expand_dims(wh, -2)
# wh 的shape为[box_num, 1, 2]
boxes_max = wh / 2.
boxes_min = -boxes_max
intersect_min = np.maximum(boxes_min, anchors_min)
intersect_max = np.minimum(boxes_max, anchors_max)
intersect_wh = np.maximum(intersect_max - intersect_min, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchors[..., 0] * anchors[..., 1]
iou = intersect_area / (box_area + anchor_area - intersect_area)
# 找出和ground truth box的iou最大的anchor box, 然后将对应不同比例的负责该ground turth box 的位置置为ground truth box坐标
best_anchor = np.argmax(iou, axis = -1)
for t, n in enumerate(best_anchor):
for l in range(num_layers):
if n in anchor_mask[l]:
i = np.floor(true_boxes[t, 0] * grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[t, 1] * grid_shapes[l][0]).astype('int32')
k = anchor_mask[l].index(n)
c = true_boxes[t, 4].astype('int32')
y_true[l][j, i, k, 0:4] = true_boxes[t, 0:4]
y_true[l][j, i, k, 4] = 1.
y_true[l][j, i, k, 5 + c] = 1.
return y_true[0], y_true[1], y_true[2]
def read_annotations(self):
"""
Introduction
------------
读取COCO数据集图片路径和对应的标注
Parameters
----------
data_file: 文件路径
"""
image_data = []
boxes_data = []
name_box_id = defaultdict(list)
with open(self.annotations_file[self.mode], encoding='utf-8') as file:
data = json.load(file)
annotations = data['annotations']
for ant in annotations:
id = ant['image_id']
name = os.path.join(self.data_file[self.mode], '%012d.jpg' % id)
cat = ant['category_id']
if cat >= 1 and cat <= 11:
cat = cat - 1
elif cat >= 13 and cat <= 25:
cat = cat - 2
elif cat >= 27 and cat <= 28:
cat = cat - 3
elif cat >= 31 and cat <= 44:
cat = cat - 5
elif cat >= 46 and cat <= 65:
cat = cat - 6
elif cat == 67:
cat = cat - 7
elif cat == 70:
cat = cat - 9
elif cat >= 72 and cat <= 82:
cat = cat - 10
elif cat >= 84 and cat <= 90:
cat = cat - 11
name_box_id[name].append([ant['bbox'], cat])
for key in name_box_id.keys():
boxes = []
image_data.append(key)
box_infos = name_box_id[key]
for info in box_infos:
x_min = info[0][0]
y_min = info[0][1]
x_max = x_min + info[0][2]
y_max = y_min + info[0][3]
boxes.append(np.array([x_min, y_min, x_max, y_max, info[1]]))
boxes_data.append(np.array(boxes))
return image_data, boxes_data
def convert_to_tfrecord(self, tfrecord_path, num_tfrecords):
"""
Introduction
------------
将图片和boxes数据存储为tfRecord
Parameters
----------
tfrecord_path: tfrecord文件存储路径
num_tfrecords: 分成多少个tfrecord
"""
image_data, boxes_data = self.read_annotations()
images_num = int(len(image_data) / num_tfrecords)
for index_records in range(num_tfrecords):
output_file = os.path.join(tfrecord_path, str(index_records) + '_' + self.mode + '.tfrecords')
with tf.python_io.TFRecordWriter(output_file) as record_writer:
for index in range(index_records * images_num, (index_records + 1) * images_num):
with tf.gfile.FastGFile(image_data[index], 'rb') as file:
image = file.read()
xmin, xmax, ymin, ymax, label = [], [], [], [], []
for box in boxes_data[index]:
xmin.append(box[0])
ymin.append(box[1])
xmax.append(box[2])
ymax.append(box[3])
label.append(box[4])
example = tf.train.Example(features = tf.train.Features(
feature = {
'image/encoded' : tf.train.Feature(bytes_list = tf.train.BytesList(value = [image])),
'image/object/bbox/xmin' : tf.train.Feature(float_list = tf.train.FloatList(value = xmin)),
'image/object/bbox/xmax': tf.train.Feature(float_list = tf.train.FloatList(value = xmax)),
'image/object/bbox/ymin': tf.train.Feature(float_list = tf.train.FloatList(value = ymin)),
'image/object/bbox/ymax': tf.train.Feature(float_list = tf.train.FloatList(value = ymax)),
'image/object/bbox/label': tf.train.Feature(float_list = tf.train.FloatList(value = label)),
}
))
record_writer.write(example.SerializeToString())
if index % 1000 == 0:
print('Processed {} of {} images'.format(index + 1, len(image_data)))
def parser(self, serialized_example):
"""
Introduction
------------
解析tfRecord数据
Parameters
----------
serialized_example: 序列化的每条数据
"""
features = tf.parse_single_example(
serialized_example,
features = {
'image/encoded' : tf.FixedLenFeature([], dtype = tf.string),
'image/object/bbox/xmin' : tf.VarLenFeature(dtype = tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype = tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype = tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype = tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype = tf.float32)
}
)
image = tf.image.decode_jpeg(features['image/encoded'], channels = 3)
image = tf.image.convert_image_dtype(image, tf.uint8)
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, axis = 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, axis = 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, axis = 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, axis = 0)
label = tf.expand_dims(features['image/object/bbox/label'].values, axis = 0)
bbox = tf.concat(axis = 0, values = [xmin, ymin, xmax, ymax, label])
bbox = tf.transpose(bbox, [1, 0])
image, bbox = self.Preprocess(image, bbox)
bbox_true_13, bbox_true_26, bbox_true_52 = tf.py_func(self.Preprocess_true_boxes, [bbox], [tf.float32, tf.float32, tf.float32])
return image, bbox, bbox_true_13, bbox_true_26, bbox_true_52
def Preprocess(self, image, bbox):
"""
Introduction
------------
对图片进行预处理,增强数据集
Parameters
----------
image: tensorflow解析的图片
bbox: 图片中对应的box坐标
"""
image_width, image_high = tf.cast(tf.shape(image)[1], tf.float32), tf.cast(tf.shape(image)[0], tf.float32)
input_width = tf.cast(self.input_shape, tf.float32)
input_high = tf.cast(self.input_shape, tf.float32)
new_high = image_high * tf.minimum(input_width / image_width, input_high / image_high)
new_width = image_width * tf.minimum(input_width / image_width, input_high / image_high)
# 将图片按照固定长宽比进行padding缩放
dx = (input_width - new_width) / 2
dy = (input_high - new_high) / 2
image = tf.image.resize_images(image, [tf.cast(new_high, tf.int32), tf.cast(new_width, tf.int32)], method = tf.image.ResizeMethod.BICUBIC)
new_image = tf.image.pad_to_bounding_box(image, tf.cast(dy, tf.int32), tf.cast(dx, tf.int32), tf.cast(input_high, tf.int32), tf.cast(input_width, tf.int32))
image_ones = tf.ones_like(image)
image_ones_padded = tf.image.pad_to_bounding_box(image_ones, tf.cast(dy, tf.int32), tf.cast(dx, tf.int32), tf.cast(input_high, tf.int32), tf.cast(input_width, tf.int32))
image_color_padded = (1 - image_ones_padded) * 128
image = image_color_padded + new_image
# 矫正bbox坐标
xmin, ymin, xmax, ymax, label = tf.split(value = bbox, num_or_size_splits=5, axis = 1)
xmin = xmin * new_width / image_width + dx
xmax = xmax * new_width / image_width + dx
ymin = ymin * new_high / image_high + dy
ymax = ymax * new_high / image_high + dy
bbox = tf.concat([xmin, ymin, xmax, ymax, label], 1)
if self.mode == 'train':
# 随机左右翻转图片
def _flip_left_right_boxes(boxes):
xmin, ymin, xmax, ymax, label = tf.split(value = boxes, num_or_size_splits = 5, axis = 1)
flipped_xmin = tf.subtract(input_width, xmax)
flipped_xmax = tf.subtract(input_width, xmin)
flipped_boxes = tf.concat([flipped_xmin, ymin, flipped_xmax, ymax, label], 1)
return flipped_boxes
flip_left_right = tf.greater(tf.random_uniform([], dtype = tf.float32, minval = 0, maxval = 1), 0.5)
image = tf.cond(flip_left_right, lambda : tf.image.flip_left_right(image), lambda : image)
bbox = tf.cond(flip_left_right, lambda: _flip_left_right_boxes(bbox), lambda: bbox)
# 将图片归一化到0和1之间
image = image / 255.
image = tf.clip_by_value(image, clip_value_min = 0.0, clip_value_max = 1.0)
bbox = tf.clip_by_value(bbox, clip_value_min = 0, clip_value_max = input_width - 1)
bbox = tf.cond(tf.greater(tf.shape(bbox)[0], config.max_boxes), lambda: bbox[:config.max_boxes], lambda: tf.pad(bbox, paddings = [[0, config.max_boxes - tf.shape(bbox)[0]], [0, 0]], mode = 'CONSTANT'))
return image, bbox
def build_dataset(self, batch_size):
"""
Introduction
------------
建立数据集dataset
Parameters
----------
batch_size: batch大小
Return
------
dataset: 返回tensorflow的dataset
"""
dataset = tf.data.TFRecordDataset(filenames = self.TfrecordFile)
dataset = dataset.map(self.parser, num_parallel_calls = 10)
if self.mode == 'train':
dataset = dataset.repeat().shuffle(9000).batch(batch_size).prefetch(batch_size)
else:
dataset = dataset.repeat().batch(batch_size).prefetch(batch_size)
return dataset | self.anchors = self._get_anchors()
self.num_classes = num_classes
file_pattern = self.data_dir + "/*" + self.mode + '.tfrecords'
self.TfrecordFile = tf.gfile.Glob(file_pattern) |
import.rs | use crate::object::PyObject;
use std::os::raw::{c_char, c_int, c_long};
extern "C" {
pub fn PyImport_GetMagicNumber() -> c_long;
pub fn PyImport_GetMagicTag() -> *const c_char;
#[cfg_attr(PyPy, link_name = "PyPyImport_ExecCodeModule")]
pub fn PyImport_ExecCodeModule(name: *const c_char, co: *mut PyObject) -> *mut PyObject;
#[cfg_attr(PyPy, link_name = "PyPyImport_ExecCodeModuleEx")]
pub fn PyImport_ExecCodeModuleEx(
name: *const c_char,
co: *mut PyObject,
pathname: *const c_char,
) -> *mut PyObject;
pub fn PyImport_ExecCodeModuleWithPathnames(
name: *const c_char,
co: *mut PyObject,
pathname: *const c_char,
cpathname: *const c_char,
) -> *mut PyObject;
pub fn PyImport_ExecCodeModuleObject(
name: *mut PyObject,
co: *mut PyObject,
pathname: *mut PyObject,
cpathname: *mut PyObject,
) -> *mut PyObject;
#[cfg_attr(PyPy, link_name = "PyPyImport_GetModuleDict")]
pub fn PyImport_GetModuleDict() -> *mut PyObject;
// skipped Python 3.7 / ex-non-limited PyImport_GetModule
pub fn PyImport_AddModuleObject(name: *mut PyObject) -> *mut PyObject;
#[cfg_attr(PyPy, link_name = "PyPyImport_AddModule")]
pub fn PyImport_AddModule(name: *const c_char) -> *mut PyObject;
#[cfg_attr(PyPy, link_name = "PyPyImport_ImportModule")]
pub fn PyImport_ImportModule(name: *const c_char) -> *mut PyObject;
#[cfg_attr(PyPy, link_name = "PyPyImport_ImportModuleNoBlock")]
pub fn PyImport_ImportModuleNoBlock(name: *const c_char) -> *mut PyObject;
#[cfg_attr(PyPy, link_name = "PyPyImport_ImportModuleLevel")]
pub fn PyImport_ImportModuleLevel(
name: *const c_char,
globals: *mut PyObject,
locals: *mut PyObject,
fromlist: *mut PyObject,
level: c_int,
) -> *mut PyObject;
#[cfg_attr(PyPy, link_name = "PyPyImport_ImportModuleLevelObject")]
pub fn PyImport_ImportModuleLevelObject(
name: *mut PyObject,
globals: *mut PyObject,
locals: *mut PyObject,
fromlist: *mut PyObject,
level: c_int,
) -> *mut PyObject;
}
#[inline]
pub unsafe fn PyImport_ImportModuleEx(
name: *const c_char,
globals: *mut PyObject,
locals: *mut PyObject,
fromlist: *mut PyObject,
) -> *mut PyObject |
extern "C" {
pub fn PyImport_GetImporter(path: *mut PyObject) -> *mut PyObject;
#[cfg_attr(PyPy, link_name = "PyPyImport_Import")]
pub fn PyImport_Import(name: *mut PyObject) -> *mut PyObject;
#[cfg_attr(PyPy, link_name = "PyPyImport_ReloadModule")]
pub fn PyImport_ReloadModule(m: *mut PyObject) -> *mut PyObject;
#[cfg(not(Py_3_9))]
#[deprecated(note = "Removed in Python 3.9 as it was \"For internal use only\".")]
pub fn PyImport_Cleanup();
pub fn PyImport_ImportFrozenModuleObject(name: *mut PyObject) -> c_int;
pub fn PyImport_ImportFrozenModule(name: *const c_char) -> c_int;
pub fn PyImport_AppendInittab(
name: *const c_char,
initfunc: Option<extern "C" fn() -> *mut PyObject>,
) -> c_int;
}
| {
PyImport_ImportModuleLevel(name, globals, locals, fromlist, 0)
} |
encrypt.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Name: encrypt.py
Author: Wesley Lee - [email protected]
Assignment: Introduction to Cryptography Project
Date: 04-20-2018
Description:
Encrypts a file usesing a symmetric-key algorithm based on a special class of graphs.
"""
from time import *
from getA import *
# Initializes the Matrix L1_1
# plaintext: Inside contents of plaintext
# password: Password used the encrypt file
def encrypt_init(plaintext, password):
plaintextASCII = [ord(a) for a in plaintext]
asciiPW = [ord(b) for b in password]
A = getA(asciiPW, plaintextASCII)
L_1 = matmul(A, plaintextASCII)
newL_1 = [int(a) % 127 for a in L_1]
return newL_1, asciiPW
# Encrypts the ciphertext
# plaintext: Inside contents of plaintext
# password: Password used the encrypt file
def | (plaintext, password):
# Starts Timer
startTime = time()
# Initializes
p, t= encrypt_init(plaintext, password)
n = len(p)
l = list(range(n))
prime = 127
r = 0
# Symmetric-key algorithm based on the central map created
# F(p) = ciphertext
for i in range(0, len(password)):
if r == 0:
l[0] = (p[0] + t[i]) % prime
l[1] = (p[1] + p[0] * l[0]) % prime
l[2] = (p[2] + p[0] * l[1]) % prime
for j in range(3, n):
if (j % 4) == 1 or (j % 4) == 2:
l[j] = (p[j] + p[0] * l[j - 2]) % prime
else:
l[j] = (p[j] + p[j - 2] * l[0]) % prime
r = 1
else:
p[0] = (l[0] + t[i]) % prime
p[1] = (l[1] - p[0] * l[0]) % prime
p[2] = (l[2] - p[0] * l[1]) % prime
for j in range(3, n):
if (j % 4) == 1 or (j % 4) == 2:
p[j] = (l[j] - p[0] * l[j - 2]) % prime
else:
p[j] = (l[j] - p[j - 2] * l[0]) % prime
r = 0
# Stops Timer
finishTime = time()
timeTaken = '%f' % (finishTime - startTime)
if r == 0:
return p, str(timeTaken)
elif r == 1:
return l, str(timeTaken)
| encrypt |
multi_fetch_tags_result.go | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package m3
import (
"sync"
"github.com/m3db/m3/src/dbnode/client"
xerrors "github.com/m3db/m3x/errors"
)
const initSize = 10
type multiSearchResult struct {
sync.Mutex
seenIters []client.TaggedIDsIterator // track known iterators to avoid leaking
dedupeMap map[string]MultiTagResult
err xerrors.MultiError
}
// NewMultiFetchTagsResult builds a new multi fetch tags result
func NewMultiFetchTagsResult() MultiFetchTagsResult |
func (r *multiSearchResult) Close() error {
r.Lock()
defer r.Unlock()
for _, iters := range r.seenIters {
iters.Finalize()
}
r.seenIters = nil
r.dedupeMap = nil
r.err = xerrors.NewMultiError()
return nil
}
func (r *multiSearchResult) FinalResult() ([]MultiTagResult, error) {
r.Lock()
defer r.Unlock()
err := r.err.FinalError()
if err != nil {
return nil, err
}
result := make([]MultiTagResult, 0, len(r.dedupeMap))
for _, it := range r.dedupeMap {
result = append(result, it)
}
return result, nil
}
func (r *multiSearchResult) Add(
newIterator client.TaggedIDsIterator,
err error,
) {
r.Lock()
defer r.Unlock()
if err != nil {
r.err = r.err.Add(err)
return
}
r.seenIters = append(r.seenIters, newIterator)
// Need to check the error to bail early after accumulating the iterators
// otherwise when we close the the multi fetch result
if !r.err.Empty() {
// don't need to do anything if the final result is going to be an error
return
}
for newIterator.Next() {
_, ident, tagIter := newIterator.Current()
id := ident.String()
_, exists := r.dedupeMap[id]
if !exists {
r.dedupeMap[id] = MultiTagResult{
ID: ident,
Iter: tagIter.Duplicate(),
}
}
}
if err := newIterator.Err(); err != nil {
r.err = r.err.Add(err)
}
}
| {
return &multiSearchResult{
dedupeMap: make(map[string]MultiTagResult, initSize),
seenIters: make([]client.TaggedIDsIterator, 0, initSize),
}
} |
1.py | # Question - http://www.pythonchallenge.com/pc/def/map.html
# Thought Process - | # ROT2 Shift answer - i hope you didnt translate it by hand. thats what computers are for. doing it in by hand is inefficient and that's why this text is so long. using string.maketrans() is recommended. now apply on the url.
# ROT2 on map - ocr
# used https://planetcalc.com/1434/ for directly calculating the ROT
# solution - http://www.pythonchallenge.com/pc/def/ocr.html | # Its a caesar's cipher with ROT2 as the image shows the
# alphabet shifting
|
pythonExe16.py | #Explicit function
def digitSum(n):
|
#Initializing list
List=[367,111,562,945,6726,873]
#Using the function on odd element of the list
newList=[digitSum(i) for i in List if i & 1]
print(newList) | dsum=0
for ele in str(n):
dsum+=int (ele)
return dsum |
machines.go | package api
import (
"context"
"crypto/rand"
"github.com/manifoldco/go-base64"
"github.com/manifoldco/torus-cli/apitypes"
"github.com/manifoldco/torus-cli/identity"
"github.com/manifoldco/torus-cli/registry"
)
const tokenSecretSize = 18
// MachinesClient makes requests to the Daemon on behalf of the user to
// manipulate Machine resources.
type MachinesClient struct {
*registry.MachinesClient
client *apiRoundTripper
}
func newMachinesClient(upstream *registry.MachinesClient, rt *apiRoundTripper) *MachinesClient {
return &MachinesClient{upstream, rt}
}
// Create a new machine in the given org
func (m *MachinesClient) Create(ctx context.Context, orgID, teamID *identity.ID,
name string, output ProgressFunc) (*apitypes.MachineSegment, *base64.Value, error) {
secret, err := createTokenSecret()
if err != nil {
return nil, nil, err
}
mcr := apitypes.MachinesCreateRequest{
Name: name,
OrgID: orgID,
TeamID: teamID,
Secret: secret,
}
result := &apitypes.MachineSegment{}
err = m.client.DaemonRoundTrip(ctx, "POST", "/machines", nil, &mcr, &result, output)
return result, secret, err
}
func createTokenSecret() (*base64.Value, error) {
value := make([]byte, tokenSecretSize)
_, err := rand.Read(value)
if err != nil |
return base64.New(value), nil
}
| {
return nil, err
} |
lib.rs | extern crate proc_macro;
use proc_macro::TokenStream;
#[proc_macro]
pub fn get_enabled_features(_cargo_file: TokenStream) -> TokenStream {
//https://www.reddit.com/r/rust/comments/834g53/list_of_conditional_compilation_features_at/
let features_gen_code = r#"
let enabled_features = (|| -> Vec<&'static str>{
macro_rules! make_feat_list {
($($feat:expr),*) => {
vec![
$(
#[cfg(feature = $feat)]
$feat,
)*
]
}
}
make_feat_list!(FEATURES)
})();
"#;
let mut args = std::env::args().skip_while(|val| !val.starts_with("--manifest-path"));
let mut cmd = cargo_metadata::MetadataCommand::new();
match args.next() {
Some(ref p) if p == "--manifest-path" => {
cmd.manifest_path(args.next().unwrap());
}
Some(p) => {
cmd.manifest_path(p.trim_start_matches("--manifest-path="));
}
None => {}
};
let metadata = cmd
.features(cargo_metadata::CargoOpt::AllFeatures)
.exec()
.unwrap(); | metadata
.root_package()
.unwrap()
.features
.keys()
.cloned()
.collect::<Vec<String>>()
);
//Without '[' in the begin and ']' in the end
let slice_features = &root_package_features[1..root_package_features.len() - 1];
let features_gen_code = features_gen_code.replace("FEATURES", slice_features);
features_gen_code.parse().unwrap()
} |
//Get string of features
let root_package_features = format!(
"{:?}", |
interface.go | /*
Copyright 2022 Rancher Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by main. DO NOT EDIT.
package v3
import (
"github.com/rancher/lasso/pkg/controller"
v3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
"github.com/rancher/wrangler/pkg/schemes"
"k8s.io/apimachinery/pkg/runtime/schema"
)
func init() |
type Interface interface {
APIService() APIServiceController
ActiveDirectoryProvider() ActiveDirectoryProviderController
AuthConfig() AuthConfigController
AuthProvider() AuthProviderController
AuthToken() AuthTokenController
AzureADProvider() AzureADProviderController
Catalog() CatalogController
CatalogTemplate() CatalogTemplateController
CatalogTemplateVersion() CatalogTemplateVersionController
CisBenchmarkVersion() CisBenchmarkVersionController
CisConfig() CisConfigController
CloudCredential() CloudCredentialController
Cluster() ClusterController
ClusterAlert() ClusterAlertController
ClusterAlertGroup() ClusterAlertGroupController
ClusterAlertRule() ClusterAlertRuleController
ClusterCatalog() ClusterCatalogController
ClusterLogging() ClusterLoggingController
ClusterMonitorGraph() ClusterMonitorGraphController
ClusterRegistrationToken() ClusterRegistrationTokenController
ClusterRoleTemplateBinding() ClusterRoleTemplateBindingController
ClusterScan() ClusterScanController
ClusterTemplate() ClusterTemplateController
ClusterTemplateRevision() ClusterTemplateRevisionController
ComposeConfig() ComposeConfigController
DynamicSchema() DynamicSchemaController
EtcdBackup() EtcdBackupController
Feature() FeatureController
FleetWorkspace() FleetWorkspaceController
FreeIpaProvider() FreeIpaProviderController
GithubProvider() GithubProviderController
GlobalDns() GlobalDnsController
GlobalDnsProvider() GlobalDnsProviderController
GlobalRole() GlobalRoleController
GlobalRoleBinding() GlobalRoleBindingController
GoogleOAuthProvider() GoogleOAuthProviderController
Group() GroupController
GroupMember() GroupMemberController
KontainerDriver() KontainerDriverController
LocalProvider() LocalProviderController
ManagedChart() ManagedChartController
MonitorMetric() MonitorMetricController
MultiClusterApp() MultiClusterAppController
MultiClusterAppRevision() MultiClusterAppRevisionController
Node() NodeController
NodeDriver() NodeDriverController
NodePool() NodePoolController
NodeTemplate() NodeTemplateController
Notifier() NotifierController
OIDCProvider() OIDCProviderController
OpenLdapProvider() OpenLdapProviderController
PodSecurityPolicyTemplate() PodSecurityPolicyTemplateController
PodSecurityPolicyTemplateProjectBinding() PodSecurityPolicyTemplateProjectBindingController
Preference() PreferenceController
Principal() PrincipalController
Project() ProjectController
ProjectAlert() ProjectAlertController
ProjectAlertGroup() ProjectAlertGroupController
ProjectAlertRule() ProjectAlertRuleController
ProjectCatalog() ProjectCatalogController
ProjectLogging() ProjectLoggingController
ProjectMonitorGraph() ProjectMonitorGraphController
ProjectNetworkPolicy() ProjectNetworkPolicyController
ProjectRoleTemplateBinding() ProjectRoleTemplateBindingController
RancherUserNotification() RancherUserNotificationController
RkeAddon() RkeAddonController
RkeK8sServiceOption() RkeK8sServiceOptionController
RkeK8sSystemImage() RkeK8sSystemImageController
RoleTemplate() RoleTemplateController
SamlProvider() SamlProviderController
SamlToken() SamlTokenController
Setting() SettingController
Template() TemplateController
TemplateContent() TemplateContentController
TemplateVersion() TemplateVersionController
Token() TokenController
User() UserController
UserAttribute() UserAttributeController
}
func New(controllerFactory controller.SharedControllerFactory) Interface {
return &version{
controllerFactory: controllerFactory,
}
}
type version struct {
controllerFactory controller.SharedControllerFactory
}
func (c *version) APIService() APIServiceController {
return NewAPIServiceController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "APIService"}, "apiservices", false, c.controllerFactory)
}
func (c *version) ActiveDirectoryProvider() ActiveDirectoryProviderController {
return NewActiveDirectoryProviderController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ActiveDirectoryProvider"}, "activedirectoryproviders", false, c.controllerFactory)
}
func (c *version) AuthConfig() AuthConfigController {
return NewAuthConfigController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "AuthConfig"}, "authconfigs", false, c.controllerFactory)
}
func (c *version) AuthProvider() AuthProviderController {
return NewAuthProviderController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "AuthProvider"}, "authproviders", false, c.controllerFactory)
}
func (c *version) AuthToken() AuthTokenController {
return NewAuthTokenController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "AuthToken"}, "authtokens", false, c.controllerFactory)
}
func (c *version) AzureADProvider() AzureADProviderController {
return NewAzureADProviderController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "AzureADProvider"}, "azureadproviders", false, c.controllerFactory)
}
func (c *version) Catalog() CatalogController {
return NewCatalogController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "Catalog"}, "catalogs", false, c.controllerFactory)
}
func (c *version) CatalogTemplate() CatalogTemplateController {
return NewCatalogTemplateController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "CatalogTemplate"}, "catalogtemplates", true, c.controllerFactory)
}
func (c *version) CatalogTemplateVersion() CatalogTemplateVersionController {
return NewCatalogTemplateVersionController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "CatalogTemplateVersion"}, "catalogtemplateversions", true, c.controllerFactory)
}
func (c *version) CisBenchmarkVersion() CisBenchmarkVersionController {
return NewCisBenchmarkVersionController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "CisBenchmarkVersion"}, "cisbenchmarkversions", true, c.controllerFactory)
}
func (c *version) CisConfig() CisConfigController {
return NewCisConfigController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "CisConfig"}, "cisconfigs", true, c.controllerFactory)
}
func (c *version) CloudCredential() CloudCredentialController {
return NewCloudCredentialController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "CloudCredential"}, "cloudcredentials", true, c.controllerFactory)
}
func (c *version) Cluster() ClusterController {
return NewClusterController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "Cluster"}, "clusters", false, c.controllerFactory)
}
func (c *version) ClusterAlert() ClusterAlertController {
return NewClusterAlertController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ClusterAlert"}, "clusteralerts", true, c.controllerFactory)
}
func (c *version) ClusterAlertGroup() ClusterAlertGroupController {
return NewClusterAlertGroupController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ClusterAlertGroup"}, "clusteralertgroups", true, c.controllerFactory)
}
func (c *version) ClusterAlertRule() ClusterAlertRuleController {
return NewClusterAlertRuleController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ClusterAlertRule"}, "clusteralertrules", true, c.controllerFactory)
}
func (c *version) ClusterCatalog() ClusterCatalogController {
return NewClusterCatalogController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ClusterCatalog"}, "clustercatalogs", true, c.controllerFactory)
}
func (c *version) ClusterLogging() ClusterLoggingController {
return NewClusterLoggingController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ClusterLogging"}, "clusterloggings", true, c.controllerFactory)
}
func (c *version) ClusterMonitorGraph() ClusterMonitorGraphController {
return NewClusterMonitorGraphController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ClusterMonitorGraph"}, "clustermonitorgraphs", true, c.controllerFactory)
}
func (c *version) ClusterRegistrationToken() ClusterRegistrationTokenController {
return NewClusterRegistrationTokenController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ClusterRegistrationToken"}, "clusterregistrationtokens", true, c.controllerFactory)
}
func (c *version) ClusterRoleTemplateBinding() ClusterRoleTemplateBindingController {
return NewClusterRoleTemplateBindingController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ClusterRoleTemplateBinding"}, "clusterroletemplatebindings", true, c.controllerFactory)
}
func (c *version) ClusterScan() ClusterScanController {
return NewClusterScanController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ClusterScan"}, "clusterscans", true, c.controllerFactory)
}
func (c *version) ClusterTemplate() ClusterTemplateController {
return NewClusterTemplateController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ClusterTemplate"}, "clustertemplates", true, c.controllerFactory)
}
func (c *version) ClusterTemplateRevision() ClusterTemplateRevisionController {
return NewClusterTemplateRevisionController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ClusterTemplateRevision"}, "clustertemplaterevisions", true, c.controllerFactory)
}
func (c *version) ComposeConfig() ComposeConfigController {
return NewComposeConfigController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ComposeConfig"}, "composeconfigs", false, c.controllerFactory)
}
func (c *version) DynamicSchema() DynamicSchemaController {
return NewDynamicSchemaController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "DynamicSchema"}, "dynamicschemas", false, c.controllerFactory)
}
func (c *version) EtcdBackup() EtcdBackupController {
return NewEtcdBackupController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "EtcdBackup"}, "etcdbackups", true, c.controllerFactory)
}
func (c *version) Feature() FeatureController {
return NewFeatureController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "Feature"}, "features", false, c.controllerFactory)
}
func (c *version) FleetWorkspace() FleetWorkspaceController {
return NewFleetWorkspaceController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "FleetWorkspace"}, "fleetworkspaces", false, c.controllerFactory)
}
func (c *version) FreeIpaProvider() FreeIpaProviderController {
return NewFreeIpaProviderController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "FreeIpaProvider"}, "freeipaproviders", false, c.controllerFactory)
}
func (c *version) GithubProvider() GithubProviderController {
return NewGithubProviderController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "GithubProvider"}, "githubproviders", false, c.controllerFactory)
}
func (c *version) GlobalDns() GlobalDnsController {
return NewGlobalDnsController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "GlobalDns"}, "globaldnses", true, c.controllerFactory)
}
func (c *version) GlobalDnsProvider() GlobalDnsProviderController {
return NewGlobalDnsProviderController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "GlobalDnsProvider"}, "globaldnsproviders", true, c.controllerFactory)
}
func (c *version) GlobalRole() GlobalRoleController {
return NewGlobalRoleController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "GlobalRole"}, "globalroles", false, c.controllerFactory)
}
func (c *version) GlobalRoleBinding() GlobalRoleBindingController {
return NewGlobalRoleBindingController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "GlobalRoleBinding"}, "globalrolebindings", false, c.controllerFactory)
}
func (c *version) GoogleOAuthProvider() GoogleOAuthProviderController {
return NewGoogleOAuthProviderController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "GoogleOAuthProvider"}, "googleoauthproviders", false, c.controllerFactory)
}
func (c *version) Group() GroupController {
return NewGroupController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "Group"}, "groups", false, c.controllerFactory)
}
func (c *version) GroupMember() GroupMemberController {
return NewGroupMemberController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "GroupMember"}, "groupmembers", false, c.controllerFactory)
}
func (c *version) KontainerDriver() KontainerDriverController {
return NewKontainerDriverController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "KontainerDriver"}, "kontainerdrivers", false, c.controllerFactory)
}
func (c *version) LocalProvider() LocalProviderController {
return NewLocalProviderController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "LocalProvider"}, "localproviders", false, c.controllerFactory)
}
func (c *version) ManagedChart() ManagedChartController {
return NewManagedChartController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ManagedChart"}, "managedcharts", true, c.controllerFactory)
}
func (c *version) MonitorMetric() MonitorMetricController {
return NewMonitorMetricController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "MonitorMetric"}, "monitormetrics", true, c.controllerFactory)
}
func (c *version) MultiClusterApp() MultiClusterAppController {
return NewMultiClusterAppController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "MultiClusterApp"}, "multiclusterapps", true, c.controllerFactory)
}
func (c *version) MultiClusterAppRevision() MultiClusterAppRevisionController {
return NewMultiClusterAppRevisionController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "MultiClusterAppRevision"}, "multiclusterapprevisions", true, c.controllerFactory)
}
func (c *version) Node() NodeController {
return NewNodeController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "Node"}, "nodes", true, c.controllerFactory)
}
func (c *version) NodeDriver() NodeDriverController {
return NewNodeDriverController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "NodeDriver"}, "nodedrivers", false, c.controllerFactory)
}
func (c *version) NodePool() NodePoolController {
return NewNodePoolController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "NodePool"}, "nodepools", true, c.controllerFactory)
}
func (c *version) NodeTemplate() NodeTemplateController {
return NewNodeTemplateController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "NodeTemplate"}, "nodetemplates", true, c.controllerFactory)
}
func (c *version) Notifier() NotifierController {
return NewNotifierController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "Notifier"}, "notifiers", true, c.controllerFactory)
}
func (c *version) OIDCProvider() OIDCProviderController {
return NewOIDCProviderController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "OIDCProvider"}, "oidcproviders", false, c.controllerFactory)
}
func (c *version) OpenLdapProvider() OpenLdapProviderController {
return NewOpenLdapProviderController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "OpenLdapProvider"}, "openldapproviders", false, c.controllerFactory)
}
func (c *version) PodSecurityPolicyTemplate() PodSecurityPolicyTemplateController {
return NewPodSecurityPolicyTemplateController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "PodSecurityPolicyTemplate"}, "podsecuritypolicytemplates", false, c.controllerFactory)
}
func (c *version) PodSecurityPolicyTemplateProjectBinding() PodSecurityPolicyTemplateProjectBindingController {
return NewPodSecurityPolicyTemplateProjectBindingController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "PodSecurityPolicyTemplateProjectBinding"}, "podsecuritypolicytemplateprojectbindings", true, c.controllerFactory)
}
func (c *version) Preference() PreferenceController {
return NewPreferenceController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "Preference"}, "preferences", true, c.controllerFactory)
}
func (c *version) Principal() PrincipalController {
return NewPrincipalController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "Principal"}, "principals", false, c.controllerFactory)
}
func (c *version) Project() ProjectController {
return NewProjectController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "Project"}, "projects", true, c.controllerFactory)
}
func (c *version) ProjectAlert() ProjectAlertController {
return NewProjectAlertController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ProjectAlert"}, "projectalerts", true, c.controllerFactory)
}
func (c *version) ProjectAlertGroup() ProjectAlertGroupController {
return NewProjectAlertGroupController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ProjectAlertGroup"}, "projectalertgroups", true, c.controllerFactory)
}
func (c *version) ProjectAlertRule() ProjectAlertRuleController {
return NewProjectAlertRuleController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ProjectAlertRule"}, "projectalertrules", true, c.controllerFactory)
}
func (c *version) ProjectCatalog() ProjectCatalogController {
return NewProjectCatalogController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ProjectCatalog"}, "projectcatalogs", true, c.controllerFactory)
}
func (c *version) ProjectLogging() ProjectLoggingController {
return NewProjectLoggingController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ProjectLogging"}, "projectloggings", true, c.controllerFactory)
}
func (c *version) ProjectMonitorGraph() ProjectMonitorGraphController {
return NewProjectMonitorGraphController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ProjectMonitorGraph"}, "projectmonitorgraphs", true, c.controllerFactory)
}
func (c *version) ProjectNetworkPolicy() ProjectNetworkPolicyController {
return NewProjectNetworkPolicyController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ProjectNetworkPolicy"}, "projectnetworkpolicies", true, c.controllerFactory)
}
func (c *version) ProjectRoleTemplateBinding() ProjectRoleTemplateBindingController {
return NewProjectRoleTemplateBindingController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "ProjectRoleTemplateBinding"}, "projectroletemplatebindings", true, c.controllerFactory)
}
func (c *version) RancherUserNotification() RancherUserNotificationController {
return NewRancherUserNotificationController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "RancherUserNotification"}, "rancherusernotifications", false, c.controllerFactory)
}
func (c *version) RkeAddon() RkeAddonController {
return NewRkeAddonController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "RkeAddon"}, "rkeaddons", true, c.controllerFactory)
}
func (c *version) RkeK8sServiceOption() RkeK8sServiceOptionController {
return NewRkeK8sServiceOptionController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "RkeK8sServiceOption"}, "rkek8sserviceoptions", true, c.controllerFactory)
}
func (c *version) RkeK8sSystemImage() RkeK8sSystemImageController {
return NewRkeK8sSystemImageController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "RkeK8sSystemImage"}, "rkek8ssystemimages", true, c.controllerFactory)
}
func (c *version) RoleTemplate() RoleTemplateController {
return NewRoleTemplateController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "RoleTemplate"}, "roletemplates", false, c.controllerFactory)
}
func (c *version) SamlProvider() SamlProviderController {
return NewSamlProviderController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "SamlProvider"}, "samlproviders", false, c.controllerFactory)
}
func (c *version) SamlToken() SamlTokenController {
return NewSamlTokenController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "SamlToken"}, "samltokens", false, c.controllerFactory)
}
func (c *version) Setting() SettingController {
return NewSettingController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "Setting"}, "settings", false, c.controllerFactory)
}
func (c *version) Template() TemplateController {
return NewTemplateController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "Template"}, "templates", false, c.controllerFactory)
}
func (c *version) TemplateContent() TemplateContentController {
return NewTemplateContentController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "TemplateContent"}, "templatecontents", false, c.controllerFactory)
}
func (c *version) TemplateVersion() TemplateVersionController {
return NewTemplateVersionController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "TemplateVersion"}, "templateversions", false, c.controllerFactory)
}
func (c *version) Token() TokenController {
return NewTokenController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "Token"}, "tokens", false, c.controllerFactory)
}
func (c *version) User() UserController {
return NewUserController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "User"}, "users", false, c.controllerFactory)
}
func (c *version) UserAttribute() UserAttributeController {
return NewUserAttributeController(schema.GroupVersionKind{Group: "management.cattle.io", Version: "v3", Kind: "UserAttribute"}, "userattributes", false, c.controllerFactory)
}
| {
schemes.Register(v3.AddToScheme)
} |
walkers.rs | //! Functions and types for conveniently traversing and querying a SqlSchema.
#![deny(missing_docs)]
use crate::{
ids::EnumId, Column, ColumnArity, ColumnId, ColumnType, ColumnTypeFamily, DefaultValue, Enum, ForeignKey,
ForeignKeyAction, Index, IndexColumn, IndexFieldId, IndexId, IndexType, PrimaryKey, PrimaryKeyColumn, SQLSortOrder,
SqlSchema, Table, TableId, UserDefinedType, View,
};
use serde::de::DeserializeOwned;
use std::fmt;
/// Traverse all the columns in the schema.
pub fn walk_columns(schema: &SqlSchema) -> impl Iterator<Item = ColumnWalker<'_>> {
schema.iter_tables().flat_map(move |(table_id, table)| {
(0..table.columns.len()).map(move |column_id| ColumnWalker {
schema,
column_id: ColumnId(column_id as u32),
table_id,
})
})
}
/// Traverse a table column.
#[derive(Clone, Copy)]
pub struct ColumnWalker<'a> {
/// The schema the column is contained in.
schema: &'a SqlSchema,
column_id: ColumnId,
table_id: TableId,
}
impl<'a> fmt::Debug for ColumnWalker<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ColumnWalker")
.field("column_id", &self.column_id)
.field("table_id", &self.table_id)
.finish()
}
}
impl<'a> ColumnWalker<'a> {
/// The nullability and arity of the column.
pub fn arity(&self) -> &ColumnArity {
&self.column().tpe.arity
}
/// A reference to the underlying Column struct.
pub fn column(&self) -> &'a Column {
&self.table().table()[self.column_id]
}
/// The index of the column in the parent table.
pub fn column_id(&self) -> ColumnId {
self.column_id
}
/// Returns whether the column has the enum default value of the given enum type.
pub fn column_has_enum_default_value(&self, enum_name: &str, value: &str) -> bool {
self.column_type_family_as_enum().map(|enm| enm.name.as_str()) == Some(enum_name)
&& self
.default()
.and_then(|default| default.as_value())
.and_then(|value| value.as_enum_value())
== Some(value)
}
/// Returns whether the type of the column matches the provided enum name.
pub fn column_type_is_enum(&self, enum_name: &str) -> bool {
self.column_type_family_as_enum()
.map(|enm| enm.name == enum_name)
.unwrap_or(false)
}
/// The type family.
pub fn column_type_family(&self) -> &'a ColumnTypeFamily {
&self.column().tpe.family
}
/// Extract an `Enum` column type family, or `None` if the family is something else.
pub fn column_type_family_as_enum(&self) -> Option<&'a Enum> {
self.column_type_family().as_enum().map(|enum_name| {
self.schema()
.get_enum(enum_name)
.ok_or_else(|| panic!("Cannot find enum referenced in ColumnTypeFamily (`{}`)", enum_name))
.unwrap()
})
}
/// The column name.
pub fn name(&self) -> &'a str {
&self.column().name
}
/// The default value for the column.
pub fn default(&self) -> Option<&'a DefaultValue> {
self.column().default.as_ref()
}
/// The full column type.
pub fn column_type(self) -> &'a ColumnType {
&self.column().tpe
}
/// The column native type.
pub fn column_native_type<T>(&self) -> Option<T>
where
T: DeserializeOwned,
{
self.column()
.tpe
.native_type
.as_ref()
.map(|val| serde_json::from_value(val.clone()).unwrap())
}
/// Is this column an auto-incrementing integer?
pub fn is_autoincrement(&self) -> bool {
self.column().auto_increment
}
/// Is this column indexed by a secondary index??
pub fn is_part_of_secondary_index(&self) -> bool {
let table = self.table();
let name = self.name();
table.indexes().any(|idx| idx.contains_column(name))
}
/// Is this column a part of the table's primary key?
pub fn is_part_of_primary_key(&self) -> bool {
self.table().table().is_part_of_primary_key(self.name())
}
/// Is this column a part of the table's primary key?
pub fn is_part_of_foreign_key(&self) -> bool {
self.table().table().is_part_of_foreign_key(self.name())
}
/// Returns whether two columns are named the same and belong to the same table.
pub fn is_same_column(&self, other: &ColumnWalker<'_>) -> bool {
self.name() == other.name() && self.table().name() == other.table().name()
}
/// Returns whether this column is the primary key. If it is only part of the primary key, this will return false.
pub fn is_single_primary_key(&self) -> bool {
self.table()
.primary_key()
.map(|pk| pk.columns.len() == 1 && pk.columns.first().map(|c| c.name() == self.name()).unwrap_or(false))
.unwrap_or(false)
}
/// Traverse to the column's table.
pub fn table(&self) -> TableWalker<'a> {
TableWalker {
schema: self.schema,
table_id: self.table_id,
}
}
/// Get a reference to the SQL schema the column is part of.
pub fn schema(&self) -> &'a SqlSchema {
self.schema
}
}
/// Traverse a view
#[derive(Clone, Copy)]
pub struct ViewWalker<'a> {
/// The schema the view is contained in.
schema: &'a SqlSchema,
/// The index of the view in the schema.
view_index: usize,
}
impl<'a> ViewWalker<'a> {
/// Create a ViewWalker from a schema and a reference to one of its views.
pub fn new(schema: &'a SqlSchema, view_index: usize) -> Self {
Self { schema, view_index }
}
/// The name of the view
pub fn name(&self) -> &'a str {
&self.view().name
}
/// The SQL definition of the view
pub fn definition(&self) -> Option<&'a str> {
self.view().definition.as_deref()
}
/// The index of the view in the schema.
pub fn view_index(&self) -> usize {
self.view_index
}
fn view(&self) -> &'a View {
&self.schema.views[self.view_index]
}
}
/// Traverse a user-defined type
#[derive(Clone, Copy)]
pub struct UserDefinedTypeWalker<'a> {
schema: &'a SqlSchema,
udt_index: usize,
}
impl<'a> UserDefinedTypeWalker<'a> {
/// Create a UserDefinedTypeWalker from a schema and a reference to one of its udts.
pub fn new(schema: &'a SqlSchema, udt_index: usize) -> Self {
Self { schema, udt_index }
}
/// The name of the type
pub fn name(&self) -> &'a str {
&self.udt().name
}
/// The SQL definition of the type
pub fn definition(&self) -> Option<&'a str> {
self.udt().definition.as_deref()
}
/// The index of the user-defined type in the schema.
pub fn udt_index(&self) -> usize {
self.udt_index
}
fn udt(&self) -> &'a UserDefinedType {
&self.schema.user_defined_types[self.udt_index]
}
}
/// Traverse a table.
#[derive(Clone, Copy)]
pub struct TableWalker<'a> {
/// The schema the table is contained in.
schema: &'a SqlSchema,
table_id: TableId,
}
impl<'a> fmt::Debug for TableWalker<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TableWalker").field("table_id", &self.table_id).finish()
}
}
impl<'a> TableWalker<'a> {
/// Create a TableWalker from a schema and a reference to one of its tables. This should stay private.
pub(crate) fn new(schema: &'a SqlSchema, table_id: TableId) -> Self {
Self { schema, table_id }
}
/// Get a column in the table, by name.
pub fn column(&self, column_name: &str) -> Option<ColumnWalker<'a>> {
self.columns().find(|column| column.name() == column_name)
}
/// Get a column in the table by index.
pub fn column_at(&self, column_id: ColumnId) -> ColumnWalker<'a> {
ColumnWalker {
schema: self.schema,
column_id,
table_id: self.table_id,
}
}
/// Traverse the table's columns.
pub fn columns(&self) -> impl Iterator<Item = ColumnWalker<'a>> {
let schema = self.schema;
let table_id = self.table_id;
(0..self.table().columns.len()).map(move |column_id| ColumnWalker {
schema,
column_id: ColumnId(column_id as u32),
table_id,
})
}
/// The number of foreign key constraints on the table.
pub fn foreign_key_count(&self) -> usize {
self.table().foreign_keys.len()
}
/// Traverse to an index by index.
pub fn index_at(&self, index_index: usize) -> IndexWalker<'a> {
IndexWalker {
schema: self.schema,
table_id: self.table_id,
index_index,
}
}
/// Traverse the indexes on the table.
pub fn indexes(&self) -> impl Iterator<Item = IndexWalker<'a>> {
let schema = self.schema;
let table_id = self.table_id;
(0..self.table().indices.len()).map(move |index_index| IndexWalker {
schema,
table_id,
index_index,
})
}
/// The number of indexes on the table.
pub fn indexes_count(&self) -> usize {
self.table().indices.len()
}
/// Traverse the foreign keys on the table.
pub fn foreign_keys(&self) -> impl Iterator<Item = ForeignKeyWalker<'a>> {
let table_id = self.table_id;
let schema = self.schema;
(0..self.table().foreign_keys.len()).map(move |foreign_key_index| ForeignKeyWalker {
foreign_key_index,
table_id,
schema,
})
}
/// Traverse foreign keys from other tables, referencing current table.
pub fn referencing_foreign_keys(&self) -> impl Iterator<Item = ForeignKeyWalker<'a>> {
let table_id = self.table_id;
self.schema
.table_walkers()
.filter(move |t| t.table_id() != table_id)
.flat_map(|t| t.foreign_keys())
.filter(move |fk| fk.referenced_table().table_id() == table_id)
}
/// Get a foreign key by index.
pub fn foreign_key_at(&self, index: usize) -> ForeignKeyWalker<'a> {
ForeignKeyWalker {
schema: self.schema,
table_id: self.table_id,
foreign_key_index: index,
}
}
/// The table name.
pub fn name(&self) -> &'a str {
&self.table().name
}
/// Try to traverse a foreign key for a single column.
pub fn foreign_key_for_column(&self, column: &str) -> Option<&'a ForeignKey> {
self.table().foreign_key_for_column(column)
}
/// Traverse to the primary key of the table.
pub fn primary_key(&self) -> Option<&'a PrimaryKey> {
self.table().primary_key.as_ref()
}
/// The columns that are part of the primary keys.
pub fn primary_key_columns(&'a self) -> Box<dyn ExactSizeIterator<Item = PrimaryKeyColumnWalker<'a>> + 'a> {
let as_walker = move |primary_key_column_id: usize, c: &PrimaryKeyColumn| {
let column_id = self.column(c.name()).map(|c| c.column_id).unwrap();
PrimaryKeyColumnWalker {
schema: self.schema,
primary_key_column_id,
table_id: self.table_id,
column_id,
}
};
match self.table().primary_key.as_ref() {
Some(pk) => Box::new(pk.columns.iter().enumerate().map(move |(i, c)| as_walker(i, c))),
None => Box::new(std::iter::empty()),
}
}
/// The names of the columns that are part of the primary key.
pub fn primary_key_column_names(&self) -> Option<Vec<String>> {
self.table()
.primary_key
.as_ref()
.map(|pk| pk.columns.iter().map(|c| c.name().to_string()).collect())
}
/// The SQLSchema the table belongs to.
pub fn schema(self) -> &'a SqlSchema {
self.schema
}
/// Reference to the underlying `Table` struct.
pub fn table(&self) -> &'a Table {
&self.schema[self.table_id]
}
/// The index of the table in the schema.
pub fn table_id(&self) -> TableId {
self.table_id
}
}
/// A walker of a column in a primary key.
#[derive(Clone, Copy)]
pub struct PrimaryKeyColumnWalker<'a> {
schema: &'a SqlSchema,
primary_key_column_id: usize,
table_id: TableId,
column_id: ColumnId,
}
impl<'a> PrimaryKeyColumnWalker<'a> {
/// Conversion to a normal column walker.
pub fn as_column(self) -> ColumnWalker<'a> {
ColumnWalker {
schema: self.schema,
column_id: self.column_id,
table_id: self.table_id,
}
}
/// The length limit of the (text) column. Matters on MySQL only.
pub fn length(self) -> Option<u32> {
self.get().length
}
/// The BTree ordering. Matters on SQL Server only.
pub fn sort_order(self) -> Option<SQLSortOrder> {
self.get().sort_order
}
fn table(self) -> TableWalker<'a> {
TableWalker {
schema: self.schema,
table_id: self.table_id,
}
}
fn get(self) -> &'a PrimaryKeyColumn {
self.table()
.table()
.primary_key_columns()
.nth(self.primary_key_column_id)
.unwrap()
}
}
/// Traverse a foreign key.
#[derive(Clone, Copy)]
pub struct ForeignKeyWalker<'schema> {
/// The index of the foreign key in the table.
foreign_key_index: usize,
table_id: TableId,
schema: &'schema SqlSchema,
}
impl<'a> fmt::Debug for ForeignKeyWalker<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ForeignKeyWalker")
.field("foreign_key_index", &self.foreign_key_index)
.field("table_id", &self.table_id)
.field("__table_name", &self.table().name())
.field("__referenced_table", &self.foreign_key().referenced_table)
.field("__constrained_columns", &self.foreign_key().columns)
.field("__referenced_columns", &self.foreign_key().referenced_columns)
.finish()
}
}
impl<'schema> ForeignKeyWalker<'schema> {
/// The names of the foreign key columns on the referencing table.
pub fn constrained_column_names(&self) -> &'schema [String] {
&self.foreign_key().columns
}
/// The foreign key columns on the referencing table.
pub fn constrained_columns<'b>(&'b self) -> impl Iterator<Item = ColumnWalker<'schema>> + 'b {
self.foreign_key()
.columns
.iter()
.filter_map(move |colname| self.table().columns().find(|column| colname == column.name()))
}
/// The name of the foreign key constraint.
pub fn constraint_name(&self) -> Option<&'schema str> {
self.foreign_key().constraint_name.as_deref()
}
/// The underlying ForeignKey struct.
pub fn foreign_key(&self) -> &'schema ForeignKey {
&self.table().table().foreign_keys[self.foreign_key_index]
}
/// The index of the foreign key in the parent table.
pub fn foreign_key_index(&self) -> usize {
self.foreign_key_index
}
/// Access the underlying ForeignKey struct.
pub fn inner(&self) -> &'schema ForeignKey {
self.foreign_key()
}
/// The `ON DELETE` behaviour of the foreign key.
pub fn on_delete_action(&self) -> &ForeignKeyAction {
&self.foreign_key().on_delete_action
}
/// The `ON UPDATE` behaviour of the foreign key.
pub fn on_update_action(&self) -> &ForeignKeyAction {
&self.foreign_key().on_update_action
}
/// The names of the columns referenced by the foreign key on the referenced table.
pub fn referenced_column_names(&self) -> &'schema [String] {
&self.foreign_key().referenced_columns
}
/// The number of columns referenced by the constraint.
pub fn referenced_columns_count(&self) -> usize {
self.foreign_key().referenced_columns.len()
}
/// The table the foreign key "points to".
pub fn referenced_table(&self) -> TableWalker<'schema> {
TableWalker {
schema: self.schema,
table_id: self
.schema
.table_walker(&self.foreign_key().referenced_table)
.ok_or_else(|| format!("Foreign key references unknown table. {:?}", self))
.unwrap()
.table_id,
}
}
/// Traverse to the referencing/constrained table.
pub fn table(&self) -> TableWalker<'schema> {
TableWalker {
schema: self.schema,
table_id: self.table_id,
}
}
/// True if relation is back to the same table.
pub fn is_self_relation(&self) -> bool {
self.table().name() == self.referenced_table().name()
}
}
/// Traverse an index column.
#[derive(Clone, Copy)]
pub struct IndexColumnWalker<'a> {
schema: &'a SqlSchema,
index_column_id: usize,
table_id: TableId,
index_index: usize,
}
impl<'a> IndexColumnWalker<'a> {
/// Get the index column data.
pub fn get(&self) -> &'a IndexColumn {
&self.index().get().columns[self.index_column_id]
}
/// The length limit of the (text) column. Matters on MySQL only.
pub fn length(self) -> Option<u32> {
self.get().length
}
/// The BTree ordering.
pub fn sort_order(self) -> Option<SQLSortOrder> {
self.get().sort_order
}
/// The table where the column is located.
pub fn table(&self) -> TableWalker<'a> {
TableWalker {
table_id: self.table_id,
schema: self.schema,
}
}
/// The index of the column.
pub fn index(&self) -> IndexWalker<'a> {
IndexWalker {
schema: self.schema,
table_id: self.table_id,
index_index: self.index_index,
}
}
/// Convert to a normal column walker, losing the possible index arguments.
pub fn as_column(&self) -> ColumnWalker<'a> {
let column_id = self
.table()
.columns()
.enumerate()
.find_map(|(i, c)| {
if c.column().name == self.get().name() {
Some(i)
} else |
})
.expect("STATE ERROR BOOP");
ColumnWalker {
schema: self.schema,
column_id: ColumnId(column_id as u32),
table_id: self.table_id,
}
}
/// The identifier of the index column.
pub fn index_field_id(&self) -> IndexFieldId {
IndexFieldId(self.index().index_id(), self.index_column_id as u32)
}
}
/// Traverse an index.
#[derive(Clone, Copy)]
pub struct IndexWalker<'a> {
schema: &'a SqlSchema,
/// The index of the table in the schema.
table_id: TableId,
/// The index of the database index in the table.
index_index: usize,
}
impl<'a> fmt::Debug for IndexWalker<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("IndexWalker")
.field("index_index", &self.index_index)
.field("table_id", &self.table_id)
.finish()
}
}
impl<'a> IndexWalker<'a> {
/// The names of the indexed columns.
pub fn column_names(&'a self) -> impl ExactSizeIterator<Item = &'a str> + 'a {
self.get().columns.iter().map(|c| c.name())
}
/// Traverse the indexed columns.
pub fn columns<'b>(&'b self) -> impl ExactSizeIterator<Item = IndexColumnWalker<'a>> + 'b {
self.get()
.columns
.iter()
.enumerate()
.map(move |(index_column_id, _)| IndexColumnWalker {
schema: self.schema,
index_column_id,
table_id: self.table_id,
index_index: self.index_index,
})
}
/// True if index contains the given column.
pub fn contains_column(&self, column_name: &str) -> bool {
self.get().columns.iter().any(|column| column.name() == column_name)
}
fn get(&self) -> &'a Index {
&self.table().table().indices[self.index_index]
}
/// The index of the index in the parent table.
pub fn index(&self) -> usize {
self.index_index
}
/// The identifier of the index.
pub fn index_id(&self) -> IndexId {
IndexId(self.table_id, self.index_index as u32)
}
/// The IndexType
pub fn index_type(&self) -> IndexType {
self.get().tpe
}
/// The name of the index.
pub fn name(&self) -> &'a str {
&self.get().name
}
/// The SqlSchema the index belongs to
pub fn schema(self) -> &'a SqlSchema {
self.schema
}
/// Traverse to the table of the index.
pub fn table(&self) -> TableWalker<'a> {
TableWalker {
table_id: self.table_id,
schema: self.schema,
}
}
}
/// Traverse an enum.
#[derive(Clone, Copy)]
pub struct EnumWalker<'a> {
pub(crate) schema: &'a SqlSchema,
pub(crate) enum_id: EnumId,
}
impl<'a> fmt::Debug for EnumWalker<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("EnumWalker").field("enum_id", &self.enum_id).finish()
}
}
impl<'a> EnumWalker<'a> {
/// The unique identifier of the enum in the parent schema.
pub fn enum_id(self) -> EnumId {
self.enum_id
}
fn get(&self) -> &'a Enum {
&self.schema[self.enum_id]
}
/// The name of the enum. This is a made up name on MySQL.
pub fn name(&self) -> &'a str {
&self.get().name
}
/// The values of the enum
pub fn values(&self) -> &'a [String] {
&self.get().values
}
}
/// Extension methods for the traversal of a SqlSchema.
pub trait SqlSchemaExt {
/// Find a table by name.
fn table_walker<'a>(&'a self, name: &str) -> Option<TableWalker<'a>>;
/// Find a table by id.
fn table_walker_at(&self, table_id: TableId) -> TableWalker<'_>;
/// Find a view by index.
fn view_walker_at(&self, index: usize) -> ViewWalker<'_>;
/// Find a user-defined type by index.
fn udt_walker_at(&self, index: usize) -> UserDefinedTypeWalker<'_>;
/// Walk an enum by ID.
fn walk_enum(&self, enum_id: EnumId) -> EnumWalker<'_>;
}
impl SqlSchemaExt for SqlSchema {
fn table_walker<'a>(&'a self, name: &str) -> Option<TableWalker<'a>> {
Some(TableWalker {
table_id: TableId(self.tables.iter().position(|table| table.name == name)? as u32),
schema: self,
})
}
fn table_walker_at(&self, table_id: TableId) -> TableWalker<'_> {
TableWalker { table_id, schema: self }
}
fn view_walker_at(&self, index: usize) -> ViewWalker<'_> {
ViewWalker {
view_index: index,
schema: self,
}
}
fn udt_walker_at(&self, index: usize) -> UserDefinedTypeWalker<'_> {
UserDefinedTypeWalker {
udt_index: index,
schema: self,
}
}
fn walk_enum(&self, enum_id: EnumId) -> EnumWalker<'_> {
EnumWalker { schema: self, enum_id }
}
}
| {
None
} |
kbible.py | """
kbible.py - base bible object and commands
"""
import pandas as pd
import yaml
import os
import subprocess
__author__ = "Sungcheol Kim <[email protected]>"
__docformat__ = "restructuredtext en"
class KBible(object):
""" Bible text object """
def __init__(self, version="개역한글판성경", debug=False, **kwargs):
""" read or parse bible text """
self._biblelist = []
self._versionlist = {}
this_dir, this_filename = os.path.split(__file__)
listname = os.path.join(this_dir, "data", u"book_names.csv")
self._table = pd.read_csv(listname, index_col=0)
self.add(version, **kwargs)
def add(self, version, **kwargs):
""" add different version """
b = read_full_bible(version_name=version, **kwargs)
self._biblelist.append(b)
self._versionlist[version] = len(self._biblelist) - 1
def delete(self, v | ):
""" remove version """
if (version in self._versionlist) and (len(self._versionlist) > 1):
i = self._versionlist[version]
del self._versionlist[version]
del self._biblelist[i]
else:
print('... not found or only have one bible version: {}'.format(version))
def save(self, version="개역한글판성경"):
""" save bible text as compressed csv """
if version in self._versionlist:
this_dir, this_filename = os.path.split(__file__)
filename = os.path.join(this_dir, "data", version + ".csv.gz")
b = self._biblelist[self._versionlist[version]]
b.to_csv(filename, compression='gzip')
print('... save file: {}'.format(filename))
def get(self, version=""):
""" return bible as pandas """
if version == "":
return self._biblelist[0]
try:
return self._biblelist[self._versionlist[version]]
except:
print('... no bible version: {}'.format(version))
return []
def bystr(self, sstr, form="md"):
""" extract bible verse """
if form == "pd":
res = pd.DataFrame()
for b in self._biblelist:
res = pd.concat([res, extract_bystr(b, sstr, form="pd")], axis=0)
return res
else:
msg = ""
for b in self._biblelist:
msg = msg + extract_bystr(b, sstr, form=form) + '\n'
return msg
def search(self, sstr, form="md", regex=False):
""" search string in bible """
res = pd.DataFrame()
for b in self._biblelist:
b_res_idx = b.text.str.contains(sstr, regex=regex)
if sum(b_res_idx) > 0:
res = pd.concat([res, b[b_res_idx]], axis=0)
if len(res) > 0:
return get_texts(res, form=form)
else:
print('... no matched results')
return []
def read(self, sstr, form='mdlines'):
""" search by short index string and show in markdown file """
msg = self.bystr(sstr, form=form)
with open('.temp.md', 'w') as f:
f.writelines(msg)
cmd = ['open', '.temp.md']
subprocess.call(cmd)
def bible_parser(version_name="개역한글판성경"):
""" read bible text and return panda database
inputs:
version_name : available versions 개역한글판성경, 개역개정판성경
output:
bible panda dataframe as short version
"""
# read bible txt file
this_dir, this_filename = os.path.split(__file__)
filename = os.path.join(this_dir, "data", version_name + ".txt")
with open(filename, "r") as f:
lines = f.readlines()
# prepare data container
books = []
chapters = []
verses = []
texts = []
for i, line in enumerate(lines):
line = line.strip('\n\r')
# check comment line
if len(line) == 0:
continue
if line[0] == "#":
continue
if line.find(':') == -1:
continue
# find header
hp = line.find(' ')
if hp > 1 and hp < 25:
header = line[:hp]
text = line[hp+1:]
# find book, chapter, verse, text
try:
tmp = header.split(':')[0]
if tmp.find('.') > 0: # english bible short name
book = tmp.split('.')[0]
chapter = tmp.split('.')[1]
elif tmp[:2] in ["1_", "2_", "3_"]: # english bible long name
book = tmp[:2] + ''.join(filter(str.isalpha, tmp[2:]))
chapter = ''.join(filter(str.isdigit, tmp[2:]))
else: # korean bible
book = ''.join(filter(str.isalpha, tmp))
chapter = ''.join(filter(str.isdigit, tmp))
verse = header.split(':')[1]
except:
print('... header error: ({}) {}'.format(i, header))
continue
# convert data
try:
verse = int(verse)
chapter = int(chapter)
except:
print("... conversion error: ({}) {} {}".format(i, verse, chapter))
continue
# collect
books.append(book)
chapters.append(chapter)
verses.append(verse)
texts.append(text)
else:
print("... unrecognized line: ({}) {}".format(i, line))
df_bible = {'book':books, 'chapter':chapters, 'verse':verses, 'text':texts}
idx = range(len(books))
bible = pd.DataFrame(data=df_bible, index=idx)
return bible
def read_full_bible(version_name="개역한글판성경", save=False):
""" read bible version and combine book data
inputs:
version_name: bible version
save: [True|False]
output:
bible panda dataframe
"""
try:
this_dir, this_filename = os.path.split(__file__)
filename = os.path.join(this_dir, "data", version_name + ".csv.gz")
full_bible = pd.read_csv(filename, index_col=0, compression = "gzip")
return full_bible
except FileNotFoundError:
print('... generate bible database: {}'.format(filename))
bible = bible_parser(version_name=version_name)
listname = os.path.join(this_dir, "data", u"book_names.csv")
table = pd.read_csv(listname, index_col=0)
if bible['book'][0] == 'Gen':
table['book'] = table['eng_short']
elif bible['book'][0] == 'Genesis':
table['book'] = table['eng']
else:
table['book'] = table['kor_short']
full_bible = pd.merge(bible, table, on='book', how='left')
if save:
full_bible.to_csv(filename, compression='gzip')
return full_bible
def find_id(bible, book=[], chapter=[], verse=[], verb=False):
""" find index on full bible database
inputs:
bible: bible panda database
book: book names as list
chapter: chapters as list
verse: verses as list
verb: [True|False] show details
output:
panda dataframe filtered by all combination of book, chapter, verses
"""
isfullbible = False
# check books
books = set(bible['book'])
if "code" in bible.columns:
isfullbible = True
if len(book) == 0:
book = books[0]
if isinstance(book, str):
book = [book]
if verb: print('... search book:{}'.format(book))
if isfullbible:
result = bible.loc[bible.kor.isin(book) | bible.kor_short.isin(book) | bible.eng.isin(book) | bible.eng_short.isin(book) | bible.book.isin(book)]
else:
result = bible.loc[bible.book.isin(book)]
# check chapter
if isinstance(chapter, int):
chapter = [chapter]
if len(chapter) == 0:
return result
if verb: print('... search chapter: {}'.format(chapter))
result = result.loc[bible.chapter.isin(chapter)]
# check verse
if isinstance(verse, int):
verse = [verse]
if len(verse) == 0:
return result
if verb: print('... search verse: {}'.format(verse))
result = result.loc[bible.verse.isin(verse)]
if len(result) > 0:
return result
else:
print("... not found: {}, {}, {}".format(book, chapter, verse))
return []
def extract_bystr(bible, sstr, form="pd"):
""" extract verse by short search string
inputs:
bible: panda database of bible version
sstr: search pattern
- example "창3:16", "고후5:3", '요일1:1', "창세기1:1"
- no spaces
- one separator :
- ',' and '-' is possible (창3:16,17) (창1:1-5)
form: output format
- "md" one line sentence with markdowm
- "string" text string
- "pd" panda dataframe
output:
object determined by form variable
"""
# remove all spaces
sstr = sstr.replace(" ", "")
# find components
if sstr.find(":") > 0:
head = sstr.split(':')[0]
verses = sstr.split(':')[1]
else:
head = sstr
verses = []
book = ''.join(filter(str.isalpha, head))
chapter = ''.join(filter(str.isdigit, head))
# if there is no chapter
if len(chapter) == 0:
chapter = []
else:
chapter = int(chapter)
# check , in verse
if len(verses) > 0:
if verses.find(',') > 0:
verses = verses.split(',')
# check - in verse
elif verses.find('-') > 0:
start = verses.split('-')[0]
end = verses.split('-')[1]
try:
verses = list(range(int(start), int(end)+1))
except:
print('... wrong format: {}'.format(sstr))
return 0
else:
verses = [int(verses)]
verses = [int(v) for v in verses]
#print(book, chapter, verses)
# return verses
res = find_id(bible, book=book, chapter=chapter, verse=verses)
if len(res) == 0:
return []
return get_texts(res, form=form, sstr=sstr)
def get_texts(bible_pd, form="md", sstr="", sep="", text_width=0):
""" print verses using different format """
if form == "pd":
return bible_pd
if len(bible_pd["book"]) == 0:
return ""
# show id as kor_short
bible_pd.loc[:, "id"] = bible_pd.loc[:, "kor_short"] + sep + bible_pd["chapter"].astype(str) + ":" + bible_pd["verse"].astype(str)
bible_pd = tidy_footnote(bible_pd)
if (len(set(bible_pd["book"])) == 1) and (sstr.find(":") == -1):
min_v = bible_pd["verse"].min()
max_v = bible_pd["verse"].max()
sstr = "{}:{}-{}".format(sstr, min_v, max_v)
if form == "string":
if sstr == "":
bible_pd[form] = bible_pd["id"] + " - " + bible_pd[form].astype(str)
msg = '\n'.join(bible_pd[form].values)
else:
msg = sstr + ' ' + ' '.join(bible_pd[form].values)
return msg
if form == "md":
if sstr == "":
bible_pd[form] = "`" + bible_pd["id"] + "` " + bible_pd[form].astype(str)
msg = '\n'.join(bible_pd[form].values)
else:
verse_string_list = [ '<sup>{}</sup> {}'.format(v, l) for v,l in zip(bible_pd['verse'], bible_pd[form]) ]
msg = '`{}` '.format(sstr) + ' '.join(verse_string_list)
if sum(bible_pd["footnote"] != "") > 0:
return msg + '\n' + ''.join(bible_pd["footnote"].values)
else:
return msg
if form == "mdlines":
bible_pd["md"] = "`" + bible_pd["id"] + "` " + bible_pd["md"].astype(str)
msg = '\n'.join(bible_pd["md"].values)
if sum(bible_pd["footnote"] != "") > 0:
return msg + '\n' + ''.join(bible_pd["footnote"].values)
else:
return msg
print('... {} format is not implemented: ["pd", "md", "string"]'.format(form))
return []
def tidy_footnote(bible_pd, keyword="FOOTNOTE"):
""" remove footnote """
bible_pd["md"] = bible_pd["text"]
bible_pd["string"] = bible_pd["text"]
bible_pd["footnote"] = ""
start_word = "__a__{}__a__".format(keyword)
end_word = "__b__{}__b__".format(keyword)
fn_idx = ["a", "b", "c", "d", "e", "f"]
# search different verses
for i in bible_pd.index[bible_pd.text.str.contains(start_word)]:
# search in one verse
text = bible_pd.at[i, "text"]
tmp = text.replace("_b_", "_a_").split(start_word)
bible_pd.at[i, "string"] = tmp[0] + ''.join(tmp[2::2])
# check multiple footnotes
md = tmp[0]
fn = ""
for j in range(int(len(tmp)/2)):
md = md + "[^{}{}]".format(bible_pd.at[i, "id"], fn_idx[j]) + tmp[j*2 + 2]
fn = fn + "[^{}{}]:".format(bible_pd.at[i, "id"], fn_idx[j]) + tmp[j*2 + 1].replace("TR","") + '\n'
bible_pd.at[i, "md"] = md
bible_pd.at[i, "footnote"] = fn
return bible_pd
def make_mdpage(bible, day_info, save_dir=None):
""" print all verses in list using markdown format
inputs:
bible: name of version or panda dataframe
day_info: name of day information file or yaml data
save: [True|False]
output:
text strings of markdown page
"""
# check day_info.yml file
if isinstance(day_info, str):
try:
with open(day_info, "r") as f:
day_info = yaml.load(f, yaml.BaseLoader)
except:
print("... file: {} parser error!".format(day_info))
return 0
bible_version = ""
# check bible version
if isinstance(bible, str):
try:
bible_version = "-" + bible
bible = read_full_bible(bible)
except:
print("... read bible error: {}".format(bible_version[1:]))
return 0
msg = "# {}일차 - {}\n\n".format(day_info["day"],day_info["title"])
msg = msg + "찬양 : {}\n\n".format(day_info["song"])
msg = msg + "기도 : {}\n\n".format(day_info["prayer"])
msg = msg + "요약 : {}\n\n".format(day_info["summary"])
msg = msg + "성경 버전 : {}\n\n".format(bible_version[1:])
for v in day_info["verses"]:
msg = msg + '- {}\n\n'.format(extract_bystr(bible, v, form="md"))
msg = msg + "### info\n\n"
msg = msg + "- 성경 구절 갯수 : {}".format(len(day_info["verses"]))
if save_dir is not None:
filename = '{}/day{}-{}{}.md'.format(save_dir, day_info["day"], day_info["title"].replace(" ", ""), bible_version)
with open(filename, "w") as f:
f.write(msg)
print('... save to {}'.format(filename))
return msg
| ersion |
filterable-list-control.js | const _oFilterableListControls =
element( by.css( '.o-filterable-list-controls' ) );
const multiselect = require( '../shared_objects/multiselect' );
const EC = protractor.ExpectedConditions;
function _getFilterableElement( selector ) {
return _oFilterableListControls.element( by.css( selector ) );
}
async function | () {
const expandable = this.mExpandable;
await expandable.click();
return browser.wait( EC.elementToBeClickable( expandable ) );
}
function close() {
return this.mExpandable.click();
}
const oFilterableListControls = {
mExpandable: _getFilterableElement( '.o-expandable' ),
mNotification: _getFilterableElement( '.m-notification' ),
oPostPreview: _getFilterableElement( '.o-post-preview' ),
open: open,
close: close
};
Object.assign( oFilterableListControls, multiselect );
module.exports = oFilterableListControls;
| open |
test_policy_v1beta1_api.py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.api.policy_v1beta1_api import PolicyV1beta1Api # noqa: E501
from kubernetes.client.rest import ApiException
class TestPolicyV1beta1Api(unittest.TestCase):
"""PolicyV1beta1Api unit test stubs"""
def setUp(self):
self.api = kubernetes.client.api.policy_v1beta1_api.PolicyV1beta1Api() # noqa: E501
def tearDown(self):
pass
def test_create_namespaced_pod_disruption_budget(self):
"""Test case for create_namespaced_pod_disruption_budget
"""
pass
def test_create_pod_security_policy(self):
"""Test case for create_pod_security_policy
"""
pass
def test_delete_collection_namespaced_pod_disruption_budget(self):
"""Test case for delete_collection_namespaced_pod_disruption_budget
"""
pass
def test_delete_collection_pod_security_policy(self):
"""Test case for delete_collection_pod_security_policy
"""
pass
def test_delete_namespaced_pod_disruption_budget(self):
"""Test case for delete_namespaced_pod_disruption_budget
"""
pass
def test_delete_pod_security_policy(self):
"""Test case for delete_pod_security_policy
"""
pass
def test_get_api_resources(self):
"""Test case for get_api_resources
"""
pass
def test_list_namespaced_pod_disruption_budget(self):
"""Test case for list_namespaced_pod_disruption_budget
"""
pass
| """
pass
def test_list_pod_security_policy(self):
"""Test case for list_pod_security_policy
"""
pass
def test_patch_namespaced_pod_disruption_budget(self):
"""Test case for patch_namespaced_pod_disruption_budget
"""
pass
def test_patch_namespaced_pod_disruption_budget_status(self):
"""Test case for patch_namespaced_pod_disruption_budget_status
"""
pass
def test_patch_pod_security_policy(self):
"""Test case for patch_pod_security_policy
"""
pass
def test_read_namespaced_pod_disruption_budget(self):
"""Test case for read_namespaced_pod_disruption_budget
"""
pass
def test_read_namespaced_pod_disruption_budget_status(self):
"""Test case for read_namespaced_pod_disruption_budget_status
"""
pass
def test_read_pod_security_policy(self):
"""Test case for read_pod_security_policy
"""
pass
def test_replace_namespaced_pod_disruption_budget(self):
"""Test case for replace_namespaced_pod_disruption_budget
"""
pass
def test_replace_namespaced_pod_disruption_budget_status(self):
"""Test case for replace_namespaced_pod_disruption_budget_status
"""
pass
def test_replace_pod_security_policy(self):
"""Test case for replace_pod_security_policy
"""
pass
if __name__ == '__main__':
unittest.main() | def test_list_pod_disruption_budget_for_all_namespaces(self):
"""Test case for list_pod_disruption_budget_for_all_namespaces
|
support_test.go | /*
Copyright hechain. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package config_test
import (
"crypto/rand"
"encoding/hex"
"fmt"
"os"
"os/exec"
"path/filepath"
"testing"
"github.com/golang/protobuf/proto"
"github.com/hechain20/hechain/common/channelconfig"
"github.com/hechain20/hechain/common/configtx/test"
"github.com/hechain20/hechain/discovery/support/config"
"github.com/hechain20/hechain/discovery/support/mocks"
"github.com/hechain20/hechain/internal/configtxgen/encoder"
"github.com/hechain20/hechain/internal/configtxgen/genesisconfig"
"github.com/hechain20/hechain/protoutil"
"github.com/hyperledger/fabric-protos-go/common"
"github.com/hyperledger/fabric-protos-go/discovery"
"github.com/hyperledger/fabric-protos-go/msp"
"github.com/onsi/gomega/gexec"
"github.com/stretchr/testify/require"
)
func TestMSPIDMapping(t *testing.T) {
randString := func() string {
buff := make([]byte, 10)
rand.Read(buff)
return hex.EncodeToString(buff)
}
dir := filepath.Join(os.TempDir(), fmt.Sprintf("TestMSPIDMapping_%s", randString()))
os.Mkdir(dir, 0o700)
defer os.RemoveAll(dir)
cryptogen, err := gexec.Build("github.com/hechain20/hechain/cmd/cryptogen")
require.NoError(t, err)
defer os.Remove(cryptogen)
idemixgen, err := gexec.Build("github.com/IBM/idemix/tools/idemixgen", "-mod=mod")
require.NoError(t, err)
defer os.Remove(idemixgen)
cryptoConfigDir := filepath.Join(dir, "crypto-config")
b, err := exec.Command(cryptogen, "generate", fmt.Sprintf("--output=%s", cryptoConfigDir)).CombinedOutput()
require.NoError(t, err, string(b))
idemixConfigDir := filepath.Join(dir, "crypto-config", "idemix")
b, err = exec.Command(idemixgen, "ca-keygen", fmt.Sprintf("--output=%s", idemixConfigDir)).CombinedOutput()
require.NoError(t, err, string(b))
profileConfig := genesisconfig.Load("TwoOrgsChannel", "testdata/")
ordererConfig := genesisconfig.Load("TwoOrgsOrdererGenesis", "testdata/")
profileConfig.Orderer = ordererConfig.Orderer
// Override the MSP directory with our randomly generated and populated path
for _, org := range ordererConfig.Orderer.Organizations {
org.MSPDir = filepath.Join(cryptoConfigDir, "ordererOrganizations", "example.com", "msp")
org.Name = randString()
}
// Randomize organization names
for _, org := range profileConfig.Application.Organizations {
org.Name = randString()
// Non bccsp-msp orgs don't have the crypto material produced by cryptogen,
// we need to use the idemix crypto folder instead.
if org.MSPType != "bccsp" {
org.MSPDir = filepath.Join(idemixConfigDir)
continue
}
org.MSPDir = filepath.Join(cryptoConfigDir, "peerOrganizations", "org1.example.com", "msp")
}
channelGroup, err := encoder.NewChannelGroup(profileConfig)
require.NoError(t, err)
fakeConfigGetter := &mocks.ConfigGetter{}
fakeConfigGetter.GetCurrConfigReturnsOnCall(
0,
&common.Config{
ChannelGroup: channelGroup,
},
)
cs := config.NewDiscoverySupport(fakeConfigGetter)
res, err := cs.Config("mychannel")
require.NoError(t, err)
actualKeys := make(map[string]struct{})
for key := range res.Orderers {
actualKeys[key] = struct{}{}
}
for key := range res.Msps {
actualKeys[key] = struct{}{}
}
// Note that Org3MSP is an idemix org, but it shouldn't be listed here
// because peers can't have idemix credentials
expected := map[string]struct{}{
"OrdererMSP": {},
"Org1MSP": {},
"Org2MSP": {},
}
require.Equal(t, expected, actualKeys)
}
func TestSupportGreenPath(t *testing.T) {
fakeConfigGetter := &mocks.ConfigGetter{}
fakeConfigGetter.GetCurrConfigReturnsOnCall(0, nil)
cs := config.NewDiscoverySupport(fakeConfigGetter)
res, err := cs.Config("test")
require.Nil(t, res)
require.Equal(t, "could not get last config for channel test", err.Error())
config, err := test.MakeChannelConfig("test")
require.NoError(t, err)
require.NotNil(t, config)
fakeConfigGetter.GetCurrConfigReturnsOnCall(1, config)
res, err = cs.Config("test")
require.NoError(t, err)
require.NotNil(t, res)
}
func TestValidateConfig(t *testing.T) {
tests := []struct {
name string
config *common.Config
containsError string
}{
{
name: "nil Config field",
config: &common.Config{},
containsError: "field Config.ChannelGroup is nil",
},
{
name: "nil Groups field",
config: &common.Config{
ChannelGroup: &common.ConfigGroup{},
},
containsError: "field Config.ChannelGroup.Groups is nil",
},
{
name: "no orderer group key",
config: &common.Config{
ChannelGroup: &common.ConfigGroup{
Groups: map[string]*common.ConfigGroup{
channelconfig.ApplicationGroupKey: {},
},
},
},
containsError: "key Config.ChannelGroup.Groups[Orderer] is missing",
},
{
name: "no application group key",
config: &common.Config{
ChannelGroup: &common.ConfigGroup{
Groups: map[string]*common.ConfigGroup{
channelconfig.OrdererGroupKey: {
Groups: map[string]*common.ConfigGroup{},
},
},
},
},
containsError: "key Config.ChannelGroup.Groups[Application] is missing",
},
{
name: "no groups key in orderer group",
config: &common.Config{
ChannelGroup: &common.ConfigGroup{
Groups: map[string]*common.ConfigGroup{
channelconfig.ApplicationGroupKey: {
Groups: map[string]*common.ConfigGroup{},
},
channelconfig.OrdererGroupKey: {},
},
},
},
containsError: "key Config.ChannelGroup.Groups[Orderer].Groups is nil",
},
{
name: "no groups key in application group",
config: &common.Config{
ChannelGroup: &common.ConfigGroup{
Groups: map[string]*common.ConfigGroup{
channelconfig.ApplicationGroupKey: {},
channelconfig.OrdererGroupKey: {
Groups: map[string]*common.ConfigGroup{},
},
},
},
},
containsError: "key Config.ChannelGroup.Groups[Application].Groups is nil",
},
{
name: "no Values in ChannelGroup",
config: &common.Config{
ChannelGroup: &common.ConfigGroup{
Groups: map[string]*common.ConfigGroup{
channelconfig.ApplicationGroupKey: {
Groups: map[string]*common.ConfigGroup{},
},
channelconfig.OrdererGroupKey: {
Groups: map[string]*common.ConfigGroup{},
},
},
},
},
containsError: "field Config.ChannelGroup.Values is nil",
},
}
for _, test := range tests {
test := test
t.Run(test.name, func(t *testing.T) {
err := config.ValidateConfig(test.config)
require.Contains(t, test.containsError, err.Error())
})
}
}
func TestOrdererEndpoints(t *testing.T) {
t.Run("Global endpoints", func(t *testing.T) {
channelConfig, err := test.MakeChannelConfig("mychannel")
require.NoError(t, err)
fakeConfigGetter := &mocks.ConfigGetter{}
cs := config.NewDiscoverySupport(fakeConfigGetter)
fakeConfigGetter.GetCurrConfigReturnsOnCall(0, channelConfig)
injectGlobalOrdererEndpoint(t, channelConfig, "globalEndpoint:7050")
res, err := cs.Config("test")
require.NoError(t, err)
require.Equal(t, map[string]*discovery.Endpoints{
"SampleOrg": {Endpoint: []*discovery.Endpoint{{Host: "globalEndpoint", Port: 7050}}},
}, res.Orderers)
})
t.Run("Per org endpoints alongside global endpoints", func(t *testing.T) {
channelConfig, err := test.MakeChannelConfig("mychannel")
require.NoError(t, err)
fakeConfigGetter := &mocks.ConfigGetter{}
cs := config.NewDiscoverySupport(fakeConfigGetter)
fakeConfigGetter.GetCurrConfigReturnsOnCall(0, channelConfig)
injectAdditionalEndpointPair(t, channelConfig, "perOrgEndpoint:7050", "anotherOrg")
injectAdditionalEndpointPair(t, channelConfig, "endpointWithoutAPortName", "aBadOrg")
res, err := cs.Config("test")
require.NoError(t, err)
require.Equal(t, map[string]*discovery.Endpoints{
"SampleOrg": {Endpoint: []*discovery.Endpoint{{Host: "127.0.0.1", Port: 7050}}},
"anotherOrg": {Endpoint: []*discovery.Endpoint{{Host: "perOrgEndpoint", Port: 7050}}},
"aBadOrg": {},
}, res.Orderers)
})
t.Run("Per org endpoints without global endpoints", func(t *testing.T) {
channelConfig, err := test.MakeChannelConfig("mychannel")
require.NoError(t, err)
fakeConfigGetter := &mocks.ConfigGetter{}
cs := config.NewDiscoverySupport(fakeConfigGetter)
fakeConfigGetter.GetCurrConfigReturnsOnCall(0, channelConfig)
removeGlobalEndpoints(t, channelConfig)
injectAdditionalEndpointPair(t, channelConfig, "perOrgEndpoint:7050", "SampleOrg")
injectAdditionalEndpointPair(t, channelConfig, "endpointWithoutAPortName", "aBadOrg")
res, err := cs.Config("test")
require.NoError(t, err)
require.Equal(t, map[string]*discovery.Endpoints{
"SampleOrg": {Endpoint: []*discovery.Endpoint{{Host: "perOrgEndpoint", Port: 7050}}},
"aBadOrg": {},
}, res.Orderers)
})
}
func removeGlobalEndpoints(t *testing.T, config *common.Config) {
// Remove the orderer addresses
delete(config.ChannelGroup.Values, channelconfig.OrdererAddressesKey)
}
func injectGlobalOrdererEndpoint(t *testing.T, config *common.Config, endpoint string) {
ordererAddresses := channelconfig.OrdererAddressesValue([]string{endpoint})
// Replace the orderer addresses
config.ChannelGroup.Values[ordererAddresses.Key()] = &common.ConfigValue{
Value: protoutil.MarshalOrPanic(ordererAddresses.Value()),
ModPolicy: "/Channel/Orderer/Admins",
}
// Remove the per org addresses, if applicable
ordererGrps := config.ChannelGroup.Groups[channelconfig.OrdererGroupKey].Groups
for _, grp := range ordererGrps {
if grp.Values[channelconfig.EndpointsKey] == nil {
continue
}
grp.Values[channelconfig.EndpointsKey].Value = nil
}
}
func | (t *testing.T, config *common.Config, endpoint string, orgName string) {
ordererGrp := config.ChannelGroup.Groups[channelconfig.OrdererGroupKey].Groups
// Get the first orderer org config
var firstOrdererConfig *common.ConfigGroup
for _, grp := range ordererGrp {
firstOrdererConfig = grp
break
}
// Duplicate it.
secondOrdererConfig := proto.Clone(firstOrdererConfig).(*common.ConfigGroup)
ordererGrp[orgName] = secondOrdererConfig
// Reach the FabricMSPConfig buried in it.
mspConfig := &msp.MSPConfig{}
err := proto.Unmarshal(secondOrdererConfig.Values[channelconfig.MSPKey].Value, mspConfig)
require.NoError(t, err)
fabricConfig := &msp.FabricMSPConfig{}
err = proto.Unmarshal(mspConfig.Config, fabricConfig)
require.NoError(t, err)
// Rename it.
fabricConfig.Name = orgName
// Pack the MSP config back into the config
secondOrdererConfig.Values[channelconfig.MSPKey].Value = protoutil.MarshalOrPanic(&msp.MSPConfig{
Config: protoutil.MarshalOrPanic(fabricConfig),
Type: mspConfig.Type,
})
// Inject the endpoint
ordererOrgProtos := &common.OrdererAddresses{
Addresses: []string{endpoint},
}
secondOrdererConfig.Values[channelconfig.EndpointsKey].Value = protoutil.MarshalOrPanic(ordererOrgProtos)
}
| injectAdditionalEndpointPair |
test.rs | use proptest::prop_assert_eq;
use proptest::test_runner::{Config, TestRunner};
use crate::erlang::is_binary_1::result;
use crate::test::strategy;
use crate::test::with_process_arc;
#[test]
fn without_binary_returns_false() {
run!(
|arc_process| strategy::term::is_not_binary(arc_process.clone()),
|term| {
prop_assert_eq!(result(term), false.into());
Ok(())
},
);
}
#[test]
fn with_binary_returns_true() {
with_process_arc(|arc_process| { | .run(&strategy::term::is_binary(arc_process.clone()), |term| {
prop_assert_eq!(result(term), true.into());
Ok(())
})
.unwrap();
});
} | TestRunner::new(Config::with_source_file(file!())) |
system_contracts_access.rs | use lazy_static::lazy_static;
use engine_core::execution;
use engine_shared::transform::TypeMismatch;
use engine_test_support::{
internal::{
ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_GENESIS_CONFIG, DEFAULT_PAYMENT,
},
DEFAULT_ACCOUNT_ADDR,
};
use types::{URef, U512};
const CONTRACT_SYSTEM_CONTRACTS_ACCESS: &str = "system_contracts_access.wasm";
const CONTRACT_OVERWRITE_UREF_CONTENT: &str = "overwrite_uref_content.wasm";
const CONTRACT_TRANSFER_TO_ACCOUNT_01: &str = "transfer_to_account_01.wasm";
const SYSTEM_ADDR: [u8; 32] = [0u8; 32];
const ACCOUNT_1_ADDR: [u8; 32] = [1u8; 32];
lazy_static! {
static ref ACCOUNT_1_INITIAL_BALANCE: U512 = *DEFAULT_PAYMENT * 10;
static ref SYSTEM_INITIAL_BALANCE: U512 = *DEFAULT_PAYMENT * 10;
}
fn | (builder: &mut InMemoryWasmTestBuilder, address: [u8; 32]) {
let exec_request =
ExecuteRequestBuilder::standard(address, CONTRACT_SYSTEM_CONTRACTS_ACCESS, ()).build();
builder.exec(exec_request).expect_success().commit();
}
#[ignore]
#[test]
fn should_verify_system_contracts_access_rights_default() {
let mut builder = InMemoryWasmTestBuilder::default();
let exec_request_1 = ExecuteRequestBuilder::standard(
DEFAULT_ACCOUNT_ADDR,
CONTRACT_TRANSFER_TO_ACCOUNT_01,
(ACCOUNT_1_ADDR, *ACCOUNT_1_INITIAL_BALANCE),
)
.build();
builder
.run_genesis(&DEFAULT_GENESIS_CONFIG)
.exec(exec_request_1)
.expect_success()
.commit();
run_test_with_address(&mut builder, DEFAULT_ACCOUNT_ADDR);
run_test_with_address(&mut builder, ACCOUNT_1_ADDR);
}
fn overwrite_as_account(builder: &mut InMemoryWasmTestBuilder, uref: URef, address: [u8; 32]) {
let exec_request =
ExecuteRequestBuilder::standard(address, CONTRACT_OVERWRITE_UREF_CONTENT, (uref,)).build();
let result = builder.exec(exec_request).commit().finish();
let error_msg = result
.builder()
.exec_error_message(0)
.expect("should execute with error");
let err = format!(
"{}",
execution::Error::ForgedReference(uref.into_read_add_write())
);
assert!(error_msg.contains(&err), "error_msg: {:?}", error_msg);
}
#[ignore]
#[test]
fn should_not_overwrite_system_contract_uref_as_user() {
let mut builder = InMemoryWasmTestBuilder::default();
let exec_request_1 = ExecuteRequestBuilder::standard(
DEFAULT_ACCOUNT_ADDR,
CONTRACT_TRANSFER_TO_ACCOUNT_01,
(ACCOUNT_1_ADDR, *ACCOUNT_1_INITIAL_BALANCE),
)
.build();
builder.run_genesis(&DEFAULT_GENESIS_CONFIG);
let mint_uref = builder.get_pos_contract_uref().into_read();
let pos_uref = builder.get_pos_contract_uref().into_read();
let result = builder
.exec(exec_request_1)
.expect_success()
.commit()
.finish();
let mut builder = InMemoryWasmTestBuilder::from_result(result);
// Try to break system contracts as user created through transfer process
overwrite_as_account(&mut builder, mint_uref, ACCOUNT_1_ADDR);
overwrite_as_account(&mut builder, pos_uref, ACCOUNT_1_ADDR);
// Try to break system contracts as user created through genesis process
overwrite_as_account(&mut builder, mint_uref, DEFAULT_ACCOUNT_ADDR);
overwrite_as_account(&mut builder, pos_uref, DEFAULT_ACCOUNT_ADDR);
}
#[ignore]
#[test]
fn should_overwrite_system_contract_uref_as_system() {
let exec_request_1 = ExecuteRequestBuilder::standard(
DEFAULT_ACCOUNT_ADDR,
CONTRACT_TRANSFER_TO_ACCOUNT_01,
(SYSTEM_ADDR, *SYSTEM_INITIAL_BALANCE),
)
.build();
let result = InMemoryWasmTestBuilder::default()
.run_genesis(&DEFAULT_GENESIS_CONFIG)
.exec(exec_request_1)
.expect_success()
.commit()
.finish();
let mint_uref = result.builder().get_mint_contract_uref();
let pos_uref = result.builder().get_pos_contract_uref();
let exec_request_2 =
ExecuteRequestBuilder::standard(SYSTEM_ADDR, CONTRACT_OVERWRITE_UREF_CONTENT, (mint_uref,))
.build();
let mut new_builder = InMemoryWasmTestBuilder::from_result(result);
let result_mint = new_builder.clone().exec(exec_request_2).commit().finish();
let error_msg = result_mint
.builder()
.exec_error_message(0)
.expect("should execute mint overwrite with error");
assert!(
error_msg.contains("FinalizationError"),
"Expected FinalizationError, got {}",
error_msg
);
let exec_request_3 =
ExecuteRequestBuilder::standard(SYSTEM_ADDR, CONTRACT_OVERWRITE_UREF_CONTENT, (pos_uref,))
.build();
let result_pos = new_builder.exec(exec_request_3).commit().finish();
let error_msg = result_pos
.builder()
.exec_error_message(0)
.expect("should execute pos overwrite with error");
let type_mismatch = TypeMismatch::new("Contract".to_string(), "String".to_string());
let expected_error = execution::Error::TypeMismatch(type_mismatch);
assert!(error_msg.contains(&expected_error.to_string()));
}
| run_test_with_address |
mock.go | package apimock
import (
"context"
"fmt"
"github.com/gopasspw/gopass/internal/store/mockstore"
"github.com/gopasspw/gopass/pkg/gopass"
)
// Secret is a mock secret for writing
type Secret struct {
Buf []byte
}
// Bytes returns the underlying bytes
func (m *Secret) Bytes() []byte {
return m.Buf
}
// MockAPI is a gopass API mock
type MockAPI struct {
store *mockstore.MockStore
}
// New creates a new gopass API mock
func New() *MockAPI {
return &MockAPI{
store: mockstore.New(""),
}
}
// String returns mockapi
func (a *MockAPI) String() string {
return "mockapi"
} | return a.store.List(ctx, "")
}
// Get does nothing
func (a *MockAPI) Get(ctx context.Context, name, _ string) (gopass.Secret, error) {
return a.store.Get(ctx, name)
}
// Revisions does nothing
func (a *MockAPI) Revisions(ctx context.Context, name string) ([]string, error) {
rs, err := a.store.ListRevisions(ctx, name)
if err != nil {
return nil, err
}
revs := make([]string, 0, len(rs))
for _, r := range rs {
revs = append(revs, r.Hash)
}
return revs, nil
}
// Set does nothing
func (a *MockAPI) Set(ctx context.Context, name string, sec gopass.Byter) error {
return a.store.Set(ctx, name, sec)
}
// Remove does nothing
func (a *MockAPI) Remove(ctx context.Context, name string) error {
return a.store.Delete(ctx, name)
}
// RemoveAll does nothing
func (a *MockAPI) RemoveAll(ctx context.Context, prefix string) error {
return a.store.Prune(ctx, prefix)
}
// Rename does nothing
func (a *MockAPI) Rename(ctx context.Context, src, dest string) error {
return a.store.Move(ctx, src, dest)
}
// Sync does nothing
func (a *MockAPI) Sync(ctx context.Context) error {
return fmt.Errorf("not yet implemented")
}
// Close does nothing
func (a *MockAPI) Close(ctx context.Context) error {
return nil
} |
// List does nothing
func (a *MockAPI) List(ctx context.Context) ([]string, error) { |
lior1.js | const http = require('http');
const fs = require('fs')
http.createServer((req, res) => {
console.log(req.url);
if (req.url === '/') {
| res.end();
})
}
else if (req.url === '/style.css') {
fs.readFile('style.css', 'utf-8', (err, data) => {
res.writeHead(200, { 'Content-Type': 'text/css' })
res.write(data);
res.end();
})
}
})
.listen(8083, () => {
console.log('server listen on port 8080')
}); | fs.readFile('index.html', 'utf-8', (err, data) => {
res.writeHead(200, { 'Content-Type': 'text/html' })
res.write(data);
|
metadata_easyjson.go | // Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT.
package hardware
import (
json "encoding/json"
easyjson "github.com/mailru/easyjson"
jlexer "github.com/mailru/easyjson/jlexer"
jwriter "github.com/mailru/easyjson/jwriter"
)
// suppress unused package warning
var (
_ *json.RawMessage
_ *jlexer.Lexer
_ *jwriter.Writer
_ easyjson.Marshaler
)
func easyjsonBa0ee0e3DecodeGithubComSkydiveProjectSkydiveTopologyProbesHardware(in *jlexer.Lexer, out *CPUInfos) {
isTopLevel := in.IsStart()
if in.IsNull() {
in.Skip()
*out = nil
} else {
in.Delim('[')
if *out == nil {
if !in.IsDelim(']') {
*out = make(CPUInfos, 0, 8)
} else {
*out = CPUInfos{}
}
} else {
*out = (*out)[:0]
}
for !in.IsDelim(']') {
var v1 *CPUInfo
if in.IsNull() {
in.Skip()
v1 = nil
} else {
if v1 == nil {
v1 = new(CPUInfo)
}
(*v1).UnmarshalEasyJSON(in)
}
*out = append(*out, v1)
in.WantComma()
}
in.Delim(']')
}
if isTopLevel {
in.Consumed()
}
}
func | (out *jwriter.Writer, in CPUInfos) {
if in == nil && (out.Flags&jwriter.NilSliceAsEmpty) == 0 {
out.RawString("null")
} else {
out.RawByte('[')
for v2, v3 := range in {
if v2 > 0 {
out.RawByte(',')
}
if v3 == nil {
out.RawString("null")
} else {
(*v3).MarshalEasyJSON(out)
}
}
out.RawByte(']')
}
}
// MarshalJSON supports json.Marshaler interface
func (v CPUInfos) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonBa0ee0e3EncodeGithubComSkydiveProjectSkydiveTopologyProbesHardware(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v CPUInfos) MarshalEasyJSON(w *jwriter.Writer) {
easyjsonBa0ee0e3EncodeGithubComSkydiveProjectSkydiveTopologyProbesHardware(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *CPUInfos) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjsonBa0ee0e3DecodeGithubComSkydiveProjectSkydiveTopologyProbesHardware(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *CPUInfos) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjsonBa0ee0e3DecodeGithubComSkydiveProjectSkydiveTopologyProbesHardware(l, v)
}
func easyjsonBa0ee0e3DecodeGithubComSkydiveProjectSkydiveTopologyProbesHardware1(in *jlexer.Lexer, out *CPUInfo) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "CPU":
out.CPU = int64(in.Int64())
case "VendorID":
out.VendorID = string(in.String())
case "Family":
out.Family = string(in.String())
case "Model":
out.Model = string(in.String())
case "Stepping":
out.Stepping = int64(in.Int64())
case "PhysicalID":
out.PhysicalID = string(in.String())
case "CoreID":
out.CoreID = string(in.String())
case "Cores":
out.Cores = int64(in.Int64())
case "ModelName":
out.ModelName = string(in.String())
case "Mhz":
out.Mhz = int64(in.Int64())
case "CacheSize":
out.CacheSize = int64(in.Int64())
case "Microcode":
out.Microcode = string(in.String())
default:
in.SkipRecursive()
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjsonBa0ee0e3EncodeGithubComSkydiveProjectSkydiveTopologyProbesHardware1(out *jwriter.Writer, in CPUInfo) {
out.RawByte('{')
first := true
_ = first
if in.CPU != 0 {
const prefix string = ",\"CPU\":"
first = false
out.RawString(prefix[1:])
out.Int64(int64(in.CPU))
}
if in.VendorID != "" {
const prefix string = ",\"VendorID\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.VendorID))
}
if in.Family != "" {
const prefix string = ",\"Family\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Family))
}
if in.Model != "" {
const prefix string = ",\"Model\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Model))
}
if in.Stepping != 0 {
const prefix string = ",\"Stepping\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Int64(int64(in.Stepping))
}
if in.PhysicalID != "" {
const prefix string = ",\"PhysicalID\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.PhysicalID))
}
if in.CoreID != "" {
const prefix string = ",\"CoreID\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.CoreID))
}
if in.Cores != 0 {
const prefix string = ",\"Cores\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Int64(int64(in.Cores))
}
if in.ModelName != "" {
const prefix string = ",\"ModelName\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.ModelName))
}
if in.Mhz != 0 {
const prefix string = ",\"Mhz\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Int64(int64(in.Mhz))
}
if in.CacheSize != 0 {
const prefix string = ",\"CacheSize\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Int64(int64(in.CacheSize))
}
if in.Microcode != "" {
const prefix string = ",\"Microcode\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Microcode))
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v CPUInfo) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{}
easyjsonBa0ee0e3EncodeGithubComSkydiveProjectSkydiveTopologyProbesHardware1(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v CPUInfo) MarshalEasyJSON(w *jwriter.Writer) {
easyjsonBa0ee0e3EncodeGithubComSkydiveProjectSkydiveTopologyProbesHardware1(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *CPUInfo) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjsonBa0ee0e3DecodeGithubComSkydiveProjectSkydiveTopologyProbesHardware1(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *CPUInfo) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjsonBa0ee0e3DecodeGithubComSkydiveProjectSkydiveTopologyProbesHardware1(l, v)
}
| easyjsonBa0ee0e3EncodeGithubComSkydiveProjectSkydiveTopologyProbesHardware |
useSubscription.ts | import { ReadShape, ParamsFromShape } from '@rest-hooks/core/endpoint/index';
import { useMemo } from 'react';
import { EndpointInterface, Schema, FetchFunction } from '@rest-hooks/endpoint';
import useSubscriptionNew from '@rest-hooks/core/react-integration/newhooks/useSubscription';
import shapeToEndpoint from '@rest-hooks/core/endpoint/adapter';
/**
* Keeps a resource fresh by subscribing to updates.
* @see https://resthooks.io/docs/api/useSubscription
*/
export default function | <
E extends
| EndpointInterface<FetchFunction, Schema | undefined, undefined>
| ReadShape<any, any>,
Args extends
| (E extends (...args: any) => any
? readonly [...Parameters<E>]
: readonly [ParamsFromShape<E>])
| readonly [null],
>(endpoint: E, ...args: Args) {
const adaptedEndpoint: any = useMemo(() => {
return shapeToEndpoint(endpoint);
// we currently don't support shape changes
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
return useSubscriptionNew(adaptedEndpoint, ...args);
}
| useSubscription |
function.rs | #[cfg(feature = "jit")]
mod jitfunc;
use super::code::PyCodeRef;
use super::dict::PyDictRef;
use super::pystr::PyStrRef;
use super::pytype::PyTypeRef;
use super::tuple::{PyTupleRef, PyTupleTyped};
use crate::builtins::asyncgenerator::PyAsyncGen;
use crate::builtins::coroutine::PyCoroutine;
use crate::builtins::generator::PyGenerator;
use crate::bytecode;
use crate::common::lock::PyMutex;
use crate::frame::Frame;
use crate::function::{FuncArgs, OptionalArg};
#[cfg(feature = "jit")]
use crate::pyobject::IntoPyObject;
use crate::pyobject::{
BorrowValue, IdProtocol, ItemProtocol, PyClassImpl, PyComparisonValue, PyContext, PyObjectRef,
PyRef, PyResult, PyValue, TypeProtocol,
};
use crate::scope::Scope;
use crate::slots::{Callable, Comparable, PyComparisonOp, SlotDescriptor, SlotGetattro};
use crate::VirtualMachine;
use itertools::Itertools;
#[cfg(feature = "jit")]
use rustpython_common::lock::OnceCell;
#[cfg(feature = "jit")]
use rustpython_jit::CompiledCode;
pub type PyFunctionRef = PyRef<PyFunction>;
#[pyclass(module = false, name = "function")]
#[derive(Debug)]
pub struct PyFunction {
code: PyCodeRef,
#[cfg(feature = "jit")]
jitted_code: OnceCell<CompiledCode>,
globals: PyDictRef,
closure: Option<PyTupleTyped<PyCellRef>>,
defaults_and_kwdefaults: PyMutex<(Option<PyTupleRef>, Option<PyDictRef>)>,
name: PyMutex<PyStrRef>,
}
impl PyFunction {
pub(crate) fn new(
code: PyCodeRef,
globals: PyDictRef,
closure: Option<PyTupleTyped<PyCellRef>>,
defaults: Option<PyTupleRef>,
kw_only_defaults: Option<PyDictRef>,
) -> Self {
let name = PyMutex::new(code.obj_name.clone());
PyFunction {
code,
#[cfg(feature = "jit")]
jitted_code: OnceCell::new(),
globals,
closure,
defaults_and_kwdefaults: PyMutex::new((defaults, kw_only_defaults)),
name,
}
}
fn fill_locals_from_args(
&self,
frame: &Frame,
func_args: FuncArgs,
vm: &VirtualMachine,
) -> PyResult<()> {
let code = &*self.code;
let nargs = func_args.args.len();
let nexpected_args = code.arg_count;
let total_args = code.arg_count + code.kwonlyarg_count;
// let arg_names = self.code.arg_names();
// This parses the arguments from args and kwargs into
// the proper variables keeping into account default values
// and starargs and kwargs.
// See also: PyEval_EvalCodeWithName in cpython:
// https://github.com/python/cpython/blob/master/Python/ceval.c#L3681
let mut fastlocals = frame.fastlocals.lock();
let mut args_iter = func_args.args.into_iter();
// Copy positional arguments into local variables
// zip short-circuits if either iterator returns None, which is the behavior we want --
// only fill as much as there is to fill with as much as we have
for (local, arg) in Iterator::zip(
fastlocals.iter_mut().take(nexpected_args),
args_iter.by_ref().take(nargs),
) {
*local = Some(arg);
}
let mut vararg_offset = total_args;
// Pack other positional arguments in to *args:
if code.flags.contains(bytecode::CodeFlags::HAS_VARARGS) {
let vararg_value = vm.ctx.new_tuple(args_iter.collect());
fastlocals[vararg_offset] = Some(vararg_value);
vararg_offset += 1;
} else {
// Check the number of positional arguments
if nargs > nexpected_args {
return Err(vm.new_type_error(format!(
"Expected {} arguments (got: {})",
nexpected_args, nargs
)));
}
}
// Do we support `**kwargs` ?
let kwargs = if code.flags.contains(bytecode::CodeFlags::HAS_VARKEYWORDS) {
let d = vm.ctx.new_dict();
fastlocals[vararg_offset] = Some(d.clone().into_object());
Some(d)
} else {
None
};
let argpos = |range: std::ops::Range<_>, name: &str| {
code.varnames
.iter()
.enumerate()
.skip(range.start)
.take(range.end - range.start)
.find(|(_, s)| s.borrow_value() == name)
.map(|(p, _)| p)
};
let mut posonly_passed_as_kwarg = Vec::new();
// Handle keyword arguments
for (name, value) in func_args.kwargs {
// Check if we have a parameter with this name:
if let Some(pos) = argpos(code.posonlyarg_count..total_args, &name) {
let slot = &mut fastlocals[pos];
if slot.is_some() {
return Err(
vm.new_type_error(format!("Got multiple values for argument '{}'", name))
);
}
*slot = Some(value);
} else if argpos(0..code.posonlyarg_count, &name).is_some() {
posonly_passed_as_kwarg.push(name);
} else if let Some(kwargs) = kwargs.as_ref() {
kwargs.set_item(name, value, vm)?;
} else {
return Err(
vm.new_type_error(format!("got an unexpected keyword argument '{}'", name))
);
}
}
if !posonly_passed_as_kwarg.is_empty() {
return Err(vm.new_type_error(format!(
"{}() got some positional-only arguments passed as keyword arguments: '{}'",
&self.code.obj_name,
posonly_passed_as_kwarg.into_iter().format(", "),
)));
}
let mut defaults_and_kwdefaults = None;
// can't be a closure cause it returns a reference to a captured variable :/
macro_rules! get_defaults {
() => {{
defaults_and_kwdefaults
.get_or_insert_with(|| self.defaults_and_kwdefaults.lock().clone())
}};
}
// Add missing positional arguments, if we have fewer positional arguments than the
// function definition calls for
if nargs < nexpected_args {
let defaults = get_defaults!().0.as_ref().map(|tup| tup.borrow_value());
let ndefs = defaults.map_or(0, |d| d.len());
let nrequired = code.arg_count - ndefs;
// Given the number of defaults available, check all the arguments for which we
// _don't_ have defaults; if any are missing, raise an exception
let mut missing = vec![];
for i in nargs..nrequired {
if fastlocals[i].is_none() {
missing.push(&code.varnames[i]);
}
}
if !missing.is_empty() {
return Err(vm.new_type_error(format!(
"Missing {} required positional arguments: {}",
missing.len(),
missing.iter().format(", ")
)));
}
if let Some(defaults) = defaults {
let n = std::cmp::min(nargs, nexpected_args);
let i = n.saturating_sub(nrequired);
// We have sufficient defaults, so iterate over the corresponding names and use
// the default if we don't already have a value
for i in i..defaults.len() {
let slot = &mut fastlocals[nrequired + i];
if slot.is_none() {
*slot = Some(defaults[i].clone());
}
}
}
};
if code.kwonlyarg_count > 0 {
// TODO: compile a list of missing arguments
// let mut missing = vec![];
// Check if kw only arguments are all present:
for (slot, kwarg) in fastlocals
.iter_mut()
.zip(&*code.varnames)
.skip(code.arg_count)
.take(code.kwonlyarg_count)
{
if slot.is_none() {
if let Some(defaults) = &get_defaults!().1 {
if let Some(default) = defaults.get_item_option(kwarg.clone(), vm)? {
*slot = Some(default);
continue;
}
}
// No default value and not specified.
return Err(vm.new_type_error(format!(
"Missing required kw only argument: '{}'",
kwarg
)));
}
}
}
if let Some(cell2arg) = code.cell2arg.as_deref() {
for (cell_idx, arg_idx) in cell2arg.iter().enumerate().filter(|(_, i)| **i != -1) {
let x = fastlocals[*arg_idx as usize].take();
frame.cells_frees[cell_idx].set(x);
}
}
Ok(())
}
pub fn invoke_with_locals(
&self,
func_args: FuncArgs,
locals: Option<PyDictRef>,
vm: &VirtualMachine,
) -> PyResult {
#[cfg(feature = "jit")]
if let Some(jitted_code) = self.jitted_code.get() {
match jitfunc::get_jit_args(self, &func_args, jitted_code, vm) {
Ok(args) => {
return Ok(args.invoke().into_pyobject(vm));
}
Err(err) => info!(
"jit: function `{}` is falling back to being interpreted because of the \
error: {}",
self.code.obj_name, err
),
}
}
let code = &self.code;
let locals = if self.code.flags.contains(bytecode::CodeFlags::NEW_LOCALS) {
vm.ctx.new_dict()
} else {
locals.unwrap_or_else(|| self.globals.clone())
};
// Construct frame:
let frame = Frame::new(
code.clone(),
Scope::new(Some(locals), self.globals.clone()),
vm.builtins.dict().unwrap(),
self.closure.as_ref().map_or(&[], |c| c.borrow_value()),
vm,
)
.into_ref(vm);
self.fill_locals_from_args(&frame, func_args, vm)?;
// If we have a generator, create a new generator
let is_gen = code.flags.contains(bytecode::CodeFlags::IS_GENERATOR);
let is_coro = code.flags.contains(bytecode::CodeFlags::IS_COROUTINE);
match (is_gen, is_coro) {
(true, false) => Ok(PyGenerator::new(frame, self.name()).into_object(vm)),
(false, true) => Ok(PyCoroutine::new(frame, self.name()).into_object(vm)),
(true, true) => Ok(PyAsyncGen::new(frame, self.name()).into_object(vm)),
(false, false) => vm.run_frame_full(frame),
}
}
pub fn invoke(&self, func_args: FuncArgs, vm: &VirtualMachine) -> PyResult {
self.invoke_with_locals(func_args, None, vm)
}
}
impl PyValue for PyFunction {
fn class(vm: &VirtualMachine) -> &PyTypeRef {
&vm.ctx.types.function_type
}
}
#[pyimpl(with(SlotDescriptor, Callable), flags(HAS_DICT, METHOD_DESCR))]
impl PyFunction {
#[pyproperty(magic)]
fn code(&self) -> PyCodeRef {
self.code.clone()
}
#[pyproperty(magic)]
fn defaults(&self) -> Option<PyTupleRef> {
self.defaults_and_kwdefaults.lock().0.clone()
}
#[pyproperty(magic, setter)]
fn set_defaults(&self, defaults: Option<PyTupleRef>) {
self.defaults_and_kwdefaults.lock().0 = defaults
}
#[pyproperty(magic)]
fn kwdefaults(&self) -> Option<PyDictRef> {
self.defaults_and_kwdefaults.lock().1.clone()
}
#[pyproperty(magic, setter)]
fn set_kwdefaults(&self, kwdefaults: Option<PyDictRef>) {
self.defaults_and_kwdefaults.lock().1 = kwdefaults
}
#[pyproperty(magic)]
fn globals(&self) -> PyDictRef {
self.globals.clone()
}
#[pyproperty(magic)]
fn closure(&self) -> Option<PyTupleTyped<PyCellRef>> {
self.closure.clone()
}
#[pyproperty(magic)]
fn name(&self) -> PyStrRef {
self.name.lock().clone()
}
#[pyproperty(magic, setter)]
fn set_name(&self, name: PyStrRef) {
*self.name.lock() = name;
}
#[pymethod(magic)]
fn repr(zelf: PyRef<Self>) -> String {
format!("<function {} at {:#x}>", zelf.name.lock(), zelf.get_id())
}
#[cfg(feature = "jit")]
#[pymethod(magic)]
fn jit(zelf: PyRef<Self>, vm: &VirtualMachine) -> PyResult<()> {
zelf.jitted_code
.get_or_try_init(|| {
let arg_types = jitfunc::get_jit_arg_types(&zelf, vm)?;
rustpython_jit::compile(&zelf.code.code, &arg_types)
.map_err(|err| jitfunc::new_jit_error(err.to_string(), vm))
})
.map(drop)
}
}
impl SlotDescriptor for PyFunction {
fn descr_get(
zelf: PyObjectRef,
obj: Option<PyObjectRef>,
cls: Option<PyObjectRef>,
vm: &VirtualMachine,
) -> PyResult {
let (zelf, obj) = Self::_unwrap(zelf, obj, vm)?;
if vm.is_none(&obj) && !Self::_cls_is(&cls, &obj.class()) {
Ok(zelf.into_object())
} else {
Ok(vm.ctx.new_bound_method(zelf.into_object(), obj))
}
}
}
impl Callable for PyFunction {
fn call(zelf: &PyRef<Self>, args: FuncArgs, vm: &VirtualMachine) -> PyResult {
zelf.invoke(args, vm)
}
}
#[pyclass(module = false, name = "method")]
#[derive(Debug)]
pub struct PyBoundMethod {
// TODO: these shouldn't be public
object: PyObjectRef,
pub function: PyObjectRef,
}
impl Callable for PyBoundMethod {
fn call(zelf: &PyRef<Self>, mut args: FuncArgs, vm: &VirtualMachine) -> PyResult {
args.prepend_arg(zelf.object.clone());
vm.invoke(&zelf.function, args)
}
}
impl Comparable for PyBoundMethod {
fn cmp(
zelf: &PyRef<Self>,
other: &PyObjectRef,
op: PyComparisonOp,
_vm: &VirtualMachine,
) -> PyResult<PyComparisonValue> {
op.eq_only(|| {
let other = class_or_notimplemented!(Self, other);
Ok(PyComparisonValue::Implemented(
zelf.function.is(&other.function) && zelf.object.is(&other.object),
))
})
}
}
impl SlotGetattro for PyBoundMethod {
fn getattro(zelf: PyRef<Self>, name: PyStrRef, vm: &VirtualMachine) -> PyResult {
if let Some(obj) = zelf.get_class_attr(name.borrow_value()) {
return vm.call_if_get_descriptor(obj, zelf.into_object());
}
vm.get_attribute(zelf.function.clone(), name)
}
}
#[pyimpl(with(Callable, Comparable, SlotGetattro), flags(HAS_DICT))]
impl PyBoundMethod {
pub fn new(object: PyObjectRef, function: PyObjectRef) -> Self {
PyBoundMethod { object, function }
}
#[pyslot] | cls: PyTypeRef,
function: PyObjectRef,
object: PyObjectRef,
vm: &VirtualMachine,
) -> PyResult<PyRef<Self>> {
PyBoundMethod::new(object, function).into_ref_with_type(vm, cls)
}
#[pymethod(magic)]
fn repr(&self, vm: &VirtualMachine) -> PyResult<String> {
let funcname = if let Some(qname) =
vm.get_attribute_opt(self.function.clone(), "__qualname__")?
{
Some(qname)
} else if let Some(name) = vm.get_attribute_opt(self.function.clone(), "__qualname__")? {
Some(name)
} else {
None
};
let funcname: Option<PyStrRef> = funcname.and_then(|o| o.downcast().ok());
Ok(format!(
"<bound method {} of {}>",
funcname.as_ref().map_or("?", |s| s.borrow_value()),
vm.to_repr(&self.object)?.borrow_value(),
))
}
#[pyproperty(magic)]
fn doc(&self, vm: &VirtualMachine) -> PyResult {
vm.get_attribute(self.function.clone(), "__doc__")
}
#[pyproperty(magic)]
fn func(&self) -> PyObjectRef {
self.function.clone()
}
#[pyproperty(magic)]
fn module(&self, vm: &VirtualMachine) -> Option<PyObjectRef> {
vm.get_attribute(self.function.clone(), "__module__").ok()
}
}
impl PyValue for PyBoundMethod {
fn class(vm: &VirtualMachine) -> &PyTypeRef {
&vm.ctx.types.bound_method_type
}
}
#[pyclass(module = false, name = "cell")]
#[derive(Debug, Default)]
pub(crate) struct PyCell {
contents: PyMutex<Option<PyObjectRef>>,
}
pub(crate) type PyCellRef = PyRef<PyCell>;
impl PyValue for PyCell {
fn class(vm: &VirtualMachine) -> &PyTypeRef {
&vm.ctx.types.cell_type
}
}
#[pyimpl]
impl PyCell {
#[pyslot]
fn tp_new(cls: PyTypeRef, value: OptionalArg, vm: &VirtualMachine) -> PyResult<PyRef<Self>> {
Self::new(value.into_option()).into_ref_with_type(vm, cls)
}
pub fn new(contents: Option<PyObjectRef>) -> Self {
Self {
contents: PyMutex::new(contents),
}
}
pub fn get(&self) -> Option<PyObjectRef> {
self.contents.lock().clone()
}
pub fn set(&self, x: Option<PyObjectRef>) {
*self.contents.lock() = x;
}
#[pyproperty]
fn cell_contents(&self, vm: &VirtualMachine) -> PyResult {
self.get()
.ok_or_else(|| vm.new_value_error("Cell is empty".to_owned()))
}
#[pyproperty(setter)]
fn set_cell_contents(&self, x: PyObjectRef) {
self.set(Some(x))
}
#[pyproperty(deleter)]
fn del_cell_contents(&self) {
self.set(None)
}
}
pub fn init(context: &PyContext) {
PyFunction::extend_class(context, &context.types.function_type);
PyBoundMethod::extend_class(context, &context.types.bound_method_type);
PyCell::extend_class(context, &context.types.cell_type);
} | fn tp_new( |
actor.test.ts | import {
Machine,
spawn,
interpret,
ActorRef,
ActorRefFrom,
Behavior,
createMachine,
EventObject
} from '../src';
import {
assign,
send,
sendParent,
raise,
doneInvoke,
sendUpdate,
respond,
forwardTo,
error
} from '../src/actions';
import { interval } from 'rxjs';
import { map } from 'rxjs/operators';
import { fromPromise } from '../src/behaviors';
describe('spawning machines', () => {
const todoMachine = Machine({
id: 'todo',
initial: 'incomplete',
states: {
incomplete: {
on: { SET_COMPLETE: 'complete' }
},
complete: {
onEntry: sendParent({ type: 'TODO_COMPLETED' })
}
}
});
const context = {
todoRefs: {} as Record<string, ActorRef<any>>
};
type TodoEvent =
| {
type: 'ADD';
id: number;
}
| {
type: 'SET_COMPLETE';
id: number;
}
| {
type: 'TODO_COMPLETED';
};
const todosMachine = Machine<any, TodoEvent>({
id: 'todos',
context: context,
initial: 'active',
states: {
active: {
on: {
TODO_COMPLETED: 'success'
}
},
success: {
type: 'final'
}
},
on: {
ADD: {
actions: assign({
todoRefs: (ctx, e) => ({
...ctx.todoRefs,
[e.id]: spawn(todoMachine)
})
})
},
SET_COMPLETE: {
actions: send('SET_COMPLETE', {
to: (ctx, e: Extract<TodoEvent, { type: 'SET_COMPLETE' }>) => {
return ctx.todoRefs[e.id];
}
})
}
}
});
// Adaptation: https://github.com/p-org/P/wiki/PingPong-program
type PingPongEvent =
| { type: 'PING' }
| { type: 'PONG' }
| { type: 'SUCCESS' };
const serverMachine = Machine<any, PingPongEvent>({
id: 'server',
initial: 'waitPing',
states: {
waitPing: {
on: {
PING: 'sendPong'
}
},
sendPong: {
entry: [sendParent('PONG'), raise('SUCCESS')],
on: {
SUCCESS: 'waitPing'
}
}
}
});
interface ClientContext {
server?: ActorRef<PingPongEvent>;
}
const clientMachine = Machine<ClientContext, PingPongEvent>({
id: 'client',
initial: 'init',
context: {
server: undefined
},
states: {
init: {
entry: [
assign({
server: () => spawn(serverMachine)
}),
raise('SUCCESS')
],
on: {
SUCCESS: 'sendPing'
}
},
sendPing: {
entry: [send('PING', { to: (ctx) => ctx.server! }), raise('SUCCESS')],
on: {
SUCCESS: 'waitPong'
}
},
waitPong: {
on: {
PONG: 'complete'
}
},
complete: {
type: 'final'
}
}
});
it('should invoke actors', (done) => {
const service = interpret(todosMachine)
.onDone(() => {
done();
})
.start();
service.send('ADD', { id: 42 });
service.send('SET_COMPLETE', { id: 42 });
});
it('should invoke actors (when sending batch)', (done) => {
const service = interpret(todosMachine)
.onDone(() => {
done();
})
.start();
service.send([{ type: 'ADD', id: 42 }]);
service.send('SET_COMPLETE', { id: 42 });
});
it('should invoke a null actor if spawned outside of a service', () => {
expect(spawn(todoMachine)).toBeTruthy();
});
it('should allow bidirectional communication between parent/child actors', (done) => {
interpret(clientMachine)
.onDone(() => {
done();
})
.start();
});
});
describe('spawning promises', () => {
const promiseMachine = Machine<any>({
id: 'promise',
initial: 'idle',
context: {
promiseRef: undefined
},
states: {
idle: {
entry: assign({
promiseRef: () => {
const ref = spawn(
new Promise((res) => {
res('response');
}),
'my-promise'
);
return ref;
}
}),
on: {
[doneInvoke('my-promise')]: {
target: 'success',
cond: (_, e) => e.data === 'response'
}
}
},
success: {
type: 'final'
}
}
});
it('should be able to spawn a promise', (done) => {
const promiseService = interpret(promiseMachine).onDone(() => {
done();
});
promiseService.start();
});
});
describe('spawning callbacks', () => {
const callbackMachine = Machine<any>({
id: 'callback',
initial: 'idle',
context: {
callbackRef: undefined
},
states: {
idle: {
entry: assign({
callbackRef: () =>
spawn((cb, receive) => {
receive((event) => {
if (event.type === 'START') {
setTimeout(() => {
cb('SEND_BACK');
}, 10);
}
});
})
}),
on: {
START_CB: {
actions: send('START', { to: (ctx) => ctx.callbackRef })
},
SEND_BACK: 'success'
}
},
success: {
type: 'final'
}
}
});
it('should be able to spawn an actor from a callback', (done) => {
const callbackService = interpret(callbackMachine).onDone(() => {
done();
});
callbackService.start();
callbackService.send('START_CB');
});
});
describe('spawning observables', () => {
interface Events {
type: 'INT';
value: number;
}
const observableMachine = Machine<any, Events>({
id: 'observable',
initial: 'idle',
context: {
observableRef: undefined
},
states: {
idle: {
entry: assign({
observableRef: () => {
const ref = spawn(
interval(10).pipe(
map((n) => ({
type: 'INT',
value: n
}))
)
);
return ref;
}
}),
on: {
INT: {
target: 'success',
cond: (_, e) => e.value === 5
}
}
},
success: {
type: 'final'
}
}
});
it('should be able to spawn an observable', (done) => {
const observableService = interpret(observableMachine).onDone(() => {
done();
});
observableService.start();
});
});
describe('communicating with spawned actors', () => {
it('should treat an interpreter as an actor', (done) => {
const existingMachine = Machine({
initial: 'inactive',
states: {
inactive: {
on: { ACTIVATE: 'active' }
},
active: {
entry: respond('EXISTING.DONE')
}
}
});
const existingService = interpret(existingMachine).start();
const parentMachine = Machine<any>({
initial: 'pending',
context: {
existingRef: undefined as any
},
states: {
pending: {
entry: assign({
// No need to spawn an existing service:
// existingRef: () => spawn(existingService)
existingRef: existingService
}),
on: {
'EXISTING.DONE': 'success'
},
after: {
100: {
actions: send('ACTIVATE', { to: (ctx) => ctx.existingRef })
}
}
},
success: {
type: 'final'
}
}
});
const parentService = interpret(parentMachine).onDone(() => {
done();
});
parentService.start();
});
it('should be able to communicate with arbitrary actors if sessionId is known', (done) => {
const existingMachine = Machine({
initial: 'inactive',
states: {
inactive: {
on: { ACTIVATE: 'active' }
},
active: {
entry: respond('EXISTING.DONE')
}
}
});
const existingService = interpret(existingMachine).start();
const parentMachine = Machine<any>({
initial: 'pending',
states: {
pending: {
entry: send('ACTIVATE', { to: existingService.sessionId }),
on: {
'EXISTING.DONE': 'success'
},
after: {
100: {
actions: send('ACTIVATE', { to: (ctx) => ctx.existingRef })
}
}
},
success: {
type: 'final'
}
}
});
const parentService = interpret(parentMachine).onDone(() => {
done();
});
parentService.start();
});
});
describe('actors', () => {
it('should only spawn actors defined on initial state once', () => {
let count = 0;
const startMachine = Machine<any>({
id: 'start',
initial: 'start',
context: {
items: [0, 1, 2, 3],
refs: []
},
states: {
start: {
entry: assign({
refs: (ctx) => {
count++;
const c = ctx.items.map((item) =>
spawn(new Promise((res) => res(item)))
);
return c;
}
})
}
}
});
interpret(startMachine)
.onTransition(() => {
expect(count).toEqual(1);
})
.start();
});
it('should spawn null actors if not used within a service', () => {
interface TestContext {
ref?: ActorRef<any>;
}
const nullActorMachine = Machine<TestContext>({
initial: 'foo',
context: { ref: undefined },
states: {
foo: {
entry: assign<TestContext>({
ref: () => spawn(Promise.resolve(42))
})
}
}
});
const { initialState } = nullActorMachine;
// expect(initialState.context.ref!.id).toBe('null'); // TODO: identify null actors
expect(initialState.context.ref!.send).toBeDefined();
});
describe('autoForward option', () => {
const pongActorMachine = Machine({
id: 'server',
initial: 'waitPing',
states: {
waitPing: {
on: {
PING: 'sendPong'
}
},
sendPong: {
entry: [sendParent('PONG'), raise('SUCCESS')],
on: {
SUCCESS: 'waitPing'
}
}
}
});
it('should not forward events to a spawned actor by default', () => {
let pongCounter = 0;
const machine = Machine<any>({
id: 'client',
context: { counter: 0, serverRef: undefined },
initial: 'initial',
states: {
initial: {
entry: assign(() => ({
serverRef: spawn(pongActorMachine)
})),
on: {
PONG: {
actions: () => ++pongCounter
}
}
}
}
});
const service = interpret(machine);
service.start();
service.send('PING');
service.send('PING');
expect(pongCounter).toEqual(0);
});
it('should not forward events to a spawned actor when { autoForward: false }', () => {
let pongCounter = 0;
const machine = Machine<{ counter: number; serverRef?: ActorRef<any> }>({
id: 'client',
context: { counter: 0, serverRef: undefined },
initial: 'initial',
states: {
initial: {
entry: assign((ctx) => ({
...ctx,
serverRef: spawn(pongActorMachine, { autoForward: false })
})),
on: {
PONG: {
actions: () => ++pongCounter
}
}
}
}
});
const service = interpret(machine);
service.start();
service.send('PING');
service.send('PING');
expect(pongCounter).toEqual(0);
});
it('should forward events to a spawned actor when { autoForward: true }', () => {
let pongCounter = 0;
const machine = Machine<any>({
id: 'client',
context: { counter: 0, serverRef: undefined },
initial: 'initial',
states: {
initial: {
entry: assign(() => ({
serverRef: spawn(pongActorMachine, { autoForward: true })
})),
on: {
PONG: {
actions: () => ++pongCounter
}
}
}
}
});
const service = interpret(machine);
service.start();
service.send('PING');
service.send('PING');
expect(pongCounter).toEqual(2);
});
});
describe('sync option', () => {
const childMachine = Machine({
id: 'child',
context: { value: 0 },
initial: 'active',
states: {
active: {
after: {
10: { actions: assign({ value: 42 }), internal: true }
}
}
}
});
interface TestContext {
ref?: ActorRefFrom<typeof childMachine>;
refNoSync?: ActorRefFrom<typeof childMachine>;
refNoSyncDefault?: ActorRefFrom<typeof childMachine>;
}
const parentMachine = Machine<TestContext>({
id: 'parent',
context: {
ref: undefined,
refNoSync: undefined,
refNoSyncDefault: undefined
},
initial: 'foo',
states: {
foo: {
entry: assign<TestContext>({
ref: () => spawn(childMachine, { sync: true }),
refNoSync: () => spawn(childMachine, { sync: false }),
refNoSyncDefault: () => spawn(childMachine)
})
},
success: {
type: 'final'
}
}
});
it('should sync spawned actor state when { sync: true }', () => {
return new Promise<void>((res) => {
const service = interpret(parentMachine, {
id: 'a-service'
}).onTransition((s) => {
if (s.context.ref?.getSnapshot()?.context.value === 42) {
res();
}
});
service.start();
});
});
it('should not sync spawned actor state when { sync: false }', () => {
return new Promise<void>((res, rej) => {
const service = interpret(parentMachine, {
id: 'b-service'
}).onTransition((s) => {
if (s.context.refNoSync?.getSnapshot()?.context.value === 42) {
rej(new Error('value change caused transition'));
}
});
service.start();
setTimeout(() => {
expect(
service.state.context.refNoSync?.getSnapshot()?.context.value
).toBe(42);
res();
}, 30);
});
});
it('should not sync spawned actor state (default)', () => {
return new Promise<void>((res, rej) => {
const service = interpret(parentMachine, {
id: 'c-service'
}).onTransition((s) => {
if (s.context.refNoSyncDefault?.getSnapshot()?.context.value === 42) {
rej(new Error('value change caused transition'));
}
});
service.start();
setTimeout(() => {
expect(
service.state.context.refNoSyncDefault?.getSnapshot()?.context.value
).toBe(42);
res();
}, 30);
});
});
it('parent state should be changed if synced child actor update occurs', (done) => {
const syncChildMachine = Machine({
initial: 'active',
states: {
active: {
after: { 500: 'inactive' }
},
inactive: {}
}
});
interface SyncMachineContext {
ref?: ActorRefFrom<typeof syncChildMachine>;
}
const syncMachine = Machine<SyncMachineContext>({
initial: 'same',
context: {},
states: {
same: {
entry: assign<SyncMachineContext>({
ref: () => spawn(syncChildMachine, { sync: true })
})
}
}
});
interpret(syncMachine)
.onTransition((state) => {
if (state.context.ref?.getSnapshot()?.matches('inactive')) {
expect(state.changed).toBe(true);
done();
}
})
.start();
});
const falseSyncOptions = [{}, { sync: false }];
falseSyncOptions.forEach((falseSyncOption) => {
it(`parent state should NOT be changed regardless of unsynced child actor update (options: ${JSON.stringify(
falseSyncOption
)})`, (done) => {
const syncChildMachine = Machine({
initial: 'active',
states: {
active: {
after: { 10: 'inactive' }
},
inactive: {}
}
});
interface SyncMachineContext {
ref?: ActorRefFrom<typeof syncChildMachine>;
}
const syncMachine = Machine<SyncMachineContext>({
initial: 'same',
context: {},
states: {
same: {
entry: assign<SyncMachineContext>({
ref: () => spawn(syncChildMachine, falseSyncOption)
})
}
}
});
const service = interpret(syncMachine)
.onTransition((state) => {
if (
state.context.ref &&
state.context.ref.getSnapshot()?.matches('inactive')
) {
expect(state.changed).toBe(false);
}
})
.start();
setTimeout(() => {
expect(
service.state.context.ref?.getSnapshot()?.matches('inactive')
).toBe(true);
done();
}, 20);
});
it(`parent state should be changed if unsynced child actor manually sends update event (options: ${JSON.stringify(
falseSyncOption
)})`, (done) => {
const syncChildMachine = Machine({
initial: 'active',
states: {
active: {
after: { 10: 'inactive' }
},
inactive: {
entry: sendUpdate()
}
}
});
interface SyncMachineContext {
ref?: ActorRefFrom<typeof syncChildMachine>;
}
const syncMachine = Machine<SyncMachineContext>({
initial: 'same',
context: {},
states: {
same: {
entry: assign<SyncMachineContext>({
ref: () => spawn(syncChildMachine, falseSyncOption)
})
}
}
});
interpret(syncMachine)
.onTransition((state) => {
if (state.context.ref?.getSnapshot()?.matches('inactive')) {
expect(state.changed).toBe(true);
done();
}
})
.start();
});
it('should only spawn an actor in an initial state of a child that gets invoked in the initial state of a parent when the parent gets started', () => {
let spawnCounter = 0;
interface TestContext {
promise?: ActorRefFrom<Promise<string>>;
}
const child = Machine<TestContext>({
initial: 'bar',
context: {},
states: {
bar: {
entry: assign<TestContext>({
promise: () => {
return spawn(() => {
spawnCounter++;
return Promise.resolve('answer');
});
}
})
}
}
});
const parent = Machine({
initial: 'foo',
states: {
foo: {
invoke: {
src: child,
onDone: 'end'
}
},
end: { type: 'final' }
}
});
interpret(parent).start();
expect(spawnCounter).toBe(1);
});
});
});
describe('with behaviors', () => {
it('should work with a reducer behavior', (done) => {
const countBehavior: Behavior<EventObject, number> = {
transition: (count, event) => {
if (event.type === 'INC') {
return count + 1;
} else {
return count - 1;
}
},
initialState: 0
};
const countMachine = createMachine<{
count: ActorRefFrom<typeof countBehavior> | undefined;
}>({
context: {
count: undefined
},
entry: assign({
count: () => spawn(countBehavior)
}),
on: {
INC: {
actions: forwardTo((ctx) => ctx.count!)
}
}
});
const countService = interpret(countMachine)
.onTransition((state) => {
if (state.context.count?.getSnapshot() === 2) {
done();
}
})
.start();
countService.send('INC');
countService.send('INC');
});
it('should work with a promise behavior (fulfill)', (done) => {
const promiseBehavior = fromPromise( | })
);
const countMachine = createMachine<{
count: ActorRefFrom<typeof promiseBehavior> | undefined;
}>({
context: {
count: undefined
},
entry: assign({
count: () => spawn(promiseBehavior, 'test')
}),
initial: 'pending',
states: {
pending: {
on: {
'done.invoke.test': {
target: 'success',
cond: (_, e) => e.data === 42
}
}
},
success: {
type: 'final'
}
}
});
const countService = interpret(countMachine).onDone(() => {
done();
});
countService.start();
});
it('should work with a promise behavior (reject)', (done) => {
const errorMessage = 'An error occurred';
const promiseBehavior = fromPromise(
() =>
new Promise<number>((_, rej) => {
setTimeout(rej(errorMessage), 1000);
})
);
const countMachine = createMachine<{
count: ActorRefFrom<typeof promiseBehavior>;
}>({
context: () => ({
count: spawn(promiseBehavior, 'test')
}),
initial: 'pending',
states: {
pending: {
on: {
[error('test')]: {
target: 'success',
cond: (_, e) => {
return e.data === errorMessage;
}
}
}
},
success: {
type: 'final'
}
}
});
const countService = interpret(countMachine).onDone(() => {
done();
});
countService.start();
});
it('behaviors should have reference to the parent', (done) => {
const pongBehavior: Behavior<EventObject, undefined> = {
transition: (_, event, { parent }) => {
if (event.type === 'PING') {
parent?.send({ type: 'PONG' });
}
return undefined;
},
initialState: undefined
};
const pingMachine = createMachine<{
ponger: ActorRefFrom<typeof pongBehavior> | undefined;
}>({
initial: 'waiting',
context: {
ponger: undefined
},
entry: assign({
ponger: () => spawn(pongBehavior)
}),
states: {
waiting: {
entry: send('PING', { to: (ctx) => ctx.ponger! }),
invoke: {
id: 'ponger',
src: () => pongBehavior
},
on: {
PONG: 'success'
}
},
success: {
type: 'final'
}
}
});
const pingService = interpret(pingMachine).onDone(() => {
done();
});
pingService.start();
});
});
it('should be able to spawn actors in (lazy) initial context', (done) => {
const machine = createMachine<{ ref: ActorRef<any> }>({
context: () => ({
ref: spawn((sendBack) => {
sendBack('TEST');
})
}),
initial: 'waiting',
states: {
waiting: {
on: { TEST: 'success' }
},
success: {
type: 'final'
}
}
});
interpret(machine)
.onDone(() => {
done();
})
.start();
});
}); | () =>
new Promise<number>((res) => {
setTimeout(res(42)); |
admin.py | from django.contrib import admin
from .models import Order, OrderItem
class OrderItemInline(admin.TabularInline):
|
class OrderAdmin(admin.ModelAdmin):
list_display = ['id', 'first_name', 'last_name', 'email', 'address', 'postal_code', 'city', 'paid', 'created',
'updated']
list_filter = ['paid', 'created', 'updated']
inlines = [OrderItemInline]
admin.site.register(Order, OrderAdmin)
| model = OrderItem
raw_id_fields = ['product'] |
create_index.py | from analizer.abstract import instruction
from analizer.reports import Nodo
from analizer.typechecker.Metadata import File
from analizer.typechecker.Metadata import Struct
class CreateIndex(instruction.Instruction):
def __init__(self, unique, idIndex, idTable, usingMethod, whereCl, optList=[]):
self.unique = unique
self.idIndex = idIndex
self.idTable = idTable
self.optList = optList
self.whereCl = whereCl
self.usingMethod = usingMethod
if not idIndex: |
def execute(self, environment):
name = self.idIndex
if self.existIndex(name):
return "Error: ya existe un index con el nombre " + name
table = Struct.extractTable(instruction.dbtemp,self.idTable)
print(table)
if table == 1 or table == 0:
return "Error: no existe la tabla " + self.idTable + " en la base de datos " + instruction.dbtemp
try:
Index = File.importFile("Index")
indexBody = {}
indexBody["Table"] = self.idTable
indexBody["Unique"] = self.unique
indexBody["Method"] = self.usingMethod
indexBody["Columns"] = []
for c in self.optList:
col = {}
col["Name"] = c[0]
col["Order"] = c[1]
if c[2]:
nulls = c[2][0]
if c[2][1]:
nulls += " " + c[2][1]
else:
if col["Order"] == "DESC":
nulls = "NULLS FIRST"
else:
nulls = "NULLS LAST"
col["Nulls"] = nulls
indexBody["Columns"].append(col)
Index[name] = indexBody
File.exportFile(Index, "Index")
return "Index " + name + " creado"
except:
return "Error fatal"
def existIndex(self, name):
Index = File.importFile("Index")
exists = Index.get(name)
if exists != None:
return True
return False
def dot(self):
new = Nodo.Nodo("CREATE_INDEX")
if self.unique:
uniqueNode = Nodo.Nodo("UNIQUE")
new.addNode(uniqueNode)
if self.usingMethod:
uhNode = Nodo.Nodo("USING_HASH")
new.addNode(uhNode)
id1 = Nodo.Nodo(str(self.idIndex))
id2 = Nodo.Nodo(str(self.idTable))
new.addNode(id1)
new.addNode(id2)
listNode = Nodo.Nodo("INDEX_LIST")
new.addNode(listNode)
for l in self.optList:
if l[0] != None:
l1 = Nodo.Nodo(str(l[0]))
listNode.addNode(l1)
if l[1] != None:
l2 = Nodo.Nodo(str(l[1]))
listNode.addNode(l2)
if l[2]:
l3 = Nodo.Nodo("NULLS")
listNode.addNode(l3)
if l[2][1] != None:
l4 = Nodo.Nodo(str(l[2][1]))
listNode.addNode(l4)
if self.whereCl != None:
new.addNode(self.whereCl.dot())
return new | idIndex = "index_" + idTable
for l in optList:
idIndex += "_" + l[0]
self.idIndex = idIndex |
conf.go | package common
import "net"
| ListenAddrs []net.Addr
ConnAddrs []net.Addr
} | type CaptureConf struct {
DeviceName string |
admin.py | from django.contrib import admin
from .models import MovieList, Movie
class MoviesInline(admin.TabularInline):
model = Movie
extra = 1
fieldsets = [
('Movies', {'fields': ['title']}),
(None, {'fields': ['movielist']})
]
| ('General', {'fields': ['name']}),
('Date information', {'fields': ['created_date'], 'classes': ['collapse']}),
]
list_display = ('name', 'created_date')
inlines = [MoviesInline]
list_filter = ['created_date']
admin.site.register(MovieList, MovieListAdmin)
admin.site.register(Movie) |
class MovieListAdmin(admin.ModelAdmin):
fieldsets = [ |
service.rs | //! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
use fc_consensus::FrontierBlockImport;
use fc_mapping_sync::MappingSyncWorker;
use fc_rpc::EthTask;
use fc_rpc_core::types::{FilterPool, PendingTransactions};
use findora_defi_runtime::{self, opaque::Block, RuntimeApi, SLOT_DURATION};
use futures::StreamExt;
use sc_cli::SubstrateCli;
use sc_client_api::{BlockchainEvents, ExecutorProvider, RemoteBackend};
use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams};
#[cfg(feature = "manual-seal")]
use sc_consensus_manual_seal::{self as manual_seal};
use sc_executor::native_executor_instance;
pub use sc_executor::NativeExecutor;
use sc_finality_grandpa::SharedVoterState;
use sc_service::{error::Error as ServiceError, BasePath, Configuration, TaskManager};
use sc_telemetry::{Telemetry, TelemetryWorker};
use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
use sp_core::U256;
use sp_inherents::{InherentData, InherentDataProviders, InherentIdentifier, ProvideInherentData};
use sp_timestamp::InherentError;
use std::{
cell::RefCell,
collections::{BTreeMap, HashMap},
sync::{Arc, Mutex},
time::Duration,
};
use crate::cli::Cli;
#[cfg(feature = "manual-seal")]
use crate::cli::Sealing;
// Our native executor instance.
native_executor_instance!(
pub Executor,
findora_defi_runtime::api::dispatch,
findora_defi_runtime::native_version,
);
type FullClient = sc_service::TFullClient<Block, RuntimeApi, Executor>;
type FullBackend = sc_service::TFullBackend<Block>;
type FullSelectChain = sc_consensus::LongestChain<FullBackend, Block>;
#[cfg(feature = "aura")]
pub type ConsensusResult = (
sc_consensus_aura::AuraBlockImport<
Block,
FullClient,
FrontierBlockImport<
Block,
sc_finality_grandpa::GrandpaBlockImport<
FullBackend,
Block,
FullClient,
FullSelectChain,
>,
FullClient,
>,
AuraPair,
>,
sc_finality_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
);
#[cfg(feature = "manual-seal")]
pub type ConsensusResult = (
FrontierBlockImport<Block, Arc<FullClient>, FullClient>,
Sealing,
);
/// Provide a mock duration starting at 0 in millisecond for timestamp inherent.
/// Each call will increment timestamp by slot_duration making Aura think time has passed.
pub struct MockTimestampInherentDataProvider;
pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"timstap0";
thread_local!(static TIMESTAMP: RefCell<u64> = RefCell::new(0));
impl ProvideInherentData for MockTimestampInherentDataProvider {
fn inherent_identifier(&self) -> &'static InherentIdentifier {
&INHERENT_IDENTIFIER
}
fn provide_inherent_data(
&self,
inherent_data: &mut InherentData,
) -> Result<(), sp_inherents::Error> {
TIMESTAMP.with(|x| {
*x.borrow_mut() += SLOT_DURATION;
inherent_data.put_data(INHERENT_IDENTIFIER, &*x.borrow())
})
}
fn error_to_string(&self, error: &[u8]) -> Option<String> {
InherentError::try_from(&INHERENT_IDENTIFIER, error).map(|e| format!("{:?}", e))
}
}
pub fn open_frontier_backend(config: &Configuration) -> Result<Arc<fc_db::Backend<Block>>, String> {
let config_dir = config
.base_path
.as_ref()
.map(|base_path| base_path.config_dir(config.chain_spec.id()))
.unwrap_or_else(|| {
BasePath::from_project("", "", &crate::cli::Cli::executable_name())
.config_dir(config.chain_spec.id())
});
let database_dir = config_dir.join("frontier").join("db");
Ok(Arc::new(fc_db::Backend::<Block>::new(
&fc_db::DatabaseSettings {
source: fc_db::DatabaseSettingsSrc::RocksDb {
path: database_dir,
cache_size: 0,
},
},
)?))
}
pub fn new_partial(
config: &Configuration,
#[allow(unused_variables)] cli: &Cli,
) -> Result<
sc_service::PartialComponents<
FullClient,
FullBackend,
FullSelectChain,
sp_consensus::import_queue::BasicQueue<Block, sp_api::TransactionFor<FullClient, Block>>,
sc_transaction_pool::FullPool<Block, FullClient>,
(
ConsensusResult,
PendingTransactions,
Option<FilterPool>,
Arc<fc_db::Backend<Block>>,
Option<Telemetry>,
),
>,
ServiceError,
> {
let inherent_data_providers = sp_inherents::InherentDataProviders::new();
let telemetry = config
.telemetry_endpoints
.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;
let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, Executor>(
&config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
)?;
let client = Arc::new(client);
let telemetry = telemetry.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", worker.run());
telemetry
});
let select_chain = sc_consensus::LongestChain::new(backend.clone());
let transaction_pool = sc_transaction_pool::BasicPool::new_full(
config.transaction_pool.clone(),
config.role.is_authority().into(),
config.prometheus_registry(),
task_manager.spawn_handle(),
client.clone(),
);
let pending_transactions: PendingTransactions = Some(Arc::new(Mutex::new(HashMap::new())));
let filter_pool: Option<FilterPool> = Some(Arc::new(Mutex::new(BTreeMap::new())));
let frontier_backend = open_frontier_backend(config)?;
#[cfg(feature = "manual-seal")]
{
let sealing = cli.run.sealing;
inherent_data_providers
.register_provider(MockTimestampInherentDataProvider)
.map_err(Into::into)
.map_err(sp_consensus::error::Error::InherentData)?;
inherent_data_providers
.register_provider(pallet_dynamic_fee::InherentDataProvider(U256::from(
cli.run.target_gas_price,
)))
.map_err(Into::into)
.map_err(sp_consensus::Error::InherentData)?;
let frontier_block_import =
FrontierBlockImport::new(client.clone(), client.clone(), frontier_backend.clone());
let import_queue = sc_consensus_manual_seal::import_queue(
Box::new(frontier_block_import.clone()),
&task_manager.spawn_essential_handle(),
config.prometheus_registry(),
);
Ok(sc_service::PartialComponents {
client,
backend,
task_manager,
import_queue,
keystore_container,
select_chain,
transaction_pool,
inherent_data_providers,
other: (
(frontier_block_import, sealing),
pending_transactions,
filter_pool,
frontier_backend,
telemetry,
),
})
}
#[cfg(feature = "aura")]
{
inherent_data_providers
.register_provider(pallet_dynamic_fee::InherentDataProvider(U256::from(
cli.run.target_gas_price,
)))
.map_err(Into::into)
.map_err(sp_consensus::Error::InherentData)?;
let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import(
client.clone(),
&(client.clone() as Arc<_>),
select_chain.clone(),
telemetry.as_ref().map(|x| x.handle()),
)?;
let frontier_block_import = FrontierBlockImport::new(
grandpa_block_import.clone(),
client.clone(),
frontier_backend.clone(),
);
let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(
frontier_block_import,
client.clone(),
);
let import_queue =
sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _>(ImportQueueParams {
slot_duration: sc_consensus_aura::slot_duration(&*client)?,
block_import: aura_block_import.clone(),
justification_import: Some(Box::new(grandpa_block_import.clone())),
client: client.clone(),
inherent_data_providers: inherent_data_providers.clone(),
spawner: &task_manager.spawn_essential_handle(),
registry: config.prometheus_registry(),
can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(
client.executor().clone(),
),
check_for_equivocation: Default::default(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
})?;
Ok(sc_service::PartialComponents {
client,
backend,
task_manager,
import_queue,
keystore_container,
select_chain,
transaction_pool,
inherent_data_providers,
other: (
(aura_block_import, grandpa_link),
pending_transactions,
filter_pool,
frontier_backend,
telemetry,
),
})
}
}
/// Builds a new service for a full client.
pub fn new_full(config: Configuration, cli: &Cli) -> Result<TaskManager, ServiceError> |
#[cfg(feature = "aura")]
pub fn new_light(config: Configuration) -> Result<TaskManager, ServiceError> {
let telemetry = config
.telemetry_endpoints
.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, sc_telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;
let (client, backend, keystore_container, mut task_manager, on_demand) =
sc_service::new_light_parts::<Block, RuntimeApi, Executor>(
&config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
)?;
let mut telemetry = telemetry.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", worker.run());
telemetry
});
let select_chain = sc_consensus::LongestChain::new(backend.clone());
let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light(
config.transaction_pool.clone(),
config.prometheus_registry(),
task_manager.spawn_handle(),
client.clone(),
on_demand.clone(),
));
let (grandpa_block_import, _) = sc_finality_grandpa::block_import(
client.clone(),
&(client.clone() as Arc<_>),
select_chain.clone(),
telemetry.as_ref().map(|x| x.handle()),
)?;
let import_queue =
sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _>(ImportQueueParams {
slot_duration: sc_consensus_aura::slot_duration(&*client)?,
block_import: grandpa_block_import.clone(),
justification_import: Some(Box::new(grandpa_block_import)),
client: client.clone(),
inherent_data_providers: InherentDataProviders::new(),
spawner: &task_manager.spawn_essential_handle(),
registry: config.prometheus_registry(),
can_author_with: sp_consensus::NeverCanAuthor,
check_for_equivocation: Default::default(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
})?;
let light_deps = crate::rpc::LightDeps {
remote_blockchain: backend.remote_blockchain(),
fetcher: on_demand.clone(),
client: client.clone(),
pool: transaction_pool.clone(),
};
let rpc_extensions = crate::rpc::create_light(light_deps);
let (network, network_status_sinks, system_rpc_tx, network_starter) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
on_demand: Some(on_demand.clone()),
block_announce_validator_builder: None,
})?;
if config.offchain_worker.enabled {
sc_service::build_offchain_workers(
&config,
task_manager.spawn_handle(),
client.clone(),
network.clone(),
);
}
sc_service::spawn_tasks(sc_service::SpawnTasksParams {
remote_blockchain: Some(backend.remote_blockchain()),
transaction_pool,
task_manager: &mut task_manager,
on_demand: Some(on_demand),
rpc_extensions_builder: Box::new(sc_service::NoopRpcExtensionBuilder(rpc_extensions)),
config,
client,
keystore: keystore_container.sync_keystore(),
backend,
network,
network_status_sinks,
system_rpc_tx,
telemetry: telemetry.as_mut(),
})?;
network_starter.start_network();
Ok(task_manager)
}
#[cfg(feature = "manual-seal")]
pub fn new_light(config: Configuration) -> Result<TaskManager, ServiceError> {
return Err(ServiceError::Other(
"Manual seal does not support light client".to_string(),
));
}
| {
let enable_dev_signer = cli.run.enable_dev_signer;
let sc_service::PartialComponents {
client,
backend,
mut task_manager,
import_queue,
keystore_container,
select_chain,
transaction_pool,
inherent_data_providers,
other:
(consensus_result, pending_transactions, filter_pool, frontier_backend, mut telemetry),
} = new_partial(&config, cli)?;
let (network, network_status_sinks, system_rpc_tx, network_starter) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
on_demand: None,
block_announce_validator_builder: None,
})?;
// Channel for the rpc handler to communicate with the authorship task.
let (command_sink, commands_stream) = futures::channel::mpsc::channel(1000);
if config.offchain_worker.enabled {
sc_service::build_offchain_workers(
&config,
task_manager.spawn_handle(),
client.clone(),
network.clone(),
);
}
let role = config.role.clone();
let force_authoring = config.force_authoring;
let backoff_authoring_blocks: Option<()> = None;
let name = config.network.node_name.clone();
let enable_grandpa = !config.disable_grandpa;
let prometheus_registry = config.prometheus_registry().cloned();
let is_authority = role.is_authority();
let subscription_task_executor =
sc_rpc::SubscriptionTaskExecutor::new(task_manager.spawn_handle());
let rpc_extensions_builder = {
let client = client.clone();
let pool = transaction_pool.clone();
let network = network.clone();
let pending = pending_transactions.clone();
let filter_pool = filter_pool.clone();
let frontier_backend = frontier_backend.clone();
let max_past_logs = cli.run.max_past_logs;
Box::new(move |deny_unsafe, _| {
let deps = crate::rpc::FullDeps {
client: client.clone(),
pool: pool.clone(),
deny_unsafe,
is_authority,
enable_dev_signer,
network: network.clone(),
pending_transactions: pending.clone(),
filter_pool: filter_pool.clone(),
backend: frontier_backend.clone(),
max_past_logs,
command_sink: Some(command_sink.clone()),
};
crate::rpc::create_full(deps, subscription_task_executor.clone())
})
};
task_manager.spawn_essential_handle().spawn(
"frontier-mapping-sync-worker",
MappingSyncWorker::new(
client.import_notification_stream(),
Duration::new(6, 0),
client.clone(),
backend.clone(),
frontier_backend.clone(),
)
.for_each(|()| futures::future::ready(())),
);
let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
network: network.clone(),
client: client.clone(),
keystore: keystore_container.sync_keystore(),
task_manager: &mut task_manager,
transaction_pool: transaction_pool.clone(),
rpc_extensions_builder: rpc_extensions_builder,
on_demand: None,
remote_blockchain: None,
backend,
network_status_sinks,
system_rpc_tx,
config,
telemetry: telemetry.as_mut(),
})?;
// Spawn Frontier EthFilterApi maintenance task.
if let Some(filter_pool) = filter_pool {
// Each filter is allowed to stay in the pool for 100 blocks.
const FILTER_RETAIN_THRESHOLD: u64 = 100;
task_manager.spawn_essential_handle().spawn(
"frontier-filter-pool",
EthTask::filter_pool_task(Arc::clone(&client), filter_pool, FILTER_RETAIN_THRESHOLD),
);
}
// Spawn Frontier pending transactions maintenance task (as essential, otherwise we leak).
if let Some(pending_transactions) = pending_transactions {
const TRANSACTION_RETAIN_THRESHOLD: u64 = 5;
task_manager.spawn_essential_handle().spawn(
"frontier-pending-transactions",
EthTask::pending_transaction_task(
Arc::clone(&client),
pending_transactions,
TRANSACTION_RETAIN_THRESHOLD,
),
);
}
#[cfg(feature = "manual-seal")]
{
let (block_import, sealing) = consensus_result;
if role.is_authority() {
let env = sc_basic_authorship::ProposerFactory::new(
task_manager.spawn_handle(),
client.clone(),
transaction_pool.clone(),
prometheus_registry.as_ref(),
telemetry.as_ref().map(|x| x.handle()),
);
// Background authorship future
match sealing {
Sealing::Manual => {
let authorship_future =
manual_seal::run_manual_seal(manual_seal::ManualSealParams {
block_import,
env,
client,
pool: transaction_pool.pool().clone(),
commands_stream,
select_chain,
consensus_data_provider: None,
inherent_data_providers,
});
// we spawn the future on a background thread managed by service.
task_manager
.spawn_essential_handle()
.spawn_blocking("manual-seal", authorship_future);
}
Sealing::Instant => {
let authorship_future =
manual_seal::run_instant_seal(manual_seal::InstantSealParams {
block_import,
env,
client: client.clone(),
pool: transaction_pool.pool().clone(),
select_chain,
consensus_data_provider: None,
inherent_data_providers,
});
// we spawn the future on a background thread managed by service.
task_manager
.spawn_essential_handle()
.spawn_blocking("instant-seal", authorship_future);
}
};
}
log::info!("Manual Seal Ready");
}
#[cfg(feature = "aura")]
{
let (aura_block_import, grandpa_link) = consensus_result;
if role.is_authority() {
let proposer = sc_basic_authorship::ProposerFactory::new(
task_manager.spawn_handle(),
client.clone(),
transaction_pool.clone(),
prometheus_registry.as_ref(),
telemetry.as_ref().map(|x| x.handle()),
);
let can_author_with =
sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone());
let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _>(
StartAuraParams {
slot_duration: sc_consensus_aura::slot_duration(&*client)?,
client: client.clone(),
select_chain,
block_import: aura_block_import,
proposer_factory: proposer,
sync_oracle: network.clone(),
inherent_data_providers: inherent_data_providers.clone(),
force_authoring,
backoff_authoring_blocks,
keystore: keystore_container.sync_keystore(),
can_author_with,
block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
telemetry: telemetry.as_ref().map(|x| x.handle()),
},
)?;
// the AURA authoring task is considered essential, i.e. if it
// fails we take down the service with it.
task_manager
.spawn_essential_handle()
.spawn_blocking("aura", aura);
// if the node isn't actively participating in consensus then it doesn't
// need a keystore, regardless of which protocol we use below.
let keystore = if role.is_authority() {
Some(keystore_container.sync_keystore())
} else {
None
};
let grandpa_config = sc_finality_grandpa::Config {
// FIXME #1578 make this available through chainspec
gossip_duration: Duration::from_millis(333),
justification_period: 512,
name: Some(name),
observer_enabled: false,
keystore,
is_authority: role.is_authority(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
};
if enable_grandpa {
// start the full GRANDPA voter
// NOTE: non-authorities could run the GRANDPA observer protocol, but at
// this point the full voter should provide better guarantees of block
// and vote data availability than the observer. The observer has not
// been tested extensively yet and having most nodes in a network run it
// could lead to finality stalls.
let grandpa_config = sc_finality_grandpa::GrandpaParams {
config: grandpa_config,
link: grandpa_link,
network,
telemetry: telemetry.as_ref().map(|x| x.handle()),
voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(),
prometheus_registry,
shared_voter_state: SharedVoterState::empty(),
};
// the GRANDPA voter task is considered infallible, i.e.
// if it fails we take down the service with it.
task_manager.spawn_essential_handle().spawn_blocking(
"grandpa-voter",
sc_finality_grandpa::run_grandpa_voter(grandpa_config)?,
);
}
}
}
network_starter.start_network();
Ok(task_manager)
} |
find_cell_hosts.go | // Code generated by go-swagger; DO NOT EDIT.
// Copyright (c) 2016, 2017, 2018 Alexandre Biancalana <[email protected]>.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
package host
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the generate command
import (
"net/http"
middleware "github.com/go-openapi/runtime/middleware"
"configManager/models"
)
// FindCellHostsHandlerFunc turns a function with the right signature into a find cell hosts handler
type FindCellHostsHandlerFunc func(FindCellHostsParams, *models.Customer) middleware.Responder
// Handle executing the request and returning a response
func (fn FindCellHostsHandlerFunc) Handle(params FindCellHostsParams, principal *models.Customer) middleware.Responder {
return fn(params, principal)
}
// FindCellHostsHandler interface for that can handle valid find cell hosts params
type FindCellHostsHandler interface {
Handle(FindCellHostsParams, *models.Customer) middleware.Responder
}
// NewFindCellHosts creates a new http.Handler for the find cell hosts operation
func NewFindCellHosts(ctx *middleware.Context, handler FindCellHostsHandler) *FindCellHosts {
return &FindCellHosts{Context: ctx, Handler: handler}
}
/*FindCellHosts swagger:route GET /cell/{cell_id}/hosts host findCellHosts
Find Hosts by Cell
*/
type FindCellHosts struct {
Context *middleware.Context
Handler FindCellHostsHandler
}
func (o *FindCellHosts) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
route, rCtx, _ := o.Context.RouteInfo(r)
if rCtx != nil {
r = rCtx
}
var Params = NewFindCellHostsParams()
| o.Context.Respond(rw, r, route.Produces, route, err)
return
}
if aCtx != nil {
r = aCtx
}
var principal *models.Customer
if uprinc != nil {
principal = uprinc.(*models.Customer) // this is really a models.Customer, I promise
}
if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params
o.Context.Respond(rw, r, route.Produces, route, err)
return
}
res := o.Handler.Handle(Params, principal) // actually handle the request
o.Context.Respond(rw, r, route.Produces, route, res)
} | uprinc, aCtx, err := o.Context.Authorize(r, route)
if err != nil { |
racks.go | // Copyright 2018 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ui
import (
"github.com/TriggerMail/luci-go/server/router"
"github.com/TriggerMail/luci-go/server/templates"
"github.com/TriggerMail/luci-go/machine-db/api/crimson/v1"
)
func racksPage(c *router.Context) | {
resp, err := server(c.Context).ListRacks(c.Context, &crimson.ListRacksRequest{})
if err != nil {
renderErr(c, err)
return
}
templates.MustRender(c.Context, c.Writer, "pages/racks.html", map[string]interface{}{
"Racks": resp.Racks,
})
} |
|
component.ts | import { Component, Input, AfterContentInit, ChangeDetectorRef, SimpleChanges, OnChanges } from '@angular/core';
@Component({
selector: 'lib-primitive-dropdownlist',
templateUrl: './template.html',
styleUrls: ['./styles.less']
})
export class | implements AfterContentInit, OnChanges {
public _ng_value: any;
public _ng_error: string | undefined;
@Input() public items: Array<{ caption: string, value: any, }> = [];
@Input() public defaults: any;
@Input() public placeholder: string = '';
@Input() public disabled: boolean = false;
@Input() public onFocus: (...args: any[]) => any = () => void 0;
@Input() public onBlur: (...args: any[]) => any = () => void 0;
@Input() public onChange: (value: string) => any = () => void 0;
constructor(private _cdRef: ChangeDetectorRef) {
}
public ngAfterContentInit() {
if (!(this.items instanceof Array) || this.items.length === 0) {
return;
}
this._ng_value = this.defaults;
this._cdRef.detectChanges();
}
ngOnChanges(changes: SimpleChanges) {
if (changes.defaults !== undefined) {
this.defaults = changes.defaults.currentValue;
this._ng_value = this.defaults;
}
if (changes.items !== undefined) {
this.items = changes.items.currentValue;
}
this._cdRef.detectChanges();
}
public _ng_onChange(value: any) {
this.onChange(value);
this._ng_value = value;
this._cdRef.detectChanges();
}
public getValue(): any {
return this._ng_value;
}
}
| DDListStandardComponent |
vfio.rs | /* automatically generated by rust-bindgen */
#[repr(C)]
#[derive(Default)]
pub struct __IncompleteArrayField<T>(::std::marker::PhantomData<T>, [T; 0]);
impl<T> __IncompleteArrayField<T> {
#[inline]
pub fn new() -> Self {
__IncompleteArrayField(::std::marker::PhantomData, [])
}
#[inline]
pub unsafe fn as_ptr(&self) -> *const T {
::std::mem::transmute(self)
}
#[inline]
pub unsafe fn as_mut_ptr(&mut self) -> *mut T {
::std::mem::transmute(self)
}
#[inline]
pub unsafe fn as_slice(&self, len: usize) -> &[T] {
::std::slice::from_raw_parts(self.as_ptr(), len)
}
#[inline]
pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] {
::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len)
}
}
impl<T> ::std::fmt::Debug for __IncompleteArrayField<T> {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
fmt.write_str("__IncompleteArrayField")
}
}
impl<T> ::std::clone::Clone for __IncompleteArrayField<T> {
#[inline]
fn clone(&self) -> Self {
Self::new()
}
}
pub const __BITS_PER_LONG: u32 = 64;
pub const __FD_SETSIZE: u32 = 1024;
pub const _IOC_NRBITS: u32 = 8;
pub const _IOC_TYPEBITS: u32 = 8;
pub const _IOC_SIZEBITS: u32 = 14;
pub const _IOC_DIRBITS: u32 = 2;
pub const _IOC_NRMASK: u32 = 255;
pub const _IOC_TYPEMASK: u32 = 255;
pub const _IOC_SIZEMASK: u32 = 16383;
pub const _IOC_DIRMASK: u32 = 3;
pub const _IOC_NRSHIFT: u32 = 0;
pub const _IOC_TYPESHIFT: u32 = 8;
pub const _IOC_SIZESHIFT: u32 = 16;
pub const _IOC_DIRSHIFT: u32 = 30;
pub const _IOC_NONE: u32 = 0;
pub const _IOC_WRITE: u32 = 1;
pub const _IOC_READ: u32 = 2;
pub const IOC_IN: u32 = 1073741824;
pub const IOC_OUT: u32 = 2147483648;
pub const IOC_INOUT: u32 = 3221225472;
pub const IOCSIZE_MASK: u32 = 1073676288;
pub const IOCSIZE_SHIFT: u32 = 16;
pub const VFIO_API_VERSION: u32 = 0;
pub const VFIO_TYPE1_IOMMU: u32 = 1;
pub const VFIO_SPAPR_TCE_IOMMU: u32 = 2;
pub const VFIO_TYPE1v2_IOMMU: u32 = 3;
pub const VFIO_DMA_CC_IOMMU: u32 = 4;
pub const VFIO_EEH: u32 = 5;
pub const VFIO_TYPE1_NESTING_IOMMU: u32 = 6;
pub const VFIO_SPAPR_TCE_v2_IOMMU: u32 = 7;
pub const VFIO_NOIOMMU_IOMMU: u32 = 8;
pub const VFIO_TYPE: u32 = 59;
pub const VFIO_BASE: u32 = 100;
pub const VFIO_GROUP_FLAGS_VIABLE: u32 = 1;
pub const VFIO_GROUP_FLAGS_CONTAINER_SET: u32 = 2;
pub const VFIO_DEVICE_FLAGS_RESET: u32 = 1;
pub const VFIO_DEVICE_FLAGS_PCI: u32 = 2;
pub const VFIO_DEVICE_FLAGS_PLATFORM: u32 = 4;
pub const VFIO_DEVICE_FLAGS_AMBA: u32 = 8;
pub const VFIO_DEVICE_FLAGS_CCW: u32 = 16;
pub const VFIO_DEVICE_API_PCI_STRING: &'static [u8; 9usize] = b"vfio-pci\0";
pub const VFIO_DEVICE_API_PLATFORM_STRING: &'static [u8; 14usize] = b"vfio-platform\0";
pub const VFIO_DEVICE_API_AMBA_STRING: &'static [u8; 10usize] = b"vfio-amba\0";
pub const VFIO_DEVICE_API_CCW_STRING: &'static [u8; 9usize] = b"vfio-ccw\0";
pub const VFIO_REGION_INFO_FLAG_READ: u32 = 1;
pub const VFIO_REGION_INFO_FLAG_WRITE: u32 = 2;
pub const VFIO_REGION_INFO_FLAG_MMAP: u32 = 4;
pub const VFIO_REGION_INFO_FLAG_CAPS: u32 = 8;
pub const VFIO_REGION_INFO_CAP_SPARSE_MMAP: u32 = 1;
pub const VFIO_REGION_INFO_CAP_TYPE: u32 = 2;
pub const VFIO_REGION_TYPE_PCI_VENDOR_TYPE: u32 = 2147483648;
pub const VFIO_REGION_TYPE_PCI_VENDOR_MASK: u32 = 65535;
pub const VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION: u32 = 1;
pub const VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG: u32 = 2;
pub const VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG: u32 = 3;
pub const VFIO_REGION_INFO_CAP_MSIX_MAPPABLE: u32 = 3;
pub const VFIO_IRQ_INFO_EVENTFD: u32 = 1;
pub const VFIO_IRQ_INFO_MASKABLE: u32 = 2;
pub const VFIO_IRQ_INFO_AUTOMASKED: u32 = 4;
pub const VFIO_IRQ_INFO_NORESIZE: u32 = 8;
pub const VFIO_IRQ_SET_DATA_NONE: u32 = 1;
pub const VFIO_IRQ_SET_DATA_BOOL: u32 = 2;
pub const VFIO_IRQ_SET_DATA_EVENTFD: u32 = 4;
pub const VFIO_IRQ_SET_ACTION_MASK: u32 = 8;
pub const VFIO_IRQ_SET_ACTION_UNMASK: u32 = 16;
pub const VFIO_IRQ_SET_ACTION_TRIGGER: u32 = 32;
pub const VFIO_IRQ_SET_DATA_TYPE_MASK: u32 = 7;
pub const VFIO_IRQ_SET_ACTION_TYPE_MASK: u32 = 56;
pub const VFIO_GFX_PLANE_TYPE_PROBE: u32 = 1;
pub const VFIO_GFX_PLANE_TYPE_DMABUF: u32 = 2;
pub const VFIO_GFX_PLANE_TYPE_REGION: u32 = 4;
pub const VFIO_DEVICE_IOEVENTFD_8: u32 = 1;
pub const VFIO_DEVICE_IOEVENTFD_16: u32 = 2;
pub const VFIO_DEVICE_IOEVENTFD_32: u32 = 4;
pub const VFIO_DEVICE_IOEVENTFD_64: u32 = 8;
pub const VFIO_DEVICE_IOEVENTFD_SIZE_MASK: u32 = 15;
pub const VFIO_IOMMU_INFO_PGSIZES: u32 = 1;
pub const VFIO_DMA_MAP_FLAG_READ: u32 = 1;
pub const VFIO_DMA_MAP_FLAG_WRITE: u32 = 2;
pub const VFIO_IOMMU_SPAPR_INFO_DDW: u32 = 1;
pub const VFIO_EEH_PE_DISABLE: u32 = 0;
pub const VFIO_EEH_PE_ENABLE: u32 = 1;
pub const VFIO_EEH_PE_UNFREEZE_IO: u32 = 2;
pub const VFIO_EEH_PE_UNFREEZE_DMA: u32 = 3;
pub const VFIO_EEH_PE_GET_STATE: u32 = 4;
pub const VFIO_EEH_PE_STATE_NORMAL: u32 = 0;
pub const VFIO_EEH_PE_STATE_RESET: u32 = 1;
pub const VFIO_EEH_PE_STATE_STOPPED: u32 = 2;
pub const VFIO_EEH_PE_STATE_STOPPED_DMA: u32 = 4;
pub const VFIO_EEH_PE_STATE_UNAVAIL: u32 = 5;
pub const VFIO_EEH_PE_RESET_DEACTIVATE: u32 = 5;
pub const VFIO_EEH_PE_RESET_HOT: u32 = 6;
pub const VFIO_EEH_PE_RESET_FUNDAMENTAL: u32 = 7;
pub const VFIO_EEH_PE_CONFIGURE: u32 = 8;
pub const VFIO_EEH_PE_INJECT_ERR: u32 = 9;
pub type __s8 = ::std::os::raw::c_schar;
pub type __u8 = ::std::os::raw::c_uchar;
pub type __s16 = ::std::os::raw::c_short;
pub type __u16 = ::std::os::raw::c_ushort;
pub type __s32 = ::std::os::raw::c_int;
pub type __u32 = ::std::os::raw::c_uint;
pub type __s64 = ::std::os::raw::c_longlong;
pub type __u64 = ::std::os::raw::c_ulonglong;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct __kernel_fd_set {
pub fds_bits: [::std::os::raw::c_ulong; 16usize],
}
#[test]
fn bindgen_test_layout___kernel_fd_set() {
assert_eq!(
::std::mem::size_of::<__kernel_fd_set>(),
128usize,
concat!("Size of: ", stringify!(__kernel_fd_set))
);
assert_eq!(
::std::mem::align_of::<__kernel_fd_set>(),
8usize,
concat!("Alignment of ", stringify!(__kernel_fd_set))
);
}
pub type __kernel_sighandler_t =
::std::option::Option<unsafe extern "C" fn(arg1: ::std::os::raw::c_int)>;
pub type __kernel_key_t = ::std::os::raw::c_int;
pub type __kernel_mqd_t = ::std::os::raw::c_int;
pub type __kernel_old_uid_t = ::std::os::raw::c_ushort;
pub type __kernel_old_gid_t = ::std::os::raw::c_ushort;
pub type __kernel_old_dev_t = ::std::os::raw::c_ulong;
pub type __kernel_long_t = ::std::os::raw::c_long;
pub type __kernel_ulong_t = ::std::os::raw::c_ulong;
pub type __kernel_ino_t = __kernel_ulong_t;
pub type __kernel_mode_t = ::std::os::raw::c_uint;
pub type __kernel_pid_t = ::std::os::raw::c_int;
pub type __kernel_ipc_pid_t = ::std::os::raw::c_int;
pub type __kernel_uid_t = ::std::os::raw::c_uint;
pub type __kernel_gid_t = ::std::os::raw::c_uint;
pub type __kernel_suseconds_t = __kernel_long_t;
pub type __kernel_daddr_t = ::std::os::raw::c_int;
pub type __kernel_uid32_t = ::std::os::raw::c_uint;
pub type __kernel_gid32_t = ::std::os::raw::c_uint;
pub type __kernel_size_t = __kernel_ulong_t;
pub type __kernel_ssize_t = __kernel_long_t;
pub type __kernel_ptrdiff_t = __kernel_long_t;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct __kernel_fsid_t {
pub val: [::std::os::raw::c_int; 2usize],
}
#[test]
fn bindgen_test_layout___kernel_fsid_t() {
assert_eq!(
::std::mem::size_of::<__kernel_fsid_t>(),
8usize,
concat!("Size of: ", stringify!(__kernel_fsid_t))
);
assert_eq!(
::std::mem::align_of::<__kernel_fsid_t>(),
4usize,
concat!("Alignment of ", stringify!(__kernel_fsid_t))
);
}
pub type __kernel_off_t = __kernel_long_t;
pub type __kernel_loff_t = ::std::os::raw::c_longlong;
pub type __kernel_time_t = __kernel_long_t;
pub type __kernel_clock_t = __kernel_long_t;
pub type __kernel_timer_t = ::std::os::raw::c_int;
pub type __kernel_clockid_t = ::std::os::raw::c_int;
pub type __kernel_caddr_t = *mut ::std::os::raw::c_char;
pub type __kernel_uid16_t = ::std::os::raw::c_ushort;
pub type __kernel_gid16_t = ::std::os::raw::c_ushort;
pub type __le16 = __u16;
pub type __be16 = __u16;
pub type __le32 = __u32;
pub type __be32 = __u32;
pub type __le64 = __u64;
pub type __be64 = __u64;
pub type __sum16 = __u16;
pub type __wsum = __u32;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_info_cap_header {
pub id: __u16,
pub version: __u16,
pub next: __u32,
}
#[test]
fn | () {
assert_eq!(
::std::mem::size_of::<vfio_info_cap_header>(),
8usize,
concat!("Size of: ", stringify!(vfio_info_cap_header))
);
assert_eq!(
::std::mem::align_of::<vfio_info_cap_header>(),
4usize,
concat!("Alignment of ", stringify!(vfio_info_cap_header))
);
}
#[doc = " VFIO_GROUP_GET_STATUS - _IOR(VFIO_TYPE, VFIO_BASE + 3,"]
#[doc = "\t\t\t\t\t\tstruct vfio_group_status)"]
#[doc = ""]
#[doc = " Retrieve information about the group. Fills in provided"]
#[doc = " struct vfio_group_info. Caller sets argsz."]
#[doc = " Return: 0 on succes, -errno on failure."]
#[doc = " Availability: Always"]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_group_status {
pub argsz: __u32,
pub flags: __u32,
}
#[test]
fn bindgen_test_layout_vfio_group_status() {
assert_eq!(
::std::mem::size_of::<vfio_group_status>(),
8usize,
concat!("Size of: ", stringify!(vfio_group_status))
);
assert_eq!(
::std::mem::align_of::<vfio_group_status>(),
4usize,
concat!("Alignment of ", stringify!(vfio_group_status))
);
}
#[doc = " VFIO_DEVICE_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 7,"]
#[doc = "\t\t\t\t\t\tstruct vfio_device_info)"]
#[doc = ""]
#[doc = " Retrieve information about the device. Fills in provided"]
#[doc = " struct vfio_device_info. Caller sets argsz."]
#[doc = " Return: 0 on success, -errno on failure."]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_device_info {
pub argsz: __u32,
pub flags: __u32,
pub num_regions: __u32,
pub num_irqs: __u32,
}
#[test]
fn bindgen_test_layout_vfio_device_info() {
assert_eq!(
::std::mem::size_of::<vfio_device_info>(),
16usize,
concat!("Size of: ", stringify!(vfio_device_info))
);
assert_eq!(
::std::mem::align_of::<vfio_device_info>(),
4usize,
concat!("Alignment of ", stringify!(vfio_device_info))
);
}
#[doc = " VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,"]
#[doc = "\t\t\t\t struct vfio_region_info)"]
#[doc = ""]
#[doc = " Retrieve information about a device region. Caller provides"]
#[doc = " struct vfio_region_info with index value set. Caller sets argsz."]
#[doc = " Implementation of region mapping is bus driver specific. This is"]
#[doc = " intended to describe MMIO, I/O port, as well as bus specific"]
#[doc = " regions (ex. PCI config space). Zero sized regions may be used"]
#[doc = " to describe unimplemented regions (ex. unimplemented PCI BARs)."]
#[doc = " Return: 0 on success, -errno on failure."]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_region_info {
pub argsz: __u32,
pub flags: __u32,
pub index: __u32,
pub cap_offset: __u32,
pub size: __u64,
pub offset: __u64,
}
#[test]
fn bindgen_test_layout_vfio_region_info() {
assert_eq!(
::std::mem::size_of::<vfio_region_info>(),
32usize,
concat!("Size of: ", stringify!(vfio_region_info))
);
assert_eq!(
::std::mem::align_of::<vfio_region_info>(),
8usize,
concat!("Alignment of ", stringify!(vfio_region_info))
);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_region_sparse_mmap_area {
pub offset: __u64,
pub size: __u64,
}
#[test]
fn bindgen_test_layout_vfio_region_sparse_mmap_area() {
assert_eq!(
::std::mem::size_of::<vfio_region_sparse_mmap_area>(),
16usize,
concat!("Size of: ", stringify!(vfio_region_sparse_mmap_area))
);
assert_eq!(
::std::mem::align_of::<vfio_region_sparse_mmap_area>(),
8usize,
concat!("Alignment of ", stringify!(vfio_region_sparse_mmap_area))
);
}
#[repr(C)]
#[repr(align(8))]
#[derive(Debug, Default)]
pub struct vfio_region_info_cap_sparse_mmap {
pub header: vfio_info_cap_header,
pub nr_areas: __u32,
pub reserved: __u32,
pub areas: __IncompleteArrayField<vfio_region_sparse_mmap_area>,
}
#[test]
fn bindgen_test_layout_vfio_region_info_cap_sparse_mmap() {
assert_eq!(
::std::mem::size_of::<vfio_region_info_cap_sparse_mmap>(),
16usize,
concat!("Size of: ", stringify!(vfio_region_info_cap_sparse_mmap))
);
assert_eq!(
::std::mem::align_of::<vfio_region_info_cap_sparse_mmap>(),
8usize,
concat!(
"Alignment of ",
stringify!(vfio_region_info_cap_sparse_mmap)
)
);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_region_info_cap_type {
pub header: vfio_info_cap_header,
pub type_: __u32,
pub subtype: __u32,
}
#[test]
fn bindgen_test_layout_vfio_region_info_cap_type() {
assert_eq!(
::std::mem::size_of::<vfio_region_info_cap_type>(),
16usize,
concat!("Size of: ", stringify!(vfio_region_info_cap_type))
);
assert_eq!(
::std::mem::align_of::<vfio_region_info_cap_type>(),
4usize,
concat!("Alignment of ", stringify!(vfio_region_info_cap_type))
);
}
#[doc = " VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,"]
#[doc = "\t\t\t\t struct vfio_irq_info)"]
#[doc = ""]
#[doc = " Retrieve information about a device IRQ. Caller provides"]
#[doc = " struct vfio_irq_info with index value set. Caller sets argsz."]
#[doc = " Implementation of IRQ mapping is bus driver specific. Indexes"]
#[doc = " using multiple IRQs are primarily intended to support MSI-like"]
#[doc = " interrupt blocks. Zero count irq blocks may be used to describe"]
#[doc = " unimplemented interrupt types."]
#[doc = ""]
#[doc = " The EVENTFD flag indicates the interrupt index supports eventfd based"]
#[doc = " signaling."]
#[doc = ""]
#[doc = " The MASKABLE flags indicates the index supports MASK and UNMASK"]
#[doc = " actions described below."]
#[doc = ""]
#[doc = " AUTOMASKED indicates that after signaling, the interrupt line is"]
#[doc = " automatically masked by VFIO and the user needs to unmask the line"]
#[doc = " to receive new interrupts. This is primarily intended to distinguish"]
#[doc = " level triggered interrupts."]
#[doc = ""]
#[doc = " The NORESIZE flag indicates that the interrupt lines within the index"]
#[doc = " are setup as a set and new subindexes cannot be enabled without first"]
#[doc = " disabling the entire index. This is used for interrupts like PCI MSI"]
#[doc = " and MSI-X where the driver may only use a subset of the available"]
#[doc = " indexes, but VFIO needs to enable a specific number of vectors"]
#[doc = " upfront. In the case of MSI-X, where the user can enable MSI-X and"]
#[doc = " then add and unmask vectors, it's up to userspace to make the decision"]
#[doc = " whether to allocate the maximum supported number of vectors or tear"]
#[doc = " down setup and incrementally increase the vectors as each is enabled."]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_irq_info {
pub argsz: __u32,
pub flags: __u32,
pub index: __u32,
pub count: __u32,
}
#[test]
fn bindgen_test_layout_vfio_irq_info() {
assert_eq!(
::std::mem::size_of::<vfio_irq_info>(),
16usize,
concat!("Size of: ", stringify!(vfio_irq_info))
);
assert_eq!(
::std::mem::align_of::<vfio_irq_info>(),
4usize,
concat!("Alignment of ", stringify!(vfio_irq_info))
);
}
#[doc = " VFIO_DEVICE_SET_IRQS - _IOW(VFIO_TYPE, VFIO_BASE + 10, struct vfio_irq_set)"]
#[doc = ""]
#[doc = " Set signaling, masking, and unmasking of interrupts. Caller provides"]
#[doc = " struct vfio_irq_set with all fields set. 'start' and 'count' indicate"]
#[doc = " the range of subindexes being specified."]
#[doc = ""]
#[doc = " The DATA flags specify the type of data provided. If DATA_NONE, the"]
#[doc = " operation performs the specified action immediately on the specified"]
#[doc = " interrupt(s). For example, to unmask AUTOMASKED interrupt [0,0]:"]
#[doc = " flags = (DATA_NONE|ACTION_UNMASK), index = 0, start = 0, count = 1."]
#[doc = ""]
#[doc = " DATA_BOOL allows sparse support for the same on arrays of interrupts."]
#[doc = " For example, to mask interrupts [0,1] and [0,3] (but not [0,2]):"]
#[doc = " flags = (DATA_BOOL|ACTION_MASK), index = 0, start = 1, count = 3,"]
#[doc = " data = {1,0,1}"]
#[doc = ""]
#[doc = " DATA_EVENTFD binds the specified ACTION to the provided __s32 eventfd."]
#[doc = " A value of -1 can be used to either de-assign interrupts if already"]
#[doc = " assigned or skip un-assigned interrupts. For example, to set an eventfd"]
#[doc = " to be trigger for interrupts [0,0] and [0,2]:"]
#[doc = " flags = (DATA_EVENTFD|ACTION_TRIGGER), index = 0, start = 0, count = 3,"]
#[doc = " data = {fd1, -1, fd2}"]
#[doc = " If index [0,1] is previously set, two count = 1 ioctls calls would be"]
#[doc = " required to set [0,0] and [0,2] without changing [0,1]."]
#[doc = ""]
#[doc = " Once a signaling mechanism is set, DATA_BOOL or DATA_NONE can be used"]
#[doc = " with ACTION_TRIGGER to perform kernel level interrupt loopback testing"]
#[doc = " from userspace (ie. simulate hardware triggering)."]
#[doc = ""]
#[doc = " Setting of an event triggering mechanism to userspace for ACTION_TRIGGER"]
#[doc = " enables the interrupt index for the device. Individual subindex interrupts"]
#[doc = " can be disabled using the -1 value for DATA_EVENTFD or the index can be"]
#[doc = " disabled as a whole with: flags = (DATA_NONE|ACTION_TRIGGER), count = 0."]
#[doc = ""]
#[doc = " Note that ACTION_[UN]MASK specify user->kernel signaling (irqfds) while"]
#[doc = " ACTION_TRIGGER specifies kernel->user signaling."]
#[repr(C)]
#[derive(Debug, Default)]
pub struct vfio_irq_set {
pub argsz: __u32,
pub flags: __u32,
pub index: __u32,
pub start: __u32,
pub count: __u32,
pub data: __IncompleteArrayField<__u8>,
}
#[test]
fn bindgen_test_layout_vfio_irq_set() {
assert_eq!(
::std::mem::size_of::<vfio_irq_set>(),
20usize,
concat!("Size of: ", stringify!(vfio_irq_set))
);
assert_eq!(
::std::mem::align_of::<vfio_irq_set>(),
4usize,
concat!("Alignment of ", stringify!(vfio_irq_set))
);
}
pub const VFIO_PCI_BAR0_REGION_INDEX: _bindgen_ty_1 = 0;
pub const VFIO_PCI_BAR1_REGION_INDEX: _bindgen_ty_1 = 1;
pub const VFIO_PCI_BAR2_REGION_INDEX: _bindgen_ty_1 = 2;
pub const VFIO_PCI_BAR3_REGION_INDEX: _bindgen_ty_1 = 3;
pub const VFIO_PCI_BAR4_REGION_INDEX: _bindgen_ty_1 = 4;
pub const VFIO_PCI_BAR5_REGION_INDEX: _bindgen_ty_1 = 5;
pub const VFIO_PCI_ROM_REGION_INDEX: _bindgen_ty_1 = 6;
pub const VFIO_PCI_CONFIG_REGION_INDEX: _bindgen_ty_1 = 7;
pub const VFIO_PCI_VGA_REGION_INDEX: _bindgen_ty_1 = 8;
pub const VFIO_PCI_NUM_REGIONS: _bindgen_ty_1 = 9;
pub type _bindgen_ty_1 = u32;
pub const VFIO_PCI_INTX_IRQ_INDEX: _bindgen_ty_2 = 0;
pub const VFIO_PCI_MSI_IRQ_INDEX: _bindgen_ty_2 = 1;
pub const VFIO_PCI_MSIX_IRQ_INDEX: _bindgen_ty_2 = 2;
pub const VFIO_PCI_ERR_IRQ_INDEX: _bindgen_ty_2 = 3;
pub const VFIO_PCI_REQ_IRQ_INDEX: _bindgen_ty_2 = 4;
pub const VFIO_PCI_NUM_IRQS: _bindgen_ty_2 = 5;
pub type _bindgen_ty_2 = u32;
pub const VFIO_CCW_CONFIG_REGION_INDEX: _bindgen_ty_3 = 0;
pub const VFIO_CCW_NUM_REGIONS: _bindgen_ty_3 = 1;
pub type _bindgen_ty_3 = u32;
pub const VFIO_CCW_IO_IRQ_INDEX: _bindgen_ty_4 = 0;
pub const VFIO_CCW_NUM_IRQS: _bindgen_ty_4 = 1;
pub type _bindgen_ty_4 = u32;
#[doc = " VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12,"]
#[doc = "\t\t\t\t\t struct vfio_pci_hot_reset_info)"]
#[doc = ""]
#[doc = " Return: 0 on success, -errno on failure:"]
#[doc = "\t-enospc = insufficient buffer, -enodev = unsupported for device."]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_pci_dependent_device {
pub group_id: __u32,
pub segment: __u16,
pub bus: __u8,
pub devfn: __u8,
}
#[test]
fn bindgen_test_layout_vfio_pci_dependent_device() {
assert_eq!(
::std::mem::size_of::<vfio_pci_dependent_device>(),
8usize,
concat!("Size of: ", stringify!(vfio_pci_dependent_device))
);
assert_eq!(
::std::mem::align_of::<vfio_pci_dependent_device>(),
4usize,
concat!("Alignment of ", stringify!(vfio_pci_dependent_device))
);
}
#[repr(C)]
#[derive(Debug, Default)]
pub struct vfio_pci_hot_reset_info {
pub argsz: __u32,
pub flags: __u32,
pub count: __u32,
pub devices: __IncompleteArrayField<vfio_pci_dependent_device>,
}
#[test]
fn bindgen_test_layout_vfio_pci_hot_reset_info() {
assert_eq!(
::std::mem::size_of::<vfio_pci_hot_reset_info>(),
12usize,
concat!("Size of: ", stringify!(vfio_pci_hot_reset_info))
);
assert_eq!(
::std::mem::align_of::<vfio_pci_hot_reset_info>(),
4usize,
concat!("Alignment of ", stringify!(vfio_pci_hot_reset_info))
);
}
#[doc = " VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13,"]
#[doc = "\t\t\t\t struct vfio_pci_hot_reset)"]
#[doc = ""]
#[doc = " Return: 0 on success, -errno on failure."]
#[repr(C)]
#[derive(Debug, Default)]
pub struct vfio_pci_hot_reset {
pub argsz: __u32,
pub flags: __u32,
pub count: __u32,
pub group_fds: __IncompleteArrayField<__s32>,
}
#[test]
fn bindgen_test_layout_vfio_pci_hot_reset() {
assert_eq!(
::std::mem::size_of::<vfio_pci_hot_reset>(),
12usize,
concat!("Size of: ", stringify!(vfio_pci_hot_reset))
);
assert_eq!(
::std::mem::align_of::<vfio_pci_hot_reset>(),
4usize,
concat!("Alignment of ", stringify!(vfio_pci_hot_reset))
);
}
#[doc = " VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14,"]
#[doc = " struct vfio_device_query_gfx_plane)"]
#[doc = ""]
#[doc = " Set the drm_plane_type and flags, then retrieve the gfx plane info."]
#[doc = ""]
#[doc = " flags supported:"]
#[doc = " - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set"]
#[doc = " to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no"]
#[doc = " support for dma-buf."]
#[doc = " - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set"]
#[doc = " to ask if the mdev supports region. 0 on support, -EINVAL on no"]
#[doc = " support for region."]
#[doc = " - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set"]
#[doc = " with each call to query the plane info."]
#[doc = " - Others are invalid and return -EINVAL."]
#[doc = ""]
#[doc = " Note:"]
#[doc = " 1. Plane could be disabled by guest. In that case, success will be"]
#[doc = " returned with zero-initialized drm_format, size, width and height"]
#[doc = " fields."]
#[doc = " 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available"]
#[doc = ""]
#[doc = " Return: 0 on success, -errno on other failure."]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct vfio_device_gfx_plane_info {
pub argsz: __u32,
pub flags: __u32,
pub drm_plane_type: __u32,
pub drm_format: __u32,
pub drm_format_mod: __u64,
pub width: __u32,
pub height: __u32,
pub stride: __u32,
pub size: __u32,
pub x_pos: __u32,
pub y_pos: __u32,
pub x_hot: __u32,
pub y_hot: __u32,
pub __bindgen_anon_1: vfio_device_gfx_plane_info__bindgen_ty_1,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union vfio_device_gfx_plane_info__bindgen_ty_1 {
pub region_index: __u32,
pub dmabuf_id: __u32,
_bindgen_union_align: u32,
}
#[test]
fn bindgen_test_layout_vfio_device_gfx_plane_info__bindgen_ty_1() {
assert_eq!(
::std::mem::size_of::<vfio_device_gfx_plane_info__bindgen_ty_1>(),
4usize,
concat!(
"Size of: ",
stringify!(vfio_device_gfx_plane_info__bindgen_ty_1)
)
);
assert_eq!(
::std::mem::align_of::<vfio_device_gfx_plane_info__bindgen_ty_1>(),
4usize,
concat!(
"Alignment of ",
stringify!(vfio_device_gfx_plane_info__bindgen_ty_1)
)
);
}
impl Default for vfio_device_gfx_plane_info__bindgen_ty_1 {
fn default() -> Self {
unsafe { ::std::mem::zeroed() }
}
}
#[test]
fn bindgen_test_layout_vfio_device_gfx_plane_info() {
assert_eq!(
::std::mem::size_of::<vfio_device_gfx_plane_info>(),
64usize,
concat!("Size of: ", stringify!(vfio_device_gfx_plane_info))
);
assert_eq!(
::std::mem::align_of::<vfio_device_gfx_plane_info>(),
8usize,
concat!("Alignment of ", stringify!(vfio_device_gfx_plane_info))
);
}
impl Default for vfio_device_gfx_plane_info {
fn default() -> Self {
unsafe { ::std::mem::zeroed() }
}
}
#[doc = " VFIO_DEVICE_IOEVENTFD - _IOW(VFIO_TYPE, VFIO_BASE + 16,"]
#[doc = " struct vfio_device_ioeventfd)"]
#[doc = ""]
#[doc = " Perform a write to the device at the specified device fd offset, with"]
#[doc = " the specified data and width when the provided eventfd is triggered."]
#[doc = " vfio bus drivers may not support this for all regions, for all widths,"]
#[doc = " or at all. vfio-pci currently only enables support for BAR regions,"]
#[doc = " excluding the MSI-X vector table."]
#[doc = ""]
#[doc = " Return: 0 on success, -errno on failure."]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_device_ioeventfd {
pub argsz: __u32,
pub flags: __u32,
pub offset: __u64,
pub data: __u64,
pub fd: __s32,
}
#[test]
fn bindgen_test_layout_vfio_device_ioeventfd() {
assert_eq!(
::std::mem::size_of::<vfio_device_ioeventfd>(),
32usize,
concat!("Size of: ", stringify!(vfio_device_ioeventfd))
);
assert_eq!(
::std::mem::align_of::<vfio_device_ioeventfd>(),
8usize,
concat!("Alignment of ", stringify!(vfio_device_ioeventfd))
);
}
#[doc = " VFIO_IOMMU_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 12, struct vfio_iommu_info)"]
#[doc = ""]
#[doc = " Retrieve information about the IOMMU object. Fills in provided"]
#[doc = " struct vfio_iommu_info. Caller sets argsz."]
#[doc = ""]
#[doc = " XXX Should we do these by CHECK_EXTENSION too?"]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_iommu_type1_info {
pub argsz: __u32,
pub flags: __u32,
pub iova_pgsizes: __u64,
}
#[test]
fn bindgen_test_layout_vfio_iommu_type1_info() {
assert_eq!(
::std::mem::size_of::<vfio_iommu_type1_info>(),
16usize,
concat!("Size of: ", stringify!(vfio_iommu_type1_info))
);
assert_eq!(
::std::mem::align_of::<vfio_iommu_type1_info>(),
8usize,
concat!("Alignment of ", stringify!(vfio_iommu_type1_info))
);
}
#[doc = " VFIO_IOMMU_MAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 13, struct vfio_dma_map)"]
#[doc = ""]
#[doc = " Map process virtual addresses to IO virtual addresses using the"]
#[doc = " provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required."]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_iommu_type1_dma_map {
pub argsz: __u32,
pub flags: __u32,
pub vaddr: __u64,
pub iova: __u64,
pub size: __u64,
}
#[test]
fn bindgen_test_layout_vfio_iommu_type1_dma_map() {
assert_eq!(
::std::mem::size_of::<vfio_iommu_type1_dma_map>(),
32usize,
concat!("Size of: ", stringify!(vfio_iommu_type1_dma_map))
);
assert_eq!(
::std::mem::align_of::<vfio_iommu_type1_dma_map>(),
8usize,
concat!("Alignment of ", stringify!(vfio_iommu_type1_dma_map))
);
}
#[doc = " VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,"]
#[doc = "\t\t\t\t\t\t\tstruct vfio_dma_unmap)"]
#[doc = ""]
#[doc = " Unmap IO virtual addresses using the provided struct vfio_dma_unmap."]
#[doc = " Caller sets argsz. The actual unmapped size is returned in the size"]
#[doc = " field. No guarantee is made to the user that arbitrary unmaps of iova"]
#[doc = " or size different from those used in the original mapping call will"]
#[doc = " succeed."]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_iommu_type1_dma_unmap {
pub argsz: __u32,
pub flags: __u32,
pub iova: __u64,
pub size: __u64,
}
#[test]
fn bindgen_test_layout_vfio_iommu_type1_dma_unmap() {
assert_eq!(
::std::mem::size_of::<vfio_iommu_type1_dma_unmap>(),
24usize,
concat!("Size of: ", stringify!(vfio_iommu_type1_dma_unmap))
);
assert_eq!(
::std::mem::align_of::<vfio_iommu_type1_dma_unmap>(),
8usize,
concat!("Alignment of ", stringify!(vfio_iommu_type1_dma_unmap))
);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_iommu_spapr_tce_ddw_info {
pub pgsizes: __u64,
pub max_dynamic_windows_supported: __u32,
pub levels: __u32,
}
#[test]
fn bindgen_test_layout_vfio_iommu_spapr_tce_ddw_info() {
assert_eq!(
::std::mem::size_of::<vfio_iommu_spapr_tce_ddw_info>(),
16usize,
concat!("Size of: ", stringify!(vfio_iommu_spapr_tce_ddw_info))
);
assert_eq!(
::std::mem::align_of::<vfio_iommu_spapr_tce_ddw_info>(),
8usize,
concat!("Alignment of ", stringify!(vfio_iommu_spapr_tce_ddw_info))
);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_iommu_spapr_tce_info {
pub argsz: __u32,
pub flags: __u32,
pub dma32_window_start: __u32,
pub dma32_window_size: __u32,
pub ddw: vfio_iommu_spapr_tce_ddw_info,
}
#[test]
fn bindgen_test_layout_vfio_iommu_spapr_tce_info() {
assert_eq!(
::std::mem::size_of::<vfio_iommu_spapr_tce_info>(),
32usize,
concat!("Size of: ", stringify!(vfio_iommu_spapr_tce_info))
);
assert_eq!(
::std::mem::align_of::<vfio_iommu_spapr_tce_info>(),
8usize,
concat!("Alignment of ", stringify!(vfio_iommu_spapr_tce_info))
);
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_eeh_pe_err {
pub type_: __u32,
pub func: __u32,
pub addr: __u64,
pub mask: __u64,
}
#[test]
fn bindgen_test_layout_vfio_eeh_pe_err() {
assert_eq!(
::std::mem::size_of::<vfio_eeh_pe_err>(),
24usize,
concat!("Size of: ", stringify!(vfio_eeh_pe_err))
);
assert_eq!(
::std::mem::align_of::<vfio_eeh_pe_err>(),
8usize,
concat!("Alignment of ", stringify!(vfio_eeh_pe_err))
);
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct vfio_eeh_pe_op {
pub argsz: __u32,
pub flags: __u32,
pub op: __u32,
pub __bindgen_anon_1: vfio_eeh_pe_op__bindgen_ty_1,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union vfio_eeh_pe_op__bindgen_ty_1 {
pub err: vfio_eeh_pe_err,
_bindgen_union_align: [u64; 3usize],
}
#[test]
fn bindgen_test_layout_vfio_eeh_pe_op__bindgen_ty_1() {
assert_eq!(
::std::mem::size_of::<vfio_eeh_pe_op__bindgen_ty_1>(),
24usize,
concat!("Size of: ", stringify!(vfio_eeh_pe_op__bindgen_ty_1))
);
assert_eq!(
::std::mem::align_of::<vfio_eeh_pe_op__bindgen_ty_1>(),
8usize,
concat!("Alignment of ", stringify!(vfio_eeh_pe_op__bindgen_ty_1))
);
}
impl Default for vfio_eeh_pe_op__bindgen_ty_1 {
fn default() -> Self {
unsafe { ::std::mem::zeroed() }
}
}
#[test]
fn bindgen_test_layout_vfio_eeh_pe_op() {
assert_eq!(
::std::mem::size_of::<vfio_eeh_pe_op>(),
40usize,
concat!("Size of: ", stringify!(vfio_eeh_pe_op))
);
assert_eq!(
::std::mem::align_of::<vfio_eeh_pe_op>(),
8usize,
concat!("Alignment of ", stringify!(vfio_eeh_pe_op))
);
}
impl Default for vfio_eeh_pe_op {
fn default() -> Self {
unsafe { ::std::mem::zeroed() }
}
}
#[doc = " VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory)"]
#[doc = ""]
#[doc = " Registers user space memory where DMA is allowed. It pins"]
#[doc = " user pages and does the locked memory accounting so"]
#[doc = " subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls"]
#[doc = " get faster."]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_iommu_spapr_register_memory {
pub argsz: __u32,
pub flags: __u32,
pub vaddr: __u64,
pub size: __u64,
}
#[test]
fn bindgen_test_layout_vfio_iommu_spapr_register_memory() {
assert_eq!(
::std::mem::size_of::<vfio_iommu_spapr_register_memory>(),
24usize,
concat!("Size of: ", stringify!(vfio_iommu_spapr_register_memory))
);
assert_eq!(
::std::mem::align_of::<vfio_iommu_spapr_register_memory>(),
8usize,
concat!(
"Alignment of ",
stringify!(vfio_iommu_spapr_register_memory)
)
);
}
#[doc = " VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create)"]
#[doc = ""]
#[doc = " Creates an additional TCE table and programs it (sets a new DMA window)"]
#[doc = " to every IOMMU group in the container. It receives page shift, window"]
#[doc = " size and number of levels in the TCE table being created."]
#[doc = ""]
#[doc = " It allocates and returns an offset on a PCI bus of the new DMA window."]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_iommu_spapr_tce_create {
pub argsz: __u32,
pub flags: __u32,
pub page_shift: __u32,
pub __resv1: __u32,
pub window_size: __u64,
pub levels: __u32,
pub __resv2: __u32,
pub start_addr: __u64,
}
#[test]
fn bindgen_test_layout_vfio_iommu_spapr_tce_create() {
assert_eq!(
::std::mem::size_of::<vfio_iommu_spapr_tce_create>(),
40usize,
concat!("Size of: ", stringify!(vfio_iommu_spapr_tce_create))
);
assert_eq!(
::std::mem::align_of::<vfio_iommu_spapr_tce_create>(),
8usize,
concat!("Alignment of ", stringify!(vfio_iommu_spapr_tce_create))
);
}
#[doc = " VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove)"]
#[doc = ""]
#[doc = " Unprograms a TCE table from all groups in the container and destroys it."]
#[doc = " It receives a PCI bus offset as a window id."]
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct vfio_iommu_spapr_tce_remove {
pub argsz: __u32,
pub flags: __u32,
pub start_addr: __u64,
}
#[test]
fn bindgen_test_layout_vfio_iommu_spapr_tce_remove() {
assert_eq!(
::std::mem::size_of::<vfio_iommu_spapr_tce_remove>(),
16usize,
concat!("Size of: ", stringify!(vfio_iommu_spapr_tce_remove))
);
assert_eq!(
::std::mem::align_of::<vfio_iommu_spapr_tce_remove>(),
8usize,
concat!("Alignment of ", stringify!(vfio_iommu_spapr_tce_remove))
);
}
| bindgen_test_layout_vfio_info_cap_header |
shared.rs | //! Some general utility functions.
use std::{iter, str::from_utf8};
use colorsys::Rgb;
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use std::{fs, io};
use strip_ansi_escapes::strip;
use unicode_width::UnicodeWidthStr;
use zellij_tile::data::{Palette, PaletteColor, PaletteSource, ThemeHue};
const UNIX_PERMISSIONS: u32 = 0o700;
pub fn set_permissions(path: &Path) -> io::Result<()> {
let mut permissions = fs::metadata(path)?.permissions();
permissions.set_mode(UNIX_PERMISSIONS);
fs::set_permissions(path, permissions)
}
pub fn ansi_len(s: &str) -> usize {
from_utf8(&strip(s).unwrap()).unwrap().width()
}
pub fn adjust_to_size(s: &str, rows: usize, columns: usize) -> String {
s.lines()
.map(|l| {
let actual_len = ansi_len(l);
if actual_len > columns {
let mut line = String::from(l);
line.truncate(columns);
line
} else {
[l, &str::repeat(" ", columns - ansi_len(l))].concat()
}
})
.chain(iter::repeat(str::repeat(" ", columns)))
.take(rows)
.collect::<Vec<_>>()
.join("\n\r")
}
// Colors
pub mod colors {
pub const WHITE: u8 = 255;
pub const GREEN: u8 = 154;
pub const GRAY: u8 = 238;
pub const BRIGHT_GRAY: u8 = 245;
pub const RED: u8 = 88;
pub const ORANGE: u8 = 166;
pub const BLACK: u8 = 16;
pub const MAGENTA: u8 = 201;
pub const CYAN: u8 = 51;
pub const YELLOW: u8 = 226;
pub const BLUE: u8 = 45;
pub const PURPLE: u8 = 99;
pub const GOLD: u8 = 136;
pub const SILVER: u8 = 245;
pub const PINK: u8 = 207;
pub const BROWN: u8 = 215;
}
pub fn _hex_to_rgb(hex: &str) -> (u8, u8, u8) |
pub fn default_palette() -> Palette {
Palette {
source: PaletteSource::Default,
theme_hue: ThemeHue::Dark,
fg: PaletteColor::EightBit(colors::BRIGHT_GRAY),
bg: PaletteColor::EightBit(colors::GRAY),
black: PaletteColor::EightBit(colors::BLACK),
red: PaletteColor::EightBit(colors::RED),
green: PaletteColor::EightBit(colors::GREEN),
yellow: PaletteColor::EightBit(colors::YELLOW),
blue: PaletteColor::EightBit(colors::BLUE),
magenta: PaletteColor::EightBit(colors::MAGENTA),
cyan: PaletteColor::EightBit(colors::CYAN),
white: PaletteColor::EightBit(colors::WHITE),
orange: PaletteColor::EightBit(colors::ORANGE),
gray: PaletteColor::EightBit(colors::GRAY),
purple: PaletteColor::EightBit(colors::PURPLE),
gold: PaletteColor::EightBit(colors::GOLD),
silver: PaletteColor::EightBit(colors::SILVER),
pink: PaletteColor::EightBit(colors::PINK),
brown: PaletteColor::EightBit(colors::BROWN),
}
}
// Dark magic
pub fn _detect_theme_hue(bg: PaletteColor) -> ThemeHue {
match bg {
PaletteColor::Rgb((r, g, b)) => {
// HSP, P stands for perceived brightness
let hsp: f64 = (0.299 * (r as f64 * r as f64)
+ 0.587 * (g as f64 * g as f64)
+ 0.114 * (b as f64 * b as f64))
.sqrt();
match hsp > 127.5 {
true => ThemeHue::Light,
false => ThemeHue::Dark,
}
}
_ => ThemeHue::Dark,
}
}
// (this was shamelessly copied from alacritty)
//
// This returns the current terminal version as a unique number based on the
// semver version. The different versions are padded to ensure that a higher semver version will
// always report a higher version number.
pub fn version_number(mut version: &str) -> usize {
if let Some(separator) = version.rfind('-') {
version = &version[..separator];
}
let mut version_number = 0;
let semver_versions = version.split('.');
for (i, semver_version) in semver_versions.rev().enumerate() {
let semver_number = semver_version.parse::<usize>().unwrap_or(0);
version_number += usize::pow(100, i as u32) * semver_number;
}
version_number
}
| {
Rgb::from_hex_str(hex)
.expect("The passed argument must be a valid hex color")
.into()
} |
log_unittest.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2016 Twitter. All rights reserved. | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
import unittest
from mock import patch, Mock, call
from io import StringIO
from heron.common.src.python.utils import proc
class LogTest(unittest.TestCase):
def setUp(self):
pass
def test_stream_process_stdout(self):
ret = StringIO(u'hello\nworld\n')
log_fn = Mock()
with patch("subprocess.Popen") as mock_process:
mock_process.stdout = ret
proc.stream_process_stdout(mock_process, log_fn)
log_fn.assert_has_calls([call(u'hello\n'), call(u'world\n')])
def test_async_stream_process_stdout(self):
ret = StringIO(u'hello\nworld\n')
log_fn = Mock()
with patch("subprocess.Popen") as mock_process:
mock_process.stdout = ret
thread = proc.async_stream_process_stdout(mock_process, log_fn)
thread.join()
log_fn.assert_has_calls([call(u'hello\n'), call(u'world\n')]) | # |
main.go | package main
import (
"runtime"
"time"
"github.com/dustin/go-humanize"
"github.com/zephinzer/dev/internal/constants"
"github.com/zephinzer/dev/internal/log"
)
func | () {
timeStarted := time.Now()
go func(ticker <-chan time.Time) {
for {
<-ticker
if log.Trace != nil {
var memoryStatistics runtime.MemStats
runtime.ReadMemStats(&memoryStatistics)
log.Tracef("profile as of %s (started: %v)", time.Now().Format(constants.DevTimeFormat), humanize.Time(timeStarted))
log.Tracef("goroutines : %v", runtime.NumGoroutine())
totalAlloc := humanize.Bytes(memoryStatistics.TotalAlloc)
totalFrees := humanize.Bytes(memoryStatistics.Frees)
heapAlloc := humanize.Bytes(memoryStatistics.HeapAlloc)
sys := humanize.Bytes(memoryStatistics.Sys)
log.Tracef("alloc/frees/heap/sys : %s / %s / %s / %s", totalAlloc, totalFrees, heapAlloc, sys)
}
}
}(time.Tick(time.Second * 5))
cmd := GetCommand()
cmd.Execute()
}
| main |
conversation.py | import time
import emoji
from telegram import InlineKeyboardMarkup, ParseMode, InlineKeyboardButton
from telegram.ext import run_async, ConversationHandler
from telegram.error import TelegramError
from django.db.models import Q
from . import constants, authentication, renderers, models
def send_broadcast(admin, broadcast, context):
bot = context.bot
success = 0
errors = 0
for user in models.BotUser.objects.all():
try:
if user.language == 'es':
bot.send_message(
chat_id=user.chat_id,
text=broadcast.text_es
)
elif user.language == 'en':
bot.send_message(
chat_id=user.chat_id,
text=broadcast.text_en
)
success += 1
except Exception as e:
user.has_blocked_bot = True
user.save()
errors += 1
time.sleep(1)
broadcast.success = success
broadcast.errors = errors
broadcast.sent = True
broadcast.save()
bot.send_message(
chat_id=admin.chat_id,
text='Enviados: {}\nErrores: {}'.format(
success,
errors
),
)
@run_async
def feedback(update, context):
query = update.callback_query
query.answer()
user = authentication.authenticate(update.effective_user)
text = ''
if user.language == 'es':
text = (
'¿Deseas enviar tu opinión para ayudarme a mejorar el bot?'
'\n\nPuedes reportar errores, solicitar nuevas funcionalidades o mejoras.\n\n'
'Envíame tu opinión o ejecuta /cancel.'
)
elif user.language == 'en':
text = (
'Do you want to send me your feedback to help me improve the bot?'
'\n\nYou can report bugs, request new features or improvements.\n\n'
'Send your feedback or execute /cancel.'
)
query.edit_message_text(
text=text
)
return constants.INPUT_FEEDBACK
@run_async
def input_feedback(update, context):
bot = context.bot
message = update.message.text
user = authentication.authenticate(update.effective_user)
_, keyboard = renderers.main_markup(user)
if str(message).lower() == '/cancel':
if user.language == 'es':
update.message.chat.send_message(
text='✅ Se canceló la acción que estabas llevando a cabo.',
reply_markup=InlineKeyboardMarkup(keyboard)
)
elif user.language == 'en':
update.message.chat.send_message(
text='✅ The action has been canceled.',
reply_markup=InlineKeyboardMarkup(keyboard)
)
else:
name = user.first_name
if user.last_name is not None:
name += ' ' + user.last_name
if user.username is not None:
name += '(@{})'.format(user.username)
text = (
'💬 Feedback from {name}:'
'\n\n{message}'.format(
name=name,
message=message
)
)
# persist feedback
models.Feedback.objects.create(
bot_user=user,
message=message
)
# send feedback to admins
admins = models.BotUser.objects.filter(is_admin=True)
for admin in admins:
bot.send_message(
chat_id=admin.chat_id,
text=text
)
# thanks
text = ''
if user.language == 'es':
text = 'Muchas gracias por tu opinión.'
elif user.language == 'en':
text = 'Thank you for your feedback.'
bot.send_message(
chat_id=user.chat_id,
text=text,
reply_markup=InlineKeyboardMarkup(keyboard)
)
return ConversationHandler.END
@run_async
def input_broadcast_message(update, context):
message = update.message.text
bot = context.bot
user = authentication.authenticate(update.effective_user)
try:
broadcast = models.Broadcast.objects.get(sent=False)
if broadcast.setting_lang == 'es':
broadcast.text_es = message
elif broadcast.setting_lang == 'en':
broadcast.text_en = message
broadcast.setting_lang = None
broadcast.save()
text, keyboard = renderers.broadcast_markup(user, context)
bot.send_message(
chat_id=user.chat_id,
text=text,
reply_markup=InlineKeyboardMarkup(keyboard)
)
return ConversationHandler.END
except models.Notification.DoesNotExist:
return ConversationHandler.END
@run_async
def input_direct_message(update, context):
bot = context.bot
message = update.message.text
user = authentication.authenticate(update.effective_user)
if user.is_admin:
context.user_data['md_text'] = message
bot.send_message(
chat_id=user.chat_id,
text=message,
reply_markup=InlineKeyboardMarkup([
[InlineKeyboardButton(text='Enviar', callback_data='confirm_md')],
[InlineKeyboardButton(text='Cancelar', callback_data='cancel_md')],
])
)
return ConversationHandler.END
@run_async
def broadcast(update, context):
query = update.callback_query
query.answer()
user = authentication.authenticate(update.effective_user)
params = query.data.split(' ')
operation = params[1]
value = None
if params.__len__() > 2:
value = params[2]
try:
broad = models.Broadcast.objects.get(sent=False)
if operation == 'lang':
broad.setting_lang = value
broad.save()
query.edit_message_text(
text='Envíame el mensaje en idioma "{}"'.format(value)
)
return constants.INPUT_BROADCAST_MESSAGE
if operation == 'send':
send_broadcast(
admin=user,
broadcast=broad,
context=context
)
return ConversationHandler.END
except models.Broadcast.DoesNotExist:
query.edit_message_text(
text='No hay ninguna notificación en curso, comienza una nueva.'
)
return ConversationHandler.END
@run_async
def direct_message(update, context):
query = update.callback_query
query.answer()
user = authentication.authenticate(update.effective_user)
params = query.data.split(' ')
id = params[1]
if user.is_admin:
context.user_data['md_id'] = id
context.bot.send_message(
chat_id=user.chat_id,
text='🤖 Escribe el mensaje para enviar al usuario.'
)
return constants.INPUT_DIRECT_MESSAGE
@run_async
def send_direct_message(update, context):
query = update.callback_query
query.answer()
user = authentication.authenticate(update.effective_user)
| bot = context.bot
id = context.user_data.get('md_id')
text = context.user_data.get('md_text')
try:
destiny_user = models.BotUser.objects.get(pk=id)
try:
bot.send_message(
chat_id=destiny_user.chat_id,
text=text
)
query.edit_message_text(
text='✅ Mensaje enviado.'
)
destiny_user.has_blocked_bot = False
destiny_user.save()
except:
destiny_user.has_blocked_bot = True
destiny_user.save()
bot.send_message(
chat_id=user.chat_id,
text='⚠️ No fue posible enviar el mensaje.'
)
except models.BotUser.DoesNotExist:
pass
del context.user_data['md_id']
del context.user_data['md_text']
@run_async
def cancel_direct_message(update, context):
query = update.callback_query
query.answer()
user = authentication.authenticate(update.effective_user)
if user.is_admin:
bot = context.bot
query.edit_message_text(
text='✅ Envío cancelado.'
)
del context.user_data['md_id']
del context.user_data['md_text']
@run_async
def input_user_criteria(update, context):
bot = context.bot
message = update.message.text
user = authentication.authenticate(update.effective_user)
if user.is_admin:
criteria = message
users = models.BotUser.objects.filter(
Q(username__icontains=criteria) |
Q(first_name__icontains=criteria) |
Q(last_name__icontains=criteria)
)
bot.send_message(
chat_id=user.chat_id,
text='{} resultados'.format(users.count()),
)
for u in users:
text, keyboard = renderers.user_markup(u)
bot.send_message(
chat_id=user.chat_id,
text=text,
reply_markup=InlineKeyboardMarkup(keyboard)
)
return ConversationHandler.END | if user.is_admin:
|
json.go | // Copyright 2021 Security Scorecard Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package format
import (
"encoding/json"
"fmt"
"io"
_ "net/http/pprof" // nolint:gosec
docs "github.com/ossf/scorecard/v4/docs/checks"
sce "github.com/ossf/scorecard/v4/errors"
"github.com/ossf/scorecard/v4/log"
"github.com/ossf/scorecard/v4/pkg"
)
//nolint
type jsonCheckResult struct {
Name string
Details []string
Confidence int
Pass bool
}
type jsonScorecardResult struct {
Repo string
Date string
Checks []jsonCheckResult
Metadata []string
}
type jsonCheckDocumentationV2 struct {
URL string `json:"url"`
Short string `json:"short"`
// Can be extended if needed.
}
//nolint
type jsonCheckResultV2 struct {
Details []string `json:"details"`
Score int `json:"score"`
Reason string `json:"reason"`
Name string `json:"name"`
Doc jsonCheckDocumentationV2 `json:"documentation"`
}
type jsonRepoV2 struct {
Name string `json:"name"`
Commit string `json:"commit"`
}
type jsonScorecardV2 struct {
Version string `json:"version"`
Commit string `json:"commit"`
}
type jsonFloatScore float64
func (s jsonFloatScore) MarshalJSON() ([]byte, error) {
// Note: for integers, this will show as X.0.
return []byte(fmt.Sprintf("%.1f", s)), nil
}
//nolint:govet
type jsonScorecardResultV2 struct {
Date string `json:"date"`
Repo jsonRepoV2 `json:"repo"`
Scorecard jsonScorecardV2 `json:"scorecard"`
AggregateScore jsonFloatScore `json:"score"`
Checks []jsonCheckResultV2 `json:"checks"`
Metadata []string `json:"metadata"`
}
// AsJSON exports results as JSON for new detail format.
func AsJSON(r *pkg.ScorecardResult, showDetails bool, logLevel log.Level, writer io.Writer) error {
encoder := json.NewEncoder(writer)
out := jsonScorecardResult{
Repo: r.Repo.Name,
Date: r.Date.Format("2006-01-02"),
Metadata: r.Metadata,
}
//nolint
for _, checkResult := range r.Checks {
tmpResult := jsonCheckResult{
Name: checkResult.Name,
Pass: checkResult.Pass,
Confidence: checkResult.Confidence,
}
if showDetails {
for i := range checkResult.Details2 {
d := checkResult.Details2[i]
m := pkg.DetailToString(&d, logLevel)
if m == "" {
continue
}
tmpResult.Details = append(tmpResult.Details, m)
}
}
out.Checks = append(out.Checks, tmpResult)
}
if err := encoder.Encode(out); err != nil {
return sce.WithMessage(sce.ErrScorecardInternal, fmt.Sprintf("encoder.Encode: %v", err))
}
return nil
}
// AsJSON2 exports results as JSON for the cron job and in the new detail format.
func AsJSON2(r *pkg.ScorecardResult, showDetails bool,
logLevel log.Level, checkDocs docs.Doc, writer io.Writer,
) error {
score, err := r.GetAggregateScore(checkDocs)
if err != nil {
//nolint:wrapcheck
return err
}
encoder := json.NewEncoder(writer)
out := jsonScorecardResultV2{
Repo: jsonRepoV2{
Name: r.Repo.Name,
Commit: r.Repo.CommitSHA,
},
Scorecard: jsonScorecardV2{
Version: r.Scorecard.Version,
Commit: r.Scorecard.CommitSHA,
},
Date: r.Date.Format("2006-01-02"),
Metadata: r.Metadata,
AggregateScore: jsonFloatScore(score),
}
//nolint
for _, checkResult := range r.Checks {
doc, e := checkDocs.GetCheck(checkResult.Name)
if e != nil {
return sce.WithMessage(sce.ErrScorecardInternal, fmt.Sprintf("GetCheck: %s: %v", checkResult.Name, e))
}
tmpResult := jsonCheckResultV2{
Name: checkResult.Name,
Doc: jsonCheckDocumentationV2{
URL: doc.GetDocumentationURL(r.Scorecard.CommitSHA),
Short: doc.GetShort(),
},
Reason: checkResult.Reason,
Score: checkResult.Score,
}
if showDetails |
out.Checks = append(out.Checks, tmpResult)
}
if err := encoder.Encode(out); err != nil {
return sce.WithMessage(sce.ErrScorecardInternal, fmt.Sprintf("encoder.Encode: %v", err))
}
return nil
}
| {
for i := range checkResult.Details2 {
d := checkResult.Details2[i]
m := pkg.DetailToString(&d, logLevel)
if m == "" {
continue
}
tmpResult.Details = append(tmpResult.Details, m)
}
} |
dir.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use remove_dir_all::remove_dir_all;
use std::path::{self, Path, PathBuf};
use std::{fmt, fs, io};
use Builder;
/// Create a new temporary directory.
///
/// The `tempdir` function creates a directory in the file system
/// and returns a [`TempDir`].
/// The directory will be automatically deleted when the `TempDir`s
/// desctructor is run.
///
/// # Resource Leaking
///
/// See [the resource leaking][resource-leaking] docs on `TempDir`.
///
/// # Errors
///
/// If the directory can not be created, `Err` is returned.
///
/// # Examples
///
/// ```
/// # extern crate tempfile;
/// use tempfile::tempdir;
/// use std::fs::File;
/// use std::io::{self, Write};
///
/// # fn main() {
/// # if let Err(_) = run() {
/// # ::std::process::exit(1);
/// # }
/// # }
/// # fn run() -> Result<(), io::Error> {
/// // Create a directory inside of `std::env::temp_dir()`
/// let dir = tempdir()?;
///
/// let file_path = dir.path().join("my-temporary-note.txt");
/// let mut file = File::create(file_path)?;
/// writeln!(file, "Brian was here. Briefly.")?;
///
/// // `tmp_dir` goes out of scope, the directory as well as
/// // `tmp_file` will be deleted here.
/// drop(file);
/// dir.close()?;
/// # Ok(())
/// # }
/// ```
///
/// [`TempDir`]: struct.TempDir.html
/// [resource-leaking]: struct.TempDir.html#resource-leaking
pub fn tempdir() -> io::Result<TempDir> {
TempDir::new()
}
/// Create a new temporary directory.
///
/// The `tempdir` function creates a directory in the file system
/// and returns a [`TempDir`].
/// The directory will be automatically deleted when the `TempDir`s
/// desctructor is run.
///
/// # Resource Leaking
///
/// See [the resource leaking][resource-leaking] docs on `TempDir`.
///
/// # Errors
///
/// If the directory can not be created, `Err` is returned.
///
/// # Examples
///
/// ```
/// # extern crate tempfile;
/// use tempfile::tempdir;
/// use std::fs::File;
/// use std::io::{self, Write};
///
/// # fn main() {
/// # if let Err(_) = run() {
/// # ::std::process::exit(1);
/// # }
/// # }
/// # fn run() -> Result<(), io::Error> {
/// // Create a directory inside of `std::env::temp_dir()`,
/// let dir = tempdir()?;
///
/// let file_path = dir.path().join("my-temporary-note.txt");
/// let mut file = File::create(file_path)?;
/// writeln!(file, "Brian was here. Briefly.")?;
///
/// // `tmp_dir` goes out of scope, the directory as well as
/// // `tmp_file` will be deleted here.
/// drop(file);
/// dir.close()?;
/// # Ok(())
/// # }
/// ```
///
/// [`TempDir`]: struct.TempDir.html
/// [resource-leaking]: struct.TempDir.html#resource-leaking
pub fn tempdir_in<P: AsRef<Path>>(dir: P) -> io::Result<TempDir> {
TempDir::new_in(dir)
}
/// A directory in the filesystem that is automatically deleted when
/// it goes out of scope.
///
/// The [`TempDir`] type creates a directory on the file system that
/// is deleted once it goes out of scope. At construction, the
/// `TempDir` creates a new directory with a randomly generated name.
///
/// The default constructor, [`TempDir::new()`], creates directories in
/// the location returned by [`std::env::temp_dir()`], but `TempDir`
/// can be configured to manage a temporary directory in any location
/// by constructing with a [`Builder`].
///
/// After creating a `TempDir`, work with the file system by doing
/// standard [`std::fs`] file system operations on its [`Path`],
/// which can be retrieved with [`TempDir::path()`]. Once the `TempDir`
/// value is dropped, the directory at the path will be deleted, along
/// with any files and directories it contains. It is your responsibility
/// to ensure that no further file system operations are attempted
/// inside the temporary directory once it has been deleted.
///
/// # Resource Leaking
///
/// Various platform-specific conditions may cause `TempDir` to fail
/// to delete the underlying directory. It's important to ensure that
/// handles (like [`File`] and [`ReadDir`]) to files inside the
/// directory are dropped before the `TempDir` goes out of scope. The
/// `TempDir` destructor will silently ignore any errors in deleting
/// the directory; to instead handle errors call [`TempDir::close()`].
///
/// Note that if the program exits before the `TempDir` destructor is
/// run, such as via [`std::process::exit()`], by segfaulting, or by
/// receiving a signal like `SIGINT`, then the temporary directory
/// will not be deleted.
///
/// # Examples
///
/// Create a temporary directory with a generated name:
///
/// ```
/// use std::fs::File;
/// use std::io::Write;
/// use tempfile::TempDir;
///
/// # use std::io;
/// # fn run() -> Result<(), io::Error> {
/// // Create a directory inside of `std::env::temp_dir()`
/// let tmp_dir = TempDir::new()?;
/// # Ok(())
/// # }
/// ```
///
/// Create a temporary directory with a prefix in its name:
///
/// ```
/// use std::fs::File;
/// use std::io::Write;
/// use tempfile::Builder;
///
/// # use std::io;
/// # fn run() -> Result<(), io::Error> {
/// // Create a directory inside of `std::env::temp_dir()`,
/// // whose name will begin with 'example'.
/// let tmp_dir = Builder::new().prefix("example").tempdir()?;
/// # Ok(())
/// # }
/// ```
///
/// [`File`]: http://doc.rust-lang.org/std/fs/struct.File.html
/// [`Path`]: http://doc.rust-lang.org/std/path/struct.Path.html
/// [`ReadDir`]: http://doc.rust-lang.org/std/fs/struct.ReadDir.html
/// [`Builder`]: struct.Builder.html
/// [`TempDir::close()`]: struct.TempDir.html#method.close
/// [`TempDir::new()`]: struct.TempDir.html#method.new
/// [`TempDir::path()`]: struct.TempDir.html#method.path
/// [`TempDir`]: struct.TempDir.html
/// [`std::env::temp_dir()`]: https://doc.rust-lang.org/std/env/fn.temp_dir.html
/// [`std::fs`]: http://doc.rust-lang.org/std/fs/index.html
/// [`std::process::exit()`]: http://doc.rust-lang.org/std/process/fn.exit.html
pub struct TempDir {
path: Option<PathBuf>,
}
impl TempDir {
/// Attempts to make a temporary directory inside of `env::temp_dir()`.
///
/// See [`Builder`] for more configuration.
///
/// The directory and everything inside it will be automatically deleted
/// once the returned `TempDir` is destroyed.
///
/// # Errors
///
/// If the directory can not be created, `Err` is returned.
///
/// # Examples
///
/// ```
/// use std::fs::File;
/// use std::io::Write;
/// use tempfile::TempDir;
///
/// # use std::io;
/// # fn run() -> Result<(), io::Error> {
/// // Create a directory inside of `std::env::temp_dir()`
/// let tmp_dir = TempDir::new()?;
///
/// let file_path = tmp_dir.path().join("my-temporary-note.txt");
/// let mut tmp_file = File::create(file_path)?;
/// writeln!(tmp_file, "Brian was here. Briefly.")?;
///
/// // `tmp_dir` goes out of scope, the directory as well as
/// // `tmp_file` will be deleted here.
/// # Ok(())
/// # }
/// ```
///
/// [`Builder`]: struct.Builder.html
pub fn new() -> io::Result<TempDir> {
Builder::new().tempdir()
}
/// Attempts to make a temporary directory inside of `dir`.
/// The directory and everything inside it will be automatically
/// deleted once the returned `TempDir` is destroyed.
///
/// # Errors
///
/// If the directory can not be created, `Err` is returned.
///
/// # Examples
///
/// ```
/// use std::fs::{self, File};
/// use std::io::Write;
/// use tempfile::TempDir;
///
/// # use std::io;
/// # fn run() -> Result<(), io::Error> {
/// // Create a directory inside of the current directory
/// let tmp_dir = TempDir::new_in(".")?;
/// let file_path = tmp_dir.path().join("my-temporary-note.txt");
/// let mut tmp_file = File::create(file_path)?;
/// writeln!(tmp_file, "Brian was here. Briefly.")?;
/// # Ok(())
/// # }
/// ```
pub fn new_in<P: AsRef<Path>>(dir: P) -> io::Result<TempDir> {
Builder::new().tempdir_in(dir)
}
/// Accesses the [`Path`] to the temporary directory.
///
/// [`Path`]: http://doc.rust-lang.org/std/path/struct.Path.html
///
/// # Examples
///
/// ```
/// use tempfile::TempDir;
///
/// # use std::io;
/// # fn run() -> Result<(), io::Error> {
/// let tmp_path;
///
/// {
/// let tmp_dir = TempDir::new()?;
/// tmp_path = tmp_dir.path().to_owned();
///
/// // Check that the temp directory actually exists.
/// assert!(tmp_path.exists());
///
/// // End of `tmp_dir` scope, directory will be deleted
/// }
///
/// // Temp directory should be deleted by now
/// assert_eq!(tmp_path.exists(), false);
/// # Ok(())
/// # }
/// ```
pub fn path(&self) -> &path::Path {
self.path.as_ref().unwrap()
}
/// Persist the temporary directory to disk, returning the [`PathBuf`] where it is located.
///
/// This consumes the [`TempDir`] without deleting directory on the filesystem, meaning that
/// the directory will no longer be automatically deleted.
///
/// [`TempDir`]: struct.TempDir.html
/// [`PathBuf`]: http://doc.rust-lang.org/std/path/struct.PathBuf.html
///
/// # Examples
///
/// ```
/// use std::fs;
/// use tempfile::TempDir;
///
/// # use std::io;
/// # fn run() -> Result<(), io::Error> {
/// let tmp_dir = TempDir::new()?;
///
/// // Persist the temporary directory to disk,
/// // getting the path where it is.
/// let tmp_path = tmp_dir.into_path();
///
/// // Delete the temporary directory ourselves.
/// fs::remove_dir_all(tmp_path)?;
/// # Ok(())
/// # }
/// ```
pub fn into_path(mut self) -> PathBuf {
self.path.take().unwrap()
}
/// Closes and removes the temporary directory, returing a `Result`.
///
/// Although `TempDir` removes the directory on drop, in the destructor
/// any errors are ignored. To detect errors cleaning up the temporary
/// directory, call `close` instead.
///
/// # Errors
///
/// This function may return a variety of [`std::io::Error`]s that result from deleting
/// the files and directories contained with the temporary directory,
/// as well as from deleting the temporary directory itself. These errors
/// may be platform specific.
///
/// [`std::io::Error`]: http://doc.rust-lang.org/std/io/struct.Error.html
///
/// # Examples
///
/// ```
/// use std::fs::File;
/// use std::io::Write;
/// use tempfile::TempDir;
///
/// # use std::io;
/// # fn run() -> Result<(), io::Error> {
/// // Create a directory inside of `std::env::temp_dir()`.
/// let tmp_dir = TempDir::new()?;
/// let file_path = tmp_dir.path().join("my-temporary-note.txt");
/// let mut tmp_file = File::create(file_path)?;
/// writeln!(tmp_file, "Brian was here. Briefly.")?;
///
/// // By closing the `TempDir` explicitly we can check that it has
/// // been deleted successfully. If we don't close it explicitly,
/// // the directory will still be deleted when `tmp_dir` goes out
/// // of scope, but we won't know whether deleting the directory
/// // succeeded.
/// drop(tmp_file);
/// tmp_dir.close()?;
/// # Ok(())
/// # }
/// ```
pub fn close(mut self) -> io::Result<()> {
let result = remove_dir_all(self.path());
// Prevent the Drop impl from removing the dir a second time.
self.path = None;
result
}
}
impl AsRef<Path> for TempDir {
fn as_ref(&self) -> &Path |
}
impl fmt::Debug for TempDir {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("TempDir")
.field("path", &self.path())
.finish()
}
}
impl Drop for TempDir {
fn drop(&mut self) {
// Path is `None` if `close()` or `into_path()` has been called.
if let Some(ref p) = self.path {
let _ = remove_dir_all(p);
}
}
}
// pub(crate)
pub fn create(path: PathBuf) -> io::Result<TempDir> {
fs::create_dir(&path).map(|_| TempDir { path: Some(path) })
}
| {
self.path()
} |
namecoindns.py | #name_scan "d/yourdomain" 1
import sys, os
#sys.path.append('/home/khal/sources/nmcontrol/lib/')
import DNS
import rpcClient
import struct, listdns, base64, types, json, random
#from jsonrpc import ServiceProxy
from utils import *
from common import *
class Source(object):
#def __init__(self):
#self.servers = app['services']['dns'].conf['resolver'].split(',')
#self.reqobj = DNS.Request()
#jsonfile = open("config.json", "r")
#data = json.loads(jsonfile.read())
#jsonfile.close()
#username = str(data[u"username"])
#port = data[u"port"]
#password = str(data[u"password"])
#self.sp = ServiceProxy("http://%(user)s:%(passwd)[email protected]:%(port)d" % dict(user=username, passwd=password, port=port))
#elf.sp = rpcClient.rpcClientNamecoin('127.0.0.1', port, username, password)
#self.sp = app['plugins']['domain']
# def _parse_file(self):
# f = open(self._filename, "r")
# for line in f.readlines():
# line = line.strip()
# if line and line[0] != '#':
# question, type, value = line.split()
# question = question.lower()
# type = type.upper()
# if question == '@':
# question = ''
# if type == 'A':
# answer = struct.pack("!I", ipstr2int(value))
# qtype = 1
# if type == 'NS':
# answer = labels2str(value.split("."))
# qtype = 2
# elif type == 'CNAME':
# answer = labels2str(value.split("."))
# qtype = 5
# elif type == 'TXT':
# answer = label2str(value)
# qtype = 16
# elif type == 'MX':
# preference, domain = value.split(":")
# answer = struct.pack("!H", int(preference))
# answer += labels2str(domain.split("."))
# qtype = 15
# self._answers.setdefault(question, {}).setdefault(qtype, []).append(answer)
# f.close()
def isIP(self, host) :
|
def get_response(self, query, domain, qtype, qclass, src_addr):
#print query
#print domain
#print qtype
#print qclass
#print src_addr
if qtype == 1:
#answer = struct.pack("!I", ipstr2int(value))
reqtype = "A"
if qtype == 2:
#answer = labels2str(value.split("."))
reqtype = "NS"
elif qtype == 5:
#answer = labels2str(value.split("."))
reqtype = "CNAME"
elif qtype == 16:
#answer = label2str(value)
reqtype = "TXT"
elif qtype == 15:
#preference, domain = value.split(":")
#nswer = struct.pack("!H", int(preference))
#answer += labels2str(domain.split("."))
reqtype = "MX"
elif qtype == 28:
#answer = struct.pack("!I", ipstr2int(value))
reqtype = "AAAA"
elif qtype == 52:
reqtype = "TLSA"
else : reqtype = None
answers = app['services']['dns'].lookup({"query":query, "domain":domain, "qtype":qtype, "qclass":qclass, "src_addr":src_addr})
#print 'domain:', domain
#print 'answers:', answers
if domain.endswith(".bit") or domain.endswith(".tor") :
#response = listdns.lookup(self.sp, {"query":query, "domain":domain, "qtype":qtype, "qclass":qclass, "src_addr":src_addr})
#response = self.sp.lookup({"query":query, "domain":domain, "qtype":qtype, "qclass":qclass, "src_addr":src_addr})
response = answers
results = []
if type(response) == types.DictType :
tempresults = {"qtype":response["type"], "qclass":response["class"], "ttl":response["ttl"]}
if response["type"] == 1 :
#if answers == [] :
# return self.get_response(query, domain, 5, qclass, src_addr)
tempresults["rdata"] = struct.pack("!I", ipstr2int(response["data"]))
elif response["type"] == 2 or response["type"] == 5:
tempresults["rdata"] = labels2str(response["data"].split("."))
elif response["type"] == 16 :
tempresults["rdata"] = labels2str(response["data"])
elif response["type"] == 15 :
tempresult = struct.pack("!H", response["data"][0])
tempresult += labels2str(response["data"][1].split("."))
tempresults["rdata"] = tempresult
elif response["type"] == 28 :
tempresults["rdata"] = response["data"]
elif response["type"] == 52 :
tempresult = '\x03\x00'
tempresult += chr(int(response["data"][0][0]))
tempresult += bytearray.fromhex(response["data"][0][1])
tempresults["rdata"] = tempresult
#else : return 3, []
results.append(tempresults)
return 0, results
if type(response) == types.StringType :
if self.isIP(response) :
return 0, [{"qtype":1, "qclass":qclass, "ttl":300, "rdata":struct.pack("!I", ipstr2int(response))}]
return 3, []
#if query not in self._answers:
#return 3, []
#if qtype in self._answers[query]:
#if domain == "sonicrules.bit":
# results = [{'qtype': 1, 'qclass':qclass, 'ttl': 300, 'rdata': struct.pack("!I", ipstr2int(self.reqobj.req("sonicrules.org", qtype=1).answers[0]["data"]))}]
# return 0, results
#elif qtype == 1:
# if they asked for an A record and we didn't find one, check for a CNAME
#return self.get_response(query, domain, 5, qclass, src_addr)
else:
#server = self.servers[random.randrange(0, len(self.servers)-1)]
#answers = self.reqobj.req(name=domain, qtype=qtype, server=server).answers
results = []
for response in answers :
tempresults = {"qtype":response["type"], "qclass":response["class"], "ttl":response["ttl"]}
if response["type"] == 1 :
if answers == [] :
return self.get_response(query, domain, 5, qclass, src_addr)
tempresults["rdata"] = struct.pack("!I", ipstr2int(response["data"]))
elif response["type"] == 2 or response["type"] == 5:
tempresults["rdata"] = labels2str(response["data"].split("."))
elif response["type"] == 16 :
tempresults["rdata"] = labels2str(response["data"])
elif response["type"] == 15 :
tempresult = struct.pack("!H", response["data"][0])
tempresult += labels2str(response["data"][1].split("."))
tempresults["rdata"] = tempresult
elif response["type"] == 28 :
if answers == [] :
return self.get_response(query, domain, 5, qclass, src_addr)
#tempresults["rdata"] = struct.pack("!I", ipstr2int(response["data"]))
tempresults["rdata"] = response["data"]
elif response["type"] == 52 :
tempresults["rdata"] = response["data"]
#else : return 3, []
results.append(tempresults)
return 0, results
return 3, []
| parts = host.split(".")
if len(parts) != 4:
return False
try :
valid = False
for part in parts :
intpart = int(part)
if intpart <= 255 and intpart >= 0 :
valid = True
else : return False
if valid :
return True
return False
except : return False |
00.24.js | macDetailCallback("00183c000000/24",[{"d":"2006-06-13","t":"add","a":"6F Leo Complex\n44 Residency Cross Road\nBangalore Karnataka 560025\n","c":"INDIA","o":"Encore Software Limited"},{"d":"2015-08-27","t":"change","a":"6F Leo Complex Bangalore Karnataka IN 560025","c":"IN","o":"Encore Software Limited"}]); | ||
cluster.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v2
import (
"context"
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// Creates a cluster within an instance. Note that exactly one of Cluster.serve_nodes and Cluster.cluster_config.cluster_autoscaling_config can be set. If serve_nodes is set to non-zero, then the cluster is manually scaled. If cluster_config.cluster_autoscaling_config is non-empty, then autoscaling is enabled.
type Cluster struct {
pulumi.CustomResourceState
// Configuration for this cluster.
ClusterConfig ClusterConfigResponseOutput `pulumi:"clusterConfig"`
// Immutable. The type of storage used by this cluster to serve its parent instance's tables, unless explicitly overridden.
DefaultStorageType pulumi.StringOutput `pulumi:"defaultStorageType"`
// Immutable. The encryption configuration for CMEK-protected clusters.
EncryptionConfig EncryptionConfigResponseOutput `pulumi:"encryptionConfig"`
// Immutable. The location where this cluster's nodes and storage reside. For best performance, clients should be located as close as possible to this cluster. Currently only zones are supported, so values should be of the form `projects/{project}/locations/{zone}`.
Location pulumi.StringOutput `pulumi:"location"`
// The unique name of the cluster. Values are of the form `projects/{project}/instances/{instance}/clusters/a-z*`.
Name pulumi.StringOutput `pulumi:"name"`
// The number of nodes allocated to this cluster. More nodes enable higher throughput and more consistent performance.
ServeNodes pulumi.IntOutput `pulumi:"serveNodes"`
// The current state of the cluster.
State pulumi.StringOutput `pulumi:"state"`
}
// NewCluster registers a new resource with the given unique name, arguments, and options.
func NewCluster(ctx *pulumi.Context,
name string, args *ClusterArgs, opts ...pulumi.ResourceOption) (*Cluster, error) |
// GetCluster gets an existing Cluster resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetCluster(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *ClusterState, opts ...pulumi.ResourceOption) (*Cluster, error) {
var resource Cluster
err := ctx.ReadResource("google-native:bigtableadmin/v2:Cluster", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering Cluster resources.
type clusterState struct {
}
type ClusterState struct {
}
func (ClusterState) ElementType() reflect.Type {
return reflect.TypeOf((*clusterState)(nil)).Elem()
}
type clusterArgs struct {
// Configuration for this cluster.
ClusterConfig *ClusterConfig `pulumi:"clusterConfig"`
ClusterId string `pulumi:"clusterId"`
// Immutable. The type of storage used by this cluster to serve its parent instance's tables, unless explicitly overridden.
DefaultStorageType *ClusterDefaultStorageType `pulumi:"defaultStorageType"`
// Immutable. The encryption configuration for CMEK-protected clusters.
EncryptionConfig *EncryptionConfig `pulumi:"encryptionConfig"`
InstanceId string `pulumi:"instanceId"`
// Immutable. The location where this cluster's nodes and storage reside. For best performance, clients should be located as close as possible to this cluster. Currently only zones are supported, so values should be of the form `projects/{project}/locations/{zone}`.
Location *string `pulumi:"location"`
// The unique name of the cluster. Values are of the form `projects/{project}/instances/{instance}/clusters/a-z*`.
Name *string `pulumi:"name"`
Project *string `pulumi:"project"`
// The number of nodes allocated to this cluster. More nodes enable higher throughput and more consistent performance.
ServeNodes *int `pulumi:"serveNodes"`
}
// The set of arguments for constructing a Cluster resource.
type ClusterArgs struct {
// Configuration for this cluster.
ClusterConfig ClusterConfigPtrInput
ClusterId pulumi.StringInput
// Immutable. The type of storage used by this cluster to serve its parent instance's tables, unless explicitly overridden.
DefaultStorageType ClusterDefaultStorageTypePtrInput
// Immutable. The encryption configuration for CMEK-protected clusters.
EncryptionConfig EncryptionConfigPtrInput
InstanceId pulumi.StringInput
// Immutable. The location where this cluster's nodes and storage reside. For best performance, clients should be located as close as possible to this cluster. Currently only zones are supported, so values should be of the form `projects/{project}/locations/{zone}`.
Location pulumi.StringPtrInput
// The unique name of the cluster. Values are of the form `projects/{project}/instances/{instance}/clusters/a-z*`.
Name pulumi.StringPtrInput
Project pulumi.StringPtrInput
// The number of nodes allocated to this cluster. More nodes enable higher throughput and more consistent performance.
ServeNodes pulumi.IntPtrInput
}
func (ClusterArgs) ElementType() reflect.Type {
return reflect.TypeOf((*clusterArgs)(nil)).Elem()
}
type ClusterInput interface {
pulumi.Input
ToClusterOutput() ClusterOutput
ToClusterOutputWithContext(ctx context.Context) ClusterOutput
}
func (*Cluster) ElementType() reflect.Type {
return reflect.TypeOf((**Cluster)(nil)).Elem()
}
func (i *Cluster) ToClusterOutput() ClusterOutput {
return i.ToClusterOutputWithContext(context.Background())
}
func (i *Cluster) ToClusterOutputWithContext(ctx context.Context) ClusterOutput {
return pulumi.ToOutputWithContext(ctx, i).(ClusterOutput)
}
type ClusterOutput struct{ *pulumi.OutputState }
func (ClusterOutput) ElementType() reflect.Type {
return reflect.TypeOf((**Cluster)(nil)).Elem()
}
func (o ClusterOutput) ToClusterOutput() ClusterOutput {
return o
}
func (o ClusterOutput) ToClusterOutputWithContext(ctx context.Context) ClusterOutput {
return o
}
func init() {
pulumi.RegisterInputType(reflect.TypeOf((*ClusterInput)(nil)).Elem(), &Cluster{})
pulumi.RegisterOutputType(ClusterOutput{})
}
| {
if args == nil {
return nil, errors.New("missing one or more required arguments")
}
if args.ClusterId == nil {
return nil, errors.New("invalid value for required argument 'ClusterId'")
}
if args.InstanceId == nil {
return nil, errors.New("invalid value for required argument 'InstanceId'")
}
var resource Cluster
err := ctx.RegisterResource("google-native:bigtableadmin/v2:Cluster", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
} |
offset_date_time.rs | use std::cmp::Ordering;
#[cfg(feature = "std")]
use std::time::SystemTime;
#[cfg(feature = "std")]
use time::UtcOffset;
use time::{error, prelude::*, Format, OffsetDateTime, Weekday};
#[test]
#[cfg(feature = "std")]
fn now_utc() |
#[test]
#[cfg(feature = "std")]
fn now_local() {
assert!(OffsetDateTime::now_local().year() >= 2019);
assert_eq!(
OffsetDateTime::now_local().offset(),
UtcOffset::current_local_offset()
);
}
#[test]
#[cfg(feature = "std")]
fn try_now_local() {
assert!(OffsetDateTime::try_now_local().is_ok());
}
#[test]
fn to_offset() {
assert_eq!(
date!(2000 - 01 - 01)
.midnight()
.assume_utc()
.to_offset(offset!(-1))
.year(),
1999,
);
let sydney = date!(2000 - 01 - 01).midnight().assume_offset(offset!(+11));
let new_york = sydney.to_offset(offset!(-5));
let los_angeles = sydney.to_offset(offset!(-8));
assert_eq!(sydney.hour(), 0);
assert_eq!(sydney.day(), 1);
assert_eq!(new_york.hour(), 8);
assert_eq!(new_york.day(), 31);
assert_eq!(los_angeles.hour(), 5);
assert_eq!(los_angeles.day(), 31);
}
#[test]
fn unix_epoch() {
assert_eq!(
OffsetDateTime::unix_epoch(),
date!(1970 - 01 - 01).midnight().assume_utc(),
);
}
#[test]
fn from_unix_timestamp() {
assert_eq!(
OffsetDateTime::from_unix_timestamp(0),
OffsetDateTime::unix_epoch(),
);
assert_eq!(
OffsetDateTime::from_unix_timestamp(1_546_300_800),
date!(2019 - 01 - 01).midnight().assume_utc(),
);
}
#[test]
fn from_unix_timestamp_nanos() {
assert_eq!(
OffsetDateTime::from_unix_timestamp_nanos(0),
OffsetDateTime::unix_epoch(),
);
assert_eq!(
OffsetDateTime::from_unix_timestamp_nanos(1_546_300_800_000_000_000),
date!(2019 - 01 - 01).midnight().assume_utc(),
);
}
#[test]
fn offset() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc().offset(),
offset!(UTC),
);
assert_eq!(
date!(2019 - 01 - 01)
.midnight()
.assume_offset(offset!(+1))
.offset(),
offset!(+1),
);
assert_eq!(
date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.to_offset(offset!(+1))
.offset(),
offset!(+1),
);
}
#[test]
fn timestamp() {
assert_eq!(OffsetDateTime::unix_epoch().timestamp(), 0);
assert_eq!(
OffsetDateTime::unix_epoch()
.to_offset(offset!(+1))
.timestamp(),
0,
);
assert_eq!(
date!(1970 - 01 - 01)
.midnight()
.assume_offset(offset!(-1))
.timestamp(),
3_600,
);
}
#[test]
fn timestamp_nanos() {
assert_eq!(
date!(1970 - 01 - 01)
.midnight()
.assume_utc()
.timestamp_nanos(),
0,
);
assert_eq!(
date!(1970 - 01 - 01)
.with_time(time!(1:00))
.assume_utc()
.to_offset(offset!(-1))
.timestamp_nanos(),
3_600_000_000_000,
);
}
#[test]
fn date() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc().date(),
date!(2019 - 01 - 01),
);
assert_eq!(
date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.to_offset(offset!(-1))
.date(),
date!(2018 - 12 - 31),
);
}
#[test]
fn time() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc().time(),
time!(0:00),
);
assert_eq!(
date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.to_offset(offset!(-1))
.time(),
time!(23:00),
);
}
#[test]
fn year() {
assert_eq!(date!(2019 - 01 - 01).midnight().assume_utc().year(), 2019);
assert_eq!(
date!(2019 - 12 - 31)
.with_time(time!(23:00))
.assume_utc()
.to_offset(offset!(+1))
.year(),
2020,
);
assert_eq!(date!(2020 - 01 - 01).midnight().assume_utc().year(), 2020);
}
#[test]
fn month() {
assert_eq!(date!(2019 - 01 - 01).midnight().assume_utc().month(), 1);
assert_eq!(
date!(2019 - 12 - 31)
.with_time(time!(23:00))
.assume_utc()
.to_offset(offset!(+1))
.month(),
1,
);
}
#[test]
fn day() {
assert_eq!(date!(2019 - 01 - 01).midnight().assume_utc().day(), 1);
assert_eq!(
date!(2019 - 12 - 31)
.with_time(time!(23:00))
.assume_utc()
.to_offset(offset!(+1))
.day(),
1,
);
}
#[test]
fn month_day() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc().month_day(),
(1, 1),
);
assert_eq!(
date!(2019 - 12 - 31)
.with_time(time!(23:00))
.assume_utc()
.to_offset(offset!(+1))
.month_day(),
(1, 1),
);
}
#[test]
fn ordinal() {
assert_eq!(date!(2019 - 01 - 01).midnight().assume_utc().ordinal(), 1);
assert_eq!(
date!(2019 - 12 - 31)
.with_time(time!(23:00))
.assume_utc()
.to_offset(offset!(+1))
.ordinal(),
1,
);
}
#[test]
fn iso_year_week() {
assert_eq!(
date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.iso_year_week(),
(2019, 1)
);
assert_eq!(
date!(2019 - 10 - 04)
.midnight()
.assume_utc()
.iso_year_week(),
(2019, 40)
);
assert_eq!(
date!(2020 - 01 - 01)
.midnight()
.assume_utc()
.iso_year_week(),
(2020, 1)
);
assert_eq!(
date!(2020 - 12 - 31)
.midnight()
.assume_utc()
.iso_year_week(),
(2020, 53)
);
assert_eq!(
date!(2021 - 01 - 01)
.midnight()
.assume_utc()
.iso_year_week(),
(2020, 53)
);
}
#[test]
fn week() {
assert_eq!(date!(2019 - 01 - 01).midnight().assume_utc().week(), 1);
assert_eq!(date!(2020 - 01 - 01).midnight().assume_utc().week(), 1);
assert_eq!(date!(2020 - 12 - 31).midnight().assume_utc().week(), 53);
assert_eq!(date!(2021 - 01 - 01).midnight().assume_utc().week(), 53);
}
#[test]
fn weekday() {
use Weekday::*;
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc().weekday(),
Tuesday
);
assert_eq!(
date!(2019 - 02 - 01).midnight().assume_utc().weekday(),
Friday
);
assert_eq!(
date!(2019 - 03 - 01).midnight().assume_utc().weekday(),
Friday
);
}
#[test]
fn hour() {
assert_eq!(date!(2019 - 01 - 01).midnight().assume_utc().hour(), 0);
assert_eq!(
date!(2019 - 01 - 01)
.with_time(time!(23:59:59))
.assume_utc()
.to_offset(offset!(-2))
.hour(),
21,
);
}
#[test]
fn minute() {
assert_eq!(date!(2019 - 01 - 01).midnight().assume_utc().minute(), 0);
assert_eq!(
date!(2019 - 01 - 01)
.with_time(time!(23:59:59))
.assume_utc()
.to_offset(offset!(+0:30))
.minute(),
29,
);
}
#[test]
fn second() {
assert_eq!(date!(2019 - 01 - 01).midnight().assume_utc().second(), 0);
assert_eq!(
date!(2019 - 01 - 01)
.with_time(time!(23:59:59))
.assume_utc()
.to_offset(offset!(+0:00:30))
.second(),
29,
);
}
#[test]
fn millisecond() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc().millisecond(),
0
);
assert_eq!(
date!(2019 - 01 - 01)
.with_time(time!(23:59:59.999))
.assume_utc()
.millisecond(),
999,
);
}
#[test]
fn microsecond() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc().microsecond(),
0
);
assert_eq!(
date!(2019 - 01 - 01)
.with_time(time!(23:59:59.999_999))
.assume_utc()
.microsecond(),
999_999,
);
}
#[test]
fn nanosecond() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc().nanosecond(),
0
);
assert_eq!(
date!(2019 - 01 - 01)
.with_time(time!(23:59:59.999_999_999))
.assume_utc()
.nanosecond(),
999_999_999,
);
}
#[test]
fn format() {
assert_eq!(
date!(2019 - 01 - 02)
.midnight()
.assume_utc()
.format("%F %r %z"),
"2019-01-02 12:00:00 am +0000",
);
assert_eq!(
date!(2019 - 01 - 02)
.with_time(time!(3:04:05.678_901_234))
.assume_offset(offset!(+6:07))
.format(Format::Rfc3339),
"2019-01-02T03:04:05+06:07"
);
}
#[test]
fn parse() {
assert_eq!(
OffsetDateTime::parse("2019-01-02 00:00:00 +0000", "%F %T %z"),
Ok(date!(2019 - 01 - 02).midnight().assume_utc()),
);
assert_eq!(
OffsetDateTime::parse("2019-002 23:59:59 +0000", "%Y-%j %T %z"),
Ok(date!(2019 - 002).with_time(time!(23:59:59)).assume_utc())
);
assert_eq!(
OffsetDateTime::parse("2019-W01-3 12:00:00 pm +0000", "%G-W%V-%u %r %z"),
Ok(date!(2019 - 002).with_time(time!(12:00)).assume_utc())
);
assert_eq!(
OffsetDateTime::parse("2019-01-02 03:04:05 +0600", "%F %T %z"),
Ok(date!(2019 - 01 - 02)
.with_time(time!(3:04:05))
.assume_offset(offset!(+6)))
);
assert_eq!(
OffsetDateTime::parse("2020-09-08T08:44:31+02:30", Format::Rfc3339),
Ok(date!(2020 - 09 - 08)
.with_time(time!(08:44:31))
.assume_offset(offset!(+02:30)))
);
assert_eq!(
OffsetDateTime::parse("2019-01-02T03:04:05.678901234+05:06", Format::Rfc3339),
Ok(date!(2019 - 01 - 02)
.with_time(time!(3:04:05.678_901_234))
.assume_offset(offset!(+5:06)))
);
assert_eq!(
OffsetDateTime::parse("2019-01-02T03:04:05.678901234Z", Format::Rfc3339),
Ok(date!(2019 - 01 - 02)
.with_time(time!(3:04:05.678_901_234))
.assume_utc())
);
assert_eq!(
OffsetDateTime::parse("2019-01-02T03:04:05/", Format::Rfc3339),
Err(error::Parse::UnexpectedCharacter {
actual: '/',
expected: '+'
})
);
assert_eq!(
OffsetDateTime::parse("2019-01-02T03:04:05", Format::Rfc3339),
Err(error::Parse::UnexpectedEndOfString)
);
assert_eq!(
OffsetDateTime::parse("2019-01-02T03:04:05.", Format::Rfc3339),
Err(error::Parse::InvalidNanosecond)
);
}
#[test]
fn partial_eq() {
assert_eq!(
date!(2000 - 01 - 01)
.midnight()
.assume_utc()
.to_offset(offset!(-1)),
date!(2000 - 01 - 01).midnight().assume_utc(),
);
}
#[test]
fn partial_ord() {
let t1 = date!(2019 - 01 - 01).midnight().assume_utc();
let t2 = date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.to_offset(offset!(-1));
assert_eq!(t1.partial_cmp(&t2), Some(Ordering::Equal));
}
#[test]
fn ord() {
let t1 = date!(2019 - 01 - 01).midnight().assume_utc();
let t2 = date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.to_offset(offset!(-1));
assert_eq!(t1, t2);
let t1 = date!(2019 - 01 - 01).midnight().assume_utc();
let t2 = date!(2019 - 01 - 01)
.with_time(time!(0:00:00.000_000_001))
.assume_utc();
assert!(t2 > t1);
}
#[test]
#[cfg(feature = "std")]
fn hash() {
use std::{
collections::hash_map::DefaultHasher,
hash::{Hash, Hasher},
};
assert_eq!(
{
let mut hasher = DefaultHasher::new();
date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.hash(&mut hasher);
hasher.finish()
},
{
let mut hasher = DefaultHasher::new();
date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.to_offset(offset!(-1))
.hash(&mut hasher);
hasher.finish()
}
);
// Ensure that a `PrimitiveDateTime` and `OffsetDateTime` don't collide,
// even if the UTC time is the same.
assert_ne!(
{
let mut hasher = DefaultHasher::new();
date!(2019 - 01 - 01).midnight().hash(&mut hasher);
hasher.finish()
},
{
let mut hasher = DefaultHasher::new();
date!(2019 - 01 - 01)
.midnight()
.assume_utc()
.hash(&mut hasher);
hasher.finish()
}
);
}
#[test]
fn add_duration() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc() + 5.days(),
date!(2019 - 01 - 06).midnight().assume_utc(),
);
assert_eq!(
date!(2019 - 12 - 31).midnight().assume_utc() + 1.days(),
date!(2020 - 01 - 01).midnight().assume_utc(),
);
assert_eq!(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
+ 2.seconds(),
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc(),
);
assert_eq!(
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc() + (-2).seconds(),
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc(),
);
assert_eq!(
date!(1999 - 12 - 31).with_time(time!(23:00)).assume_utc() + 1.hours(),
date!(2000 - 01 - 01).midnight().assume_utc(),
);
}
#[test]
fn add_std_duration() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc() + 5.std_days(),
date!(2019 - 01 - 06).midnight().assume_utc(),
);
assert_eq!(
date!(2019 - 12 - 31).midnight().assume_utc() + 1.std_days(),
date!(2020 - 01 - 01).midnight().assume_utc(),
);
assert_eq!(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
+ 2.std_seconds(),
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc(),
);
}
#[test]
fn add_assign_duration() {
let mut ny19 = date!(2019 - 01 - 01).midnight().assume_utc();
ny19 += 5.days();
assert_eq!(ny19, date!(2019 - 01 - 06).midnight().assume_utc());
let mut nye20 = date!(2019 - 12 - 31).midnight().assume_utc();
nye20 += 1.days();
assert_eq!(nye20, date!(2020 - 01 - 01).midnight().assume_utc());
let mut nye20t = date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc();
nye20t += 2.seconds();
assert_eq!(
nye20t,
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc()
);
let mut ny20t = date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc();
ny20t += (-2).seconds();
assert_eq!(
ny20t,
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
);
}
#[test]
fn add_assign_std_duration() {
let mut ny19 = date!(2019 - 01 - 01).midnight().assume_utc();
ny19 += 5.std_days();
assert_eq!(ny19, date!(2019 - 01 - 06).midnight().assume_utc());
let mut nye20 = date!(2019 - 12 - 31).midnight().assume_utc();
nye20 += 1.std_days();
assert_eq!(nye20, date!(2020 - 01 - 01).midnight().assume_utc());
let mut nye20t = date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc();
nye20t += 2.std_seconds();
assert_eq!(
nye20t,
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc()
);
}
#[test]
fn sub_duration() {
assert_eq!(
date!(2019 - 01 - 06).midnight().assume_utc() - 5.days(),
date!(2019 - 01 - 01).midnight().assume_utc(),
);
assert_eq!(
date!(2020 - 01 - 01).midnight().assume_utc() - 1.days(),
date!(2019 - 12 - 31).midnight().assume_utc(),
);
assert_eq!(
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc() - 2.seconds(),
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc(),
);
assert_eq!(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
- (-2).seconds(),
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc(),
);
assert_eq!(
date!(1999 - 12 - 31).with_time(time!(23:00)).assume_utc() - (-1).hours(),
date!(2000 - 01 - 01).midnight().assume_utc(),
);
}
#[test]
fn sub_std_duration() {
assert_eq!(
date!(2019 - 01 - 06).midnight().assume_utc() - 5.std_days(),
date!(2019 - 01 - 01).midnight().assume_utc(),
);
assert_eq!(
date!(2020 - 01 - 01).midnight().assume_utc() - 1.std_days(),
date!(2019 - 12 - 31).midnight().assume_utc(),
);
assert_eq!(
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc() - 2.std_seconds(),
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc(),
);
}
#[test]
fn sub_assign_duration() {
let mut ny19 = date!(2019 - 01 - 06).midnight().assume_utc();
ny19 -= 5.days();
assert_eq!(ny19, date!(2019 - 01 - 01).midnight().assume_utc());
let mut ny20 = date!(2020 - 01 - 01).midnight().assume_utc();
ny20 -= 1.days();
assert_eq!(ny20, date!(2019 - 12 - 31).midnight().assume_utc());
let mut ny20t = date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc();
ny20t -= 2.seconds();
assert_eq!(
ny20t,
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
);
let mut nye20t = date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc();
nye20t -= (-2).seconds();
assert_eq!(
nye20t,
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc()
);
}
#[test]
fn sub_assign_std_duration() {
let mut ny19 = date!(2019 - 01 - 06).midnight().assume_utc();
ny19 -= 5.std_days();
assert_eq!(ny19, date!(2019 - 01 - 01).midnight().assume_utc());
let mut ny20 = date!(2020 - 01 - 01).midnight().assume_utc();
ny20 -= 1.std_days();
assert_eq!(ny20, date!(2019 - 12 - 31).midnight().assume_utc());
let mut ny20t = date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc();
ny20t -= 2.std_seconds();
assert_eq!(
ny20t,
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
);
}
#[test]
#[cfg(feature = "std")]
fn std_add_duration() {
assert_eq!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc()) + 0.seconds(),
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc()),
);
assert_eq!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc()) + 5.days(),
SystemTime::from(date!(2019 - 01 - 06).midnight().assume_utc()),
);
assert_eq!(
SystemTime::from(date!(2019 - 12 - 31).midnight().assume_utc()) + 1.days(),
SystemTime::from(date!(2020 - 01 - 01).midnight().assume_utc()),
);
assert_eq!(
SystemTime::from(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
) + 2.seconds(),
SystemTime::from(date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc()),
);
assert_eq!(
SystemTime::from(date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc())
+ (-2).seconds(),
SystemTime::from(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
),
);
}
#[test]
#[cfg(feature = "std")]
fn std_add_assign_duration() {
let mut ny19 = SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc());
ny19 += 5.days();
assert_eq!(ny19, date!(2019 - 01 - 06).midnight().assume_utc());
let mut nye20 = SystemTime::from(date!(2019 - 12 - 31).midnight().assume_utc());
nye20 += 1.days();
assert_eq!(nye20, date!(2020 - 01 - 01).midnight().assume_utc());
let mut nye20t = SystemTime::from(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc(),
);
nye20t += 2.seconds();
assert_eq!(
nye20t,
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc()
);
let mut ny20t = SystemTime::from(date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc());
ny20t += (-2).seconds();
assert_eq!(
ny20t,
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
);
}
#[test]
#[cfg(feature = "std")]
fn std_sub_duration() {
assert_eq!(
SystemTime::from(date!(2019 - 01 - 06).midnight().assume_utc()) - 5.days(),
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc()),
);
assert_eq!(
SystemTime::from(date!(2020 - 01 - 01).midnight().assume_utc()) - 1.days(),
SystemTime::from(date!(2019 - 12 - 31).midnight().assume_utc()),
);
assert_eq!(
SystemTime::from(date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc())
- 2.seconds(),
SystemTime::from(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
),
);
assert_eq!(
SystemTime::from(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
) - (-2).seconds(),
SystemTime::from(date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc()),
);
}
#[test]
#[cfg(feature = "std")]
fn std_sub_assign_duration() {
let mut ny19 = SystemTime::from(date!(2019 - 01 - 06).midnight().assume_utc());
ny19 -= 5.days();
assert_eq!(ny19, date!(2019 - 01 - 01).midnight().assume_utc());
let mut ny20 = SystemTime::from(date!(2020 - 01 - 01).midnight().assume_utc());
ny20 -= 1.days();
assert_eq!(ny20, date!(2019 - 12 - 31).midnight().assume_utc());
let mut ny20t = SystemTime::from(date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc());
ny20t -= 2.seconds();
assert_eq!(
ny20t,
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc()
);
let mut nye20t = SystemTime::from(
date!(2019 - 12 - 31)
.with_time(time!(23:59:59))
.assume_utc(),
);
nye20t -= (-2).seconds();
assert_eq!(
nye20t,
date!(2020 - 01 - 01).with_time(time!(0:00:01)).assume_utc()
);
}
#[test]
fn sub_self() {
assert_eq!(
date!(2019 - 01 - 02).midnight().assume_utc()
- date!(2019 - 01 - 01).midnight().assume_utc(),
1.days(),
);
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc()
- date!(2019 - 01 - 02).midnight().assume_utc(),
(-1).days(),
);
assert_eq!(
date!(2020 - 01 - 01).midnight().assume_utc()
- date!(2019 - 12 - 31).midnight().assume_utc(),
1.days(),
);
assert_eq!(
date!(2019 - 12 - 31).midnight().assume_utc()
- date!(2020 - 01 - 01).midnight().assume_utc(),
(-1).days(),
);
}
#[test]
#[cfg(feature = "std")]
fn std_sub() {
assert_eq!(
SystemTime::from(date!(2019 - 01 - 02).midnight().assume_utc())
- date!(2019 - 01 - 01).midnight().assume_utc(),
1.days()
);
assert_eq!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
- date!(2019 - 01 - 02).midnight().assume_utc(),
(-1).days()
);
assert_eq!(
SystemTime::from(date!(2020 - 01 - 01).midnight().assume_utc())
- date!(2019 - 12 - 31).midnight().assume_utc(),
1.days()
);
assert_eq!(
SystemTime::from(date!(2019 - 12 - 31).midnight().assume_utc())
- date!(2020 - 01 - 01).midnight().assume_utc(),
(-1).days()
);
}
#[test]
#[cfg(feature = "std")]
fn sub_std() {
assert_eq!(
date!(2019 - 01 - 02).midnight().assume_utc()
- SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc()),
1.days()
);
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc()
- SystemTime::from(date!(2019 - 01 - 02).midnight().assume_utc()),
(-1).days()
);
assert_eq!(
date!(2020 - 01 - 01).midnight().assume_utc()
- SystemTime::from(date!(2019 - 12 - 31).midnight().assume_utc()),
1.days()
);
assert_eq!(
date!(2019 - 12 - 31).midnight().assume_utc()
- SystemTime::from(date!(2020 - 01 - 01).midnight().assume_utc()),
(-1).days()
);
}
#[test]
#[cfg(feature = "std")]
#[allow(deprecated)]
fn eq_std() {
let now_datetime = OffsetDateTime::now();
let now_systemtime = SystemTime::from(now_datetime);
assert_eq!(now_datetime, now_systemtime);
}
#[test]
#[cfg(feature = "std")]
#[allow(deprecated)]
fn std_eq() {
let now_datetime = OffsetDateTime::now();
let now_systemtime = SystemTime::from(now_datetime);
assert_eq!(now_datetime, now_systemtime);
}
#[test]
#[cfg(feature = "std")]
fn ord_std() {
assert_eq!(
date!(2019 - 01 - 01).midnight().assume_utc(),
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 01).midnight().assume_utc()
< SystemTime::from(date!(2020 - 01 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 01).midnight().assume_utc()
< SystemTime::from(date!(2019 - 02 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 01).midnight().assume_utc()
< SystemTime::from(date!(2019 - 01 - 02).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 01).midnight().assume_utc()
< SystemTime::from(date!(2019 - 01 - 01).with_time(time!(1:00:00)).assume_utc())
);
assert!(
date!(2019 - 01 - 01).midnight().assume_utc()
< SystemTime::from(date!(2019 - 01 - 01).with_time(time!(0:01:00)).assume_utc())
);
assert!(
date!(2019 - 01 - 01).midnight().assume_utc()
< SystemTime::from(date!(2019 - 01 - 01).with_time(time!(0:00:01)).assume_utc())
);
assert!(
date!(2019 - 01 - 01).midnight().assume_utc()
< SystemTime::from(
date!(2019 - 01 - 01)
.with_time(time!(0:00:00.001))
.assume_utc()
)
);
assert!(
date!(2020 - 01 - 01).midnight().assume_utc()
> SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 02 - 01).midnight().assume_utc()
> SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 02).midnight().assume_utc()
> SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 01).with_time(time!(1:00:00)).assume_utc()
> SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 01).with_time(time!(0:01:00)).assume_utc()
> SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 01).with_time(time!(0:00:01)).assume_utc()
> SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
);
assert!(
date!(2019 - 01 - 01)
.with_time(time!(0:00:00.000_000_001))
.assume_utc()
> SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
);
}
#[test]
#[cfg(feature = "std")]
fn std_ord() {
assert_eq!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc()),
date!(2019 - 01 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
< date!(2020 - 01 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
< date!(2019 - 02 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
< date!(2019 - 01 - 02).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
< date!(2019 - 01 - 01).with_time(time!(1:00:00)).assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
< date!(2019 - 01 - 01).with_time(time!(0:01:00)).assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
< date!(2019 - 01 - 01).with_time(time!(0:00:01)).assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).midnight().assume_utc())
< date!(2019 - 01 - 01)
.with_time(time!(0:00:00.000_000_001))
.assume_utc()
);
assert!(
SystemTime::from(date!(2020 - 01 - 01).midnight().assume_utc())
> date!(2019 - 01 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 02 - 01).midnight().assume_utc())
> date!(2019 - 01 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 02).midnight().assume_utc())
> date!(2019 - 01 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).with_time(time!(1:00:00)).assume_utc())
> date!(2019 - 01 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).with_time(time!(0:01:00)).assume_utc())
> date!(2019 - 01 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(date!(2019 - 01 - 01).with_time(time!(0:00:01)).assume_utc())
> date!(2019 - 01 - 01).midnight().assume_utc()
);
assert!(
SystemTime::from(
date!(2019 - 01 - 01)
.with_time(time!(0:00:00.001))
.assume_utc()
) > date!(2019 - 01 - 01).midnight().assume_utc()
);
}
#[test]
#[cfg(feature = "std")]
fn from_std() {
assert_eq!(
OffsetDateTime::from(SystemTime::UNIX_EPOCH),
OffsetDateTime::unix_epoch()
);
assert_eq!(
OffsetDateTime::from(SystemTime::UNIX_EPOCH - 1.std_days()),
OffsetDateTime::unix_epoch() - 1.days()
);
assert_eq!(
OffsetDateTime::from(SystemTime::UNIX_EPOCH + 1.std_days()),
OffsetDateTime::unix_epoch() + 1.days()
);
}
#[test]
#[cfg(feature = "std")]
fn to_std() {
assert_eq!(
SystemTime::from(OffsetDateTime::unix_epoch()),
SystemTime::UNIX_EPOCH
);
assert_eq!(
SystemTime::from(OffsetDateTime::unix_epoch() + 1.days()),
SystemTime::UNIX_EPOCH + 1.std_days()
);
assert_eq!(
SystemTime::from(OffsetDateTime::unix_epoch() - 1.days()),
SystemTime::UNIX_EPOCH - 1.std_days()
);
}
#[test]
#[cfg(feature = "std")]
fn display() {
assert_eq!(
date!(1970 - 01 - 01).midnight().assume_utc().to_string(),
String::from("1970-01-01 0:00 +0")
);
}
| {
assert!(OffsetDateTime::now_utc().year() >= 2019);
assert_eq!(OffsetDateTime::now_utc().offset(), offset!(UTC));
} |
q08.rs | //-----------------------------------------------------
// Setup.
static INPUT: &str = include_str!("data/q08.data");
fn process_data_a(data: &str) -> i32 {
let mut rv = 0;
for line in data.lines() {
rv += 2;
let mut rest = line[1..line.len() - 1].chars();
while let Some(curr) = rest.next() {
if curr != '\\' {
continue;
}
let next = rest.next();
match next {
Some('\\') => rv += 1,
Some('\"') => rv += 1,
Some('x') => {
rv += 3;
rest.next();
rest.next();
}
_ => {
println!("Got \\{:?}", next);
panic!("Unknown escape sequence!!!")
}
}
}
}
rv
}
fn process_data_b(data: &str) -> i32 {
let mut rv = 0;
for line in data.lines() {
rv += 2;
let rest = line.chars();
for curr in rest {
match curr {
'\\' => rv += 1,
'"' => rv += 1,
_ => {}
}
}
} |
//-----------------------------------------------------
// Questions.
q_impl!("8");
#[test]
fn a() {
assert_eq!(process_data_a("\"\""), 2);
assert_eq!(process_data_a("\"abc\""), 2);
assert_eq!(process_data_a("\"aaa\\\"aaa\""), 3);
assert_eq!(process_data_a("\"\\x27\""), 5);
assert_eq!(
process_data_a(
"\"\"
\"abc\"
\"aaa\\\"aaa\"
\"\\x27\"",
),
12
);
}
#[test]
fn b() {
assert_eq!(process_data_b("\"\""), 4);
assert_eq!(process_data_b("\"abc\""), 4);
assert_eq!(process_data_b("\"aaa\\\"aaa\""), 6);
assert_eq!(process_data_b("\"\\x27\""), 5);
assert_eq!(
process_data_b(
"\"\"
\"abc\"
\"aaa\\\"aaa\"
\"\\x27\"",
),
19
);
} | rv
} |
auth.js | const jwt = require("jsonwebtoken");
const secret = "mysecretsshhhhh";
const expiration = "2h";
module.exports = {
authMiddleware: function ({ req }) {
// allows token to be sent via req.body, req.query, or headers
let token = req.body.token || req.query.token || req.headers.authorization;
// ["Bearer", "<tokenvalue>"]
if (req.headers.authorization) {
token = token.split(" ").pop().trim();
}
if (!token) {
return req;
}
try {
const { data } = jwt.verify(token, secret, { maxAge: expiration });
req.user = data;
} catch {
console.log("Invalid token");
}
return req;
},
signToken: function ({ username, email, _id }) {
const payload = { username, email, _id };
return jwt.sign({ data: payload }, secret, { expiresIn: expiration });
}, | }; |
|
diff.py | #!/usr/bin/python
#
# Logic for computing missing and duplicate files using Postgresql DB.
#
import os
import fnmatch
# from shutil import copy2
# from shutil import copyfile
from dbutils import required as db
MISSING_FROM_COMP_FOLDER = "missing_from_comp"
MISSING_FROM_SRC_FOLDER = "missing_from_source"
FILE_NAME_COLUMN = "file"
FILE_EXTENSIONS_TO_SKIP = ['.ini', '.db', '.info', '.pdfcp ']
def handleDuplicates(conn):
# conn = db.getConnection(dbName)
|
#######################
# Logic for identifying missing files in the source and destination/comparison locations.
#######################
def handleMissing(dbName, sourceSchema, compSchema):
conn = db.getConnection(dbName)
cur = conn.cursor()
try:
# identify missing files from the comparison location
command = "SELECT %s from %s.filehashes where hash in (select hash from %s.filehashes except select hash from %s.filehashes);" %(FILE_NAME_COLUMN, sourceSchema, sourceSchema, compSchema)
print("Identifying all missing files in comparison location. Executing: " + command)
cur.execute( command )
missingInComp = cur.fetchall()
# print("Missing files in comparison location: %s" % missingInComp)
# __collectMissingFiles__(list(missingInComp), MISSING_FROM_COMP_FOLDER)
try:
for missingFile in missingInComp:
__collectMissingFiles__(missingFile[0], MISSING_FROM_COMP_FOLDER)
except Exception as ce:
print("There was a problem locating files on comparison location. The comparison location files may have changed/moved since the scan. ")
print(ce)
return
# identify missing files from the source location
command = "SELECT %s from %s.filehashes where hash in (select hash from %s.filehashes except select hash from %s.filehashes);" % (FILE_NAME_COLUMN, compSchema, compSchema, sourceSchema)
print("Identifying all missing files in source location. Executing: " + command)
cur.execute(command)
missingInSource = cur.fetchall()
# print("Missing files in source location: %s" % missingInSource)
# __collectMissingFiles__(list(missingInSource), MISSING_FROM_SRC_FOLDER)
try:
for missingFile in missingInSource:
__collectMissingFiles__(missingFile[0], MISSING_FROM_SRC_FOLDER)
except Exception as se:
print("There was a problem locating files on source location. The source location files may have changed/moved since the scan. ")
print(se)
except Exception as e:
print("Unable to identify missing files! Cause: " + e)
finally:
cur.close()
# conn.close
db.returnConnection(conn)
print("Done processing missing files. Please look at %s and %s folders for missing files." % (MISSING_FROM_SRC_FOLDER, MISSING_FROM_COMP_FOLDER))
#######################
# Utility for copying all of the missing files from the specified result-set into the specified folder.
#######################
def __collectMissingFiles__( missingFile, folderName ):
# for missingFile in missingFiles:
# missingFile.endswith(tuple(set(['.ini', '.db'])))
if not missingFile.endswith(tuple(FILE_EXTENSIONS_TO_SKIP)):
dst = "./" + folderName + missingFile
print("Attempting to copy missing file: " + missingFile + " to destination: " + dst)
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
# TODO implement file type filtering to only get files we want and skip ones we don't care about like *.txt, *.ini, etc.
# if not fnmatch.fnmatch(missingFile, '.*.ini'):
# copyfile( missingFile, dst)
# copy2(missingFile, dst)
os.system('cp -v -p "' + missingFile + '" "' + dst + '"') | cur = conn.cursor
try:
# duplicates on the source location
cur.execute("SELECT * from filehashes WHERE ")
# duplicates on the comparison location
cur.execute("SELECT * from filehashes ")
finally:
cur.close
# conn.close
db.returnConnection(conn) |
v1.d.ts | /// <reference types="node" />
import { OAuth2Client, JWT, Compute, UserRefreshClient, GaxiosPromise, GoogleConfigurable, MethodOptions, StreamMethodOptions, GlobalOptions, GoogleAuth, BodyResponseCallback, APIRequestContext } from 'googleapis-common';
import { Readable } from 'stream';
export declare namespace managedidentities_v1 {
export interface Options extends GlobalOptions {
version: 'v1';
}
interface StandardParameters {
/**
* Auth client or API Key for the request
*/
auth?: string | OAuth2Client | JWT | Compute | UserRefreshClient | GoogleAuth;
/**
* V1 error format.
*/
'$.xgafv'?: string;
/**
* OAuth access token.
*/
access_token?: string;
/**
* Data format for response.
*/
alt?: string;
/**
* JSONP
*/
callback?: string;
/**
* Selector specifying which fields to include in a partial response.
*/
fields?: string;
/**
* API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
*/
key?: string;
/**
* OAuth 2.0 token for the current user.
*/
oauth_token?: string;
/**
* Returns response with indentations and line breaks.
*/
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
*/
quotaUser?: string;
/**
* Legacy upload protocol for media (e.g. "media", "multipart").
*/
uploadType?: string;
/**
* Upload protocol for media (e.g. "raw", "multipart").
*/
upload_protocol?: string;
}
/**
* Managed Service for Microsoft Active Directory API
*
* The Managed Service for Microsoft Active Directory API is used for managing a highly available, hardened service running Microsoft Active Directory (AD).
*
* @example
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* @namespace managedidentities
* @type {Function}
* @version v1
* @variation v1
* @param {object=} options Options for Managedidentities
*/
export class Managedidentities {
context: APIRequestContext;
projects: Resource$Projects;
constructor(options: GlobalOptions, google?: GoogleConfigurable);
}
/**
* Request message for AttachTrust
*/
export interface Schema$AttachTrustRequest {
/**
* Required. The domain trust resource.
*/
trust?: Schema$Trust;
}
/**
* Associates `members` with a `role`.
*/
export interface Schema$Binding {
/**
* The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
*/
condition?: Schema$Expr;
/**
* Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `[email protected]` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `[email protected]`. * `group:{emailid}`: An email address that represents a Google group. For example, `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.
*/
members?: string[] | null;
/**
* Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
*/
role?: string | null;
}
/**
* The request message for Operations.CancelOperation.
*/
export interface Schema$CancelOperationRequest {
}
/**
* Request message for DetachTrust
*/
export interface Schema$DetachTrustRequest {
/**
* Required. The domain trust resource to removed.
*/
trust?: Schema$Trust;
}
/**
* Represents a managed Microsoft Active Directory domain.
*/
export interface Schema$Domain {
/**
* Optional. The name of delegated administrator account used to perform Active Directory operations. If not specified, `setupadmin` will be used.
*/
admin?: string | null;
/**
* Optional. The full names of the Google Compute Engine [networks](/compute/docs/networks-and-firewalls#networks) the domain instance is connected to. Networks can be added using UpdateDomain. The domain is only available on networks listed in `authorized_networks`. If CIDR subnets overlap between networks, domain creation will fail.
*/
authorizedNetworks?: string[] | null;
/**
* Output only. The time the instance was created.
*/
createTime?: string | null;
/**
* Output only. The fully-qualified domain name of the exposed domain used by clients to connect to the service. Similar to what would be chosen for an Active Directory set up on an internal network.
*/
fqdn?: string | null;
/**
* Optional. Resource labels that can contain user-provided metadata.
*/
labels?: {
[key: string]: string;
} | null;
/**
* Required. Locations where domain needs to be provisioned. regions e.g. us-west1 or us-east4 Service supports up to 4 locations at once. Each location will use a /26 block.
*/
locations?: string[] | null;
/**
* Required. The unique name of the domain using the form: `projects/{project_id}/locations/global/domains/{domain_name}`.
*/
name?: string | null;
/**
* Required. The CIDR range of internal addresses that are reserved for this domain. Reserved networks must be /24 or larger. Ranges must be unique and non-overlapping with existing subnets in [Domain].[authorized_networks].
*/
reservedIpRange?: string | null;
/**
* Output only. The current state of this domain.
*/
state?: string | null;
/**
* Output only. Additional information about the current status of this domain, if available.
*/
statusMessage?: string | null;
/**
* Output only. The current trusts associated with the domain.
*/
trusts?: Schema$Trust[];
/**
* Output only. The last update time.
*/
updateTime?: string | null;
}
/**
* A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.
*/
export interface Schema$Empty {
}
/**
* Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.
*/
export interface Schema$Expr {
/**
* Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
*/
description?: string | null;
/**
* Textual representation of an expression in Common Expression Language syntax.
*/
expression?: string | null;
/**
* Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
*/
location?: string | null;
/**
* Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
*/
title?: string | null;
}
/**
* Represents the metadata of the long-running operation.
*/
export interface Schema$GoogleCloudManagedidentitiesV1alpha1OpMetadata {
/**
* Output only. API version used to start the operation.
*/
apiVersion?: string | null;
/**
* Output only. The time the operation was created.
*/
createTime?: string | null;
/**
* Output only. The time the operation finished running.
*/
endTime?: string | null;
/**
* Output only. Identifies whether the user has requested cancellation of the operation. Operations that have successfully been cancelled have Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
*/
requestedCancellation?: boolean | null;
/**
* Output only. Server-defined resource path for the target of the operation.
*/
target?: string | null;
/**
* Output only. Name of the verb executed by the operation.
*/
verb?: string | null;
}
/**
* Represents the metadata of the long-running operation.
*/
export interface Schema$GoogleCloudManagedidentitiesV1beta1OpMetadata {
/**
* Output only. API version used to start the operation.
*/
apiVersion?: string | null;
/**
* Output only. The time the operation was created.
*/
createTime?: string | null;
/**
* Output only. The time the operation finished running.
*/
endTime?: string | null;
/**
* Output only. Identifies whether the user has requested cancellation of the operation. Operations that have successfully been cancelled have Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
*/
requestedCancellation?: boolean | null;
/**
* Output only. Server-defined resource path for the target of the operation.
*/
target?: string | null;
/**
* Output only. Name of the verb executed by the operation.
*/
verb?: string | null;
}
/**
* Represents the metadata of the long-running operation.
*/
export interface Schema$GoogleCloudManagedidentitiesV1OpMetadata {
/**
* Output only. API version used to start the operation.
*/
apiVersion?: string | null;
/**
* Output only. The time the operation was created.
*/
createTime?: string | null;
/**
* Output only. The time the operation finished running.
*/
endTime?: string | null;
/**
* Output only. Identifies whether the user has requested cancellation of the operation. Operations that have successfully been cancelled have Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
*/
requestedCancellation?: boolean | null;
/**
* Output only. Server-defined resource path for the target of the operation.
*/
target?: string | null;
/**
* Output only. Name of the verb executed by the operation.
*/
verb?: string | null;
}
export interface Schema$GoogleCloudSaasacceleratorManagementProvidersV1Instance {
/**
* consumer_defined_name is the name that is set by the consumer. On the other hand Name field represents system-assigned id of an instance so consumers are not necessarily aware of it. consumer_defined_name is used for notification/UI purposes for consumer to recognize their instances.
*/
consumerDefinedName?: string | null;
/**
* Output only. Timestamp when the resource was created.
*/
createTime?: string | null;
/**
* Optional. Resource labels to represent user provided metadata. Each label is a key-value pair, where both the key and the value are arbitrary strings provided by the user.
*/
labels?: {
[key: string]: string;
} | null;
/**
* The MaintenancePolicies that have been attached to the instance. The key must be of the type name of the oneof policy name defined in MaintenancePolicy, and the referenced policy must define the same policy type. For complete details of MaintenancePolicy, please refer to go/cloud-saas-mw-ug.
*/
maintenancePolicyNames?: {
[key: string]: string;
} | null;
/**
* The MaintenanceSchedule contains the scheduling information of published maintenance schedule.
*/
maintenanceSchedules?: {
[key: string]: Schema$GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSchedule;
} | null;
/**
* Optional. The MaintenanceSettings associated with instance.
*/
maintenanceSettings?: Schema$GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSettings;
/**
* Unique name of the resource. It uses the form: `projects/{project_id}/locations/{location_id}/instances/{instance_id}`
*/
name?: string | null;
/**
* Output only. Custom string attributes used primarily to expose producer-specific information in monitoring dashboards. See go/get-instance-metadata.
*/
producerMetadata?: {
[key: string]: string;
} | null;
/**
* Output only. The list of data plane resources provisioned for this instance, e.g. compute VMs. See go/get-instance-metadata.
*/
provisionedResources?: Schema$GoogleCloudSaasacceleratorManagementProvidersV1ProvisionedResource[];
/**
* Link to the SLM instance template. Only populated when updating SLM instances via SSA's Actuation service adaptor. Service producers with custom control plane (e.g. Cloud SQL) doesn't need to populate this field. Instead they should use software_versions.
*/
slmInstanceTemplate?: string | null;
/**
* Output only. SLO metadata for instance classification in the Standardized dataplane SLO platform. See go/cloud-ssa-standard-slo for feature description.
*/
sloMetadata?: Schema$GoogleCloudSaasacceleratorManagementProvidersV1SloMetadata;
/**
* Software versions that are used to deploy this instance. This can be mutated by rollout services.
*/
softwareVersions?: {
[key: string]: string;
} | null;
/**
* Output only. Current lifecycle state of the resource (e.g. if it's being created or ready to use).
*/
state?: string | null;
/**
* Output only. ID of the associated GCP tenant project. See go/get-instance-metadata.
*/
tenantProjectId?: string | null;
/**
* Output only. Timestamp when the resource was last modified.
*/
updateTime?: string | null;
}
/**
* Maintenance schedule which is exposed to customer and potentially end user, indicating published upcoming future maintenance schedule
*/
export interface Schema$GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSchedule {
/**
* Can this scheduled update be rescheduled? By default, it's true and API needs to do explicitly check whether it's set, if it's set as false explicitly, it's false
*/
canReschedule?: boolean | null;
/**
* The scheduled end time for the maintenance.
*/
endTime?: string | null;
/**
* The rollout management policy this maintenance schedule is associated with. When doing reschedule update request, the reschedule should be against this given policy.
*/
rolloutManagementPolicy?: string | null;
/**
* The scheduled start time for the maintenance.
*/
startTime?: string | null;
}
/**
* Maintenance settings associated with instance. Allows service producers and end users to assign settings that controls maintenance on this instance. | export interface Schema$GoogleCloudSaasacceleratorManagementProvidersV1MaintenanceSettings {
/**
* Optional. Exclude instance from maintenance. When true, rollout service will not attempt maintenance on the instance. Rollout service will include the instance in reported rollout progress as not attempted.
*/
exclude?: boolean | null;
}
/**
* Node information for custom per-node SLO implementations. SSA does not support per-node SLO, but producers can populate per-node information in SloMetadata for custom precomputations. SSA Eligibility Exporter will emit per-node metric based on this information.
*/
export interface Schema$GoogleCloudSaasacceleratorManagementProvidersV1NodeSloMetadata {
/**
* By default node is eligible if instance is eligible. But individual node might be excluded from SLO by adding entry here. For semantic see SloMetadata.exclusions. If both instance and node level exclusions are present for time period, the node level's reason will be reported by Eligibility Exporter.
*/
exclusions?: Schema$GoogleCloudSaasacceleratorManagementProvidersV1SloExclusion[];
/**
* The location of the node, if different from instance location.
*/
location?: string | null;
/**
* The id of the node. This should be equal to SaasInstanceNode.node_id.
*/
nodeId?: string | null;
}
/**
* Describes provisioned dataplane resources.
*/
export interface Schema$GoogleCloudSaasacceleratorManagementProvidersV1ProvisionedResource {
/**
* Type of the resource. This can be either a GCP resource or a custom one (e.g. another cloud provider's VM). For GCP compute resources use singular form of the names listed in GCP compute API documentation (https://cloud.google.com/compute/docs/reference/rest/v1/), prefixed with 'compute-', for example: 'compute-instance', 'compute-disk', 'compute-autoscaler'.
*/
resourceType?: string | null;
/**
* URL identifying the resource, e.g. "https://www.googleapis.com/compute/v1/projects/...)".
*/
resourceUrl?: string | null;
}
/**
* SloEligibility is a tuple containing eligibility value: true if an instance is eligible for SLO calculation or false if it should be excluded from all SLO-related calculations along with a user-defined reason.
*/
export interface Schema$GoogleCloudSaasacceleratorManagementProvidersV1SloEligibility {
/**
* Whether an instance is eligible or ineligible.
*/
eligible?: boolean | null;
/**
* User-defined reason for the current value of instance eligibility. Usually, this can be directly mapped to the internal state. An empty reason is allowed.
*/
reason?: string | null;
}
/**
* SloExclusion represents an exclusion in SLI calculation applies to all SLOs.
*/
export interface Schema$GoogleCloudSaasacceleratorManagementProvidersV1SloExclusion {
/**
* Exclusion duration. No restrictions on the possible values. When an ongoing operation is taking longer than initially expected, an existing entry in the exclusion list can be updated by extending the duration. This is supported by the subsystem exporting eligibility data as long as such extension is committed at least 10 minutes before the original exclusion expiration - otherwise it is possible that there will be "gaps" in the exclusion application in the exported timeseries.
*/
duration?: string | null;
/**
* Human-readable reason for the exclusion. This should be a static string (e.g. "Disruptive update in progress") and should not contain dynamically generated data (e.g. instance name). Can be left empty.
*/
reason?: string | null;
/**
* Name of an SLI that this exclusion applies to. Can be left empty, signaling that the instance should be excluded from all SLIs defined in the service SLO configuration.
*/
sliName?: string | null;
/**
* Start time of the exclusion. No alignment (e.g. to a full minute) needed.
*/
startTime?: string | null;
}
/**
* SloMetadata contains resources required for proper SLO classification of the instance.
*/
export interface Schema$GoogleCloudSaasacceleratorManagementProvidersV1SloMetadata {
/**
* Optional. User-defined instance eligibility.
*/
eligibility?: Schema$GoogleCloudSaasacceleratorManagementProvidersV1SloEligibility;
/**
* List of SLO exclusion windows. When multiple entries in the list match (matching the exclusion time-window against current time point) the exclusion reason used in the first matching entry will be published. It is not needed to include expired exclusion in this list, as only the currently applicable exclusions are taken into account by the eligibility exporting subsystem (the historical state of exclusions will be reflected in the historically produced timeseries regardless of the current state). This field can be used to mark the instance as temporary ineligible for the purpose of SLO calculation. For permanent instance SLO exclusion, use of custom instance eligibility is recommended. See 'eligibility' field below.
*/
exclusions?: Schema$GoogleCloudSaasacceleratorManagementProvidersV1SloExclusion[];
/**
* Optional. List of nodes. Some producers need to use per-node metadata to calculate SLO. This field allows such producers to publish per-node SLO meta data, which will be consumed by SSA Eligibility Exporter and published in the form of per node metric to Monarch.
*/
nodes?: Schema$GoogleCloudSaasacceleratorManagementProvidersV1NodeSloMetadata[];
/**
* Name of the SLO tier the Instance belongs to. This name will be expected to match the tiers specified in the service SLO configuration. Field is mandatory and must not be empty.
*/
tier?: string | null;
}
/**
* Response message for ListDomains
*/
export interface Schema$ListDomainsResponse {
/**
* A list of Managed Identities Service domains in the project.
*/
domains?: Schema$Domain[];
/**
* A token to retrieve the next page of results, or empty if there are no more results in the list.
*/
nextPageToken?: string | null;
/**
* A list of locations that could not be reached.
*/
unreachable?: string[] | null;
}
/**
* The response message for Locations.ListLocations.
*/
export interface Schema$ListLocationsResponse {
/**
* A list of locations that matches the specified filter in the request.
*/
locations?: Schema$Location[];
/**
* The standard List next-page token.
*/
nextPageToken?: string | null;
}
/**
* The response message for Operations.ListOperations.
*/
export interface Schema$ListOperationsResponse {
/**
* The standard List next-page token.
*/
nextPageToken?: string | null;
/**
* A list of operations that matches the specified filter in the request.
*/
operations?: Schema$Operation[];
}
/**
* A resource that represents Google Cloud Platform location.
*/
export interface Schema$Location {
/**
* The friendly name for this location, typically a nearby city name. For example, "Tokyo".
*/
displayName?: string | null;
/**
* Cross-service attributes for the location. For example {"cloud.googleapis.com/region": "us-east1"}
*/
labels?: {
[key: string]: string;
} | null;
/**
* The canonical id for this location. For example: `"us-east1"`.
*/
locationId?: string | null;
/**
* Service-specific metadata. For example the available capacity at the given location.
*/
metadata?: {
[key: string]: any;
} | null;
/**
* Resource name for the location, which may vary between implementations. For example: `"projects/example-project/locations/us-east1"`
*/
name?: string | null;
}
/**
* This resource represents a long-running operation that is the result of a network API call.
*/
export interface Schema$Operation {
/**
* If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
*/
done?: boolean | null;
/**
* The error result of the operation in case of failure or cancellation.
*/
error?: Schema$Status;
/**
* Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
*/
metadata?: {
[key: string]: any;
} | null;
/**
* The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
*/
name?: string | null;
/**
* The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
*/
response?: {
[key: string]: any;
} | null;
}
/**
* Represents the metadata of the long-running operation.
*/
export interface Schema$OperationMetadata {
/**
* [Output only] API version used to start the operation.
*/
apiVersion?: string | null;
/**
* [Output only] Identifies whether the user has requested cancellation of the operation. Operations that have successfully been cancelled have Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
*/
cancelRequested?: boolean | null;
/**
* [Output only] The time the operation was created.
*/
createTime?: string | null;
/**
* [Output only] The time the operation finished running.
*/
endTime?: string | null;
/**
* [Output only] Human-readable status of the operation, if any.
*/
statusDetail?: string | null;
/**
* [Output only] Server-defined resource path for the target of the operation.
*/
target?: string | null;
/**
* [Output only] Name of the verb executed by the operation.
*/
verb?: string | null;
}
/**
* An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:[email protected]", "group:[email protected]", "domain:google.com", "serviceAccount:[email protected]" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:[email protected]" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: - user:[email protected] - group:[email protected] - domain:google.com - serviceAccount:[email protected] role: roles/resourcemanager.organizationAdmin - members: - user:[email protected] role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).
*/
export interface Schema$Policy {
/**
* Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.
*/
bindings?: Schema$Binding[];
/**
* `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
*/
etag?: string | null;
/**
* Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
*/
version?: number | null;
}
/**
* Request message for ReconfigureTrust
*/
export interface Schema$ReconfigureTrustRequest {
/**
* Required. The target DNS server IP addresses to resolve the remote domain involved in the trust.
*/
targetDnsIpAddresses?: string[] | null;
/**
* Required. The fully-qualified target domain name which will be in trust with current domain.
*/
targetDomainName?: string | null;
}
/**
* Request message for ResetAdminPassword
*/
export interface Schema$ResetAdminPasswordRequest {
}
/**
* Response message for ResetAdminPassword
*/
export interface Schema$ResetAdminPasswordResponse {
/**
* A random password. See admin for more information.
*/
password?: string | null;
}
/**
* Request message for `SetIamPolicy` method.
*/
export interface Schema$SetIamPolicyRequest {
/**
* REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them.
*/
policy?: Schema$Policy;
}
/**
* The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
*/
export interface Schema$Status {
/**
* The status code, which should be an enum value of google.rpc.Code.
*/
code?: number | null;
/**
* A list of messages that carry the error details. There is a common set of message types for APIs to use.
*/
details?: Array<{
[key: string]: any;
}> | null;
/**
* A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
*/
message?: string | null;
}
/**
* Request message for `TestIamPermissions` method.
*/
export interface Schema$TestIamPermissionsRequest {
/**
* The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).
*/
permissions?: string[] | null;
}
/**
* Response message for `TestIamPermissions` method.
*/
export interface Schema$TestIamPermissionsResponse {
/**
* A subset of `TestPermissionsRequest.permissions` that the caller is allowed.
*/
permissions?: string[] | null;
}
/**
* Represents a relationship between two domains. This allows a controller in one domain to authenticate a user in another domain.
*/
export interface Schema$Trust {
/**
* Output only. The time the instance was created.
*/
createTime?: string | null;
/**
* Output only. The last heartbeat time when the trust was known to be connected.
*/
lastTrustHeartbeatTime?: string | null;
/**
* Optional. The trust authentication type, which decides whether the trusted side has forest/domain wide access or selective access to an approved set of resources.
*/
selectiveAuthentication?: boolean | null;
/**
* Output only. The current state of the trust.
*/
state?: string | null;
/**
* Output only. Additional information about the current state of the trust, if available.
*/
stateDescription?: string | null;
/**
* Required. The target DNS server IP addresses which can resolve the remote domain involved in the trust.
*/
targetDnsIpAddresses?: string[] | null;
/**
* Required. The fully qualified target domain name which will be in trust with the current domain.
*/
targetDomainName?: string | null;
/**
* Required. The trust direction, which decides if the current domain is trusted, trusting, or both.
*/
trustDirection?: string | null;
/**
* Required. The trust secret used for the handshake with the target domain. This will not be stored.
*/
trustHandshakeSecret?: string | null;
/**
* Required. The type of trust represented by the trust resource.
*/
trustType?: string | null;
/**
* Output only. The last update time.
*/
updateTime?: string | null;
}
/**
* Request message for ValidateTrust
*/
export interface Schema$ValidateTrustRequest {
/**
* Required. The domain trust to validate trust state for.
*/
trust?: Schema$Trust;
}
export class Resource$Projects {
context: APIRequestContext;
locations: Resource$Projects$Locations;
constructor(context: APIRequestContext);
}
export class Resource$Projects$Locations {
context: APIRequestContext;
global: Resource$Projects$Locations$Global;
constructor(context: APIRequestContext);
/**
* managedidentities.projects.locations.get
* @desc Gets information about a location.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.get({
* // Resource name for the location.
* name: 'projects/my-project/locations/my-location',
* });
* console.log(res.data);
*
* // Example response
* // {
* // "displayName": "my_displayName",
* // "labels": {},
* // "locationId": "my_locationId",
* // "metadata": {},
* // "name": "my_name"
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.get
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string} params.name Resource name for the location.
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
get(params: Params$Resource$Projects$Locations$Get, options: StreamMethodOptions): GaxiosPromise<Readable>;
get(params?: Params$Resource$Projects$Locations$Get, options?: MethodOptions): GaxiosPromise<Schema$Location>;
get(params: Params$Resource$Projects$Locations$Get, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
get(params: Params$Resource$Projects$Locations$Get, options: MethodOptions | BodyResponseCallback<Schema$Location>, callback: BodyResponseCallback<Schema$Location>): void;
get(params: Params$Resource$Projects$Locations$Get, callback: BodyResponseCallback<Schema$Location>): void;
get(callback: BodyResponseCallback<Schema$Location>): void;
/**
* managedidentities.projects.locations.list
* @desc Lists information about the supported locations for this service.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.list({
* // The standard list filter.
* filter: 'placeholder-value',
* // If true, the returned list will include locations which are not yet revealed.
* includeUnrevealedLocations: 'placeholder-value',
* // The resource that owns the locations collection, if applicable.
* name: 'projects/my-project',
* // The standard list page size.
* pageSize: 'placeholder-value',
* // The standard list page token.
* pageToken: 'placeholder-value',
* });
* console.log(res.data);
*
* // Example response
* // {
* // "locations": [],
* // "nextPageToken": "my_nextPageToken"
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.list
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string=} params.filter The standard list filter.
* @param {boolean=} params.includeUnrevealedLocations If true, the returned list will include locations which are not yet revealed.
* @param {string} params.name The resource that owns the locations collection, if applicable.
* @param {integer=} params.pageSize The standard list page size.
* @param {string=} params.pageToken The standard list page token.
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
list(params: Params$Resource$Projects$Locations$List, options: StreamMethodOptions): GaxiosPromise<Readable>;
list(params?: Params$Resource$Projects$Locations$List, options?: MethodOptions): GaxiosPromise<Schema$ListLocationsResponse>;
list(params: Params$Resource$Projects$Locations$List, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
list(params: Params$Resource$Projects$Locations$List, options: MethodOptions | BodyResponseCallback<Schema$ListLocationsResponse>, callback: BodyResponseCallback<Schema$ListLocationsResponse>): void;
list(params: Params$Resource$Projects$Locations$List, callback: BodyResponseCallback<Schema$ListLocationsResponse>): void;
list(callback: BodyResponseCallback<Schema$ListLocationsResponse>): void;
}
export interface Params$Resource$Projects$Locations$Get extends StandardParameters {
/**
* Resource name for the location.
*/
name?: string;
}
export interface Params$Resource$Projects$Locations$List extends StandardParameters {
/**
* The standard list filter.
*/
filter?: string;
/**
* If true, the returned list will include locations which are not yet revealed.
*/
includeUnrevealedLocations?: boolean;
/**
* The resource that owns the locations collection, if applicable.
*/
name?: string;
/**
* The standard list page size.
*/
pageSize?: number;
/**
* The standard list page token.
*/
pageToken?: string;
}
export class Resource$Projects$Locations$Global {
context: APIRequestContext;
domains: Resource$Projects$Locations$Global$Domains;
operations: Resource$Projects$Locations$Global$Operations;
constructor(context: APIRequestContext);
}
export class Resource$Projects$Locations$Global$Domains {
context: APIRequestContext;
constructor(context: APIRequestContext);
/**
* managedidentities.projects.locations.global.domains.attachTrust
* @desc Adds an AD trust to a domain.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.global.domains.attachTrust(
* {
* // Required. The resource domain name, project name and location using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
* name: 'projects/my-project/locations/global/domains/my-domain',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {
* // "trust": {}
* // }
* },
* }
* );
* console.log(res.data);
*
* // Example response
* // {
* // "done": false,
* // "error": {},
* // "metadata": {},
* // "name": "my_name",
* // "response": {}
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.global.domains.attachTrust
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string} params.name Required. The resource domain name, project name and location using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
* @param {().AttachTrustRequest} params.requestBody Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
attachTrust(params: Params$Resource$Projects$Locations$Global$Domains$Attachtrust, options: StreamMethodOptions): GaxiosPromise<Readable>;
attachTrust(params?: Params$Resource$Projects$Locations$Global$Domains$Attachtrust, options?: MethodOptions): GaxiosPromise<Schema$Operation>;
attachTrust(params: Params$Resource$Projects$Locations$Global$Domains$Attachtrust, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
attachTrust(params: Params$Resource$Projects$Locations$Global$Domains$Attachtrust, options: MethodOptions | BodyResponseCallback<Schema$Operation>, callback: BodyResponseCallback<Schema$Operation>): void;
attachTrust(params: Params$Resource$Projects$Locations$Global$Domains$Attachtrust, callback: BodyResponseCallback<Schema$Operation>): void;
attachTrust(callback: BodyResponseCallback<Schema$Operation>): void;
/**
* managedidentities.projects.locations.global.domains.create
* @desc Creates a Microsoft AD domain.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.global.domains.create({
* // Required. The fully qualified domain name. e.g. mydomain.myorganization.com, with the following restrictions: * Must contain only lowercase letters, numbers, periods and hyphens. * Must start with a letter. * Must contain between 2-64 characters. * Must end with a number or a letter. * Must not start with period. * First segement length (mydomain form example above) shouldn't exceed 15 chars. * The last segment cannot be fully numeric. * Must be unique within the customer project.
* domainName: 'placeholder-value',
* // Required. The resource project name and location using the form: `projects/{project_id}/locations/global`
* parent: 'projects/my-project/locations/global',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {
* // "admin": "my_admin",
* // "authorizedNetworks": [],
* // "createTime": "my_createTime",
* // "fqdn": "my_fqdn",
* // "labels": {},
* // "locations": [],
* // "name": "my_name",
* // "reservedIpRange": "my_reservedIpRange",
* // "state": "my_state",
* // "statusMessage": "my_statusMessage",
* // "trusts": [],
* // "updateTime": "my_updateTime"
* // }
* },
* });
* console.log(res.data);
*
* // Example response
* // {
* // "done": false,
* // "error": {},
* // "metadata": {},
* // "name": "my_name",
* // "response": {}
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.global.domains.create
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string=} params.domainName Required. The fully qualified domain name. e.g. mydomain.myorganization.com, with the following restrictions: * Must contain only lowercase letters, numbers, periods and hyphens. * Must start with a letter. * Must contain between 2-64 characters. * Must end with a number or a letter. * Must not start with period. * First segement length (mydomain form example above) shouldn't exceed 15 chars. * The last segment cannot be fully numeric. * Must be unique within the customer project.
* @param {string} params.parent Required. The resource project name and location using the form: `projects/{project_id}/locations/global`
* @param {().Domain} params.requestBody Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
create(params: Params$Resource$Projects$Locations$Global$Domains$Create, options: StreamMethodOptions): GaxiosPromise<Readable>;
create(params?: Params$Resource$Projects$Locations$Global$Domains$Create, options?: MethodOptions): GaxiosPromise<Schema$Operation>;
create(params: Params$Resource$Projects$Locations$Global$Domains$Create, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
create(params: Params$Resource$Projects$Locations$Global$Domains$Create, options: MethodOptions | BodyResponseCallback<Schema$Operation>, callback: BodyResponseCallback<Schema$Operation>): void;
create(params: Params$Resource$Projects$Locations$Global$Domains$Create, callback: BodyResponseCallback<Schema$Operation>): void;
create(callback: BodyResponseCallback<Schema$Operation>): void;
/**
* managedidentities.projects.locations.global.domains.delete
* @desc Deletes a domain.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.global.domains.delete({
* // Required. The domain resource name using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
* name: 'projects/my-project/locations/global/domains/my-domain',
* });
* console.log(res.data);
*
* // Example response
* // {
* // "done": false,
* // "error": {},
* // "metadata": {},
* // "name": "my_name",
* // "response": {}
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.global.domains.delete
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string} params.name Required. The domain resource name using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
delete(params: Params$Resource$Projects$Locations$Global$Domains$Delete, options: StreamMethodOptions): GaxiosPromise<Readable>;
delete(params?: Params$Resource$Projects$Locations$Global$Domains$Delete, options?: MethodOptions): GaxiosPromise<Schema$Operation>;
delete(params: Params$Resource$Projects$Locations$Global$Domains$Delete, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
delete(params: Params$Resource$Projects$Locations$Global$Domains$Delete, options: MethodOptions | BodyResponseCallback<Schema$Operation>, callback: BodyResponseCallback<Schema$Operation>): void;
delete(params: Params$Resource$Projects$Locations$Global$Domains$Delete, callback: BodyResponseCallback<Schema$Operation>): void;
delete(callback: BodyResponseCallback<Schema$Operation>): void;
/**
* managedidentities.projects.locations.global.domains.detachTrust
* @desc Removes an AD trust.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.global.domains.detachTrust(
* {
* // Required. The resource domain name, project name, and location using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
* name: 'projects/my-project/locations/global/domains/my-domain',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {
* // "trust": {}
* // }
* },
* }
* );
* console.log(res.data);
*
* // Example response
* // {
* // "done": false,
* // "error": {},
* // "metadata": {},
* // "name": "my_name",
* // "response": {}
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.global.domains.detachTrust
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string} params.name Required. The resource domain name, project name, and location using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
* @param {().DetachTrustRequest} params.requestBody Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
detachTrust(params: Params$Resource$Projects$Locations$Global$Domains$Detachtrust, options: StreamMethodOptions): GaxiosPromise<Readable>;
detachTrust(params?: Params$Resource$Projects$Locations$Global$Domains$Detachtrust, options?: MethodOptions): GaxiosPromise<Schema$Operation>;
detachTrust(params: Params$Resource$Projects$Locations$Global$Domains$Detachtrust, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
detachTrust(params: Params$Resource$Projects$Locations$Global$Domains$Detachtrust, options: MethodOptions | BodyResponseCallback<Schema$Operation>, callback: BodyResponseCallback<Schema$Operation>): void;
detachTrust(params: Params$Resource$Projects$Locations$Global$Domains$Detachtrust, callback: BodyResponseCallback<Schema$Operation>): void;
detachTrust(callback: BodyResponseCallback<Schema$Operation>): void;
/**
* managedidentities.projects.locations.global.domains.get
* @desc Gets information about a domain.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.global.domains.get({
* // Required. The domain resource name using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
* name: 'projects/my-project/locations/global/domains/my-domain',
* });
* console.log(res.data);
*
* // Example response
* // {
* // "admin": "my_admin",
* // "authorizedNetworks": [],
* // "createTime": "my_createTime",
* // "fqdn": "my_fqdn",
* // "labels": {},
* // "locations": [],
* // "name": "my_name",
* // "reservedIpRange": "my_reservedIpRange",
* // "state": "my_state",
* // "statusMessage": "my_statusMessage",
* // "trusts": [],
* // "updateTime": "my_updateTime"
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.global.domains.get
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string} params.name Required. The domain resource name using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
get(params: Params$Resource$Projects$Locations$Global$Domains$Get, options: StreamMethodOptions): GaxiosPromise<Readable>;
get(params?: Params$Resource$Projects$Locations$Global$Domains$Get, options?: MethodOptions): GaxiosPromise<Schema$Domain>;
get(params: Params$Resource$Projects$Locations$Global$Domains$Get, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
get(params: Params$Resource$Projects$Locations$Global$Domains$Get, options: MethodOptions | BodyResponseCallback<Schema$Domain>, callback: BodyResponseCallback<Schema$Domain>): void;
get(params: Params$Resource$Projects$Locations$Global$Domains$Get, callback: BodyResponseCallback<Schema$Domain>): void;
get(callback: BodyResponseCallback<Schema$Domain>): void;
/**
* managedidentities.projects.locations.global.domains.getIamPolicy
* @desc Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.global.domains.getIamPolicy(
* {
* // Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
* 'options.requestedPolicyVersion': 'placeholder-value',
* // REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.
* resource: 'projects/my-project/locations/global/domains/my-domain',
* }
* );
* console.log(res.data);
*
* // Example response
* // {
* // "bindings": [],
* // "etag": "my_etag",
* // "version": 0
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.global.domains.getIamPolicy
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {integer=} params.options.requestedPolicyVersion Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
* @param {string} params.resource_ REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
getIamPolicy(params: Params$Resource$Projects$Locations$Global$Domains$Getiampolicy, options: StreamMethodOptions): GaxiosPromise<Readable>;
getIamPolicy(params?: Params$Resource$Projects$Locations$Global$Domains$Getiampolicy, options?: MethodOptions): GaxiosPromise<Schema$Policy>;
getIamPolicy(params: Params$Resource$Projects$Locations$Global$Domains$Getiampolicy, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
getIamPolicy(params: Params$Resource$Projects$Locations$Global$Domains$Getiampolicy, options: MethodOptions | BodyResponseCallback<Schema$Policy>, callback: BodyResponseCallback<Schema$Policy>): void;
getIamPolicy(params: Params$Resource$Projects$Locations$Global$Domains$Getiampolicy, callback: BodyResponseCallback<Schema$Policy>): void;
getIamPolicy(callback: BodyResponseCallback<Schema$Policy>): void;
/**
* managedidentities.projects.locations.global.domains.list
* @desc Lists domains in a project.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.global.domains.list({
* // Optional. A filter specifying constraints of a list operation. For example, `Domain.fqdn="mydomain.myorginization"`.
* filter: 'placeholder-value',
* // Optional. Specifies the ordering of results. See [Sorting order](https://cloud.google.com/apis/design/design_patterns#sorting_order) for more information.
* orderBy: 'placeholder-value',
* // Optional. The maximum number of items to return. If not specified, a default value of 1000 will be used. Regardless of the page_size value, the response may include a partial list. Callers should rely on a response's next_page_token to determine if there are additional results to list.
* pageSize: 'placeholder-value',
* // Optional. The `next_page_token` value returned from a previous ListDomainsRequest request, if any.
* pageToken: 'placeholder-value',
* // Required. The resource name of the domain location using the form: `projects/{project_id}/locations/global`
* parent: 'projects/my-project/locations/global',
* });
* console.log(res.data);
*
* // Example response
* // {
* // "domains": [],
* // "nextPageToken": "my_nextPageToken",
* // "unreachable": []
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.global.domains.list
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string=} params.filter Optional. A filter specifying constraints of a list operation. For example, `Domain.fqdn="mydomain.myorginization"`.
* @param {string=} params.orderBy Optional. Specifies the ordering of results. See [Sorting order](https://cloud.google.com/apis/design/design_patterns#sorting_order) for more information.
* @param {integer=} params.pageSize Optional. The maximum number of items to return. If not specified, a default value of 1000 will be used. Regardless of the page_size value, the response may include a partial list. Callers should rely on a response's next_page_token to determine if there are additional results to list.
* @param {string=} params.pageToken Optional. The `next_page_token` value returned from a previous ListDomainsRequest request, if any.
* @param {string} params.parent Required. The resource name of the domain location using the form: `projects/{project_id}/locations/global`
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
list(params: Params$Resource$Projects$Locations$Global$Domains$List, options: StreamMethodOptions): GaxiosPromise<Readable>;
list(params?: Params$Resource$Projects$Locations$Global$Domains$List, options?: MethodOptions): GaxiosPromise<Schema$ListDomainsResponse>;
list(params: Params$Resource$Projects$Locations$Global$Domains$List, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
list(params: Params$Resource$Projects$Locations$Global$Domains$List, options: MethodOptions | BodyResponseCallback<Schema$ListDomainsResponse>, callback: BodyResponseCallback<Schema$ListDomainsResponse>): void;
list(params: Params$Resource$Projects$Locations$Global$Domains$List, callback: BodyResponseCallback<Schema$ListDomainsResponse>): void;
list(callback: BodyResponseCallback<Schema$ListDomainsResponse>): void;
/**
* managedidentities.projects.locations.global.domains.patch
* @desc Updates the metadata and configuration of a domain.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.global.domains.patch({
* // Required. The unique name of the domain using the form: `projects/{project_id}/locations/global/domains/{domain_name}`.
* name: 'projects/my-project/locations/global/domains/my-domain',
* // Required. Mask of fields to update. At least one path must be supplied in this field. The elements of the repeated paths field may only include fields from Domain: * `labels` * `locations` * `authorized_networks`
* updateMask: 'placeholder-value',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {
* // "admin": "my_admin",
* // "authorizedNetworks": [],
* // "createTime": "my_createTime",
* // "fqdn": "my_fqdn",
* // "labels": {},
* // "locations": [],
* // "name": "my_name",
* // "reservedIpRange": "my_reservedIpRange",
* // "state": "my_state",
* // "statusMessage": "my_statusMessage",
* // "trusts": [],
* // "updateTime": "my_updateTime"
* // }
* },
* });
* console.log(res.data);
*
* // Example response
* // {
* // "done": false,
* // "error": {},
* // "metadata": {},
* // "name": "my_name",
* // "response": {}
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.global.domains.patch
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string} params.name Required. The unique name of the domain using the form: `projects/{project_id}/locations/global/domains/{domain_name}`.
* @param {string=} params.updateMask Required. Mask of fields to update. At least one path must be supplied in this field. The elements of the repeated paths field may only include fields from Domain: * `labels` * `locations` * `authorized_networks`
* @param {().Domain} params.requestBody Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
patch(params: Params$Resource$Projects$Locations$Global$Domains$Patch, options: StreamMethodOptions): GaxiosPromise<Readable>;
patch(params?: Params$Resource$Projects$Locations$Global$Domains$Patch, options?: MethodOptions): GaxiosPromise<Schema$Operation>;
patch(params: Params$Resource$Projects$Locations$Global$Domains$Patch, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
patch(params: Params$Resource$Projects$Locations$Global$Domains$Patch, options: MethodOptions | BodyResponseCallback<Schema$Operation>, callback: BodyResponseCallback<Schema$Operation>): void;
patch(params: Params$Resource$Projects$Locations$Global$Domains$Patch, callback: BodyResponseCallback<Schema$Operation>): void;
patch(callback: BodyResponseCallback<Schema$Operation>): void;
/**
* managedidentities.projects.locations.global.domains.reconfigureTrust
* @desc Updates the DNS conditional forwarder.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.global.domains.reconfigureTrust(
* {
* // Required. The resource domain name, project name and location using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
* name: 'projects/my-project/locations/global/domains/my-domain',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {
* // "targetDnsIpAddresses": [],
* // "targetDomainName": "my_targetDomainName"
* // }
* },
* }
* );
* console.log(res.data);
*
* // Example response
* // {
* // "done": false,
* // "error": {},
* // "metadata": {},
* // "name": "my_name",
* // "response": {}
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.global.domains.reconfigureTrust
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string} params.name Required. The resource domain name, project name and location using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
* @param {().ReconfigureTrustRequest} params.requestBody Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
reconfigureTrust(params: Params$Resource$Projects$Locations$Global$Domains$Reconfiguretrust, options: StreamMethodOptions): GaxiosPromise<Readable>;
reconfigureTrust(params?: Params$Resource$Projects$Locations$Global$Domains$Reconfiguretrust, options?: MethodOptions): GaxiosPromise<Schema$Operation>;
reconfigureTrust(params: Params$Resource$Projects$Locations$Global$Domains$Reconfiguretrust, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
reconfigureTrust(params: Params$Resource$Projects$Locations$Global$Domains$Reconfiguretrust, options: MethodOptions | BodyResponseCallback<Schema$Operation>, callback: BodyResponseCallback<Schema$Operation>): void;
reconfigureTrust(params: Params$Resource$Projects$Locations$Global$Domains$Reconfiguretrust, callback: BodyResponseCallback<Schema$Operation>): void;
reconfigureTrust(callback: BodyResponseCallback<Schema$Operation>): void;
/**
* managedidentities.projects.locations.global.domains.resetAdminPassword
* @desc Resets a domain's administrator password.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.global.domains.resetAdminPassword(
* {
* // Required. The domain resource name using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
* name: 'projects/my-project/locations/global/domains/my-domain',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {}
* },
* }
* );
* console.log(res.data);
*
* // Example response
* // {
* // "password": "my_password"
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.global.domains.resetAdminPassword
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string} params.name Required. The domain resource name using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
* @param {().ResetAdminPasswordRequest} params.requestBody Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
resetAdminPassword(params: Params$Resource$Projects$Locations$Global$Domains$Resetadminpassword, options: StreamMethodOptions): GaxiosPromise<Readable>;
resetAdminPassword(params?: Params$Resource$Projects$Locations$Global$Domains$Resetadminpassword, options?: MethodOptions): GaxiosPromise<Schema$ResetAdminPasswordResponse>;
resetAdminPassword(params: Params$Resource$Projects$Locations$Global$Domains$Resetadminpassword, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
resetAdminPassword(params: Params$Resource$Projects$Locations$Global$Domains$Resetadminpassword, options: MethodOptions | BodyResponseCallback<Schema$ResetAdminPasswordResponse>, callback: BodyResponseCallback<Schema$ResetAdminPasswordResponse>): void;
resetAdminPassword(params: Params$Resource$Projects$Locations$Global$Domains$Resetadminpassword, callback: BodyResponseCallback<Schema$ResetAdminPasswordResponse>): void;
resetAdminPassword(callback: BodyResponseCallback<Schema$ResetAdminPasswordResponse>): void;
/**
* managedidentities.projects.locations.global.domains.setIamPolicy
* @desc Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.global.domains.setIamPolicy(
* {
* // REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.
* resource: 'projects/my-project/locations/global/domains/my-domain',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {
* // "policy": {}
* // }
* },
* }
* );
* console.log(res.data);
*
* // Example response
* // {
* // "bindings": [],
* // "etag": "my_etag",
* // "version": 0
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.global.domains.setIamPolicy
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string} params.resource_ REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.
* @param {().SetIamPolicyRequest} params.requestBody Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
setIamPolicy(params: Params$Resource$Projects$Locations$Global$Domains$Setiampolicy, options: StreamMethodOptions): GaxiosPromise<Readable>;
setIamPolicy(params?: Params$Resource$Projects$Locations$Global$Domains$Setiampolicy, options?: MethodOptions): GaxiosPromise<Schema$Policy>;
setIamPolicy(params: Params$Resource$Projects$Locations$Global$Domains$Setiampolicy, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
setIamPolicy(params: Params$Resource$Projects$Locations$Global$Domains$Setiampolicy, options: MethodOptions | BodyResponseCallback<Schema$Policy>, callback: BodyResponseCallback<Schema$Policy>): void;
setIamPolicy(params: Params$Resource$Projects$Locations$Global$Domains$Setiampolicy, callback: BodyResponseCallback<Schema$Policy>): void;
setIamPolicy(callback: BodyResponseCallback<Schema$Policy>): void;
/**
* managedidentities.projects.locations.global.domains.testIamPermissions
* @desc Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.global.domains.testIamPermissions(
* {
* // REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.
* resource: 'projects/my-project/locations/global/domains/my-domain',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {
* // "permissions": []
* // }
* },
* }
* );
* console.log(res.data);
*
* // Example response
* // {
* // "permissions": []
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.global.domains.testIamPermissions
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string} params.resource_ REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.
* @param {().TestIamPermissionsRequest} params.requestBody Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
testIamPermissions(params: Params$Resource$Projects$Locations$Global$Domains$Testiampermissions, options: StreamMethodOptions): GaxiosPromise<Readable>;
testIamPermissions(params?: Params$Resource$Projects$Locations$Global$Domains$Testiampermissions, options?: MethodOptions): GaxiosPromise<Schema$TestIamPermissionsResponse>;
testIamPermissions(params: Params$Resource$Projects$Locations$Global$Domains$Testiampermissions, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
testIamPermissions(params: Params$Resource$Projects$Locations$Global$Domains$Testiampermissions, options: MethodOptions | BodyResponseCallback<Schema$TestIamPermissionsResponse>, callback: BodyResponseCallback<Schema$TestIamPermissionsResponse>): void;
testIamPermissions(params: Params$Resource$Projects$Locations$Global$Domains$Testiampermissions, callback: BodyResponseCallback<Schema$TestIamPermissionsResponse>): void;
testIamPermissions(callback: BodyResponseCallback<Schema$TestIamPermissionsResponse>): void;
/**
* managedidentities.projects.locations.global.domains.validateTrust
* @desc Validates a trust state, that the target domain is reachable, and that the target domain is able to accept incoming trust requests.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.global.domains.validateTrust(
* {
* // Required. The resource domain name, project name, and location using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
* name: 'projects/my-project/locations/global/domains/my-domain',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {
* // "trust": {}
* // }
* },
* }
* );
* console.log(res.data);
*
* // Example response
* // {
* // "done": false,
* // "error": {},
* // "metadata": {},
* // "name": "my_name",
* // "response": {}
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.global.domains.validateTrust
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string} params.name Required. The resource domain name, project name, and location using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
* @param {().ValidateTrustRequest} params.requestBody Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
validateTrust(params: Params$Resource$Projects$Locations$Global$Domains$Validatetrust, options: StreamMethodOptions): GaxiosPromise<Readable>;
validateTrust(params?: Params$Resource$Projects$Locations$Global$Domains$Validatetrust, options?: MethodOptions): GaxiosPromise<Schema$Operation>;
validateTrust(params: Params$Resource$Projects$Locations$Global$Domains$Validatetrust, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
validateTrust(params: Params$Resource$Projects$Locations$Global$Domains$Validatetrust, options: MethodOptions | BodyResponseCallback<Schema$Operation>, callback: BodyResponseCallback<Schema$Operation>): void;
validateTrust(params: Params$Resource$Projects$Locations$Global$Domains$Validatetrust, callback: BodyResponseCallback<Schema$Operation>): void;
validateTrust(callback: BodyResponseCallback<Schema$Operation>): void;
}
export interface Params$Resource$Projects$Locations$Global$Domains$Attachtrust extends StandardParameters {
/**
* Required. The resource domain name, project name and location using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
*/
name?: string;
/**
* Request body metadata
*/
requestBody?: Schema$AttachTrustRequest;
}
export interface Params$Resource$Projects$Locations$Global$Domains$Create extends StandardParameters {
/**
* Required. The fully qualified domain name. e.g. mydomain.myorganization.com, with the following restrictions: * Must contain only lowercase letters, numbers, periods and hyphens. * Must start with a letter. * Must contain between 2-64 characters. * Must end with a number or a letter. * Must not start with period. * First segement length (mydomain form example above) shouldn't exceed 15 chars. * The last segment cannot be fully numeric. * Must be unique within the customer project.
*/
domainName?: string;
/**
* Required. The resource project name and location using the form: `projects/{project_id}/locations/global`
*/
parent?: string;
/**
* Request body metadata
*/
requestBody?: Schema$Domain;
}
export interface Params$Resource$Projects$Locations$Global$Domains$Delete extends StandardParameters {
/**
* Required. The domain resource name using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
*/
name?: string;
}
export interface Params$Resource$Projects$Locations$Global$Domains$Detachtrust extends StandardParameters {
/**
* Required. The resource domain name, project name, and location using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
*/
name?: string;
/**
* Request body metadata
*/
requestBody?: Schema$DetachTrustRequest;
}
export interface Params$Resource$Projects$Locations$Global$Domains$Get extends StandardParameters {
/**
* Required. The domain resource name using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
*/
name?: string;
}
export interface Params$Resource$Projects$Locations$Global$Domains$Getiampolicy extends StandardParameters {
/**
* Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
*/
'options.requestedPolicyVersion'?: number;
/**
* REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.
*/
resource?: string;
}
export interface Params$Resource$Projects$Locations$Global$Domains$List extends StandardParameters {
/**
* Optional. A filter specifying constraints of a list operation. For example, `Domain.fqdn="mydomain.myorginization"`.
*/
filter?: string;
/**
* Optional. Specifies the ordering of results. See [Sorting order](https://cloud.google.com/apis/design/design_patterns#sorting_order) for more information.
*/
orderBy?: string;
/**
* Optional. The maximum number of items to return. If not specified, a default value of 1000 will be used. Regardless of the page_size value, the response may include a partial list. Callers should rely on a response's next_page_token to determine if there are additional results to list.
*/
pageSize?: number;
/**
* Optional. The `next_page_token` value returned from a previous ListDomainsRequest request, if any.
*/
pageToken?: string;
/**
* Required. The resource name of the domain location using the form: `projects/{project_id}/locations/global`
*/
parent?: string;
}
export interface Params$Resource$Projects$Locations$Global$Domains$Patch extends StandardParameters {
/**
* Required. The unique name of the domain using the form: `projects/{project_id}/locations/global/domains/{domain_name}`.
*/
name?: string;
/**
* Required. Mask of fields to update. At least one path must be supplied in this field. The elements of the repeated paths field may only include fields from Domain: * `labels` * `locations` * `authorized_networks`
*/
updateMask?: string;
/**
* Request body metadata
*/
requestBody?: Schema$Domain;
}
export interface Params$Resource$Projects$Locations$Global$Domains$Reconfiguretrust extends StandardParameters {
/**
* Required. The resource domain name, project name and location using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
*/
name?: string;
/**
* Request body metadata
*/
requestBody?: Schema$ReconfigureTrustRequest;
}
export interface Params$Resource$Projects$Locations$Global$Domains$Resetadminpassword extends StandardParameters {
/**
* Required. The domain resource name using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
*/
name?: string;
/**
* Request body metadata
*/
requestBody?: Schema$ResetAdminPasswordRequest;
}
export interface Params$Resource$Projects$Locations$Global$Domains$Setiampolicy extends StandardParameters {
/**
* REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.
*/
resource?: string;
/**
* Request body metadata
*/
requestBody?: Schema$SetIamPolicyRequest;
}
export interface Params$Resource$Projects$Locations$Global$Domains$Testiampermissions extends StandardParameters {
/**
* REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.
*/
resource?: string;
/**
* Request body metadata
*/
requestBody?: Schema$TestIamPermissionsRequest;
}
export interface Params$Resource$Projects$Locations$Global$Domains$Validatetrust extends StandardParameters {
/**
* Required. The resource domain name, project name, and location using the form: `projects/{project_id}/locations/global/domains/{domain_name}`
*/
name?: string;
/**
* Request body metadata
*/
requestBody?: Schema$ValidateTrustRequest;
}
export class Resource$Projects$Locations$Global$Operations {
context: APIRequestContext;
constructor(context: APIRequestContext);
/**
* managedidentities.projects.locations.global.operations.cancel
* @desc Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.global.operations.cancel(
* {
* // The name of the operation resource to be cancelled.
* name: 'projects/my-project/locations/global/operations/my-operation',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {}
* },
* }
* );
* console.log(res.data);
*
* // Example response
* // {}
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.global.operations.cancel
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string} params.name The name of the operation resource to be cancelled.
* @param {().CancelOperationRequest} params.requestBody Request body data
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
cancel(params: Params$Resource$Projects$Locations$Global$Operations$Cancel, options: StreamMethodOptions): GaxiosPromise<Readable>;
cancel(params?: Params$Resource$Projects$Locations$Global$Operations$Cancel, options?: MethodOptions): GaxiosPromise<Schema$Empty>;
cancel(params: Params$Resource$Projects$Locations$Global$Operations$Cancel, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
cancel(params: Params$Resource$Projects$Locations$Global$Operations$Cancel, options: MethodOptions | BodyResponseCallback<Schema$Empty>, callback: BodyResponseCallback<Schema$Empty>): void;
cancel(params: Params$Resource$Projects$Locations$Global$Operations$Cancel, callback: BodyResponseCallback<Schema$Empty>): void;
cancel(callback: BodyResponseCallback<Schema$Empty>): void;
/**
* managedidentities.projects.locations.global.operations.delete
* @desc Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.global.operations.delete(
* {
* // The name of the operation resource to be deleted.
* name: 'projects/my-project/locations/global/operations/my-operation',
* }
* );
* console.log(res.data);
*
* // Example response
* // {}
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.global.operations.delete
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string} params.name The name of the operation resource to be deleted.
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
delete(params: Params$Resource$Projects$Locations$Global$Operations$Delete, options: StreamMethodOptions): GaxiosPromise<Readable>;
delete(params?: Params$Resource$Projects$Locations$Global$Operations$Delete, options?: MethodOptions): GaxiosPromise<Schema$Empty>;
delete(params: Params$Resource$Projects$Locations$Global$Operations$Delete, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
delete(params: Params$Resource$Projects$Locations$Global$Operations$Delete, options: MethodOptions | BodyResponseCallback<Schema$Empty>, callback: BodyResponseCallback<Schema$Empty>): void;
delete(params: Params$Resource$Projects$Locations$Global$Operations$Delete, callback: BodyResponseCallback<Schema$Empty>): void;
delete(callback: BodyResponseCallback<Schema$Empty>): void;
/**
* managedidentities.projects.locations.global.operations.get
* @desc Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.global.operations.get({
* // The name of the operation resource.
* name: 'projects/my-project/locations/global/operations/my-operation',
* });
* console.log(res.data);
*
* // Example response
* // {
* // "done": false,
* // "error": {},
* // "metadata": {},
* // "name": "my_name",
* // "response": {}
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.global.operations.get
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string} params.name The name of the operation resource.
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
get(params: Params$Resource$Projects$Locations$Global$Operations$Get, options: StreamMethodOptions): GaxiosPromise<Readable>;
get(params?: Params$Resource$Projects$Locations$Global$Operations$Get, options?: MethodOptions): GaxiosPromise<Schema$Operation>;
get(params: Params$Resource$Projects$Locations$Global$Operations$Get, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
get(params: Params$Resource$Projects$Locations$Global$Operations$Get, options: MethodOptions | BodyResponseCallback<Schema$Operation>, callback: BodyResponseCallback<Schema$Operation>): void;
get(params: Params$Resource$Projects$Locations$Global$Operations$Get, callback: BodyResponseCallback<Schema$Operation>): void;
get(callback: BodyResponseCallback<Schema$Operation>): void;
/**
* managedidentities.projects.locations.global.operations.list
* @desc Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/x/operations`. To override the binding, API services can add a binding such as `"/v1/{name=users/x}/operations"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
* @example
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/managedidentities.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const managedidentities = google.managedidentities('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await managedidentities.projects.locations.global.operations.list(
* {
* // The standard list filter.
* filter: 'placeholder-value',
* // The name of the operation's parent resource.
* name: 'projects/my-project/locations/global/operations',
* // The standard list page size.
* pageSize: 'placeholder-value',
* // The standard list page token.
* pageToken: 'placeholder-value',
* }
* );
* console.log(res.data);
*
* // Example response
* // {
* // "nextPageToken": "my_nextPageToken",
* // "operations": []
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* @alias managedidentities.projects.locations.global.operations.list
* @memberOf! ()
*
* @param {object} params Parameters for request
* @param {string=} params.filter The standard list filter.
* @param {string} params.name The name of the operation's parent resource.
* @param {integer=} params.pageSize The standard list page size.
* @param {string=} params.pageToken The standard list page token.
* @param {object} [options] Optionally override request options, such as `url`, `method`, and `encoding`.
* @param {callback} callback The callback that handles the response.
* @return {object} Request object
*/
list(params: Params$Resource$Projects$Locations$Global$Operations$List, options: StreamMethodOptions): GaxiosPromise<Readable>;
list(params?: Params$Resource$Projects$Locations$Global$Operations$List, options?: MethodOptions): GaxiosPromise<Schema$ListOperationsResponse>;
list(params: Params$Resource$Projects$Locations$Global$Operations$List, options: StreamMethodOptions | BodyResponseCallback<Readable>, callback: BodyResponseCallback<Readable>): void;
list(params: Params$Resource$Projects$Locations$Global$Operations$List, options: MethodOptions | BodyResponseCallback<Schema$ListOperationsResponse>, callback: BodyResponseCallback<Schema$ListOperationsResponse>): void;
list(params: Params$Resource$Projects$Locations$Global$Operations$List, callback: BodyResponseCallback<Schema$ListOperationsResponse>): void;
list(callback: BodyResponseCallback<Schema$ListOperationsResponse>): void;
}
export interface Params$Resource$Projects$Locations$Global$Operations$Cancel extends StandardParameters {
/**
* The name of the operation resource to be cancelled.
*/
name?: string;
/**
* Request body metadata
*/
requestBody?: Schema$CancelOperationRequest;
}
export interface Params$Resource$Projects$Locations$Global$Operations$Delete extends StandardParameters {
/**
* The name of the operation resource to be deleted.
*/
name?: string;
}
export interface Params$Resource$Projects$Locations$Global$Operations$Get extends StandardParameters {
/**
* The name of the operation resource.
*/
name?: string;
}
export interface Params$Resource$Projects$Locations$Global$Operations$List extends StandardParameters {
/**
* The standard list filter.
*/
filter?: string;
/**
* The name of the operation's parent resource.
*/
name?: string;
/**
* The standard list page size.
*/
pageSize?: number;
/**
* The standard list page token.
*/
pageToken?: string;
}
export {};
} | */ |
invalidate_all_refresh_tokens_response.go | package invalidateallrefreshtokens
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// InvalidateAllRefreshTokensResponse provides operations to call the invalidateAllRefreshTokens method.
type InvalidateAllRefreshTokensResponse struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// The value property
value *bool
}
// NewInvalidateAllRefreshTokensResponse instantiates a new invalidateAllRefreshTokensResponse and sets the default values.
func NewInvalidateAllRefreshTokensResponse()(*InvalidateAllRefreshTokensResponse) {
m := &InvalidateAllRefreshTokensResponse{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateInvalidateAllRefreshTokensResponseFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateInvalidateAllRefreshTokensResponseFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) |
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *InvalidateAllRefreshTokensResponse) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *InvalidateAllRefreshTokensResponse) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["value"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetValue(val)
}
return nil
}
return res
}
// GetValue gets the value property value. The value property
func (m *InvalidateAllRefreshTokensResponse) GetValue()(*bool) {
if m == nil {
return nil
} else {
return m.value
}
}
// Serialize serializes information the current object
func (m *InvalidateAllRefreshTokensResponse) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteBoolValue("value", m.GetValue())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *InvalidateAllRefreshTokensResponse) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetValue sets the value property value. The value property
func (m *InvalidateAllRefreshTokensResponse) SetValue(value *bool)() {
if m != nil {
m.value = value
}
}
| {
return NewInvalidateAllRefreshTokensResponse(), nil
} |
about.component.spec.ts | import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { AboutComponent } from './about.component';
describe('AboutComponent', () => {
let component: AboutComponent;
let fixture: ComponentFixture<AboutComponent>;
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [AboutComponent]
}).compileComponents();
}));
beforeEach(() => {
// Create component and test fixture
fixture = TestBed.createComponent(AboutComponent);
// Instatiate test component from the fixture
component = fixture.componentInstance;
// Detect the changes in the component
fixture.detectChanges();
});
| it('the page title should be "About - Essence Global"', () => {
const title = component.pageTitle.getTitle();
expect(title).toEqual('About - Essence Global');
});
}); | it('should create', () => {
expect(component).toBeTruthy();
});
|
mod.rs | use crate::{
database::entity::settings,
models::{AppInfo, Response},
state::State,
};
use actix_web::{get, web, HttpResponse, Responder, Scope};
use sea_orm::EntityTrait;
pub mod admin;
pub mod application;
pub mod auth;
pub mod file;
pub mod user;
pub fn | () -> Scope {
web::scope("/").service(info)
}
#[get("info")]
async fn info(state: web::Data<State>) -> Response<impl Responder> {
let settings = settings::Entity::find_by_id(true)
.one(&state.database)
.await?
.unwrap();
Ok(HttpResponse::Ok().json(AppInfo::new(
settings::Model {
one_row_enforce: true,
app_name: settings.app_name,
app_description: settings.app_description,
color: settings.color,
},
state.invite_only,
state.smtp_client.is_some(),
)))
}
| get_routes |
mergemessages.py | import os
import sys
import polib
import django
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import translation
# This is almost a management command, but we do not want it to be added to the django-admin namespace for the simple
# reason that it is not expected to be executed by package users, only by the package maintainers.
# We use a thin __main__ wrapper to make it work (ish) like a management command.
MODULE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'internationalflavor')
LOCALE_PATH = os.path.join(MODULE_PATH, 'locale')
def mark_entry(entry):
if 'fuzzy' in entry.flags:
entry.flags.remove('fuzzy')
entry.comment = "auto-generated from CLDR -- see docs before updating"
class Command(BaseCommand):
help = 'Updates messages in the PO file with messages from the CLDR'
def handle(self, *args, **options): | if options['l']:
languages = (options['l'], dict(settings.LANGUAGES)[options['l']]),
else:
languages = settings.LANGUAGES
for lc, language in languages:
try:
self.stdout.write("Parsing language %s [%s]" % (language, lc))
# Get some files ready
# The pofile is our combined file
pofile = polib.pofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django.po'))
# The cldrfile contain only messages from CLDR
cldrfile = polib.pofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'cldr.po'))
# The djangofile will only contain messages not from CLDR
try:
djangofile = polib.pofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django_only.po'))
except IOError:
djangofile = polib.POFile()
djangofile.metadata = pofile.metadata
djangofile.header = pofile.header
# Merge all non-django messages to the djangofile
django_only_messages = polib.POFile()
for entry in pofile:
if cldrfile.find(entry.msgid) is None and not entry.obsolete and not 'fuzzy' in entry.flags:
django_only_messages.append(entry)
djangofile.merge(django_only_messages)
djangofile.save(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django_only.po'))
# Add all entries from the CLDR file to the combined file
for entry in cldrfile:
e = pofile.find(entry.msgid)
if e is None:
e = polib.POEntry()
e.msgid = entry.msgid
pofile.append(e)
elif 'manual' in e.tcomment.lower():
self.stdout.write("-- Skipping %s of %s" % (e.msgid, language))
continue
e.obsolete = False
e.msgstr = entry.msgstr
e.comment = entry.comment
if 'fuzzy' in e.flags:
e.flags.remove('fuzzy')
# Add entries from the Django file to the combined file
for entry in djangofile:
e = pofile.find(entry.msgid)
# If not in main file, then skip
if e is None:
continue
e.obsolete = entry.obsolete
e.msgstr = entry.msgstr
e.comment = entry.comment
e.flags = entry.flags
# We copy over the header and metadata from the djangofile.
pofile.metadata = djangofile.metadata
pofile.header = djangofile.header
pofile.save()
pofile.save_as_mofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django.mo'))
except IOError as e:
self.stderr.write("Error while handling %s: %s (possibly no valid .po file)" % (language, e))
except Exception as e:
self.stderr.write("Error while handling %s: %s" % (language, e))
def add_arguments(self, parser):
parser.add_argument('-l')
if __name__ == '__main__':
settings.configure()
django.setup()
Command().run_from_argv(["django-admin.py", "mergemessages"] + sys.argv[1:]) | translation.deactivate_all()
|
0003_set_site_domain_and_name.py | """
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
|
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID, defaults={"domain": "example.com", "name": "example.com"}
)
class Migration(migrations.Migration):
dependencies = [("sites", "0002_alter_domain_unique")]
operations = [migrations.RunPython(update_site_forward, update_site_backward)]
| """Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "127.0.0.1",
"name": "plaid_project",
},
) |
api.py | #!/usr/bin/env python3 | import pyeapi
from netests import log
from nornir.core.task import Result
from netests.constants import PING_DATA_HOST_KEY
from netests.converters.ping.ping_validator import _raise_exception_on_ping_cmd
def _arista_ping_api_exec(task):
c = pyeapi.connect(
transport=task.host.get('secure_api', 'https'),
host=task.host.hostname,
username=task.host.username,
password=task.host.password,
port=task.host.port
)
result = True
for p in task.host[PING_DATA_HOST_KEY].ping_lst:
try:
output = c.execute(
[
"enable",
f"ping vrf {p.vrf} {p.ip_address} repeat 1 timeout 2"
]
)
except Exception:
pass
log.debug(
"\n"
"Execute the following ping command on Arista API\n"
"[ "
"enable , ",
f"ping vrf {p.vrf} {p.ip_address} repeat 1 timeout 2 ]"
"From the following PING object :\n"
f"{p.to_json()}"
"Result is :\n"
f"{output}"
)
r = arista_api_validate_output(
output=output,
hostname=task.host.name,
platform=task.host.platform,
connexion=task.host.get('connexion', 'ssh'),
ping_print=p.to_json(),
ping_works=p.works,
)
if r is False:
result = False
return Result(host=task.host, result=result)
def arista_api_validate_output(
output: dict,
hostname: str,
platform: str,
connexion: str,
ping_print: str,
ping_works: bool
) -> None:
if isinstance(output, dict) and 'result' in output.keys():
for d in output.get('result'):
if 'messages' in d.keys():
return _raise_exception_on_ping_cmd(
output=d.get('messages')[0],
hostname=hostname,
platform=platform,
connexion=connexion,
ping_line=ping_print,
must_work=ping_works
) | # -*- coding: utf-8 -*-
|
task1.rs | use std::fs;
fn main() | {
let contents = fs::read_to_string("input.txt").expect("Something went wrong reading the file");
let lines = contents.trim().lines();
let mut depth = 0;
let mut position = 0;
for line in lines {
let elements: Vec<&str> = line.split(" ").collect();
let magnitude = elements[1].parse::<i32>().unwrap();
match elements[0] {
"forward" => position += magnitude,
"down" => depth += magnitude,
"up" => depth -= magnitude,
_ => panic!("Unknown action: {}", elements[0]),
}
}
println!("Position: {}, Depth: {}", position, depth);
println!("Score: {}", position * depth);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.