file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
local_wallet.rs | use crate::{Infallible, Signer, SigningKey, VerifyingKey};
use async_trait::async_trait;
use starknet_core::{
crypto::{EcdsaSignError, Signature},
types::FieldElement,
};
pub struct LocalWallet {
private_key: SigningKey,
}
#[derive(Debug, thiserror::Error)]
pub enum SignError {
#[error(transparent)]
EcdsaSignError(EcdsaSignError),
}
impl LocalWallet {
pub fn from_signing_key(key: SigningKey) -> Self {
key.into()
}
}
#[async_trait]
impl Signer for LocalWallet {
type GetPublicKeyError = Infallible;
type SignError = SignError;
async fn get_public_key(&self) -> Result<VerifyingKey, Self::GetPublicKeyError> {
Ok(self.private_key.verifying_key())
}
async fn sign_hash(&self, hash: &FieldElement) -> Result<Signature, Self::SignError> {
Ok(self.private_key.sign(hash)?)
}
}
impl From<SigningKey> for LocalWallet { | fn from(value: SigningKey) -> Self {
Self { private_key: value }
}
}
impl From<EcdsaSignError> for SignError {
fn from(value: EcdsaSignError) -> Self {
Self::EcdsaSignError(value)
}
} | |
model_capability_endpoint_descriptor_all_of.go | /*
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document.
API version: 1.0.9-5517
Contact: [email protected]
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package intersight
import (
"encoding/json"
)
// CapabilityEndpointDescriptorAllOf Definition of the list of properties defined in 'capability.EndpointDescriptor', excluding properties defined in parent classes.
type CapabilityEndpointDescriptorAllOf struct {
// The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data. The enum values provides the list of concrete types that can be instantiated from this abstract type.
ClassId string `json:"ClassId"`
// The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property. The enum values provides the list of concrete types that can be instantiated from this abstract type.
ObjectType string `json:"ObjectType"`
// Detailed information about the endpoint.
Description *string `json:"Description,omitempty"`
// The model of the endpoint, for which this capability information is applicable.
Model *string `json:"Model,omitempty"`
// The vendor of the endpoint, for which this capability information is applicable.
Vendor *string `json:"Vendor,omitempty"`
// The firmware or software version of the endpoint, for which this capability information is applicable.
Version *string `json:"Version,omitempty"`
// An array of relationships to capabilityCapability resources.
Capabilities []CapabilityCapabilityRelationship `json:"Capabilities,omitempty"`
AdditionalProperties map[string]interface{}
}
type _CapabilityEndpointDescriptorAllOf CapabilityEndpointDescriptorAllOf
// NewCapabilityEndpointDescriptorAllOf instantiates a new CapabilityEndpointDescriptorAllOf object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewCapabilityEndpointDescriptorAllOf(classId string, objectType string) *CapabilityEndpointDescriptorAllOf {
this := CapabilityEndpointDescriptorAllOf{}
this.ClassId = classId
this.ObjectType = objectType
return &this
}
// NewCapabilityEndpointDescriptorAllOfWithDefaults instantiates a new CapabilityEndpointDescriptorAllOf object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewCapabilityEndpointDescriptorAllOfWithDefaults() *CapabilityEndpointDescriptorAllOf {
this := CapabilityEndpointDescriptorAllOf{}
return &this
}
// GetClassId returns the ClassId field value
func (o *CapabilityEndpointDescriptorAllOf) GetClassId() string {
if o == nil {
var ret string
return ret
}
return o.ClassId
}
// GetClassIdOk returns a tuple with the ClassId field value
// and a boolean to check if the value has been set.
func (o *CapabilityEndpointDescriptorAllOf) GetClassIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.ClassId, true
}
// SetClassId sets field value
func (o *CapabilityEndpointDescriptorAllOf) SetClassId(v string) {
o.ClassId = v
}
// GetObjectType returns the ObjectType field value
func (o *CapabilityEndpointDescriptorAllOf) GetObjectType() string {
if o == nil {
var ret string
return ret
}
return o.ObjectType
}
// GetObjectTypeOk returns a tuple with the ObjectType field value
// and a boolean to check if the value has been set.
func (o *CapabilityEndpointDescriptorAllOf) GetObjectTypeOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.ObjectType, true
}
// SetObjectType sets field value
func (o *CapabilityEndpointDescriptorAllOf) SetObjectType(v string) {
o.ObjectType = v
}
// GetDescription returns the Description field value if set, zero value otherwise.
func (o *CapabilityEndpointDescriptorAllOf) GetDescription() string {
if o == nil || o.Description == nil {
var ret string
return ret
}
return *o.Description
}
// GetDescriptionOk returns a tuple with the Description field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *CapabilityEndpointDescriptorAllOf) GetDescriptionOk() (*string, bool) {
if o == nil || o.Description == nil {
return nil, false
}
return o.Description, true
}
// HasDescription returns a boolean if a field has been set.
func (o *CapabilityEndpointDescriptorAllOf) HasDescription() bool {
if o != nil && o.Description != nil |
return false
}
// SetDescription gets a reference to the given string and assigns it to the Description field.
func (o *CapabilityEndpointDescriptorAllOf) SetDescription(v string) {
o.Description = &v
}
// GetModel returns the Model field value if set, zero value otherwise.
func (o *CapabilityEndpointDescriptorAllOf) GetModel() string {
if o == nil || o.Model == nil {
var ret string
return ret
}
return *o.Model
}
// GetModelOk returns a tuple with the Model field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *CapabilityEndpointDescriptorAllOf) GetModelOk() (*string, bool) {
if o == nil || o.Model == nil {
return nil, false
}
return o.Model, true
}
// HasModel returns a boolean if a field has been set.
func (o *CapabilityEndpointDescriptorAllOf) HasModel() bool {
if o != nil && o.Model != nil {
return true
}
return false
}
// SetModel gets a reference to the given string and assigns it to the Model field.
func (o *CapabilityEndpointDescriptorAllOf) SetModel(v string) {
o.Model = &v
}
// GetVendor returns the Vendor field value if set, zero value otherwise.
func (o *CapabilityEndpointDescriptorAllOf) GetVendor() string {
if o == nil || o.Vendor == nil {
var ret string
return ret
}
return *o.Vendor
}
// GetVendorOk returns a tuple with the Vendor field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *CapabilityEndpointDescriptorAllOf) GetVendorOk() (*string, bool) {
if o == nil || o.Vendor == nil {
return nil, false
}
return o.Vendor, true
}
// HasVendor returns a boolean if a field has been set.
func (o *CapabilityEndpointDescriptorAllOf) HasVendor() bool {
if o != nil && o.Vendor != nil {
return true
}
return false
}
// SetVendor gets a reference to the given string and assigns it to the Vendor field.
func (o *CapabilityEndpointDescriptorAllOf) SetVendor(v string) {
o.Vendor = &v
}
// GetVersion returns the Version field value if set, zero value otherwise.
func (o *CapabilityEndpointDescriptorAllOf) GetVersion() string {
if o == nil || o.Version == nil {
var ret string
return ret
}
return *o.Version
}
// GetVersionOk returns a tuple with the Version field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *CapabilityEndpointDescriptorAllOf) GetVersionOk() (*string, bool) {
if o == nil || o.Version == nil {
return nil, false
}
return o.Version, true
}
// HasVersion returns a boolean if a field has been set.
func (o *CapabilityEndpointDescriptorAllOf) HasVersion() bool {
if o != nil && o.Version != nil {
return true
}
return false
}
// SetVersion gets a reference to the given string and assigns it to the Version field.
func (o *CapabilityEndpointDescriptorAllOf) SetVersion(v string) {
o.Version = &v
}
// GetCapabilities returns the Capabilities field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *CapabilityEndpointDescriptorAllOf) GetCapabilities() []CapabilityCapabilityRelationship {
if o == nil {
var ret []CapabilityCapabilityRelationship
return ret
}
return o.Capabilities
}
// GetCapabilitiesOk returns a tuple with the Capabilities field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *CapabilityEndpointDescriptorAllOf) GetCapabilitiesOk() (*[]CapabilityCapabilityRelationship, bool) {
if o == nil || o.Capabilities == nil {
return nil, false
}
return &o.Capabilities, true
}
// HasCapabilities returns a boolean if a field has been set.
func (o *CapabilityEndpointDescriptorAllOf) HasCapabilities() bool {
if o != nil && o.Capabilities != nil {
return true
}
return false
}
// SetCapabilities gets a reference to the given []CapabilityCapabilityRelationship and assigns it to the Capabilities field.
func (o *CapabilityEndpointDescriptorAllOf) SetCapabilities(v []CapabilityCapabilityRelationship) {
o.Capabilities = v
}
func (o CapabilityEndpointDescriptorAllOf) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["ClassId"] = o.ClassId
}
if true {
toSerialize["ObjectType"] = o.ObjectType
}
if o.Description != nil {
toSerialize["Description"] = o.Description
}
if o.Model != nil {
toSerialize["Model"] = o.Model
}
if o.Vendor != nil {
toSerialize["Vendor"] = o.Vendor
}
if o.Version != nil {
toSerialize["Version"] = o.Version
}
if o.Capabilities != nil {
toSerialize["Capabilities"] = o.Capabilities
}
for key, value := range o.AdditionalProperties {
toSerialize[key] = value
}
return json.Marshal(toSerialize)
}
func (o *CapabilityEndpointDescriptorAllOf) UnmarshalJSON(bytes []byte) (err error) {
varCapabilityEndpointDescriptorAllOf := _CapabilityEndpointDescriptorAllOf{}
if err = json.Unmarshal(bytes, &varCapabilityEndpointDescriptorAllOf); err == nil {
*o = CapabilityEndpointDescriptorAllOf(varCapabilityEndpointDescriptorAllOf)
}
additionalProperties := make(map[string]interface{})
if err = json.Unmarshal(bytes, &additionalProperties); err == nil {
delete(additionalProperties, "ClassId")
delete(additionalProperties, "ObjectType")
delete(additionalProperties, "Description")
delete(additionalProperties, "Model")
delete(additionalProperties, "Vendor")
delete(additionalProperties, "Version")
delete(additionalProperties, "Capabilities")
o.AdditionalProperties = additionalProperties
}
return err
}
type NullableCapabilityEndpointDescriptorAllOf struct {
value *CapabilityEndpointDescriptorAllOf
isSet bool
}
func (v NullableCapabilityEndpointDescriptorAllOf) Get() *CapabilityEndpointDescriptorAllOf {
return v.value
}
func (v *NullableCapabilityEndpointDescriptorAllOf) Set(val *CapabilityEndpointDescriptorAllOf) {
v.value = val
v.isSet = true
}
func (v NullableCapabilityEndpointDescriptorAllOf) IsSet() bool {
return v.isSet
}
func (v *NullableCapabilityEndpointDescriptorAllOf) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableCapabilityEndpointDescriptorAllOf(val *CapabilityEndpointDescriptorAllOf) *NullableCapabilityEndpointDescriptorAllOf {
return &NullableCapabilityEndpointDescriptorAllOf{value: val, isSet: true}
}
func (v NullableCapabilityEndpointDescriptorAllOf) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableCapabilityEndpointDescriptorAllOf) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
}
| {
return true
} |
temporal_evaluation.py | """
Evaluates systems that extract temporal information from text
This code is licensed under the Apache License, Version 2.0. You may
obtain a copy of this license in the LICENSE file in the root
directory of this source tree or at
http://www.apache.org/licenses/LICENSE-2.0.
Any modifications or derivative works of this code must retain this
copyright notice, and modified files need to carry a notice
indicating that they have been altered from the originals.
If you use this code, please cite our paper:
@article{Kerr2020,
author = {Catherine Kerr and Terri Hoare and Paula Carroll and Jakub Marecek},
title = {Integer-Programming Ensemble of Temporal-Relations Classifiers},
journal = {Data Mining and Knowledge Discovery},
volume = {to appear},
year = {2020},
url = {http://arxiv.org/abs/1412.1866},
archivePrefix = {arXiv},
eprint = {1412.1866},
}
"""
import time
import sys
import re
import os
def get_arg (index):
#for arg in sys.argv:
return sys.argv[index]
global_prec_matched = 0
global_rec_matched = 0
global_system_total = 0
global_gold_total = 0
tlinksInBoth = 0 # add to find number of common TLINKs
basedir = re.sub('relation_to_timegraph.py', '', get_arg(0))
debug = float(get_arg(3))
if len(sys.argv) > 4:
evaluation_method = get_arg(4).strip()
else:
evaluation_method = ''
cmd_folder = os.path.dirname(basedir)
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import relation_to_timegraph
consider_DURING_as_SIMULTANEOUS = relation_to_timegraph.consider_DURING_as_SIMULTANEOUS
def extract_name(filename):
parts = re.split('/', filename)
length = len(parts)
return parts[length-1]
def get_directory_path(path):
name = extract_name(path)
dir = re.sub(name, '', path)
if dir == '':
dir = './'
return dir
def get_entity_val(word, line):
if re.search(word+'="[^"]*"', line):
entity = re.findall(word+'="[^"]*"', line)[0]
entity = re.sub(word+'=', '', entity)
entity = re.sub('"', '', entity)
return entity
return word
def change_DURING_relation(filetext):
newtext = ''
for line in filetext.split('\n'):
foo = ''
words = line.split('\t')
for i in range(0, len(words)):
if i == 3 and (words[i] == 'DURING' or words[i] == 'DURING_INV'):
foo += re.sub('DURING', 'SIMULTANEOUS', re.sub('DURING_INV', 'SIMULTANEOUS', words[i])) + '\t'
else:
foo += words[i] + '\t'
newtext += foo.strip() + '\n'
return newtext
def get_relations(file):
text = open(file).read()
newtext = ''
name = extract_name(file)
relations = re.findall('<TLINK[^>]*>', text)
for each in relations:
core = ''
ref = ''
relType = ''
if re.search('eventInstanceID', each):
core = get_entity_val('eventInstanceID', each)
if re.search('timeID', each):
core = get_entity_val('timeID', each)
if re.search('relatedToEventInstance', each):
ref = get_entity_val('relatedToEventInstance', each)
if re.search('relatedToTime', each):
ref = get_entity_val('relatedToTime', each)
if re.search('relType', each):
relType = get_entity_val('relType', each)
if core == '' or ref == '' or relType == '':
print 'MISSING core, ref or relation', each
else:
foo = name+'\t'+core+'\t'+ref+'\t'+relType+'\n'
if debug >= 3:
print each
print foo
newtext += foo + '\n'
if consider_DURING_as_SIMULTANEOUS == True:
newtext = change_DURING_relation(newtext)
#print '$$', newtext
return newtext
def reverse_relation(rel):
rel = re.sub('"', '', rel)
if rel.upper() == 'BEFORE':
return 'AFTER'
if rel.upper() == 'AFTER':
return 'BEFORE'
if rel.upper() == 'IBEFORE':
return 'IAFTER'
if rel.upper() == 'IAFTER':
return 'IBEFORE'
if rel.upper() == 'DURING':
return 'DURING_INV'
if rel.upper() == 'BEGINS':
return 'BEGUN_BY'
if rel.upper() == 'BEGUN_BY':
return 'BEGINS'
if rel.upper() == 'ENDS':
return 'ENDED_BY'
if rel.upper() == 'ENDED_BY':
return 'ENDS'
if rel.upper() == 'INCLUDES':
return 'IS_INCLUDED'
if rel.upper() == 'IS_INCLUDED':
return 'INCLUDES'
return rel.upper()
def get_triples(tlink_file):
tlinks = tlink_file # open(tlink_file).read() # tlink_file #
relations = ''
for line in tlinks.split('\n'):
if line.strip() == '':
continue
if debug >= 4:
print 'sending_triples', line
words = line.split('\t')
relations += words[0]+'\t'+words[1]+'\t'+words[2]+'\t'+words[3]+'\n'
if debug >= 4:
print 'received_triples', words[0]+'\t'+words[1]+'\t'+words[2]+'\t'+words[3]+'\n'
if words[1] != words[2]:
relations += words[0]+'\t'+words[2]+'\t'+words[1]+'\t'+reverse_relation(words[3]) +'\n'
if debug >= 4:
print 'received_triples', words[0]+'\t'+words[2]+'\t'+words[1]+'\t'+reverse_relation(words[3]) +'\n'
return relations
def get_timegraphs(gold, system):
gold_text = gold # open(gold).read() # gold #
system_text = system # open(system).read() # system #
tg_gold = relation_to_timegraph.Timegraph()
tg_gold = relation_to_timegraph.create_timegraph_from_weight_sorted_relations(gold_text, tg_gold)
tg_gold.final_relations = tg_gold.final_relations + tg_gold.violated_relations
tg_system = relation_to_timegraph.Timegraph()
tg_system = relation_to_timegraph.create_timegraph_from_weight_sorted_relations(system_text, tg_system)
tg_system.final_relations = tg_system.final_relations + tg_system.violated_relations
return tg_gold, tg_system
# extract entities and relation from tlink line
def get_x_y_rel(tlinks):
|
def get_entity_rel(tlink):
words = tlink.split('\t')
if len(words) == 3:
return words[0]+'\t'+words[1]+'\t'+words[2]
return words[1]+'\t'+words[2]+'\t'+words[3]
def total_relation_matched(A_tlinks, B_tlinks, B_relations, B_tg):
count = 0
global tlinksInBoth
tlinksInBoth = 0
for tlink in A_tlinks.split('\n'):
if tlink.strip() == '':
continue
if debug >= 2:
print tlink
x, y, rel = get_x_y_rel(tlink)
foo = relation_to_timegraph.interval_rel_X_Y(x, y, B_tg, rel, 'evaluation')
# print x, y, rel, foo[1] # Ckerr
if re.search(get_entity_rel(tlink.strip()), B_relations):
count += 1
tlinksInBoth += 1
if debug >= 2: #change from >=2
print 'True'
continue
if debug >= 2:
print x, y, rel, foo[1]
if re.search('true', foo[1]):
count += 1
tlinksInBoth += 1
if foo[1].strip() == 'false':
# print x, y, rel, foo[1]
tlinksInBoth += 1
# print "TLINKs in both: ", tlinksInBoth, " matching: ", count
return count
def total_implicit_matched(system_reduced, gold_reduced, gold_tg):
count = 0
# global tlinksInBoth
# tlinksInBoth = 0
for tlink in system_reduced.split('\n'):
if tlink.strip() == '':
continue
if debug >= 2:
print tlink
if re.search(tlink, gold_reduced):
continue
x, y, rel = get_x_y_rel(tlink)
foo = relation_to_timegraph.interval_rel_X_Y(x, y, gold_tg, rel, 'evaluation')
if debug >= 2:
print x, y, rel, foo[1]
if re.search('true', foo[1]):
count += 1
return count
def get_entities(relations):
included = ''
for each in relations.split('\n'):
if each.strip() == '':
continue
words = each.split('\t')
if not re.search('#'+words[1]+'#', included):
included += '#'+words[1]+'#\n'
if not re.search('#'+words[2]+'#', included):
included += '#'+words[2]+'#\n'
return included
def get_n(relations):
included = get_entities(relations)
return (len(included.split('\n'))-1)
def get_common_n(gold_relations, system_relations):
gold_entities = get_entities(gold_relations)
system_entities = get_entities(system_relations)
common = ''
for each in gold_entities.split('\n'):
if each.strip() == '':
continue
if re.search(each, system_entities):
common += each + '\n'
if debug >= 3:
print len(gold_entities.split('\n')), len(system_entities.split('\n')), len(common.split('\n'))
print common.split('\n')
print gold_entities.split('\n')
return (len(common.split('\n'))-1)
def get_ref_minus(gold_relation, system_relations):
system_entities = get_entities(system_relations)
count = 0
for each in gold_relation.split('\n'):
if each.strip() == '':
continue
words = each.split('\t')
if re.search('#'+words[1]+'#', system_entities) and re.search('#'+words[2]+'#', system_entities):
count += 1
return count
def evaluate_two_files_implicit_in_recall(arg1, arg2):
global global_prec_matched
global global_rec_matched
global global_system_total
global global_gold_total
if debug >= 1:
print '\n\n Evaluate', arg1, arg2
gold_annotation = get_relations(arg1)
system_annotation = get_relations(arg2)
tg_gold, tg_system = get_timegraphs(gold_annotation, system_annotation)
gold_relations = get_triples(gold_annotation)
system_relations = get_triples(system_annotation)
#for precision
if debug >= 2:
print '\nchecking precision'
prec_matched = total_relation_matched(tg_system.final_relations, tg_gold.final_relations, gold_relations, tg_gold)
# for recall
if debug >= 2:
print '\nchecking recall'
rec_matched = total_relation_matched(tg_gold.final_relations, tg_system.final_relations, system_relations, tg_system)
rec_implicit_matched = total_implicit_matched(tg_system.final_relations, tg_gold.final_relations, tg_gold)
n = get_common_n(tg_gold.final_relations, tg_system.final_relations)
## n = get_n(tg_gold.final_relations)
ref_plus = 0.5*n*(n-1)
## ref_minus = len(tg_gold.final_relations.split('\n'))-1
ref_minus = rec_matched ## get_ref_minus(tg_gold.final_relations, tg_system.final_relations)
w = 0.99/(1+ref_plus-ref_minus) # ref_minus #
if debug >= 2:
print 'n =', n
print 'rec_implicit_matched', rec_implicit_matched
print 'n, ref_plus, ref_minus', n , ref_plus , ref_minus
print 'w', w
print 'rec_matched', rec_matched
print 'total', (len(tg_gold.final_relations.split('\n'))-1)
print 'w*rec_implicit_matched', w*rec_implicit_matched
if debug >= 2:
print 'precision', prec_matched, len(tg_system.final_relations.split('\n'))-1
if len(tg_system.final_relations.split('\n')) <= 1:
precision = 0
else:
precision = prec_matched*1.0/(len(tg_system.final_relations.split('\n'))-1)
if debug >= 2:
print 'recall', rec_matched, len(tg_gold.final_relations.split('\n'))-1
if len(tg_gold.final_relations.split('\n')) <= 1:
recall = 0
else:
recall2 = (rec_matched)*1.0/(len(tg_gold.final_relations.split('\n'))-1)
recall = (rec_matched+w*rec_implicit_matched)*1.0/(len(tg_gold.final_relations.split('\n'))-1)
if debug >= 2:
print 'recall2', recall2
print 'recall', recall
if debug >= 1:
print precision, recall, get_fscore(precision, recall)
global_prec_matched += prec_matched
global_rec_matched += rec_matched+w*rec_implicit_matched
global_system_total += len(tg_system.final_relations.split('\n'))-1
global_gold_total += len(tg_gold.final_relations.split('\n'))-1
return tg_system
def evaluate_two_files(arg1, arg2):
global global_prec_matched
global global_rec_matched
global global_system_total
global global_gold_total
if debug >= 1:
print '\n\nEvaluate', arg1, arg2
gold_annotation = get_relations(arg1)
system_annotation = get_relations(arg2)
tg_gold, tg_system = get_timegraphs(gold_annotation, system_annotation)
gold_relations = get_triples(gold_annotation)
system_relations = get_triples(system_annotation)
#for precision
if debug >= 2:
print '\nchecking precision'
prec_matched = total_relation_matched(tg_system.final_relations, tg_gold.final_relations, gold_relations, tg_gold)
# for recall
if debug >= 2:
print '\nchecking recall'
rec_matched = total_relation_matched(tg_gold.final_relations, tg_system.final_relations, system_relations, tg_system)
if debug >= 2:
print 'precision', prec_matched, len(tg_system.final_relations.split('\n'))-1
if len(tg_system.final_relations.split('\n')) <= 1:
precision = 0
else:
precision = prec_matched*1.0/(len(tg_system.final_relations.split('\n'))-1)
if debug >= 2:
print 'recall', rec_matched, len(tg_gold.final_relations.split('\n'))-1
if len(tg_gold.final_relations.split('\n')) <= 1:
recall = 0
else:
recall = rec_matched*1.0/(len(tg_gold.final_relations.split('\n'))-1)
if debug >= 1:
print precision, recall
global_prec_matched += prec_matched
global_rec_matched += rec_matched
global_system_total += len(tg_system.final_relations.split('\n'))-1
global_gold_total += len(tg_gold.final_relations.split('\n'))-1
return tg_system
def evaluate_two_files_acl11(arg1, arg2):
global global_prec_matched
global global_rec_matched
global global_system_total
global global_gold_total
if debug >= 1:
print '\n\n Evaluate', arg1, arg2
gold_annotation = get_relations(arg1)
system_annotation = get_relations(arg2)
tg_gold, tg_system = get_timegraphs(gold_annotation, system_annotation)
if debug >= 2:
print '\nnonredundant'
print tg_gold.nonredundant
print '\nremove from reduce'
print tg_gold.remove_from_reduce
print '\nviolated relations'
print tg_gold.violated_relations
print '\nfinal relations'
print tg_gold.final_relations
gold_relations = get_triples(gold_annotation)
system_relations = get_triples(system_annotation)
#for precision
if debug >= 2:
print '\nchecking precision'
##prec_matched = total_relation_matched(tg_system.final_relations, tg_gold.final_relations, gold_relations, tg_gold)
prec_matched = total_relation_matched(system_relations, tg_gold.final_relations, gold_relations, tg_gold)
# for recall
if debug >= 2:
print '\nchecking recall'
##rec_matched = total_relation_matched(tg_gold.final_relations, tg_system.final_relations, system_relations, tg_system)
rec_matched = total_relation_matched(gold_relations, tg_system.final_relations, system_relations, tg_system)
if debug >= 2:
#print 'precision', prec_matched, len(tg_system.final_relations.split('\n'))-1
print 'precision', prec_matched, len(system_relations.split('\n'))-1
if len(system_relations.split('\n')) <= 1:
##if len(tg_system.final_relations.split('\n')) <= 1:
precision = 0
else:
##precision = prec_matched*1.0/(len(tg_system.final_relations.split('\n'))-1)
precision = prec_matched*1.0/(len(system_relations.split('\n'))-1)
if debug >= 2:
##print 'recall', rec_matched, len(tg_gold.final_relations.split('\n'))-1
print 'recall', rec_matched, len(gold_relations.split('\n'))-1
##if len(tg_gold.final_relations.split('\n')) <= 1:
if len(gold_relations.split('\n')) <= 1:
recall = 0
else:
#recall = rec_matched*1.0/(len(tg_gold.final_relations.split('\n'))-1)
recall = rec_matched*1.0/(len(gold_relations.split('\n'))-1)
if debug >= 1:
print precision, recall
global_prec_matched += prec_matched
global_rec_matched += rec_matched
##global_system_total += len(tg_system.final_relations.split('\n'))-1
global_system_total += len(system_relations.split('\n'))-1
##global_gold_total += len(tg_gold.final_relations.split('\n'))-1
global_gold_total += len(gold_relations.split('\n'))-1
return tg_system
count_relation = 0
count_node = 0
count_chains = 0
count_time = 0
def evaluate_two_folders(gold, system):
global count_relation
global count_node
global count_chains
global count_time
count = 0
if gold[-1] != '/':
gold += '/'
if system[-1] != '/':
system += '/'
for file in os.listdir(gold):
if os.path.isdir(gold+file):
subdir = file+'/'
if debug >= 1:
print 'Traverse files in Directory', gold+subdir
evaluate_two_folders(gold+subdir, system+subdir)
else:
goldfile = gold + file
systemfile = system + file
if not re.search('DS_Store', file):
if debug >= 2:
print goldfile, systemfile
start_time = time.time()
if evaluation_method == 'acl11':
tg = evaluate_two_files_acl11(goldfile, systemfile)
elif evaluation_method == 'implicit_in_recall':
tg = evaluate_two_files_implicit_in_recall(goldfile, systemfile)
else:
tg = evaluate_two_files(goldfile, systemfile)
end_time = time.time()
if debug >= 1:
print end_time-start_time, ',', tg.count_relation, ',', tg.count_node, ',', tg.next_chain+tg.count_cross_chain
count_time += end_time-start_time
count_relation += tg.count_relation
count_node += tg.count_node
count_chains += tg.next_chain+tg.count_cross_chain
# print count_time, count_relation, count_node, count_chains
# if count > 5:
# break
# count += 1
def get_fscore(p, r):
if p+r == 0:
return 0
return 2.0*p*r/(p+r)
def final_score():
global global_prec_matched
global global_rec_matched
global global_system_total
global global_gold_total
# print "System: ", global_system_total, " Gold: ", global_gold_total
if global_system_total == 0:
precision = 0
else:
precision = global_prec_matched*1.0/global_system_total
if global_gold_total == 0:
recall = 0
else:
recall = global_rec_matched*1.0/global_gold_total
if precision == 0 and recall == 0:
fscore = 0
else:
fscore = get_fscore(precision, recall)
print '=== Temporal Awareness Score ==='
if evaluation_method == 'acl11':
print 'Evaluated with ACL\'11 score, not taking the reduced graph for relations.'
elif evaluation_method == 'implicit_in_recall':
print 'Evaluated considering implicit relations in recall as well'
else:
print evaluation_method
print 'Temporal Score\tF1\tP\tR'
print '\t\t'+str(100*round(fscore, 6))+'\t'+str(100*round(precision, 6))+'\t'+str(100*round(recall, 6))+'\t'
print 'Overall Temporal Awareness Score (F1 score):', str(100*round(fscore, 6))
print ''
# take input from command line and give error messages
# call appropriate functions to evaluate
def input_and_evaluate():
invalid = 'false'
if len(sys.argv) < 3:
invalid = 'true'
else:
arg1 = get_arg(1)
arg2 = get_arg(2)
global directory_path
directory_path = get_directory_path(sys.argv[0])
# both arguments are directories
if invalid == 'false' and os.path.isdir(arg1) and os.path.isdir(arg2):
# for each files in gold folder, check the performance of that file in system folder
if debug >= 2:
print 'compare files in two folders'
evaluate_two_folders(arg1, arg2)
elif invalid == 'false' and os.path.isfile(arg1) and os.path.isfile(arg2):
# compare the performance between two files
if debug >= 2:
print 'compare two files'
goldfile = arg1 # CK added this
systemfile = arg2 # CK added this
evaluate_two_files(arg1, arg2)
if evaluation_method == 'acl11':
tg = evaluate_two_files_acl11(goldfile, systemfile)
elif evaluation_method == 'implicit_in_recall':
tg = evaluate_two_files_implicit_in_recall(goldfile, systemfile)
else:
tg = evaluate_two_files(goldfile, systemfile)
else:
invalid = 'true'
print 'INVALID INPUT FORMAT'
print '\nto check the performance of a single file:\n\t python evaluate_entities.py gold_file_path system_file_path\n'
print 'to check the performace of all files in a gold folder:\n\t python evaluate_entities.py gold_folder_path system_folder_path '
if invalid == 'false':
performance = 'get'
#get_performance()
final_score()
input_and_evaluate()
#print count_time, count_relation, count_node, count_chains
| words = tlinks.split('\t')
x = words[1]
y = words[2]
rel = words[3]
return x, y, rel |
routes.js | /*
* Copyright 2020 The Magma Authors.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @flow
* @format
*/
import type {ExpressResponse} from 'express';
import type {FBCNMSRequest} from '@fbcnms/auth/access';
const express = require('express');
const proxy = require('express-http-proxy');
const HttpsProxyAgent = require('https-proxy-agent');
const url = require('url');
const {apiCredentials, API_HOST} = require('@fbcnms/platform-server/config');
import auditLoggingDecorator from './auditLoggingDecorator';
import {intersection} from 'lodash';
const router: express.Router<FBCNMSRequest, ExpressResponse> = express.Router();
const PROXY_TIMEOUT_MS = 30000;
let agent = null;
if (process.env.HTTPS_PROXY) {
const options = url.parse(process.env.HTTPS_PROXY);
agent = new HttpsProxyAgent(options);
}
const PROXY_OPTIONS = {
https: true,
memoizeHost: false,
timeout: PROXY_TIMEOUT_MS,
proxyReqOptDecorator: (proxyReqOpts, _originalReq) => {
return {
...proxyReqOpts,
agent: agent,
cert: apiCredentials().cert,
key: apiCredentials().key,
rejectUnauthorized: false,
};
},
proxyReqPathResolver: req =>
req.originalUrl.replace(/^\/nms\/apicontroller/, ''),
};
export async function networkIdFilter(req: FBCNMSRequest): Promise<boolean> {
if (req.organization) {
const organization = await req.organization();
// If the request isn't an organization network, block
// the request
const isOrganizationAllowed = containsNetworkID(
organization.networkIDs,
req.params.networkID,
);
if (!isOrganizationAllowed) {
return false;
}
}
// super users on standalone deployments
// have access to all proxied API requests
// for the organization
if (req.user.isSuperUser) {
return true;
}
return containsNetworkID(req.user.networkIDs, req.params.networkID);
}
export async function networksResponseDecorator(
_proxyRes: ExpressResponse,
proxyResData: Buffer,
userReq: FBCNMSRequest,
_userRes: ExpressResponse,
) {
let result = JSON.parse(proxyResData.toString('utf8'));
if (userReq.organization) {
const organization = await userReq.organization();
result = intersection(result, organization.networkIDs);
}
if (!userReq.user.isSuperUser) {
// the list of networks is further restricted to what the user
// is allowed to see
result = intersection(result, userReq.user.networkIDs);
}
return JSON.stringify(result);
}
const containsNetworkID = function (
allowedNetworkIDs: string[],
networkID: string,
): boolean {
return (
allowedNetworkIDs.indexOf(networkID) !== -1 ||
// Remove secondary condition after T34404422 is addressed. Reason:
// Request needs to be lower cased otherwise calling
// MagmaAPIUrls.gateways() potentially returns missing devices.
allowedNetworkIDs
.map(id => id.toString().toLowerCase())
.indexOf(networkID.toString().toLowerCase()) !== -1
);
};
const proxyErrorHandler = (err, res, next) => {
if (err.code === 'ENOTFOUND') {
res.status(503).send('Cannot reach Orchestrator server');
} else {
next();
}
};
router.use(
/^\/magma\/v1\/networks$/,
proxy(API_HOST, {
...PROXY_OPTIONS,
userResDecorator: networksResponseDecorator,
proxyErrorHandler,
}),
);
router.use(
'/magma/v1/networks/:networkID',
proxy(API_HOST, {
...PROXY_OPTIONS,
filter: networkIdFilter,
userResDecorator: auditLoggingDecorator,
proxyErrorHandler,
}),
);
const networkTypeRegex = '(cwf|feg|lte|feg_lte|symphony|wifi)'; | filter: networkIdFilter,
userResDecorator: auditLoggingDecorator,
proxyErrorHandler,
}),
);
router.use(
'/magma/channels/:channel',
proxy(API_HOST, {
...PROXY_OPTIONS,
filter: (req, _res) => req.method === 'GET',
}),
);
router.use(
'/magma/v1/channels/:channel',
proxy(API_HOST, {
...PROXY_OPTIONS,
filter: (req, _res) => req.method === 'GET',
}),
);
router.use(
'/magma/v1/events/:networkID',
proxy(API_HOST, {
...PROXY_OPTIONS,
filter: networkIdFilter,
proxyErrorHandler,
}),
);
router.use(
'/magma/v1/events/:networkID/:streamName',
proxy(API_HOST, {
...PROXY_OPTIONS,
filter: networkIdFilter,
proxyErrorHandler,
}),
);
router.use('', (req: FBCNMSRequest, res: ExpressResponse) => {
res.status(404).send('Not Found');
});
export default router; | router.use(
`/magma/v1/:networkType(${networkTypeRegex})/:networkID`,
proxy(API_HOST, {
...PROXY_OPTIONS, |
resize_instance.py | # Copyright 2013 CentRin Data, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
from openstack_dashboard.dashboards.project.instances.workflows \
import create_instance
class SetFlavorChoiceAction(workflows.Action):
old_flavor_id = forms.CharField(required=False, widget=forms.HiddenInput())
old_flavor_name = forms.CharField(
label=_("Old Flavor"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False,
)
flavor = forms.ThemableChoiceField(
label=_("New Flavor"),
help_text=_("Choose the flavor to launch."))
class Meta(object):
name = _("Flavor Choice")
slug = 'flavor_choice'
help_text_template = ("project/instances/"
"_flavors_and_quotas.html")
def populate_flavor_choices(self, request, context):
|
def get_help_text(self, extra_context=None):
extra = {} if extra_context is None else dict(extra_context)
try:
extra['usages'] = api.nova.tenant_absolute_limits(self.request,
reserved=True)
extra['usages_json'] = json.dumps(extra['usages'])
flavors = json.dumps([f._info for f in
instance_utils.flavor_list(self.request)])
extra['flavors'] = flavors
extra['resize_instance'] = True
except Exception:
exceptions.handle(self.request,
_("Unable to retrieve quota information."))
return super(SetFlavorChoiceAction, self).get_help_text(extra)
class SetFlavorChoice(workflows.Step):
action_class = SetFlavorChoiceAction
depends_on = ("instance_id", "name")
contributes = ("old_flavor_id", "old_flavor_name", "flavors", "flavor")
class ResizeInstance(workflows.Workflow):
slug = "resize_instance"
name = _("Resize Instance")
finalize_button_name = _("Resize")
success_message = _('Request for resizing of instance "%s" '
'has been submitted.')
failure_message = _('Unable to resize instance "%s".')
success_url = "horizon:project:instances:index"
default_steps = (SetFlavorChoice, create_instance.SetAdvanced)
def format_status_message(self, message):
return message % self.context.get('name', 'unknown instance')
@sensitive_variables('context')
def handle(self, request, context):
instance_id = context.get('instance_id', None)
flavor = context.get('flavor', None)
disk_config = context.get('disk_config', None)
try:
api.nova.server_resize(request, instance_id, flavor, disk_config)
return True
except Exception:
exceptions.handle(request)
return False
| old_flavor_id = context.get('old_flavor_id')
flavors = context.get('flavors').values()
# Remove current flavor from the list of flavor choices
flavors = [flavor for flavor in flavors if flavor.id != old_flavor_id]
if flavors:
if len(flavors) > 1:
flavors = instance_utils.sort_flavor_list(request, flavors)
else:
flavor = flavors[0]
flavors = [(flavor.id, flavor.name)]
flavors.insert(0, ("", _("Select a New Flavor")))
else:
flavors.insert(0, ("", _("No flavors available")))
return flavors |
envelope.go | package enmime
import (
"fmt"
"io"
"mime"
"net/mail"
"net/textproto"
"strings"
"github.com/jaytaylor/html2text"
"github.com/jhillyerd/enmime/internal/coding"
"github.com/pkg/errors"
)
// Envelope is a simplified wrapper for MIME email messages.
type Envelope struct {
Text string // The plain text portion of the message
HTML string // The HTML portion of the message
Root *Part // The top-level Part
Attachments []*Part // All parts having a Content-Disposition of attachment
Inlines []*Part // All parts having a Content-Disposition of inline
// All non-text parts that were not placed in Attachments or Inlines, such as multipart/related
// content.
OtherParts []*Part
Errors []*Error // Errors encountered while parsing
header *textproto.MIMEHeader // Header from original message
}
// GetHeaderKeys returns a list of header keys seen in this message. Get
// individual headers with `GetHeader(name)`
func (e *Envelope) GetHeaderKeys() (headers []string) {
if e.header == nil {
return
}
for key := range *e.header {
headers = append(headers, key)
}
return headers
}
// GetHeader processes the specified header for RFC 2047 encoded words and returns the result as a
// UTF-8 string
func (e *Envelope) GetHeader(name string) string {
if e.header == nil {
return ""
}
return decodeHeader(e.header.Get(name))
}
// GetHeaderValues processes the specified header for RFC 2047 encoded words and returns all existing
// values as a list of UTF-8 strings
func (e *Envelope) GetHeaderValues(name string) []string {
if e.header == nil {
return []string{}
}
rawValues := (*e.header)[textproto.CanonicalMIMEHeaderKey(name)]
var values []string
for _, v := range rawValues {
values = append(values, decodeHeader(v))
}
return values
}
// SetHeader sets given header name to the given value.
// If the header exists already, all existing values are replaced.
func (e *Envelope) SetHeader(name string, value []string) error {
if name == "" {
return fmt.Errorf("Provide non-empty header name")
}
for i, v := range value {
if i == 0 {
e.header.Set(name, mime.BEncoding.Encode("utf-8", v))
continue
}
e.header.Add(name, mime.BEncoding.Encode("utf-8", v))
}
return nil
}
// AddHeader appends given header value to header name without changing existing values.
// If the header does not exist already, it will be created.
func (e *Envelope) AddHeader(name string, value string) error {
if name == "" {
return fmt.Errorf("Provide non-empty header name")
}
e.header.Add(name, mime.BEncoding.Encode("utf-8", value))
return nil
}
// DeleteHeader deletes given header.
func (e *Envelope) DeleteHeader(name string) error {
if name == "" {
return fmt.Errorf("Provide non-empty header name")
}
e.header.Del(name)
return nil
}
// AddressList returns a mail.Address slice with RFC 2047 encoded names converted to UTF-8
func (e *Envelope) AddressList(key string) ([]*mail.Address, error) {
if e.header == nil {
return nil, fmt.Errorf("No headers available")
}
if !AddressHeaders[strings.ToLower(key)] {
return nil, fmt.Errorf("%s is not an address header", key)
}
str := decodeToUTF8Base64Header(e.header.Get(key))
if str == "" {
return nil, mail.ErrHeaderNotPresent
}
// These statements are handy for debugging ParseAddressList errors
// fmt.Println("in: ", m.header.Get(key))
// fmt.Println("out: ", str)
ret, err := mail.ParseAddressList(str)
switch {
case err == nil:
// carry on
case err.Error() == "mail: expected comma":
ret, err = mail.ParseAddressList(ensureCommaDelimitedAddresses(str))
if err != nil {
return nil, err
}
default:
return nil, err
}
return ret, nil
}
// Clone returns a clone of the current Envelope
func (e *Envelope) Clone() *Envelope {
if e == nil {
return nil
}
newEnvelope := &Envelope{
e.Text,
e.HTML,
e.Root.Clone(nil),
e.Attachments,
e.Inlines,
e.OtherParts,
e.Errors,
e.header,
}
return newEnvelope
}
// ReadEnvelope is a wrapper around ReadParts and EnvelopeFromPart. It parses the content of the
// provided reader into an Envelope, downconverting HTML to plain text if needed, and sorting the
// attachments, inlines and other parts into their respective slices. Errors are collected from all
// Parts and placed into the Envelope.Errors slice.
func ReadEnvelope(r io.Reader) (*Envelope, error) {
// Read MIME parts from reader
root, err := ReadParts(r)
if err != nil {
return nil, errors.WithMessage(err, "Failed to ReadParts")
}
return EnvelopeFromPart(root)
}
// EnvelopeFromPart uses the provided Part tree to build an Envelope, downconverting HTML to plain
// text if needed, and sorting the attachments, inlines and other parts into their respective
// slices. Errors are collected from all Parts and placed into the Envelopes Errors slice.
func EnvelopeFromPart(root *Part) (*Envelope, error) {
e := &Envelope{
Root: root,
header: &root.Header,
}
if detectMultipartMessage(root) {
// Multi-part message (message with attachments, etc)
if err := parseMultiPartBody(root, e); err != nil {
return nil, err
}
} else {
if detectBinaryBody(root) {
// Attachment only, no text
if root.Disposition == cdInline {
e.Inlines = append(e.Inlines, root)
} else {
e.Attachments = append(e.Attachments, root)
}
} else {
// Only text, no attachments
if err := parseTextOnlyBody(root, e); err != nil {
return nil, err
}
}
}
// Down-convert HTML to text if necessary
if e.Text == "" && e.HTML != "" {
// We always warn when this happens
e.Root.addWarning(
ErrorPlainTextFromHTML,
"Message did not contain a text/plain part")
var err error
if e.Text, err = html2text.FromString(e.HTML); err != nil {
// Downcoversion shouldn't fail
e.Text = ""
p := e.Root.BreadthMatchFirst(matchHTMLBodyPart)
p.addError(
ErrorPlainTextFromHTML,
"Failed to downconvert HTML: %v",
err)
}
}
// Copy part errors into Envelope.
if e.Root != nil {
_ = e.Root.DepthMatchAll(func(part *Part) bool {
// Using DepthMatchAll to traverse all parts, don't care about result.
for i := range part.Errors {
// Range index is needed to get the correct address, because range value points to
// a locally scoped variable.
e.Errors = append(e.Errors, part.Errors[i])
}
return false
})
}
return e, nil
}
// parseTextOnlyBody parses a plain text message in root that has MIME-like headers, but
// only contains a single part - no boundaries, etc. The result is placed in e.
func parseTextOnlyBody(root *Part, e *Envelope) error {
// Determine character set
var charset string
var isHTML bool
if ctype := root.Header.Get(hnContentType); ctype != "" {
if mediatype, mparams, _, err := ParseMediaType(ctype); err == nil {
isHTML = (mediatype == ctTextHTML)
if mparams[hpCharset] != "" {
charset = mparams[hpCharset]
}
}
}
// Read transcoded text
if isHTML {
rawHTML := string(root.Content)
// Note: Empty e.Text will trigger html2text conversion
e.HTML = rawHTML
if charset == "" {
// Search for charset in HTML metadata
if charset = coding.FindCharsetInHTML(rawHTML); charset != "" {
// Found charset in HTML
if convHTML, err := coding.ConvertToUTF8String(charset, root.Content); err == nil {
// Successful conversion
e.HTML = convHTML
} else {
// Conversion failed
root.addWarning(ErrorCharsetConversion, err.Error())
}
}
// Converted from charset in HTML
return nil
}
} else {
e.Text = string(root.Content)
}
return nil
}
// parseMultiPartBody parses a multipart message in root. The result is placed in e.
func parseMultiPartBody(root *Part, e *Envelope) error {
// Parse top-level multipart
ctype := root.Header.Get(hnContentType)
mediatype, params, _, err := ParseMediaType(ctype)
if err != nil {
return fmt.Errorf("Unable to parse media type: %v", err)
}
if !strings.HasPrefix(mediatype, ctMultipartPrefix) {
return fmt.Errorf("Unknown mediatype: %v", mediatype)
}
boundary := params[hpBoundary]
if boundary == "" {
return fmt.Errorf("Unable to locate boundary param in Content-Type header")
}
// Locate text body
if mediatype == ctMultipartAltern {
p := root.BreadthMatchFirst(func(p *Part) bool {
return p.ContentType == ctTextPlain && p.Disposition != cdAttachment
})
if p != nil {
e.Text = string(p.Content)
}
} else {
// multipart is of a mixed type
parts := root.DepthMatchAll(func(p *Part) bool {
return p.ContentType == ctTextPlain && p.Disposition != cdAttachment
})
for i, p := range parts {
if i > 0 {
e.Text += "\n--\n"
}
e.Text += string(p.Content)
}
}
// Locate HTML body
p := root.BreadthMatchFirst(matchHTMLBodyPart)
if p != nil {
e.HTML += string(p.Content)
}
// Locate attachments
e.Attachments = root.BreadthMatchAll(func(p *Part) bool {
return p.Disposition == cdAttachment || p.ContentType == ctAppOctetStream
})
// Locate inlines
e.Inlines = root.BreadthMatchAll(func(p *Part) bool {
return p.Disposition == cdInline && !strings.HasPrefix(p.ContentType, ctMultipartPrefix)
})
// Locate others parts not considered in attachments or inlines
e.OtherParts = root.BreadthMatchAll(func(p *Part) bool {
if strings.HasPrefix(p.ContentType, ctMultipartPrefix) {
return false
}
if p.Disposition != "" {
return false
}
if p.ContentType == ctAppOctetStream {
return false
}
return p.ContentType != ctTextPlain && p.ContentType != ctTextHTML
})
return nil
}
// Used by Part matchers to locate the HTML body. Not inlined because it's used in multiple places.
func matchHTMLBodyPart(p *Part) bool {
return p.ContentType == ctTextHTML && p.Disposition != cdAttachment
}
// Used by AddressList to ensure that address lists are properly delimited
func ensureCommaDelimitedAddresses(s string) string | {
// This normalizes the whitespace, but may interfere with CFWS (comments with folding whitespace)
// RFC-5322 3.4.0:
// because some legacy implementations interpret the comment,
// comments generally SHOULD NOT be used in address fields
// to avoid confusing such implementations.
s = strings.Join(strings.Fields(s), " ")
inQuotes := false
inDomain := false
escapeSequence := false
sb := strings.Builder{}
for _, r := range s {
if escapeSequence {
escapeSequence = false
sb.WriteRune(r)
continue
}
if r == '"' {
inQuotes = !inQuotes
sb.WriteRune(r)
continue
}
if inQuotes {
if r == '\\' {
escapeSequence = true
sb.WriteRune(r)
continue
}
} else {
if r == '@' {
inDomain = true
sb.WriteRune(r)
continue
}
if inDomain {
if r == ';' {
sb.WriteRune(r)
break
}
if r == ',' {
inDomain = false
sb.WriteRune(r)
continue
}
if r == ' ' {
inDomain = false
sb.WriteRune(',')
sb.WriteRune(r)
continue
}
}
}
sb.WriteRune(r)
}
return sb.String()
} |
|
at-most.rs | use rule::Rule;
#[test]
fn at_most() {
let code = "yyy";
let y = Rule::new(|_, _| Ok(14));
y.literal("y");
let test1: Rule<i32> = Rule::default();
test1.at_most(2, &y);
if let Ok(_) = test1.scan(&code) {
assert!(false);
} | let test2: Rule<i32> = Rule::default();
test2.at_most(3, &y);
if let Ok(branches) = test2.scan(&code) {
assert_eq!(branches[0], 14);
assert_eq!(branches[1], 14);
assert_eq!(branches[2], 14);
}
else {
assert!(false);
}
let test3: Rule<i32> = Rule::default();
test3.at_most(4, &y);
if let Ok(branches) = test3.scan(&code) {
assert_eq!(branches[0], 14);
assert_eq!(branches[1], 14);
assert_eq!(branches[2], 14);
}
else {
assert!(false);
}
} | else {
assert!(true);
}
|
try_from_into.rs | // try_from_into.rs
// TryFrom is a simple and safe type conversion that may fail in a controlled way under some circumstances.
// Basically, this is the same as From. The main difference is that this should return a Result type
// instead of the target type itself.
// You can read more about it at https://doc.rust-lang.org/std/convert/trait.TryFrom.html
use std::convert::{TryFrom, TryInto};
#[derive(Debug, PartialEq)]
struct Color {
red: u8,
green: u8,
blue: u8,
}
// We will use this error type for these `TryFrom` conversions.
#[derive(Debug, PartialEq)]
enum IntoColorError {
// Incorrect length of slice
BadLen,
// Integer conversion error
IntConversion,
}
// Your task is to complete this implementation
// and return an Ok result of inner type Color.
// You need to create an implementation for a tuple of three integers,
// an array of three integers, and a slice of integers.
//
// Note that the implementation for tuple and array will be checked at compile time,
// but the slice implementation needs to check the slice length!
// Also note that correct RGB color values must be integers in the 0..=255 range.
// Tuple implementation
impl TryFrom<(i16, i16, i16)> for Color {
type Error = IntoColorError;
fn try_from(tuple: (i16, i16, i16)) -> Result<Self, Self::Error> {
let (r, g, b) = (tuple.0.try_into(), tuple.1.try_into(), tuple.2.try_into());
match (r, g, b) {
(Err(e), _, _) | (_, Err(e), _) | (_, _, Err(e)) => Err(IntoColorError::IntConversion),
(Ok(r), Ok(g), Ok(b)) => Ok(Color {
red: r,
green: g,
blue: b,
}),
}
}
}
// Array implementation
impl TryFrom<[i16; 3]> for Color {
type Error = IntoColorError;
fn try_from(arr: [i16; 3]) -> Result<Self, Self::Error> {
let (r, g, b) = (arr[0].try_into(), arr[1].try_into(), arr[2].try_into());
match (r, g, b) {
(Err(e), _, _) | (_, Err(e), _) | (_, _, Err(e)) => Err(IntoColorError::IntConversion),
(Ok(r), Ok(g), Ok(b)) => Ok(Color {
red: r,
green: g,
blue: b,
}),
}
}
}
// Slice implementation
impl TryFrom<&[i16]> for Color {
type Error = IntoColorError;
fn try_from(slice: &[i16]) -> Result<Self, Self::Error> {
if slice.len() != 3 {
return Err(IntoColorError::BadLen);
}
let (r, g, b) = (slice[0].try_into(), slice[1].try_into(), slice[2].try_into()); | match (r, g, b) {
(Err(e), _, _) | (_, Err(e), _) | (_, _, Err(e)) => Err(IntoColorError::IntConversion),
(Ok(r), Ok(g), Ok(b)) => Ok(Color {
red: r,
green: g,
blue: b,
}),
}
}
}
fn main() {
// Use the `from` function
let c1 = Color::try_from((183, 65, 14));
println!("{:?}", c1);
// Since TryFrom is implemented for Color, we should be able to use TryInto
let c2: Result<Color, _> = [183, 65, 14].try_into();
println!("{:?}", c2);
let v = vec![183, 65, 14];
// With slice we should use `try_from` function
let c3 = Color::try_from(&v[..]);
println!("{:?}", c3);
// or take slice within round brackets and use TryInto
let c4: Result<Color, _> = (&v[..]).try_into();
println!("{:?}", c4);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_tuple_out_of_range_positive() {
assert_eq!(
Color::try_from((256, 1000, 10000)),
Err(IntoColorError::IntConversion)
);
}
#[test]
fn test_tuple_out_of_range_negative() {
assert_eq!(
Color::try_from((-1, -10, -256)),
Err(IntoColorError::IntConversion)
);
}
#[test]
fn test_tuple_sum() {
assert_eq!(
Color::try_from((-1, 255, 255)),
Err(IntoColorError::IntConversion)
);
}
#[test]
fn test_tuple_correct() {
let c: Result<Color, _> = (183, 65, 14).try_into();
assert!(c.is_ok());
assert_eq!(
c.unwrap(),
Color {
red: 183,
green: 65,
blue: 14
}
);
}
#[test]
fn test_array_out_of_range_positive() {
let c: Result<Color, _> = [1000, 10000, 256].try_into();
assert_eq!(c, Err(IntoColorError::IntConversion));
}
#[test]
fn test_array_out_of_range_negative() {
let c: Result<Color, _> = [-10, -256, -1].try_into();
assert_eq!(c, Err(IntoColorError::IntConversion));
}
#[test]
fn test_array_sum() {
let c: Result<Color, _> = [-1, 255, 255].try_into();
assert_eq!(c, Err(IntoColorError::IntConversion));
}
#[test]
fn test_array_correct() {
let c: Result<Color, _> = [183, 65, 14].try_into();
assert!(c.is_ok());
assert_eq!(
c.unwrap(),
Color {
red: 183,
green: 65,
blue: 14
}
);
}
#[test]
fn test_slice_out_of_range_positive() {
let arr = [10000, 256, 1000];
assert_eq!(
Color::try_from(&arr[..]),
Err(IntoColorError::IntConversion)
);
}
#[test]
fn test_slice_out_of_range_negative() {
let arr = [-256, -1, -10];
assert_eq!(
Color::try_from(&arr[..]),
Err(IntoColorError::IntConversion)
);
}
#[test]
fn test_slice_sum() {
let arr = [-1, 255, 255];
assert_eq!(
Color::try_from(&arr[..]),
Err(IntoColorError::IntConversion)
);
}
#[test]
fn test_slice_correct() {
let v = vec![183, 65, 14];
let c: Result<Color, _> = Color::try_from(&v[..]);
assert!(c.is_ok());
assert_eq!(
c.unwrap(),
Color {
red: 183,
green: 65,
blue: 14
}
);
}
#[test]
fn test_slice_excess_length() {
let v = vec![0, 0, 0, 0];
assert_eq!(Color::try_from(&v[..]), Err(IntoColorError::BadLen));
}
#[test]
fn test_slice_insufficient_length() {
let v = vec![0, 0];
assert_eq!(Color::try_from(&v[..]), Err(IntoColorError::BadLen));
}
} | |
gremlin_test.go | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gremlin
import (
"io"
"os"
"reflect"
"sort"
"testing"
"github.com/google/cayley/graph"
"github.com/google/cayley/quad"
"github.com/google/cayley/quad/cquads"
_ "github.com/google/cayley/graph/memstore"
_ "github.com/google/cayley/writer"
)
// This is a simple test graph used for testing the gremlin queries
//
// +-------+ +------+
// | alice |----- ->| fred |<--
// +-------+ \---->+-------+-/ +------+ \-+-------+
// ----->| #bob# | | | emily |
// +---------+--/ --->+-------+ | +-------+
// | charlie | / v
// +---------+ / +--------+
// \--- +--------+ | #greg# |
// \-->| #dani# |------------>+--------+
// +--------+
//
func makeTestSession(data []quad.Quad) *Session {
qs, _ := graph.NewQuadStore("memstore", "", nil)
w, _ := graph.NewQuadWriter("single", qs, nil)
for _, t := range data {
w.AddQuad(t)
}
return NewSession(qs, -1, false)
}
var testQueries = []struct {
message string
query string
tag string
expect []string
}{
// Simple query tests.
{
message: "get a single vertex",
query: `
g.V("alice").All()
`,
expect: []string{"alice"},
},
{
message: "use .Out()",
query: `
g.V("alice").Out("follows").All()
`,
expect: []string{"bob"},
},
{
message: "use .In()",
query: `
g.V("bob").In("follows").All()
`,
expect: []string{"alice", "charlie", "dani"},
},
{
message: "use .Both()",
query: `
g.V("fred").Both("follows").All()
`,
expect: []string{"bob", "greg", "emily"},
},
{
message: "use .Tag()-.Is()-.Back()",
query: `
g.V("bob").In("follows").Tag("foo").Out("status").Is("cool_person").Back("foo").All()
`,
expect: []string{"dani"},
},
{
message: "separate .Tag()-.Is()-.Back()",
query: `
x = g.V("charlie").Out("follows").Tag("foo").Out("status").Is("cool_person").Back("foo")
x.In("follows").Is("dani").Back("foo").All()
`,
expect: []string{"bob"},
},
{
message: "do multiple .Back()s",
query: `
g.V("emily").Out("follows").As("f").Out("follows").Out("status").Is("cool_person").Back("f").In("follows").In("follows").As("acd").Out("status").Is("cool_person").Back("f").All()
`,
tag: "acd",
expect: []string{"dani"},
},
{
message: "use Except to filter out a single vertex",
query: `
g.V("alice", "bob").Except(g.V("alice")).All()
`,
expect: []string{"bob"},
},
{
message: "use chained Except",
query: `
g.V("alice", "bob", "charlie").Except(g.V("bob")).Except(g.V("charlie")).All()
`,
expect: []string{"alice"},
},
// Morphism tests.
{
message: "show simple morphism",
query: `
grandfollows = g.M().Out("follows").Out("follows")
g.V("charlie").Follow(grandfollows).All()
`,
expect: []string{"greg", "fred", "bob"},
},
{
message: "show reverse morphism",
query: `
grandfollows = g.M().Out("follows").Out("follows")
g.V("fred").FollowR(grandfollows).All()
`,
expect: []string{"alice", "charlie", "dani"},
},
// Intersection tests.
{
message: "show simple intersection",
query: `
function follows(x) { return g.V(x).Out("follows") }
follows("dani").And(follows("charlie")).All()
`,
expect: []string{"bob"},
},
{
message: "show simple morphism intersection",
query: `
grandfollows = g.M().Out("follows").Out("follows")
function gfollows(x) { return g.V(x).Follow(grandfollows) }
gfollows("alice").And(gfollows("charlie")).All()
`,
expect: []string{"fred"},
},
{
message: "show double morphism intersection",
query: `
grandfollows = g.M().Out("follows").Out("follows")
function gfollows(x) { return g.V(x).Follow(grandfollows) }
gfollows("emily").And(gfollows("charlie")).And(gfollows("bob")).All()
`,
expect: []string{"greg"},
},
{
message: "show reverse intersection",
query: `
grandfollows = g.M().Out("follows").Out("follows")
g.V("greg").FollowR(grandfollows).Intersect(g.V("fred").FollowR(grandfollows)).All()
`,
expect: []string{"charlie"},
},
{
message: "show standard sort of morphism intersection, continue follow",
query: `gfollowers = g.M().In("follows").In("follows")
function cool(x) { return g.V(x).As("a").Out("status").Is("cool_person").Back("a") }
cool("greg").Follow(gfollowers).Intersect(cool("bob").Follow(gfollowers)).All()
`,
expect: []string{"charlie"},
},
{
message: "test Or()",
query: `
g.V("bob").Out("follows").Or(g.V().Has("status", "cool_person")).All()
`,
expect: []string{"fred", "bob", "greg", "dani"},
},
// Gremlin Has tests.
{
message: "show a simple Has",
query: `
g.V().Has("status", "cool_person").All()
`,
expect: []string{"greg", "dani", "bob"},
},
{
message: "show a double Has",
query: `
g.V().Has("status", "cool_person").Has("follows", "fred").All()
`,
expect: []string{"bob"},
},
// Tag tests.
{
message: "show a simple save",
query: `
g.V().Save("status", "somecool").All()
`,
tag: "somecool",
expect: []string{"cool_person", "cool_person", "cool_person"},
},
{
message: "show a simple saveR",
query: `
g.V("cool_person").SaveR("status", "who").All()
`,
tag: "who",
expect: []string{"greg", "dani", "bob"},
},
{
message: "show an out save",
query: `
g.V("dani").Out(null, "pred").All()
`,
tag: "pred",
expect: []string{"follows", "follows", "status"},
},
{
message: "show a tag list",
query: `
g.V("dani").Out(null, ["pred", "foo", "bar"]).All()
`,
tag: "foo",
expect: []string{"follows", "follows", "status"},
},
{
message: "show a pred list",
query: `
g.V("dani").Out(["follows", "status"]).All()
`,
expect: []string{"bob", "greg", "cool_person"},
},
{
message: "show a predicate path",
query: `
g.V("dani").Out(g.V("follows"), "pred").All()
`,
expect: []string{"bob", "greg"},
},
{
message: "list all bob's incoming predicates",
query: `
g.V("bob").InPredicates().All()
`,
expect: []string{"follows"},
},
{
message: "list all in predicates",
query: `
g.V().InPredicates().All()
`,
expect: []string{"are", "follows", "status"},
},
{
message: "list all out predicates",
query: `
g.V().OutPredicates().All()
`,
expect: []string{"are", "follows", "status"},
},
}
func runQueryGetTag(g []quad.Quad, query string, tag string) []string {
js := makeTestSession(g)
c := make(chan interface{}, 5)
js.Execute(query, c, -1)
var results []string
for res := range c {
data := res.(*Result)
if data.val == nil {
val := data.actualResults[tag]
if val != nil {
results = append(results, js.qs.NameOf(val))
}
}
}
return results
}
func loadGraph(path string, t testing.TB) []quad.Quad {
var r io.Reader
var simpleGraph []quad.Quad
f, err := os.Open(path)
if err != nil {
t.Fatalf("Failed to open %q: %v", path, err)
}
defer f.Close()
r = f
dec := cquads.NewDecoder(r)
q1, err := dec.Unmarshal()
if err != nil {
t.Fatalf("Failed to Unmarshal: %v", err)
}
for ; err == nil; q1, err = dec.Unmarshal() {
simpleGraph = append(simpleGraph, q1)
}
return simpleGraph
}
func TestGremlin(t *testing.T) {
simpleGraph := loadGraph("../../data/testdata.nq", t)
for _, test := range testQueries {
if test.tag == "" {
test.tag = TopResultTag
}
got := runQueryGetTag(simpleGraph, test.query, test.tag)
sort.Strings(got)
sort.Strings(test.expect)
t.Log("testing", test.message)
if !reflect.DeepEqual(got, test.expect) {
t.Errorf("Failed to %s, got: %v expected: %v", test.message, got, test.expect)
}
}
}
var issue160TestGraph = []quad.Quad{
{"alice", "follows", "bob", ""},
{"bob", "follows", "alice", ""},
{"charlie", "follows", "bob", ""},
{"dani", "follows", "charlie", ""},
{"dani", "follows", "alice", ""},
{"alice", "is", "cool", ""},
{"bob", "is", "not cool", ""},
{"charlie", "is", "cool", ""},
{"danie", "is", "not cool", ""},
}
func TestIssue160(t *testing.T) {
query := `g.V().Tag('query').Out('follows').Out('follows').ForEach(function (item) { if (item.id !== item.query) g.Emit({ id: item.id }); })`
expect := []string{
"****\nid : alice\n",
"****\nid : bob\n",
"****\nid : bob\n",
"=> <nil>\n",
}
ses := makeTestSession(issue160TestGraph)
c := make(chan interface{}, 5)
go ses.Execute(query, c, 100)
var got []string
for res := range c {
func() {
defer func() {
if r := recover(); r != nil {
t.Errorf("Unexpected panic: %v", r)
}
}()
got = append(got, ses.Format(res))
}()
}
sort.Strings(got)
if !reflect.DeepEqual(got, expect) {
t.Errorf("Unexpected result, got: %q expected: %q", got, expect)
}
} | // Copyright 2014 The Cayley Authors. All rights reserved.
// |
|
log.go | package xact
import (
"mynewt.apache.org/newtmgr/nmxact/nmp"
"mynewt.apache.org/newtmgr/nmxact/sesn"
)
//////////////////////////////////////////////////////////////////////////////
// $read //
//////////////////////////////////////////////////////////////////////////////
type LogShowCmd struct {
CmdBase
Name string
Timestamp int64
Index uint32
}
func NewLogShowCmd() *LogShowCmd {
return &LogShowCmd{
CmdBase: NewCmdBase(),
}
}
type LogShowResult struct {
Rsp *nmp.LogShowRsp
}
func newLogShowResult() *LogShowResult {
return &LogShowResult{}
}
func (r *LogShowResult) Status() int {
return r.Rsp.Rc
}
func (c *LogShowCmd) Run(s sesn.Sesn) (Result, error) {
r := nmp.NewLogShowReq()
r.Name = c.Name
r.Timestamp = c.Timestamp
r.Index = c.Index
rsp, err := txReq(s, r.Msg(), &c.CmdBase)
if err != nil {
return nil, err
}
srsp := rsp.(*nmp.LogShowRsp)
res := newLogShowResult()
res.Rsp = srsp
return res, nil
}
//////////////////////////////////////////////////////////////////////////////
// $list //
//////////////////////////////////////////////////////////////////////////////
type LogListCmd struct {
CmdBase
}
func NewLogListCmd() *LogListCmd {
return &LogListCmd{
CmdBase: NewCmdBase(),
}
}
type LogListResult struct {
Rsp *nmp.LogListRsp
}
func newLogListResult() *LogListResult {
return &LogListResult{}
}
func (r *LogListResult) Status() int {
return r.Rsp.Rc
}
func (c *LogListCmd) Run(s sesn.Sesn) (Result, error) {
r := nmp.NewLogListReq()
rsp, err := txReq(s, r.Msg(), &c.CmdBase)
if err != nil {
return nil, err
}
srsp := rsp.(*nmp.LogListRsp)
res := newLogListResult()
res.Rsp = srsp
return res, nil
}
//////////////////////////////////////////////////////////////////////////////
// $module list //
//////////////////////////////////////////////////////////////////////////////
type LogModuleListCmd struct {
CmdBase
}
func NewLogModuleListCmd() *LogModuleListCmd {
return &LogModuleListCmd{
CmdBase: NewCmdBase(),
}
}
type LogModuleListResult struct {
Rsp *nmp.LogModuleListRsp
}
func newLogModuleListResult() *LogModuleListResult {
return &LogModuleListResult{}
}
func (r *LogModuleListResult) Status() int {
return r.Rsp.Rc
}
func (c *LogModuleListCmd) Run(s sesn.Sesn) (Result, error) {
r := nmp.NewLogModuleListReq()
rsp, err := txReq(s, r.Msg(), &c.CmdBase)
if err != nil {
return nil, err
}
srsp := rsp.(*nmp.LogModuleListRsp)
res := newLogModuleListResult()
res.Rsp = srsp
return res, nil
}
//////////////////////////////////////////////////////////////////////////////
// $level list //
//////////////////////////////////////////////////////////////////////////////
type LogLevelListCmd struct {
CmdBase
}
func NewLogLevelListCmd() *LogLevelListCmd {
return &LogLevelListCmd{
CmdBase: NewCmdBase(),
}
}
type LogLevelListResult struct {
Rsp *nmp.LogLevelListRsp
}
func newLogLevelListResult() *LogLevelListResult {
return &LogLevelListResult{}
}
func (r *LogLevelListResult) Status() int { | }
func (c *LogLevelListCmd) Run(s sesn.Sesn) (Result, error) {
r := nmp.NewLogLevelListReq()
rsp, err := txReq(s, r.Msg(), &c.CmdBase)
if err != nil {
return nil, err
}
srsp := rsp.(*nmp.LogLevelListRsp)
res := newLogLevelListResult()
res.Rsp = srsp
return res, nil
}
//////////////////////////////////////////////////////////////////////////////
// $clear //
//////////////////////////////////////////////////////////////////////////////
type LogClearCmd struct {
CmdBase
}
func NewLogClearCmd() *LogClearCmd {
return &LogClearCmd{
CmdBase: NewCmdBase(),
}
}
type LogClearResult struct {
Rsp *nmp.LogClearRsp
}
func newLogClearResult() *LogClearResult {
return &LogClearResult{}
}
func (r *LogClearResult) Status() int {
return r.Rsp.Rc
}
func (c *LogClearCmd) Run(s sesn.Sesn) (Result, error) {
r := nmp.NewLogClearReq()
rsp, err := txReq(s, r.Msg(), &c.CmdBase)
if err != nil {
return nil, err
}
srsp := rsp.(*nmp.LogClearRsp)
res := newLogClearResult()
res.Rsp = srsp
return res, nil
} | return r.Rsp.Rc |
AddMaster.spec.js | import { expect } from 'chai';
import { mount } from 'enzyme';
import { push } from 'react-router-redux';
import sinon from 'sinon';
import { AddMaster } from '~/domains/components';
import {
changeInput, expectDispatchOrStoreErrors, expectObjectDeepEquals, expectRequest,
} from '@/common';
| afterEach(() => {
sandbox.restore();
});
const dispatch = sandbox.stub();
it('submits form and redirects to domain', async () => {
AddMaster.trigger(dispatch);
const component = mount(dispatch.firstCall.args[0].body, '');
changeInput(component, 'email', '[email protected]');
changeInput(component, 'domain', 'test.com');
dispatch.reset();
await component.find('Form').props().onSubmit();
expect(dispatch.callCount).to.equal(1);
await expectDispatchOrStoreErrors(dispatch.firstCall.args[0], [
([fn]) => expectRequest(fn, '/domains/', {
method: 'POST',
body: {
domain: 'test.com',
soa_email: '[email protected]',
type: 'master',
},
}),
([pushResult]) => expectObjectDeepEquals(pushResult, push('/domains/test.com')),
]);
});
}); |
describe('domains/components/AddMaster', () => {
const sandbox = sinon.sandbox.create();
|
setup.py | from setuptools import setup
setup(name='orinoco',
version='0.1',
description='Sweet data integration',
author='Quartic Technologies', | packages=['orinoco'],
install_requires=[
'aiohttp',
'pyformance'
],
zip_safe=False) | author_email='[email protected]',
license='MIT', |
vpcgw_sdk.go | // This file was automatically generated. DO NOT EDIT.
// If you have any remark or suggestion do not hesitate to open an issue.
// Package vpcgw provides methods and message types of the vpcgw v1 API.
package vpcgw
import (
"bytes"
"encoding/json"
"fmt"
"net"
"net/http"
"net/url"
"strings"
"time"
"github.com/scaleway/scaleway-sdk-go/internal/errors"
"github.com/scaleway/scaleway-sdk-go/internal/marshaler"
"github.com/scaleway/scaleway-sdk-go/internal/parameter"
"github.com/scaleway/scaleway-sdk-go/namegenerator"
"github.com/scaleway/scaleway-sdk-go/scw"
)
// always import dependencies
var (
_ fmt.Stringer
_ json.Unmarshaler
_ url.URL
_ net.IP
_ http.Header
_ bytes.Reader
_ time.Time
_ = strings.Join
_ scw.ScalewayRequest
_ marshaler.Duration
_ scw.File
_ = parameter.AddToQuery
_ = namegenerator.GetRandomName
)
// API: vPC Public Gateway API
type API struct {
client *scw.Client
}
// NewAPI returns a API object from a Scaleway client.
func NewAPI(client *scw.Client) *API {
return &API{
client: client,
}
}
type DHCPEntryType string
const (
// DHCPEntryTypeUnknown is [insert doc].
DHCPEntryTypeUnknown = DHCPEntryType("unknown")
// DHCPEntryTypeReservation is [insert doc].
DHCPEntryTypeReservation = DHCPEntryType("reservation")
// DHCPEntryTypeLease is [insert doc].
DHCPEntryTypeLease = DHCPEntryType("lease")
)
func (enum DHCPEntryType) String() string {
if enum == "" {
// return default value if empty
return "unknown"
}
return string(enum)
}
func (enum DHCPEntryType) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, enum)), nil
}
func (enum *DHCPEntryType) UnmarshalJSON(data []byte) error {
tmp := ""
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
*enum = DHCPEntryType(DHCPEntryType(tmp).String())
return nil
}
type GatewayNetworkStatus string
const (
// GatewayNetworkStatusUnknown is [insert doc].
GatewayNetworkStatusUnknown = GatewayNetworkStatus("unknown")
// GatewayNetworkStatusCreated is [insert doc].
GatewayNetworkStatusCreated = GatewayNetworkStatus("created")
// GatewayNetworkStatusAttaching is [insert doc].
GatewayNetworkStatusAttaching = GatewayNetworkStatus("attaching")
// GatewayNetworkStatusConfiguring is [insert doc].
GatewayNetworkStatusConfiguring = GatewayNetworkStatus("configuring")
// GatewayNetworkStatusReady is [insert doc].
GatewayNetworkStatusReady = GatewayNetworkStatus("ready")
// GatewayNetworkStatusDetaching is [insert doc].
GatewayNetworkStatusDetaching = GatewayNetworkStatus("detaching")
// GatewayNetworkStatusDeleted is [insert doc].
GatewayNetworkStatusDeleted = GatewayNetworkStatus("deleted")
)
func (enum GatewayNetworkStatus) String() string {
if enum == "" {
// return default value if empty
return "unknown"
}
return string(enum)
}
func (enum GatewayNetworkStatus) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, enum)), nil
}
func (enum *GatewayNetworkStatus) UnmarshalJSON(data []byte) error {
tmp := ""
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
*enum = GatewayNetworkStatus(GatewayNetworkStatus(tmp).String())
return nil
}
type GatewayStatus string
const (
// GatewayStatusUnknown is [insert doc].
GatewayStatusUnknown = GatewayStatus("unknown")
// GatewayStatusStopped is [insert doc].
GatewayStatusStopped = GatewayStatus("stopped")
// GatewayStatusAllocating is [insert doc].
GatewayStatusAllocating = GatewayStatus("allocating")
// GatewayStatusConfiguring is [insert doc].
GatewayStatusConfiguring = GatewayStatus("configuring")
// GatewayStatusRunning is [insert doc].
GatewayStatusRunning = GatewayStatus("running")
// GatewayStatusStopping is [insert doc].
GatewayStatusStopping = GatewayStatus("stopping")
// GatewayStatusFailed is [insert doc].
GatewayStatusFailed = GatewayStatus("failed")
// GatewayStatusDeleting is [insert doc].
GatewayStatusDeleting = GatewayStatus("deleting")
// GatewayStatusDeleted is [insert doc].
GatewayStatusDeleted = GatewayStatus("deleted")
// GatewayStatusLocked is [insert doc].
GatewayStatusLocked = GatewayStatus("locked")
)
func (enum GatewayStatus) String() string {
if enum == "" {
// return default value if empty
return "unknown"
}
return string(enum)
}
func (enum GatewayStatus) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, enum)), nil
}
func (enum *GatewayStatus) UnmarshalJSON(data []byte) error {
tmp := ""
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
*enum = GatewayStatus(GatewayStatus(tmp).String())
return nil
}
type ListDHCPEntriesRequestOrderBy string
const (
// ListDHCPEntriesRequestOrderByCreatedAtAsc is [insert doc].
ListDHCPEntriesRequestOrderByCreatedAtAsc = ListDHCPEntriesRequestOrderBy("created_at_asc")
// ListDHCPEntriesRequestOrderByCreatedAtDesc is [insert doc].
ListDHCPEntriesRequestOrderByCreatedAtDesc = ListDHCPEntriesRequestOrderBy("created_at_desc")
// ListDHCPEntriesRequestOrderByIPAddressAsc is [insert doc].
ListDHCPEntriesRequestOrderByIPAddressAsc = ListDHCPEntriesRequestOrderBy("ip_address_asc")
// ListDHCPEntriesRequestOrderByIPAddressDesc is [insert doc].
ListDHCPEntriesRequestOrderByIPAddressDesc = ListDHCPEntriesRequestOrderBy("ip_address_desc")
// ListDHCPEntriesRequestOrderByHostnameAsc is [insert doc].
ListDHCPEntriesRequestOrderByHostnameAsc = ListDHCPEntriesRequestOrderBy("hostname_asc")
// ListDHCPEntriesRequestOrderByHostnameDesc is [insert doc].
ListDHCPEntriesRequestOrderByHostnameDesc = ListDHCPEntriesRequestOrderBy("hostname_desc")
)
func (enum ListDHCPEntriesRequestOrderBy) String() string {
if enum == "" {
// return default value if empty
return "created_at_asc"
}
return string(enum)
}
func (enum ListDHCPEntriesRequestOrderBy) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, enum)), nil
}
func (enum *ListDHCPEntriesRequestOrderBy) UnmarshalJSON(data []byte) error {
tmp := ""
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
*enum = ListDHCPEntriesRequestOrderBy(ListDHCPEntriesRequestOrderBy(tmp).String())
return nil
}
type ListDHCPsRequestOrderBy string
const (
// ListDHCPsRequestOrderByCreatedAtAsc is [insert doc].
ListDHCPsRequestOrderByCreatedAtAsc = ListDHCPsRequestOrderBy("created_at_asc")
// ListDHCPsRequestOrderByCreatedAtDesc is [insert doc].
ListDHCPsRequestOrderByCreatedAtDesc = ListDHCPsRequestOrderBy("created_at_desc")
// ListDHCPsRequestOrderBySubnetAsc is [insert doc].
ListDHCPsRequestOrderBySubnetAsc = ListDHCPsRequestOrderBy("subnet_asc")
// ListDHCPsRequestOrderBySubnetDesc is [insert doc].
ListDHCPsRequestOrderBySubnetDesc = ListDHCPsRequestOrderBy("subnet_desc")
)
func (enum ListDHCPsRequestOrderBy) String() string {
if enum == "" {
// return default value if empty
return "created_at_asc"
}
return string(enum)
}
func (enum ListDHCPsRequestOrderBy) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, enum)), nil
}
func (enum *ListDHCPsRequestOrderBy) UnmarshalJSON(data []byte) error {
tmp := ""
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
*enum = ListDHCPsRequestOrderBy(ListDHCPsRequestOrderBy(tmp).String())
return nil
}
type ListGatewayNetworksRequestOrderBy string
const (
// ListGatewayNetworksRequestOrderByCreatedAtAsc is [insert doc].
ListGatewayNetworksRequestOrderByCreatedAtAsc = ListGatewayNetworksRequestOrderBy("created_at_asc")
// ListGatewayNetworksRequestOrderByCreatedAtDesc is [insert doc].
ListGatewayNetworksRequestOrderByCreatedAtDesc = ListGatewayNetworksRequestOrderBy("created_at_desc")
// ListGatewayNetworksRequestOrderByStatusAsc is [insert doc].
ListGatewayNetworksRequestOrderByStatusAsc = ListGatewayNetworksRequestOrderBy("status_asc")
// ListGatewayNetworksRequestOrderByStatusDesc is [insert doc].
ListGatewayNetworksRequestOrderByStatusDesc = ListGatewayNetworksRequestOrderBy("status_desc")
)
func (enum ListGatewayNetworksRequestOrderBy) String() string {
if enum == "" {
// return default value if empty
return "created_at_asc"
}
return string(enum)
}
func (enum ListGatewayNetworksRequestOrderBy) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, enum)), nil
}
func (enum *ListGatewayNetworksRequestOrderBy) UnmarshalJSON(data []byte) error {
tmp := ""
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
*enum = ListGatewayNetworksRequestOrderBy(ListGatewayNetworksRequestOrderBy(tmp).String())
return nil
}
type ListGatewaysRequestOrderBy string
const (
// ListGatewaysRequestOrderByCreatedAtAsc is [insert doc].
ListGatewaysRequestOrderByCreatedAtAsc = ListGatewaysRequestOrderBy("created_at_asc")
// ListGatewaysRequestOrderByCreatedAtDesc is [insert doc].
ListGatewaysRequestOrderByCreatedAtDesc = ListGatewaysRequestOrderBy("created_at_desc")
// ListGatewaysRequestOrderByNameAsc is [insert doc].
ListGatewaysRequestOrderByNameAsc = ListGatewaysRequestOrderBy("name_asc")
// ListGatewaysRequestOrderByNameDesc is [insert doc].
ListGatewaysRequestOrderByNameDesc = ListGatewaysRequestOrderBy("name_desc")
// ListGatewaysRequestOrderByTypeAsc is [insert doc].
ListGatewaysRequestOrderByTypeAsc = ListGatewaysRequestOrderBy("type_asc")
// ListGatewaysRequestOrderByTypeDesc is [insert doc].
ListGatewaysRequestOrderByTypeDesc = ListGatewaysRequestOrderBy("type_desc")
// ListGatewaysRequestOrderByStatusAsc is [insert doc].
ListGatewaysRequestOrderByStatusAsc = ListGatewaysRequestOrderBy("status_asc")
// ListGatewaysRequestOrderByStatusDesc is [insert doc].
ListGatewaysRequestOrderByStatusDesc = ListGatewaysRequestOrderBy("status_desc")
)
func (enum ListGatewaysRequestOrderBy) String() string {
if enum == "" {
// return default value if empty
return "created_at_asc"
}
return string(enum)
}
func (enum ListGatewaysRequestOrderBy) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, enum)), nil
}
func (enum *ListGatewaysRequestOrderBy) UnmarshalJSON(data []byte) error {
tmp := ""
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
*enum = ListGatewaysRequestOrderBy(ListGatewaysRequestOrderBy(tmp).String())
return nil
}
type ListIPsRequestOrderBy string
const (
// ListIPsRequestOrderByCreatedAtAsc is [insert doc].
ListIPsRequestOrderByCreatedAtAsc = ListIPsRequestOrderBy("created_at_asc")
// ListIPsRequestOrderByCreatedAtDesc is [insert doc].
ListIPsRequestOrderByCreatedAtDesc = ListIPsRequestOrderBy("created_at_desc")
// ListIPsRequestOrderByIPAsc is [insert doc].
ListIPsRequestOrderByIPAsc = ListIPsRequestOrderBy("ip_asc")
// ListIPsRequestOrderByIPDesc is [insert doc].
ListIPsRequestOrderByIPDesc = ListIPsRequestOrderBy("ip_desc")
// ListIPsRequestOrderByReverseAsc is [insert doc].
ListIPsRequestOrderByReverseAsc = ListIPsRequestOrderBy("reverse_asc")
// ListIPsRequestOrderByReverseDesc is [insert doc].
ListIPsRequestOrderByReverseDesc = ListIPsRequestOrderBy("reverse_desc")
)
func (enum ListIPsRequestOrderBy) String() string {
if enum == "" {
// return default value if empty
return "created_at_asc"
}
return string(enum)
}
func (enum ListIPsRequestOrderBy) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, enum)), nil
}
func (enum *ListIPsRequestOrderBy) UnmarshalJSON(data []byte) error {
tmp := ""
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
*enum = ListIPsRequestOrderBy(ListIPsRequestOrderBy(tmp).String())
return nil
}
type ListPATRulesRequestOrderBy string
const (
// ListPATRulesRequestOrderByCreatedAtAsc is [insert doc].
ListPATRulesRequestOrderByCreatedAtAsc = ListPATRulesRequestOrderBy("created_at_asc")
// ListPATRulesRequestOrderByCreatedAtDesc is [insert doc].
ListPATRulesRequestOrderByCreatedAtDesc = ListPATRulesRequestOrderBy("created_at_desc")
// ListPATRulesRequestOrderByPublicPortAsc is [insert doc].
ListPATRulesRequestOrderByPublicPortAsc = ListPATRulesRequestOrderBy("public_port_asc")
// ListPATRulesRequestOrderByPublicPortDesc is [insert doc].
ListPATRulesRequestOrderByPublicPortDesc = ListPATRulesRequestOrderBy("public_port_desc")
)
func (enum ListPATRulesRequestOrderBy) String() string {
if enum == "" {
// return default value if empty
return "created_at_asc"
}
return string(enum)
}
func (enum ListPATRulesRequestOrderBy) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, enum)), nil
}
func (enum *ListPATRulesRequestOrderBy) UnmarshalJSON(data []byte) error {
tmp := ""
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
*enum = ListPATRulesRequestOrderBy(ListPATRulesRequestOrderBy(tmp).String())
return nil
}
type PATRuleProtocol string
const (
// PATRuleProtocolUnknown is [insert doc].
PATRuleProtocolUnknown = PATRuleProtocol("unknown")
// PATRuleProtocolBoth is [insert doc].
PATRuleProtocolBoth = PATRuleProtocol("both")
// PATRuleProtocolTCP is [insert doc].
PATRuleProtocolTCP = PATRuleProtocol("tcp")
// PATRuleProtocolUDP is [insert doc].
PATRuleProtocolUDP = PATRuleProtocol("udp")
)
func (enum PATRuleProtocol) String() string {
if enum == "" {
// return default value if empty
return "unknown"
}
return string(enum)
}
func (enum PATRuleProtocol) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, enum)), nil
}
func (enum *PATRuleProtocol) UnmarshalJSON(data []byte) error {
tmp := ""
if err := json.Unmarshal(data, &tmp); err != nil {
return err
}
*enum = PATRuleProtocol(PATRuleProtocol(tmp).String())
return nil
}
// DHCP: dhcp
type DHCP struct {
// ID: ID of the DHCP config
ID string `json:"id"`
// OrganizationID: owning organization
OrganizationID string `json:"organization_id"`
// ProjectID: owning project
ProjectID string `json:"project_id"`
// CreatedAt: configuration creation date
CreatedAt *time.Time `json:"created_at"`
// UpdatedAt: configuration last modification date
UpdatedAt *time.Time `json:"updated_at"`
// Subnet: subnet for the DHCP server
Subnet scw.IPNet `json:"subnet"`
// Address: address of the DHCP server
//
// Address of the DHCP server. This will be the gateway's address in the private network. It must be part of config's subnet.
//
Address net.IP `json:"address"`
// PoolLow: low IP (included) of the dynamic address pool. Must be in the config's subnet
PoolLow net.IP `json:"pool_low"`
// PoolHigh: high IP (included) of the dynamic address pool. Must be in the config's subnet
PoolHigh net.IP `json:"pool_high"`
// EnableDynamic: whether to enable dynamic pooling of IPs
//
// Whether to enable dynamic pooling of IPs. By turning the dynamic pool off, only pre-existing DHCP reservations will be handed out.
//
EnableDynamic bool `json:"enable_dynamic"`
// ValidLifetime: how long, in seconds, DHCP entries will be valid for
ValidLifetime *scw.Duration `json:"valid_lifetime"`
// RenewTimer: after how long a renew will be attempted
//
// After how long, in seconds, a renew will be attempted. Must be 30s lower than `rebind_timer`.
//
RenewTimer *scw.Duration `json:"renew_timer"`
// RebindTimer: after how long a DHCP client will query for a new lease if previous renews fail
//
// After how long, in seconds, a DHCP client will query for a new lease if previous renews fail. Must be 30s lower than `valid_lifetime`.
//
RebindTimer *scw.Duration `json:"rebind_timer"`
// PushDefaultRoute: whether the gateway should push a default route to DHCP clients or only hand out IPs
PushDefaultRoute bool `json:"push_default_route"`
// PushDNSServer: whether the gateway should push custom DNS servers to clients
//
// Whether the gateway should push custom DNS servers to clients. This allows for instance hostname -> IP resolution.
//
PushDNSServer bool `json:"push_dns_server"`
// DNSServersOverride: override the DNS server list pushed to DHCP clients, instead of the gateway itself
DNSServersOverride []string `json:"dns_servers_override"`
// DNSSearch: add search paths to the pushed DNS configuration
DNSSearch []string `json:"dns_search"`
// DNSLocalName: tLD given to hostnames in the Private Networks
//
// TLD given to hostnames in the Private Network. If an instance with hostname `foo` gets a lease, and this is set to `bar`, `foo.bar` will resolve.
//
DNSLocalName string `json:"dns_local_name"`
// Zone: zone this configuration is available in
Zone scw.Zone `json:"zone"`
}
// DHCPEntry: dhcp entry
type DHCPEntry struct {
// ID: entry ID
ID string `json:"id"`
// CreatedAt: configuration creation date
CreatedAt *time.Time `json:"created_at"`
// UpdatedAt: configuration last modification date
UpdatedAt *time.Time `json:"updated_at"`
// GatewayNetworkID: owning GatewayNetwork
GatewayNetworkID string `json:"gateway_network_id"`
// MacAddress: mAC address of the client machine
MacAddress string `json:"mac_address"`
// IPAddress: assigned IP address
IPAddress net.IP `json:"ip_address"`
// Hostname: hostname of the client machine
Hostname string `json:"hostname"`
// Type: entry type, either static (DHCP reservation) or dynamic (DHCP lease)
//
// Default value: unknown
Type DHCPEntryType `json:"type"`
// Zone: zone this entry is available in
Zone scw.Zone `json:"zone"`
}
// Gateway: gateway
type Gateway struct {
// ID: ID of the gateway
ID string `json:"id"`
// OrganizationID: owning organization
OrganizationID string `json:"organization_id"`
// ProjectID: owning project
ProjectID string `json:"project_id"`
// CreatedAt: gateway creation date
CreatedAt *time.Time `json:"created_at"`
// UpdatedAt: gateway last modification date
UpdatedAt *time.Time `json:"updated_at"`
// Type: gateway type
Type *GatewayType `json:"type"`
// Status: gateway's current status
//
// Default value: unknown
Status GatewayStatus `json:"status"`
// Name: name of the gateway
Name string `json:"name"`
// Tags: tags of the gateway
Tags []string `json:"tags"`
// IP: public IP of the gateway
IP *IP `json:"ip"`
// GatewayNetworks: gatewayNetworks attached to the gateway
GatewayNetworks []*GatewayNetwork `json:"gateway_networks"`
// UpstreamDNSServers: override the gateway's default recursive DNS servers
UpstreamDNSServers []string `json:"upstream_dns_servers"`
// Version: version of the running gateway software
Version *string `json:"version"`
// CanUpgradeTo: newly available gateway software version that can be updated to
CanUpgradeTo *string `json:"can_upgrade_to"`
// BastionEnabled: whether SSH bastion is enabled on the gateway
BastionEnabled bool `json:"bastion_enabled"`
// BastionPort: port of the SSH bastion
BastionPort uint32 `json:"bastion_port"`
// Zone: zone the gateway is available in
Zone scw.Zone `json:"zone"`
}
// GatewayNetwork: gateway network
type GatewayNetwork struct {
// ID: ID of the connection
ID string `json:"id"` | UpdatedAt *time.Time `json:"updated_at"`
// GatewayID: ID of the connected gateway
GatewayID string `json:"gateway_id"`
// PrivateNetworkID: ID of the connected private network
PrivateNetworkID string `json:"private_network_id"`
// MacAddress: mAC address of the gateway in the network (if the gateway is up and running)
MacAddress *string `json:"mac_address"`
// EnableMasquerade: whether the gateway masquerades traffic for this network
EnableMasquerade bool `json:"enable_masquerade"`
// Status: current status of the gateway network connection
//
// Default value: unknown
Status GatewayNetworkStatus `json:"status"`
// DHCP: DHCP configuration for the connected private network
DHCP *DHCP `json:"dhcp"`
// EnableDHCP: whether DHCP is enabled on the connected Private Network
EnableDHCP bool `json:"enable_dhcp"`
// Address: address of the Gateway in CIDR form to use when DHCP is not used
Address *scw.IPNet `json:"address"`
// Zone: zone the connection lives in
Zone scw.Zone `json:"zone"`
}
// GatewayType: gateway type
type GatewayType struct {
// Name: type name
Name string `json:"name"`
// Bandwidth: bandwidth, in bps, the gateway has
//
// Bandwidth, in bps, the gateway has. This is the public bandwidth to the outer internet, and the internal bandwidth to each connected Private Networks.
//
Bandwidth uint64 `json:"bandwidth"`
// Zone: zone the type is available in
Zone scw.Zone `json:"zone"`
}
// IP: ip
type IP struct {
// ID: IP ID
ID string `json:"id"`
// OrganizationID: owning organization
OrganizationID string `json:"organization_id"`
// ProjectID: owning project
ProjectID string `json:"project_id"`
// CreatedAt: configuration creation date
CreatedAt *time.Time `json:"created_at"`
// UpdatedAt: configuration last modification date
UpdatedAt *time.Time `json:"updated_at"`
// Tags: tags associated with the IP
Tags []string `json:"tags"`
// Address: the IP itself
Address net.IP `json:"address"`
// Reverse: reverse domain name for the IP address
Reverse *string `json:"reverse"`
// GatewayID: gateway associated to the IP
GatewayID *string `json:"gateway_id"`
// Zone: zone this IP is available in
Zone scw.Zone `json:"zone"`
}
// ListDHCPEntriesResponse: list dhcp entries response
type ListDHCPEntriesResponse struct {
// DHCPEntries: DHCP entries in this page
DHCPEntries []*DHCPEntry `json:"dhcp_entries"`
// TotalCount: total DHCP entries matching the filter
TotalCount uint32 `json:"total_count"`
}
// ListDHCPsResponse: list dhc ps response
type ListDHCPsResponse struct {
// Dhcps: first page of DHCP configs
Dhcps []*DHCP `json:"dhcps"`
// TotalCount: total DHCP configs matching the filter
TotalCount uint32 `json:"total_count"`
}
// ListGatewayNetworksResponse: list gateway networks response
type ListGatewayNetworksResponse struct {
// GatewayNetworks: gatewayNetworks in this page
GatewayNetworks []*GatewayNetwork `json:"gateway_networks"`
// TotalCount: total GatewayNetworks count matching the filter
TotalCount uint32 `json:"total_count"`
}
// ListGatewayTypesResponse: list gateway types response
type ListGatewayTypesResponse struct {
// Types: available types of gateway
Types []*GatewayType `json:"types"`
}
// ListGatewaysResponse: list gateways response
type ListGatewaysResponse struct {
// Gateways: gateways in this page
Gateways []*Gateway `json:"gateways"`
// TotalCount: total count of gateways matching the filter
TotalCount uint32 `json:"total_count"`
}
// ListIPsResponse: list i ps response
type ListIPsResponse struct {
// IPs: iPs in this page
IPs []*IP `json:"ips"`
// TotalCount: total IP count matching the filter
TotalCount uint32 `json:"total_count"`
}
// ListPATRulesResponse: list pat rules response
type ListPATRulesResponse struct {
// PatRules: this page of PAT rules matching the filter
PatRules []*PATRule `json:"pat_rules"`
// TotalCount: total PAT rules matching the filter
TotalCount uint32 `json:"total_count"`
}
// PATRule: pat rule
type PATRule struct {
// ID: rule ID
ID string `json:"id"`
// GatewayID: gateway the PAT rule applies to
GatewayID string `json:"gateway_id"`
// CreatedAt: rule creation date
CreatedAt *time.Time `json:"created_at"`
// UpdatedAt: rule last modification date
UpdatedAt *time.Time `json:"updated_at"`
// PublicPort: public port to listen on
PublicPort uint32 `json:"public_port"`
// PrivateIP: private IP to forward data to
PrivateIP net.IP `json:"private_ip"`
// PrivatePort: private port to translate to
PrivatePort uint32 `json:"private_port"`
// Protocol: protocol the rule applies to
//
// Default value: unknown
Protocol PATRuleProtocol `json:"protocol"`
// Zone: zone this rule is available in
Zone scw.Zone `json:"zone"`
}
// SetDHCPEntriesRequestEntry: set dhcp entries request. entry
type SetDHCPEntriesRequestEntry struct {
// MacAddress: mAC address to give a static entry to
//
// MAC address to give a static entry to. A matching entry will be upgraded to a reservation, and a matching reservation will be updated.
//
MacAddress string `json:"mac_address"`
// IPAddress: IP address to give to the machine
IPAddress net.IP `json:"ip_address"`
}
// SetDHCPEntriesResponse: set dhcp entries response
type SetDHCPEntriesResponse struct {
// DHCPEntries: list of DHCP entries
DHCPEntries []*DHCPEntry `json:"dhcp_entries"`
}
// SetPATRulesRequestRule: set pat rules request. rule
type SetPATRulesRequestRule struct {
// PublicPort: public port to listen on
//
// Public port to listen on. Uniquely identifies the rule, and a matching rule will be updated with the new parameters.
//
PublicPort uint32 `json:"public_port"`
// PrivateIP: private IP to forward data to
PrivateIP net.IP `json:"private_ip"`
// PrivatePort: private port to translate to
PrivatePort uint32 `json:"private_port"`
// Protocol: protocol the rule should apply to
//
// Default value: unknown
Protocol PATRuleProtocol `json:"protocol"`
}
// SetPATRulesResponse: set pat rules response
type SetPATRulesResponse struct {
// PatRules: list of PAT rules
PatRules []*PATRule `json:"pat_rules"`
}
// Service API
type ListGatewaysRequest struct {
Zone scw.Zone `json:"-"`
// OrderBy: order in which to return results
//
// Default value: created_at_asc
OrderBy ListGatewaysRequestOrderBy `json:"-"`
// Page: page number
Page *int32 `json:"-"`
// PageSize: gateways per page
PageSize *uint32 `json:"-"`
// OrganizationID: include only gateways in this organization
OrganizationID *string `json:"-"`
// ProjectID: include only gateways in this project
ProjectID *string `json:"-"`
// Name: filter gateways including this name
Name *string `json:"-"`
// Tags: filter gateways with these tags
Tags []string `json:"-"`
// Type: filter gateways of this type
Type *string `json:"-"`
// Status: filter gateways in this status (unknown for any)
//
// Default value: unknown
Status GatewayStatus `json:"-"`
// PrivateNetworkID: filter gateways attached to this private network
PrivateNetworkID *string `json:"-"`
}
// ListGateways: list VPC Public Gateways
func (s *API) ListGateways(req *ListGatewaysRequest, opts ...scw.RequestOption) (*ListGatewaysResponse, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
defaultPageSize, exist := s.client.GetDefaultPageSize()
if (req.PageSize == nil || *req.PageSize == 0) && exist {
req.PageSize = &defaultPageSize
}
query := url.Values{}
parameter.AddToQuery(query, "order_by", req.OrderBy)
parameter.AddToQuery(query, "page", req.Page)
parameter.AddToQuery(query, "page_size", req.PageSize)
parameter.AddToQuery(query, "organization_id", req.OrganizationID)
parameter.AddToQuery(query, "project_id", req.ProjectID)
parameter.AddToQuery(query, "name", req.Name)
parameter.AddToQuery(query, "tags", req.Tags)
parameter.AddToQuery(query, "type", req.Type)
parameter.AddToQuery(query, "status", req.Status)
parameter.AddToQuery(query, "private_network_id", req.PrivateNetworkID)
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "GET",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/gateways",
Query: query,
Headers: http.Header{},
}
var resp ListGatewaysResponse
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type GetGatewayRequest struct {
Zone scw.Zone `json:"-"`
// GatewayID: ID of the gateway to fetch
GatewayID string `json:"-"`
}
// GetGateway: get a VPC Public Gateway
func (s *API) GetGateway(req *GetGatewayRequest, opts ...scw.RequestOption) (*Gateway, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.GatewayID) == "" {
return nil, errors.New("field GatewayID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "GET",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/gateways/" + fmt.Sprint(req.GatewayID) + "",
Headers: http.Header{},
}
var resp Gateway
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type CreateGatewayRequest struct {
Zone scw.Zone `json:"-"`
// ProjectID: project to create the gateway into
ProjectID string `json:"project_id"`
// Name: name of the gateway
Name string `json:"name"`
// Tags: tags for the gateway
Tags []string `json:"tags"`
// Type: gateway type
Type string `json:"type"`
// UpstreamDNSServers: override the gateway's default recursive DNS servers, if DNS features are enabled
UpstreamDNSServers []string `json:"upstream_dns_servers"`
// IPID: attach an existing IP to the gateway
IPID *string `json:"ip_id"`
}
// CreateGateway: create a VPC Public Gateway
func (s *API) CreateGateway(req *CreateGatewayRequest, opts ...scw.RequestOption) (*Gateway, error) {
var err error
if req.ProjectID == "" {
defaultProjectID, _ := s.client.GetDefaultProjectID()
req.ProjectID = defaultProjectID
}
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if req.Name == "" {
req.Name = namegenerator.GetRandomName("gw")
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "POST",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/gateways",
Headers: http.Header{},
}
err = scwReq.SetBody(req)
if err != nil {
return nil, err
}
var resp Gateway
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type UpdateGatewayRequest struct {
Zone scw.Zone `json:"-"`
// GatewayID: ID of the gateway to update
GatewayID string `json:"-"`
// Name: name fo the gateway
Name *string `json:"name"`
// Tags: tags for the gateway
Tags *[]string `json:"tags"`
// UpstreamDNSServers: override the gateway's default recursive DNS servers, if DNS features are enabled
UpstreamDNSServers *[]string `json:"upstream_dns_servers"`
// EnableBastion: enable SSH bastion on the gateway
EnableBastion *bool `json:"enable_bastion"`
// BastionPort: port of the SSH bastion
BastionPort *uint32 `json:"bastion_port"`
}
// UpdateGateway: update a VPC Public Gateway
func (s *API) UpdateGateway(req *UpdateGatewayRequest, opts ...scw.RequestOption) (*Gateway, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.GatewayID) == "" {
return nil, errors.New("field GatewayID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "PATCH",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/gateways/" + fmt.Sprint(req.GatewayID) + "",
Headers: http.Header{},
}
err = scwReq.SetBody(req)
if err != nil {
return nil, err
}
var resp Gateway
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type DeleteGatewayRequest struct {
Zone scw.Zone `json:"-"`
// GatewayID: ID of the gateway to delete
GatewayID string `json:"-"`
// CleanupDHCP: whether to cleanup attached DHCP configurations
//
// Whether to cleanup attached DHCP configurations (if any, and if not attached to another Gateway Network).
//
CleanupDHCP bool `json:"-"`
}
// DeleteGateway: delete a VPC Public Gateway
func (s *API) DeleteGateway(req *DeleteGatewayRequest, opts ...scw.RequestOption) error {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
query := url.Values{}
parameter.AddToQuery(query, "cleanup_dhcp", req.CleanupDHCP)
if fmt.Sprint(req.Zone) == "" {
return errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.GatewayID) == "" {
return errors.New("field GatewayID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "DELETE",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/gateways/" + fmt.Sprint(req.GatewayID) + "",
Query: query,
Headers: http.Header{},
}
err = s.client.Do(scwReq, nil, opts...)
if err != nil {
return err
}
return nil
}
type UpgradeGatewayRequest struct {
Zone scw.Zone `json:"-"`
// GatewayID: ID of the gateway to upgrade
GatewayID string `json:"-"`
}
// UpgradeGateway: upgrade a VPC Public Gateway to the latest version
func (s *API) UpgradeGateway(req *UpgradeGatewayRequest, opts ...scw.RequestOption) (*Gateway, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.GatewayID) == "" {
return nil, errors.New("field GatewayID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "POST",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/gateways/" + fmt.Sprint(req.GatewayID) + "/upgrade",
Headers: http.Header{},
}
err = scwReq.SetBody(req)
if err != nil {
return nil, err
}
var resp Gateway
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type ListGatewayNetworksRequest struct {
Zone scw.Zone `json:"-"`
// OrderBy: order in which to return results
//
// Default value: created_at_asc
OrderBy ListGatewayNetworksRequestOrderBy `json:"-"`
// Page: page number
Page *int32 `json:"-"`
// PageSize: gatewayNetworks per page
PageSize *uint32 `json:"-"`
// GatewayID: filter by gateway
GatewayID *string `json:"-"`
// PrivateNetworkID: filter by private network
PrivateNetworkID *string `json:"-"`
// EnableMasquerade: filter by masquerade enablement
EnableMasquerade *bool `json:"-"`
// DHCPID: filter by DHCP configuration
DHCPID *string `json:"-"`
// Status: filter GatewayNetworks by this status (unknown for any)
//
// Default value: unknown
Status GatewayNetworkStatus `json:"-"`
}
// ListGatewayNetworks: list gateway connections to Private Networks
func (s *API) ListGatewayNetworks(req *ListGatewayNetworksRequest, opts ...scw.RequestOption) (*ListGatewayNetworksResponse, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
defaultPageSize, exist := s.client.GetDefaultPageSize()
if (req.PageSize == nil || *req.PageSize == 0) && exist {
req.PageSize = &defaultPageSize
}
query := url.Values{}
parameter.AddToQuery(query, "order_by", req.OrderBy)
parameter.AddToQuery(query, "page", req.Page)
parameter.AddToQuery(query, "page_size", req.PageSize)
parameter.AddToQuery(query, "gateway_id", req.GatewayID)
parameter.AddToQuery(query, "private_network_id", req.PrivateNetworkID)
parameter.AddToQuery(query, "enable_masquerade", req.EnableMasquerade)
parameter.AddToQuery(query, "dhcp_id", req.DHCPID)
parameter.AddToQuery(query, "status", req.Status)
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "GET",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/gateway-networks",
Query: query,
Headers: http.Header{},
}
var resp ListGatewayNetworksResponse
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type GetGatewayNetworkRequest struct {
Zone scw.Zone `json:"-"`
// GatewayNetworkID: ID of the GatewayNetwork to fetch
GatewayNetworkID string `json:"-"`
}
// GetGatewayNetwork: get a gateway connection to a Private Network
func (s *API) GetGatewayNetwork(req *GetGatewayNetworkRequest, opts ...scw.RequestOption) (*GatewayNetwork, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.GatewayNetworkID) == "" {
return nil, errors.New("field GatewayNetworkID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "GET",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/gateway-networks/" + fmt.Sprint(req.GatewayNetworkID) + "",
Headers: http.Header{},
}
var resp GatewayNetwork
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type CreateGatewayNetworkRequest struct {
Zone scw.Zone `json:"-"`
// GatewayID: gateway to connect
GatewayID string `json:"gateway_id"`
// PrivateNetworkID: private Network to connect
PrivateNetworkID string `json:"private_network_id"`
// EnableMasquerade: whether to enable masquerade on this network
EnableMasquerade bool `json:"enable_masquerade"`
// DHCPID: existing configuration
// Precisely one of Address, DHCPID must be set.
DHCPID *string `json:"dhcp_id,omitempty"`
// Address: static IP address in CIDR format to to use without DHCP
// Precisely one of Address, DHCPID must be set.
Address *scw.IPNet `json:"address,omitempty"`
// EnableDHCP: whether to enable DHCP on this Private Network
//
// Whether to enable DHCP on this Private Network. Defaults to `true` if either `dhcp_id` or `dhcp` short: are present. If set to `true`, requires that either `dhcp_id` or `dhcp` to be present.
//
EnableDHCP *bool `json:"enable_dhcp"`
}
// CreateGatewayNetwork: attach a gateway to a Private Network
func (s *API) CreateGatewayNetwork(req *CreateGatewayNetworkRequest, opts ...scw.RequestOption) (*GatewayNetwork, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "POST",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/gateway-networks",
Headers: http.Header{},
}
err = scwReq.SetBody(req)
if err != nil {
return nil, err
}
var resp GatewayNetwork
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type UpdateGatewayNetworkRequest struct {
Zone scw.Zone `json:"-"`
// GatewayNetworkID: ID of the GatewayNetwork to update
GatewayNetworkID string `json:"-"`
// EnableMasquerade: new masquerade enablement
EnableMasquerade *bool `json:"enable_masquerade"`
// DHCPID: new DHCP configuration
// Precisely one of Address, DHCPID must be set.
DHCPID *string `json:"dhcp_id,omitempty"`
// EnableDHCP: whether to enable DHCP on the connected Private Network
EnableDHCP *bool `json:"enable_dhcp"`
// Address: new static IP address
// Precisely one of Address, DHCPID must be set.
Address *scw.IPNet `json:"address,omitempty"`
}
// UpdateGatewayNetwork: update a gateway connection to a Private Network
func (s *API) UpdateGatewayNetwork(req *UpdateGatewayNetworkRequest, opts ...scw.RequestOption) (*GatewayNetwork, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.GatewayNetworkID) == "" {
return nil, errors.New("field GatewayNetworkID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "PATCH",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/gateway-networks/" + fmt.Sprint(req.GatewayNetworkID) + "",
Headers: http.Header{},
}
err = scwReq.SetBody(req)
if err != nil {
return nil, err
}
var resp GatewayNetwork
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type DeleteGatewayNetworkRequest struct {
Zone scw.Zone `json:"-"`
// GatewayNetworkID: gatewayNetwork to delete
GatewayNetworkID string `json:"-"`
// CleanupDHCP: whether to cleanup the attached DHCP configuration
//
// Whether to cleanup the attached DHCP configuration (if any, and if not attached to another gateway_network).
//
CleanupDHCP bool `json:"-"`
}
// DeleteGatewayNetwork: detach a gateway from a Private Network
func (s *API) DeleteGatewayNetwork(req *DeleteGatewayNetworkRequest, opts ...scw.RequestOption) error {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
query := url.Values{}
parameter.AddToQuery(query, "cleanup_dhcp", req.CleanupDHCP)
if fmt.Sprint(req.Zone) == "" {
return errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.GatewayNetworkID) == "" {
return errors.New("field GatewayNetworkID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "DELETE",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/gateway-networks/" + fmt.Sprint(req.GatewayNetworkID) + "",
Query: query,
Headers: http.Header{},
}
err = s.client.Do(scwReq, nil, opts...)
if err != nil {
return err
}
return nil
}
type ListDHCPsRequest struct {
Zone scw.Zone `json:"-"`
// OrderBy: order in which to return results
//
// Default value: created_at_asc
OrderBy ListDHCPsRequestOrderBy `json:"-"`
// Page: page number
Page *int32 `json:"-"`
// PageSize: DHCP configurations per page
PageSize *uint32 `json:"-"`
// OrganizationID: include only DHCPs in this organization
OrganizationID *string `json:"-"`
// ProjectID: include only DHCPs in this project
ProjectID *string `json:"-"`
// Address: filter on gateway address
Address *net.IP `json:"-"`
// HasAddress: filter on subnets containing address
HasAddress *net.IP `json:"-"`
}
// ListDHCPs: list DHCP configurations
func (s *API) ListDHCPs(req *ListDHCPsRequest, opts ...scw.RequestOption) (*ListDHCPsResponse, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
defaultPageSize, exist := s.client.GetDefaultPageSize()
if (req.PageSize == nil || *req.PageSize == 0) && exist {
req.PageSize = &defaultPageSize
}
query := url.Values{}
parameter.AddToQuery(query, "order_by", req.OrderBy)
parameter.AddToQuery(query, "page", req.Page)
parameter.AddToQuery(query, "page_size", req.PageSize)
parameter.AddToQuery(query, "organization_id", req.OrganizationID)
parameter.AddToQuery(query, "project_id", req.ProjectID)
parameter.AddToQuery(query, "address", req.Address)
parameter.AddToQuery(query, "has_address", req.HasAddress)
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "GET",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/dhcps",
Query: query,
Headers: http.Header{},
}
var resp ListDHCPsResponse
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type GetDHCPRequest struct {
Zone scw.Zone `json:"-"`
// DHCPID: ID of the DHCP config to fetch
DHCPID string `json:"-"`
}
// GetDHCP: get a DHCP configuration
func (s *API) GetDHCP(req *GetDHCPRequest, opts ...scw.RequestOption) (*DHCP, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.DHCPID) == "" {
return nil, errors.New("field DHCPID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "GET",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/dhcps/" + fmt.Sprint(req.DHCPID) + "",
Headers: http.Header{},
}
var resp DHCP
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type CreateDHCPRequest struct {
Zone scw.Zone `json:"-"`
// ProjectID: project to create the DHCP configuration in
ProjectID string `json:"project_id"`
// Subnet: subnet for the DHCP server
Subnet scw.IPNet `json:"subnet"`
// Address: address of the DHCP server. This will be the gateway's address in the private network. Defaults to the first address of the subnet
Address *net.IP `json:"address"`
// PoolLow: low IP (included) of the dynamic address pool
//
// Low IP (included) of the dynamic address pool. Defaults to the second address of the subnet.
PoolLow *net.IP `json:"pool_low"`
// PoolHigh: high IP (included) of the dynamic address pool
//
// High IP (included) of the dynamic address pool. Defaults to the last address of the subnet.
PoolHigh *net.IP `json:"pool_high"`
// EnableDynamic: whether to enable dynamic pooling of IPs
//
// Whether to enable dynamic pooling of IPs. By turning the dynamic pool off, only pre-existing DHCP reservations will be handed out. Defaults to true.
//
EnableDynamic *bool `json:"enable_dynamic"`
// ValidLifetime: for how long will DHCP entries will be valid
//
// For how long, in seconds, will DHCP entries will be valid. Defaults to 1h (3600s).
ValidLifetime *scw.Duration `json:"valid_lifetime"`
// RenewTimer: after how long a renew will be attempted
//
// After how long, in seconds, a renew will be attempted. Must be 30s lower than `rebind_timer`. Defaults to 50m (3000s).
//
RenewTimer *scw.Duration `json:"renew_timer"`
// RebindTimer: after how long a DHCP client will query for a new lease if previous renews fail
//
// After how long, in seconds, a DHCP client will query for a new lease if previous renews fail. Must be 30s lower than `valid_lifetime`. Defaults to 51m (3060s).
//
RebindTimer *scw.Duration `json:"rebind_timer"`
// PushDefaultRoute: whether the gateway should push a default route to DHCP clients or only hand out IPs. Defaults to true
PushDefaultRoute *bool `json:"push_default_route"`
// PushDNSServer: whether the gateway should push custom DNS servers to clients
//
// Whether the gateway should push custom DNS servers to clients. This allows for instance hostname -> IP resolution. Defaults to true.
//
PushDNSServer *bool `json:"push_dns_server"`
// DNSServersOverride: override the DNS server list pushed to DHCP clients, instead of the gateway itself
DNSServersOverride *[]string `json:"dns_servers_override"`
// DNSSearch: additional DNS search paths
DNSSearch *[]string `json:"dns_search"`
// DNSLocalName: tLD given to hosts in the Private Network
//
// TLD given to hostnames in the Private Network. Allowed characters are `a-z0-9-.`. Defaults to the slugified Private Network name if created along a GatewayNetwork, or else to `priv`.
//
DNSLocalName *string `json:"dns_local_name"`
}
// CreateDHCP: create a DHCP configuration
func (s *API) CreateDHCP(req *CreateDHCPRequest, opts ...scw.RequestOption) (*DHCP, error) {
var err error
if req.ProjectID == "" {
defaultProjectID, _ := s.client.GetDefaultProjectID()
req.ProjectID = defaultProjectID
}
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "POST",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/dhcps",
Headers: http.Header{},
}
err = scwReq.SetBody(req)
if err != nil {
return nil, err
}
var resp DHCP
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type UpdateDHCPRequest struct {
Zone scw.Zone `json:"-"`
// DHCPID: DHCP config to update
DHCPID string `json:"-"`
// Subnet: subnet for the DHCP server
Subnet *scw.IPNet `json:"subnet"`
// Address: address of the DHCP server. This will be the gateway's address in the private network
Address *net.IP `json:"address"`
// PoolLow: low IP (included) of the dynamic address pool
PoolLow *net.IP `json:"pool_low"`
// PoolHigh: high IP (included) of the dynamic address pool
PoolHigh *net.IP `json:"pool_high"`
// EnableDynamic: whether to enable dynamic pooling of IPs
//
// Whether to enable dynamic pooling of IPs. By turning the dynamic pool off, only pre-existing DHCP reservations will be handed out. Defaults to true.
//
EnableDynamic *bool `json:"enable_dynamic"`
// ValidLifetime: how long, in seconds, DHCP entries will be valid for
ValidLifetime *scw.Duration `json:"valid_lifetime"`
// RenewTimer: after how long a renew will be attempted
//
// After how long, in seconds, a renew will be attempted. Must be 30s lower than `rebind_timer`.
RenewTimer *scw.Duration `json:"renew_timer"`
// RebindTimer: after how long a DHCP client will query for a new lease if previous renews fail
//
// After how long, in seconds, a DHCP client will query for a new lease if previous renews fail. Must be 30s lower than `valid_lifetime`.
//
RebindTimer *scw.Duration `json:"rebind_timer"`
// PushDefaultRoute: whether the gateway should push a default route to DHCP clients or only hand out IPs
PushDefaultRoute *bool `json:"push_default_route"`
// PushDNSServer: whether the gateway should push custom DNS servers to clients
//
// Whether the gateway should push custom DNS servers to clients. This allows for instance hostname -> IP resolution.
//
PushDNSServer *bool `json:"push_dns_server"`
// DNSServersOverride: override the DNS server list pushed to DHCP clients, instead of the gateway itself
DNSServersOverride *[]string `json:"dns_servers_override"`
// DNSSearch: additional DNS search paths
DNSSearch *[]string `json:"dns_search"`
// DNSLocalName: tLD given to hosts in the Private Network
//
// TLD given to hostnames in the Private Network. Allowed characters are `a-z0-9-.`.
DNSLocalName *string `json:"dns_local_name"`
}
// UpdateDHCP: update a DHCP configuration
func (s *API) UpdateDHCP(req *UpdateDHCPRequest, opts ...scw.RequestOption) (*DHCP, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.DHCPID) == "" {
return nil, errors.New("field DHCPID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "PATCH",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/dhcps/" + fmt.Sprint(req.DHCPID) + "",
Headers: http.Header{},
}
err = scwReq.SetBody(req)
if err != nil {
return nil, err
}
var resp DHCP
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type DeleteDHCPRequest struct {
Zone scw.Zone `json:"-"`
// DHCPID: DHCP config id to delete
DHCPID string `json:"-"`
}
// DeleteDHCP: delete a DHCP configuration
func (s *API) DeleteDHCP(req *DeleteDHCPRequest, opts ...scw.RequestOption) error {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.DHCPID) == "" {
return errors.New("field DHCPID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "DELETE",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/dhcps/" + fmt.Sprint(req.DHCPID) + "",
Headers: http.Header{},
}
err = s.client.Do(scwReq, nil, opts...)
if err != nil {
return err
}
return nil
}
type ListDHCPEntriesRequest struct {
Zone scw.Zone `json:"-"`
// OrderBy: order in which to return results
//
// Default value: created_at_asc
OrderBy ListDHCPEntriesRequestOrderBy `json:"-"`
// Page: page number
Page *int32 `json:"-"`
// PageSize: DHCP entries per page
PageSize *uint32 `json:"-"`
// GatewayNetworkID: filter entries based on the gateway network they are on
GatewayNetworkID *string `json:"-"`
// MacAddress: filter entries on their MAC address
MacAddress *string `json:"-"`
// IPAddress: filter entries on their IP address
IPAddress *net.IP `json:"-"`
// Hostname: filter entries on their hostname substring
Hostname *string `json:"-"`
// Type: filter entries on their type
//
// Default value: unknown
Type DHCPEntryType `json:"-"`
}
// ListDHCPEntries: list DHCP entries
func (s *API) ListDHCPEntries(req *ListDHCPEntriesRequest, opts ...scw.RequestOption) (*ListDHCPEntriesResponse, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
defaultPageSize, exist := s.client.GetDefaultPageSize()
if (req.PageSize == nil || *req.PageSize == 0) && exist {
req.PageSize = &defaultPageSize
}
query := url.Values{}
parameter.AddToQuery(query, "order_by", req.OrderBy)
parameter.AddToQuery(query, "page", req.Page)
parameter.AddToQuery(query, "page_size", req.PageSize)
parameter.AddToQuery(query, "gateway_network_id", req.GatewayNetworkID)
parameter.AddToQuery(query, "mac_address", req.MacAddress)
parameter.AddToQuery(query, "ip_address", req.IPAddress)
parameter.AddToQuery(query, "hostname", req.Hostname)
parameter.AddToQuery(query, "type", req.Type)
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "GET",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/dhcp-entries",
Query: query,
Headers: http.Header{},
}
var resp ListDHCPEntriesResponse
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type GetDHCPEntryRequest struct {
Zone scw.Zone `json:"-"`
// DHCPEntryID: ID of the DHCP entry to fetch
DHCPEntryID string `json:"-"`
}
// GetDHCPEntry: get DHCP entries
func (s *API) GetDHCPEntry(req *GetDHCPEntryRequest, opts ...scw.RequestOption) (*DHCPEntry, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.DHCPEntryID) == "" {
return nil, errors.New("field DHCPEntryID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "GET",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/dhcp-entries/" + fmt.Sprint(req.DHCPEntryID) + "",
Headers: http.Header{},
}
var resp DHCPEntry
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type CreateDHCPEntryRequest struct {
Zone scw.Zone `json:"-"`
// GatewayNetworkID: gatewayNetwork on which to create a DHCP reservation
GatewayNetworkID string `json:"gateway_network_id"`
// MacAddress: mAC address to give a static entry to
MacAddress string `json:"mac_address"`
// IPAddress: IP address to give to the machine
IPAddress net.IP `json:"ip_address"`
}
// CreateDHCPEntry: create a static DHCP reservation
func (s *API) CreateDHCPEntry(req *CreateDHCPEntryRequest, opts ...scw.RequestOption) (*DHCPEntry, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "POST",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/dhcp-entries",
Headers: http.Header{},
}
err = scwReq.SetBody(req)
if err != nil {
return nil, err
}
var resp DHCPEntry
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type UpdateDHCPEntryRequest struct {
Zone scw.Zone `json:"-"`
// DHCPEntryID: DHCP entry ID to update
DHCPEntryID string `json:"-"`
// IPAddress: new IP address to give to the machine
IPAddress *net.IP `json:"ip_address"`
}
// UpdateDHCPEntry: update a DHCP entry
func (s *API) UpdateDHCPEntry(req *UpdateDHCPEntryRequest, opts ...scw.RequestOption) (*DHCPEntry, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.DHCPEntryID) == "" {
return nil, errors.New("field DHCPEntryID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "PATCH",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/dhcp-entries/" + fmt.Sprint(req.DHCPEntryID) + "",
Headers: http.Header{},
}
err = scwReq.SetBody(req)
if err != nil {
return nil, err
}
var resp DHCPEntry
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type SetDHCPEntriesRequest struct {
Zone scw.Zone `json:"-"`
// GatewayNetworkID: gateway Network on which to set DHCP reservation list
GatewayNetworkID string `json:"gateway_network_id"`
// DHCPEntries: new list of DHCP reservations
DHCPEntries []*SetDHCPEntriesRequestEntry `json:"dhcp_entries"`
}
// SetDHCPEntries: set all DHCP reservations on a Gateway Network
//
// Set the list of DHCP reservations attached to a Gateway Network. Reservations are identified by their MAC address, and will sync the current DHCP entry list to the given list, creating, updating or deleting DHCP entries.
//
func (s *API) SetDHCPEntries(req *SetDHCPEntriesRequest, opts ...scw.RequestOption) (*SetDHCPEntriesResponse, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "PUT",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/dhcp-entries",
Headers: http.Header{},
}
err = scwReq.SetBody(req)
if err != nil {
return nil, err
}
var resp SetDHCPEntriesResponse
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type DeleteDHCPEntryRequest struct {
Zone scw.Zone `json:"-"`
// DHCPEntryID: DHCP entry ID to delete
DHCPEntryID string `json:"-"`
}
// DeleteDHCPEntry: delete a DHCP reservation
func (s *API) DeleteDHCPEntry(req *DeleteDHCPEntryRequest, opts ...scw.RequestOption) error {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.DHCPEntryID) == "" {
return errors.New("field DHCPEntryID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "DELETE",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/dhcp-entries/" + fmt.Sprint(req.DHCPEntryID) + "",
Headers: http.Header{},
}
err = s.client.Do(scwReq, nil, opts...)
if err != nil {
return err
}
return nil
}
type ListPATRulesRequest struct {
Zone scw.Zone `json:"-"`
// OrderBy: order in which to return results
//
// Default value: created_at_asc
OrderBy ListPATRulesRequestOrderBy `json:"-"`
// Page: page number
Page *int32 `json:"-"`
// PageSize: pAT rules per page
PageSize *uint32 `json:"-"`
// GatewayID: fetch rules for this gateway
GatewayID *string `json:"-"`
// PrivateIP: fetch rules targeting this private ip
PrivateIP *net.IP `json:"-"`
// Protocol: fetch rules for this protocol
//
// Default value: unknown
Protocol PATRuleProtocol `json:"-"`
}
// ListPATRules: list PAT rules
func (s *API) ListPATRules(req *ListPATRulesRequest, opts ...scw.RequestOption) (*ListPATRulesResponse, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
defaultPageSize, exist := s.client.GetDefaultPageSize()
if (req.PageSize == nil || *req.PageSize == 0) && exist {
req.PageSize = &defaultPageSize
}
query := url.Values{}
parameter.AddToQuery(query, "order_by", req.OrderBy)
parameter.AddToQuery(query, "page", req.Page)
parameter.AddToQuery(query, "page_size", req.PageSize)
parameter.AddToQuery(query, "gateway_id", req.GatewayID)
parameter.AddToQuery(query, "private_ip", req.PrivateIP)
parameter.AddToQuery(query, "protocol", req.Protocol)
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "GET",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/pat-rules",
Query: query,
Headers: http.Header{},
}
var resp ListPATRulesResponse
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type GetPATRuleRequest struct {
Zone scw.Zone `json:"-"`
// PatRuleID: pAT rule to get
PatRuleID string `json:"-"`
}
// GetPATRule: get a PAT rule
func (s *API) GetPATRule(req *GetPATRuleRequest, opts ...scw.RequestOption) (*PATRule, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.PatRuleID) == "" {
return nil, errors.New("field PatRuleID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "GET",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/pat-rules/" + fmt.Sprint(req.PatRuleID) + "",
Headers: http.Header{},
}
var resp PATRule
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type CreatePATRuleRequest struct {
Zone scw.Zone `json:"-"`
// GatewayID: gateway on which to attach the rule to
GatewayID string `json:"gateway_id"`
// PublicPort: public port to listen on
PublicPort uint32 `json:"public_port"`
// PrivateIP: private IP to forward data to
PrivateIP net.IP `json:"private_ip"`
// PrivatePort: private port to translate to
PrivatePort uint32 `json:"private_port"`
// Protocol: protocol the rule should apply to
//
// Default value: unknown
Protocol PATRuleProtocol `json:"protocol"`
}
// CreatePATRule: create a PAT rule
func (s *API) CreatePATRule(req *CreatePATRuleRequest, opts ...scw.RequestOption) (*PATRule, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "POST",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/pat-rules",
Headers: http.Header{},
}
err = scwReq.SetBody(req)
if err != nil {
return nil, err
}
var resp PATRule
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type UpdatePATRuleRequest struct {
Zone scw.Zone `json:"-"`
// PatRuleID: pAT rule to update
PatRuleID string `json:"-"`
// PublicPort: public port to listen on
PublicPort *uint32 `json:"public_port"`
// PrivateIP: private IP to forward data to
PrivateIP *net.IP `json:"private_ip"`
// PrivatePort: private port to translate to
PrivatePort *uint32 `json:"private_port"`
// Protocol: protocol the rule should apply to
//
// Default value: unknown
Protocol PATRuleProtocol `json:"protocol"`
}
// UpdatePATRule: update a PAT rule
func (s *API) UpdatePATRule(req *UpdatePATRuleRequest, opts ...scw.RequestOption) (*PATRule, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.PatRuleID) == "" {
return nil, errors.New("field PatRuleID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "PATCH",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/pat-rules/" + fmt.Sprint(req.PatRuleID) + "",
Headers: http.Header{},
}
err = scwReq.SetBody(req)
if err != nil {
return nil, err
}
var resp PATRule
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type SetPATRulesRequest struct {
Zone scw.Zone `json:"-"`
// GatewayID: gateway on which to set the PAT rules
GatewayID string `json:"gateway_id"`
// PatRules: new list of PAT rules
PatRules []*SetPATRulesRequestRule `json:"pat_rules"`
}
// SetPATRules: set all PAT rules on a Gateway
//
// Set the list of PAT rules attached to a Gateway. Rules are identified by their public port and protocol. This will sync the current PAT rule list with the givent list, creating, updating or deleting PAT rules.
//
func (s *API) SetPATRules(req *SetPATRulesRequest, opts ...scw.RequestOption) (*SetPATRulesResponse, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "PUT",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/pat-rules",
Headers: http.Header{},
}
err = scwReq.SetBody(req)
if err != nil {
return nil, err
}
var resp SetPATRulesResponse
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type DeletePATRuleRequest struct {
Zone scw.Zone `json:"-"`
// PatRuleID: pAT rule to delete
PatRuleID string `json:"-"`
}
// DeletePATRule: delete a PAT rule
func (s *API) DeletePATRule(req *DeletePATRuleRequest, opts ...scw.RequestOption) error {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.PatRuleID) == "" {
return errors.New("field PatRuleID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "DELETE",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/pat-rules/" + fmt.Sprint(req.PatRuleID) + "",
Headers: http.Header{},
}
err = s.client.Do(scwReq, nil, opts...)
if err != nil {
return err
}
return nil
}
type ListGatewayTypesRequest struct {
Zone scw.Zone `json:"-"`
}
// ListGatewayTypes: list VPC Public Gateway types
func (s *API) ListGatewayTypes(req *ListGatewayTypesRequest, opts ...scw.RequestOption) (*ListGatewayTypesResponse, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "GET",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/gateway-types",
Headers: http.Header{},
}
var resp ListGatewayTypesResponse
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type ListIPsRequest struct {
Zone scw.Zone `json:"-"`
// OrderBy: order in which to return results
//
// Default value: created_at_asc
OrderBy ListIPsRequestOrderBy `json:"-"`
// Page: page number
Page *int32 `json:"-"`
// PageSize: iPs per page
PageSize *uint32 `json:"-"`
// OrganizationID: include only IPs in this organization
OrganizationID *string `json:"-"`
// ProjectID: include only IPs in this project
ProjectID *string `json:"-"`
// Tags: filter IPs with these tags
Tags []string `json:"-"`
// Reverse: filter by reverse containing this string
Reverse *string `json:"-"`
// IsFree: filter whether the IP is attached to a gateway or not
IsFree *bool `json:"-"`
}
// ListIPs: list IPs
func (s *API) ListIPs(req *ListIPsRequest, opts ...scw.RequestOption) (*ListIPsResponse, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
defaultPageSize, exist := s.client.GetDefaultPageSize()
if (req.PageSize == nil || *req.PageSize == 0) && exist {
req.PageSize = &defaultPageSize
}
query := url.Values{}
parameter.AddToQuery(query, "order_by", req.OrderBy)
parameter.AddToQuery(query, "page", req.Page)
parameter.AddToQuery(query, "page_size", req.PageSize)
parameter.AddToQuery(query, "organization_id", req.OrganizationID)
parameter.AddToQuery(query, "project_id", req.ProjectID)
parameter.AddToQuery(query, "tags", req.Tags)
parameter.AddToQuery(query, "reverse", req.Reverse)
parameter.AddToQuery(query, "is_free", req.IsFree)
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "GET",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/ips",
Query: query,
Headers: http.Header{},
}
var resp ListIPsResponse
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type GetIPRequest struct {
Zone scw.Zone `json:"-"`
// IPID: ID of the IP to get
IPID string `json:"-"`
}
// GetIP: get an IP
func (s *API) GetIP(req *GetIPRequest, opts ...scw.RequestOption) (*IP, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.IPID) == "" {
return nil, errors.New("field IPID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "GET",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/ips/" + fmt.Sprint(req.IPID) + "",
Headers: http.Header{},
}
var resp IP
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type CreateIPRequest struct {
Zone scw.Zone `json:"-"`
// ProjectID: project to create the IP into
ProjectID string `json:"project_id"`
// Tags: tags to give to the IP
Tags []string `json:"tags"`
}
// CreateIP: reserve an IP
func (s *API) CreateIP(req *CreateIPRequest, opts ...scw.RequestOption) (*IP, error) {
var err error
if req.ProjectID == "" {
defaultProjectID, _ := s.client.GetDefaultProjectID()
req.ProjectID = defaultProjectID
}
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "POST",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/ips",
Headers: http.Header{},
}
err = scwReq.SetBody(req)
if err != nil {
return nil, err
}
var resp IP
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type UpdateIPRequest struct {
Zone scw.Zone `json:"-"`
// IPID: ID of the IP to update
IPID string `json:"-"`
// Tags: tags to give to the IP
Tags *[]string `json:"tags"`
// Reverse: reverse to set on the IP. Empty string to unset
Reverse *string `json:"reverse"`
// GatewayID: gateway to attach the IP to. Empty string to detach
GatewayID *string `json:"gateway_id"`
}
// UpdateIP: update an IP
func (s *API) UpdateIP(req *UpdateIPRequest, opts ...scw.RequestOption) (*IP, error) {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return nil, errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.IPID) == "" {
return nil, errors.New("field IPID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "PATCH",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/ips/" + fmt.Sprint(req.IPID) + "",
Headers: http.Header{},
}
err = scwReq.SetBody(req)
if err != nil {
return nil, err
}
var resp IP
err = s.client.Do(scwReq, &resp, opts...)
if err != nil {
return nil, err
}
return &resp, nil
}
type DeleteIPRequest struct {
Zone scw.Zone `json:"-"`
// IPID: ID of the IP to delete
IPID string `json:"-"`
}
// DeleteIP: delete an IP
func (s *API) DeleteIP(req *DeleteIPRequest, opts ...scw.RequestOption) error {
var err error
if req.Zone == "" {
defaultZone, _ := s.client.GetDefaultZone()
req.Zone = defaultZone
}
if fmt.Sprint(req.Zone) == "" {
return errors.New("field Zone cannot be empty in request")
}
if fmt.Sprint(req.IPID) == "" {
return errors.New("field IPID cannot be empty in request")
}
scwReq := &scw.ScalewayRequest{
Method: "DELETE",
Path: "/vpc-gw/v1/zones/" + fmt.Sprint(req.Zone) + "/ips/" + fmt.Sprint(req.IPID) + "",
Headers: http.Header{},
}
err = s.client.Do(scwReq, nil, opts...)
if err != nil {
return err
}
return nil
}
// UnsafeGetTotalCount should not be used
// Internal usage only
func (r *ListGatewaysResponse) UnsafeGetTotalCount() uint32 {
return r.TotalCount
}
// UnsafeAppend should not be used
// Internal usage only
func (r *ListGatewaysResponse) UnsafeAppend(res interface{}) (uint32, error) {
results, ok := res.(*ListGatewaysResponse)
if !ok {
return 0, errors.New("%T type cannot be appended to type %T", res, r)
}
r.Gateways = append(r.Gateways, results.Gateways...)
r.TotalCount += uint32(len(results.Gateways))
return uint32(len(results.Gateways)), nil
}
// UnsafeGetTotalCount should not be used
// Internal usage only
func (r *ListGatewayNetworksResponse) UnsafeGetTotalCount() uint32 {
return r.TotalCount
}
// UnsafeAppend should not be used
// Internal usage only
func (r *ListGatewayNetworksResponse) UnsafeAppend(res interface{}) (uint32, error) {
results, ok := res.(*ListGatewayNetworksResponse)
if !ok {
return 0, errors.New("%T type cannot be appended to type %T", res, r)
}
r.GatewayNetworks = append(r.GatewayNetworks, results.GatewayNetworks...)
r.TotalCount += uint32(len(results.GatewayNetworks))
return uint32(len(results.GatewayNetworks)), nil
}
// UnsafeGetTotalCount should not be used
// Internal usage only
func (r *ListDHCPsResponse) UnsafeGetTotalCount() uint32 {
return r.TotalCount
}
// UnsafeAppend should not be used
// Internal usage only
func (r *ListDHCPsResponse) UnsafeAppend(res interface{}) (uint32, error) {
results, ok := res.(*ListDHCPsResponse)
if !ok {
return 0, errors.New("%T type cannot be appended to type %T", res, r)
}
r.Dhcps = append(r.Dhcps, results.Dhcps...)
r.TotalCount += uint32(len(results.Dhcps))
return uint32(len(results.Dhcps)), nil
}
// UnsafeGetTotalCount should not be used
// Internal usage only
func (r *ListDHCPEntriesResponse) UnsafeGetTotalCount() uint32 {
return r.TotalCount
}
// UnsafeAppend should not be used
// Internal usage only
func (r *ListDHCPEntriesResponse) UnsafeAppend(res interface{}) (uint32, error) {
results, ok := res.(*ListDHCPEntriesResponse)
if !ok {
return 0, errors.New("%T type cannot be appended to type %T", res, r)
}
r.DHCPEntries = append(r.DHCPEntries, results.DHCPEntries...)
r.TotalCount += uint32(len(results.DHCPEntries))
return uint32(len(results.DHCPEntries)), nil
}
// UnsafeGetTotalCount should not be used
// Internal usage only
func (r *ListPATRulesResponse) UnsafeGetTotalCount() uint32 {
return r.TotalCount
}
// UnsafeAppend should not be used
// Internal usage only
func (r *ListPATRulesResponse) UnsafeAppend(res interface{}) (uint32, error) {
results, ok := res.(*ListPATRulesResponse)
if !ok {
return 0, errors.New("%T type cannot be appended to type %T", res, r)
}
r.PatRules = append(r.PatRules, results.PatRules...)
r.TotalCount += uint32(len(results.PatRules))
return uint32(len(results.PatRules)), nil
}
// UnsafeGetTotalCount should not be used
// Internal usage only
func (r *ListIPsResponse) UnsafeGetTotalCount() uint32 {
return r.TotalCount
}
// UnsafeAppend should not be used
// Internal usage only
func (r *ListIPsResponse) UnsafeAppend(res interface{}) (uint32, error) {
results, ok := res.(*ListIPsResponse)
if !ok {
return 0, errors.New("%T type cannot be appended to type %T", res, r)
}
r.IPs = append(r.IPs, results.IPs...)
r.TotalCount += uint32(len(results.IPs))
return uint32(len(results.IPs)), nil
} | // CreatedAt: connection creation date
CreatedAt *time.Time `json:"created_at"`
// UpdatedAt: connection last modification date |
SAC-continuous.py | import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import copy
from torch.utils.tensorboard import SummaryWriter
from torch.distributions import Normal
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, hidden_width, max_action):
super(Actor, self).__init__()
self.max_action = max_action
self.l1 = nn.Linear(state_dim, hidden_width)
self.l2 = nn.Linear(hidden_width, hidden_width)
self.mean_layer = nn.Linear(hidden_width, action_dim)
self.log_std_layer = nn.Linear(hidden_width, action_dim)
def forward(self, x, deterministic=False, with_logprob=True):
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
mean = self.mean_layer(x)
log_std = self.log_std_layer(x) # We output the log_std to ensure that std=exp(log_std)>0
log_std = torch.clamp(log_std, -20, 2)
std = torch.exp(log_std)
dist = Normal(mean, std) # Generate a Gaussian distribution
if deterministic: # When evaluating,we use the deterministic policy
a = mean
else:
a = dist.rsample() # reparameterization trick: mean+std*N(0,1)
if with_logprob: # The method refers to Open AI Spinning up, which is more stable.
log_pi = dist.log_prob(a).sum(dim=1, keepdim=True)
log_pi -= (2 * (np.log(2) - a - F.softplus(-2 * a))).sum(dim=1, keepdim=True)
else:
log_pi = None
a = self.max_action * torch.tanh(a) # Use tanh to compress the unbounded Gaussian distribution into a bounded action interval.
return a, log_pi
class Critic(nn.Module): # According to (s,a), directly calculate Q(s,a)
def __init__(self, state_dim, action_dim, hidden_width):
super(Critic, self).__init__()
# Q1
self.l1 = nn.Linear(state_dim + action_dim, hidden_width)
self.l2 = nn.Linear(hidden_width, hidden_width)
self.l3 = nn.Linear(hidden_width, 1)
# Q2
self.l4 = nn.Linear(state_dim + action_dim, hidden_width)
self.l5 = nn.Linear(hidden_width, hidden_width)
self.l6 = nn.Linear(hidden_width, 1)
def forward(self, s, a):
s_a = torch.cat([s, a], 1)
q1 = F.relu(self.l1(s_a))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(s_a))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim):
self.max_size = int(1e6)
self.count = 0
self.size = 0
self.s = np.zeros((self.max_size, state_dim))
self.a = np.zeros((self.max_size, action_dim))
self.r = np.zeros((self.max_size, 1))
self.s_ = np.zeros((self.max_size, state_dim))
self.dw = np.zeros((self.max_size, 1))
def store(self, s, a, r, s_, dw):
self.s[self.count] = s
self.a[self.count] = a
self.r[self.count] = r
self.s_[self.count] = s_
self.dw[self.count] = dw
self.count = (self.count + 1) % self.max_size # When the 'count' reaches max_size, it will be reset to 0.
self.size = min(self.size + 1, self.max_size) # Record the number of transitions
def sa | elf, batch_size):
index = np.random.choice(self.size, size=batch_size) # Randomly sampling
batch_s = torch.tensor(self.s[index], dtype=torch.float)
batch_a = torch.tensor(self.a[index], dtype=torch.float)
batch_r = torch.tensor(self.r[index], dtype=torch.float)
batch_s_ = torch.tensor(self.s_[index], dtype=torch.float)
batch_dw = torch.tensor(self.dw[index], dtype=torch.float)
return batch_s, batch_a, batch_r, batch_s_, batch_dw
class SAC(object):
def __init__(self, state_dim, action_dim, max_action):
self.max_action = max_action
self.hidden_width = 256 # The number of neurons in hidden layers of the neural network
self.batch_size = 256 # batch size
self.GAMMA = 0.99 # discount factor
self.TAU = 0.005 # Softly update the target network
self.lr = 3e-4 # learning rate
self.adaptive_alpha = True # Whether to automatically learn the temperature alpha
if self.adaptive_alpha:
# Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper
self.target_entropy = -action_dim
# We learn log_alpha instead of alpha to ensure that alpha=exp(log_alpha)>0
self.log_alpha = torch.zeros(1, requires_grad=True)
self.alpha = self.log_alpha.exp()
self.alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=self.lr)
else:
self.alpha = 0.2
self.actor = Actor(state_dim, action_dim, self.hidden_width, max_action)
self.critic = Critic(state_dim, action_dim, self.hidden_width)
self.critic_target = copy.deepcopy(self.critic)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.lr)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.lr)
def choose_action(self, s, deterministic=False):
s = torch.unsqueeze(torch.tensor(s, dtype=torch.float), 0)
a, _ = self.actor(s, deterministic, False) # When choosing actions, we do not need to compute log_pi
return a.data.numpy().flatten()
def learn(self, relay_buffer):
batch_s, batch_a, batch_r, batch_s_, batch_dw = relay_buffer.sample(self.batch_size) # Sample a batch
with torch.no_grad():
batch_a_, log_pi_ = self.actor(batch_s_) # a' from the current policy
# Compute target Q
target_Q1, target_Q2 = self.critic_target(batch_s_, batch_a_)
target_Q = batch_r + self.GAMMA * (1 - batch_dw) * (torch.min(target_Q1, target_Q2) - self.alpha * log_pi_)
# Compute current Q
current_Q1, current_Q2 = self.critic(batch_s, batch_a)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Freeze critic networks so you don't waste computational effort
for params in self.critic.parameters():
params.requires_grad = False
# Compute actor loss
a, log_pi = self.actor(batch_s)
Q1, Q2 = self.critic(batch_s, a)
Q = torch.min(Q1, Q2)
actor_loss = (self.alpha * log_pi - Q).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Unfreeze critic networks
for params in self.critic.parameters():
params.requires_grad = True
# Update alpha
if self.adaptive_alpha:
# We learn log_alpha instead of alpha to ensure that alpha=exp(log_alpha)>0
alpha_loss = -(self.log_alpha.exp() * (log_pi + self.target_entropy).detach()).mean()
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
self.alpha = self.log_alpha.exp()
# Softly update target networks
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.TAU * param.data + (1 - self.TAU) * target_param.data)
def evaluate_policy(env, agent):
times = 3 # Perform three evaluations and calculate the average
evaluate_reward = 0
for _ in range(times):
s = env.reset()
done = False
episode_reward = 0
while not done:
a = agent.choose_action(s, deterministic=True) # We use the deterministic policy during the evaluating
s_, r, done, _ = env.step(a)
episode_reward += r
s = s_
evaluate_reward += episode_reward
return int(evaluate_reward / times)
def reward_adapter(r, env_index):
if env_index == 0: # Pendulum-v1
r = (r + 8) / 8
elif env_index == 1: # BipedalWalker-v3
if r <= -100:
r = -1
return r
if __name__ == '__main__':
env_name = ['Pendulum-v1', 'BipedalWalker-v3', 'HalfCheetah-v2', 'Hopper-v2', 'Walker2d-v2']
env_index = 0
env = gym.make(env_name[env_index])
env_evaluate = gym.make(env_name[env_index]) # When evaluating the policy, we need to rebuild an environment
number = 1
seed = 0
# Set random seed
env.seed(seed)
env.action_space.seed(seed)
env_evaluate.seed(seed)
env_evaluate.action_space.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
max_episode_steps = env._max_episode_steps # Maximum number of steps per episode
print("env={}".format(env_name[env_index]))
print("state_dim={}".format(state_dim))
print("action_dim={}".format(action_dim))
print("max_action={}".format(max_action))
print("max_episode_steps={}".format(max_episode_steps))
agent = SAC(state_dim, action_dim, max_action)
replay_buffer = ReplayBuffer(state_dim, action_dim)
# Build a tensorboard
writer = SummaryWriter(log_dir='runs/SAC/SAC_env_{}_number_{}_seed_{}'.format(env_name[env_index], number, seed))
max_train_steps = 3e6 # Maximum number of training steps
random_steps = 25e3 # Take the random actions in the beginning for the better exploration
evaluate_freq = 5e3 # Evaluate the policy every 'evaluate_freq' steps
evaluate_num = 0 # Record the number of evaluations
evaluate_rewards = [] # Record the rewards during the evaluating
total_steps = 0 # Record the total steps during the training
while total_steps < max_train_steps:
s = env.reset()
episode_steps = 0
done = False
while not done:
episode_steps += 1
if total_steps < random_steps: # Take the random actions in the beginning for the better exploration
a = env.action_space.sample()
else:
a = agent.choose_action(s)
s_, r, done, _ = env.step(a)
r = reward_adapter(r, env_index) # Adjust rewards for better performance
# When dead or win or reaching the max_episode_steps, done will be Ture, we need to distinguish them;
# dw means dead or win,there is no next state s';
# but when reaching the max_episode_steps,there is a next state s' actually.
if done and episode_steps != max_episode_steps:
dw = True
else:
dw = False
replay_buffer.store(s, a, r, s_, dw) # Store the transition
s = s_
if total_steps >= random_steps:
agent.learn(replay_buffer)
# Evaluate the policy every 'evaluate_freq' steps
if (total_steps + 1) % evaluate_freq == 0:
evaluate_num += 1
evaluate_reward = evaluate_policy(env_evaluate, agent)
evaluate_rewards.append(evaluate_reward)
print("evaluate_num:{} \t evaluate_reward:{}".format(evaluate_num, evaluate_reward))
writer.add_scalar('step_rewards_{}'.format(env_name[env_index]), evaluate_reward, global_step=total_steps)
# Save the rewards
if evaluate_num % 10 == 0:
np.save('./data_train/SAC_env_{}_number_{}_seed_{}.npy'.format(env_name[env_index], number, seed), np.array(evaluate_rewards))
total_steps += 1
| mple(s |
br_codegen.py | import os
import numpy as np
import qutip.settings as qset
from qutip.interpolate import Cubic_Spline
_cython_path = os.path.dirname(os.path.abspath(__file__)).replace("\\", "/")
_include_string = "'"+_cython_path+"/complex_math.pxi'"
__all__ = ['BR_Codegen']
class BR_Codegen(object):
"""
Class for generating Bloch-Redfield time-dependent code
at runtime.
"""
def __init__(self, h_terms=None, h_td_terms=None, h_obj=None,
c_terms=None, c_td_terms=None, c_obj=None,
a_terms=None, a_td_terms=None,
spline_count=[0,0],
coupled_ops=[],
coupled_lengths=[],
coupled_spectra=[],
config=None, sparse=False,
use_secular=None,
sec_cutoff=0.1,
args=None,
use_openmp=False,
omp_thresh=None,
omp_threads=None,
atol=None):
try:
import cython
except (ImportError, ModuleNotFoundError):
raise ModuleNotFoundError("Cython is needed for "
"time-depdendent brmesolve")
import sys
import os
sys.path.append(os.getcwd())
# Hamiltonian time-depdendent pieces
self.h_terms = h_terms # number of H pieces
self.h_td_terms = h_td_terms
self.h_obj = h_obj
# Collapse operator time-depdendent pieces
self.c_terms = c_terms # number of C pieces
self.c_td_terms = c_td_terms
self.c_obj = c_obj
# BR operator time-depdendent pieces
self.a_terms = a_terms # number of A pieces
self.a_td_terms = a_td_terms
self.spline_count = spline_count
self.use_secular = int(use_secular)
self.sec_cutoff = sec_cutoff
self.args = args
self.sparse = sparse
self.spline = 0
# Code generator properties
self.code = [] # strings to be written to file
self.level = 0 # indent level
self.config = config
if atol is None:
self.atol = qset.atol
else:
self.atol = atol
self.use_openmp = use_openmp
self.omp_thresh = omp_thresh
self.omp_threads = omp_threads
self.coupled_ops = coupled_ops
self.coupled_lengths = coupled_lengths
self.coupled_spectra = coupled_spectra
def write(self, string):
"""write lines of code to self.code"""
self.code.append(" " * self.level + string + "\n")
def file(self, filename):
"""open file called filename for writing"""
self.file = open(filename, "w")
def generate(self, filename="rhs.pyx"):
"""generate the file"""
for line in cython_preamble(self.use_openmp)+self.aop_td_funcs():
self.write(line)
# write function for Hamiltonian terms (there is always
# be at least one term)
for line in cython_checks() + self.ODE_func_header():
self.write(line)
self.indent()
#Reset spline count
self.spline = 0
for line in self.func_vars()+self.ham_add_and_eigsolve()+ \
self.br_matvec_terms()+["\n"]:
self.write(line)
for line in self.func_end():
self.write(line)
self.dedent()
self.file(filename)
self.file.writelines(self.code)
self.file.close()
self.config.cgen_num += 1
def | (self):
"""increase indention level by one"""
self.level += 1
def dedent(self):
"""decrease indention level by one"""
if self.level == 0:
raise SyntaxError("Error in code generator")
self.level -= 1
def _get_arg_str(self, args):
if len(args) == 0:
return ''
ret = ''
for name, value in self.args.items():
if isinstance(value, np.ndarray):
ret += ",\n np.ndarray[np.%s_t, ndim=1] %s" % \
(value.dtype.name, name)
else:
if isinstance(value, (int, np.int32, np.int64)):
kind = 'int'
elif isinstance(value, (float, np.float32, np.float64)):
kind = 'float'
elif isinstance(value, (complex, np.complex128)):
kind = 'complex'
#kind = type(value).__name__
ret += ",\n " + kind + " " + name
return ret
def ODE_func_header(self):
"""Creates function header for time-dependent ODE RHS."""
func_name = "def cy_td_ode_rhs("
# strings for time and vector variables
input_vars = ("\n double t" +
",\n complex[::1] vec")
for k in range(self.h_terms):
input_vars += (",\n " +
"complex[::1,:] H%d" % k)
#Add array for each Cubic_Spline H term
for htd in self.h_td_terms:
if isinstance(htd, Cubic_Spline):
if not htd.is_complex:
input_vars += (",\n " +
"double[::1] spline%d" % self.spline)
else:
input_vars += (",\n " +
"complex[::1] spline%d" % self.spline)
self.spline += 1
for k in range(self.c_terms):
input_vars += (",\n " +
"complex[::1,:] C%d" % k)
#Add array for each Cubic_Spline c_op term
for ctd in self.c_td_terms:
if isinstance(ctd, Cubic_Spline):
if not ctd.is_complex:
input_vars += (",\n " +
"double[::1] spline%d" % self.spline)
else:
input_vars += (",\n " +
"complex[::1] spline%d" % self.spline)
self.spline += 1
#Add coupled a_op terms
for _a in self.a_td_terms:
if isinstance(_a, Cubic_Spline):
if not _a.is_complex:
input_vars += (",\n " +
"double[::1] spline%d" % self.spline)
else:
input_vars += (",\n " +
"complex[::1] spline%d" % self.spline)
self.spline += 1
#Add a_op terms
for k in range(self.a_terms):
input_vars += (",\n " +
"complex[::1,:] A%d" % k)
input_vars += (",\n unsigned int nrows")
input_vars += self._get_arg_str(self.args)
func_end = "):"
return [func_name + input_vars + func_end]
def func_vars(self):
"""Writes the variables and their types & spmv parts"""
func_vars = ["", "cdef double complex * " +
'out = <complex *>PyDataMem_NEW_ZEROED(nrows**2,sizeof(complex))']
func_vars.append(" ")
return func_vars
def aop_td_funcs(self):
aop_func_str=[]
spline_val = self.spline_count[0]
coupled_val = 0
kk = 0
while kk < self.a_terms:
if kk not in self.coupled_ops:
aa = self.a_td_terms[kk]
if isinstance(aa, str):
aop_func_str += ["cdef complex spectral{0}(double w, double t): return {1}".format(kk, aa)]
elif isinstance(aa, tuple):
if isinstance(aa[0],str):
str0 = aa[0]
elif isinstance(aa[0],Cubic_Spline):
if not aa[0].is_complex:
aop_func_str += ["cdef double[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[0].coeffs,separator=',',precision=16)+",dtype=float)"]
str0 = "interp(w, %s, %s, spline%s)" % (aa[0].a, aa[0].b, spline_val)
else:
aop_func_str += ["cdef complex[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[0].coeffs,separator=',',precision=16)+",dtype=complex)"]
str0 = "zinterp(w, %s, %s, spline%s)" % (aa[0].a, aa[0].b, spline_val)
spline_val += 1
else:
raise Exception('Error parsing tuple.')
if isinstance(aa[1],str):
str1 = aa[1]
elif isinstance(aa[1],Cubic_Spline):
if not aa[1].is_complex:
aop_func_str += ["cdef double[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+",dtype=float)"]
str1 = "interp(t, %s, %s, spline%s)" % (aa[1].a, aa[1].b, spline_val)
else:
aop_func_str += ["cdef complex[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+",dtype=complex)"]
str1 = "zinterp(t, %s, %s, spline%s)" % (aa[1].a, aa[1].b, spline_val)
spline_val += 1
else:
raise Exception('Error parsing tuple.')
aop_func_str += ["cdef complex spectral{0}(double w, double t): return ({1})*({2})".format(kk, str0, str1)]
else:
raise Exception('Invalid a_td_term.')
kk += 1
else:
aa = self.coupled_spectra[coupled_val]
if isinstance(aa, str):
aop_func_str += ["cdef complex spectral{0}(double w, double t): return {1}".format(kk, aa)]
elif isinstance(aa, Cubic_Spline):
if not aa[1].is_complex:
aop_func_str += ["cdef double[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+",dtype=float)"]
str1 = "interp(t, %s, %s, spline%s)" % (aa[1].a, aa[1].b, spline_val)
else:
aop_func_str += ["cdef complex[::1] spline{0} = np.array(".format(spline_val)+np.array2string(aa[1].coeffs,separator=',',precision=16)+",dtype=complex)"]
str1 = "zinterp(t, %s, %s, spline%s)" % (aa[1].a, aa[1].b, spline_val)
spline_val += 1
aop_func_str += ["cdef complex spectral{0}(double w, double t): return {1}".format(kk, str1)]
kk += self.coupled_lengths[coupled_val]
coupled_val += 1
return aop_func_str
def ham_add_and_eigsolve(self):
ham_str = []
#allocate initial zero-Hamiltonian and eigenvector array in Fortran-order
ham_str += ['cdef complex[::1, :] H = farray_alloc(nrows)']
ham_str += ['cdef complex[::1, :] evecs = farray_alloc(nrows)']
#allocate double array for eigenvalues
ham_str += ['cdef double * eigvals = <double *>PyDataMem_NEW_ZEROED(nrows,sizeof(double))']
for kk in range(self.h_terms):
if isinstance(self.h_td_terms[kk], Cubic_Spline):
S = self.h_td_terms[kk]
if not S.is_complex:
td_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
else:
td_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
ham_str += ["dense_add_mult(H, H{0}, {1})".format(kk,td_str)]
self.spline += 1
else:
ham_str += ["dense_add_mult(H, H{0}, {1})".format(kk,self.h_td_terms[kk])]
#Do the eigensolving
ham_str += ["ZHEEVR(H, eigvals, evecs, nrows)"]
#Free H as it is no longer needed
ham_str += ["PyDataMem_FREE(&H[0,0])"]
return ham_str
def br_matvec_terms(self):
br_str = []
# Transform vector eigenbasis
br_str += ["cdef double complex * eig_vec = vec_to_eigbasis(vec, evecs, nrows)"]
# Do the diagonal liouvillian matvec
br_str += ["diag_liou_mult(eigvals, eig_vec, out, nrows)"]
# Do the cop_term matvec for each c_term
for kk in range(self.c_terms):
if isinstance(self.c_td_terms[kk], Cubic_Spline):
S = self.c_td_terms[kk]
if not S.is_complex:
td_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
else:
td_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
if self.use_openmp:
br_str += ["cop_super_mult_openmp(C{0}, evecs, eig_vec, {1}, out, nrows, {2}, {3}, {4})".format(kk,
td_str, self.omp_thresh, self.omp_threads, self.atol)]
else:
br_str += ["cop_super_mult(C{0}, evecs, eig_vec, {1}, out, nrows, {2})".format(kk, td_str, self.atol)]
self.spline += 1
else:
if self.use_openmp:
br_str += ["cop_super_mult_openmp(C{0}, evecs, eig_vec, {1}, out, nrows, {2}, {3}, {4})".format(kk,
self.c_td_terms[kk], self.omp_thresh, self.omp_threads, self.atol)]
else:
br_str += ["cop_super_mult(C{0}, evecs, eig_vec, {1}, out, nrows, {2})".format(kk, self.c_td_terms[kk], self.atol)]
if self.a_terms != 0:
#Calculate skew and dw_min terms
br_str += ["cdef double[:,::1] skew = <double[:nrows,:nrows]><double *>PyDataMem_NEW_ZEROED(nrows**2,sizeof(double))"]
br_str += ["cdef double dw_min = skew_and_dwmin(eigvals, skew, nrows)"]
#Compute BR term matvec
kk = 0
coupled_val = 0
while kk < self.a_terms:
if kk not in self.coupled_ops:
if self.use_openmp:
br_str += ["br_term_mult_openmp(t, A{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3}, {4}, {5})".format(kk,
self.use_secular, self.sec_cutoff, self.omp_thresh, self.omp_threads, self.atol)]
else:
br_str += ["br_term_mult(t, A{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3})".format(kk, self.use_secular, self.sec_cutoff, self.atol)]
kk += 1
else:
br_str += ['cdef complex[::1, :] Ac{0} = farray_alloc(nrows)'.format(kk)]
for nn in range(self.coupled_lengths[coupled_val]):
if isinstance(self.a_td_terms[kk+nn], str):
br_str += ["dense_add_mult(Ac{0}, A{1}, {2})".format(kk,kk+nn,self.a_td_terms[kk+nn])]
elif isinstance(self.a_td_terms[kk+nn], Cubic_Spline):
S = self.a_td_terms[kk+nn]
if not S.is_complex:
td_str = "interp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
else:
td_str = "zinterp(t, %s, %s, spline%s)" % (S.a, S.b, self.spline)
br_str += ["dense_add_mult(Ac{0}, A{1}, {2})".format(kk,kk+nn,td_str)]
else:
raise Exception('Invalid time-dependence fot a_op.')
if self.use_openmp:
br_str += ["br_term_mult_openmp(t, Ac{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3}, {4}, {5})".format(kk,
self.use_secular, self.sec_cutoff, self.omp_thresh, self.omp_threads, self.atol)]
else:
br_str += ["br_term_mult(t, Ac{0}, evecs, skew, dw_min, spectral{0}, eig_vec, out, nrows, {1}, {2}, {3})".format(kk, self.use_secular, self.sec_cutoff, self.atol)]
br_str += ["PyDataMem_FREE(&Ac{0}[0,0])".format(kk)]
kk += self.coupled_lengths[coupled_val]
coupled_val += 1
return br_str
def func_end(self):
end_str = []
#Transform out vector back to fock basis
end_str += ["cdef np.ndarray[complex, ndim=1, mode='c'] arr_out = vec_to_fockbasis(out, evecs, nrows)"]
#Free everything at end
if self.a_terms != 0:
end_str += ["PyDataMem_FREE(&skew[0,0])"]
end_str += ["PyDataMem_FREE(&evecs[0,0])"]
end_str += ["PyDataMem_FREE(eigvals)"]
end_str += ["PyDataMem_FREE(eig_vec)"]
end_str += ["PyDataMem_FREE(out)"]
end_str += ["return arr_out"]
return end_str
def cython_preamble(use_omp=False):
if use_omp:
call_str = "from qutip.cy.openmp.br_omp cimport (cop_super_mult_openmp, br_term_mult_openmp)"
else:
call_str = "from qutip.cy.brtools cimport (cop_super_mult, br_term_mult)"
"""
Returns list of code segments for Cython preamble.
"""
return ["""#!python
#cython: language_level=3
# This file is generated automatically by QuTiP.
# (C) 2011 and later, QuSTaR
import numpy as np
cimport numpy as np
cimport cython
np.import_array()
cdef extern from "numpy/arrayobject.h" nogil:
void PyDataMem_NEW_ZEROED(size_t size, size_t elsize)
void PyArray_ENABLEFLAGS(np.ndarray arr, int flags)
void PyDataMem_FREE(void * ptr)
from qutip.cy.interpolate cimport interp, zinterp
from qutip.cy.math cimport erf, zerf
cdef double pi = 3.14159265358979323
from qutip.cy.brtools cimport (dense_add_mult, ZHEEVR, dense_to_eigbasis,
vec_to_eigbasis, vec_to_fockbasis, skew_and_dwmin,
diag_liou_mult, spec_func, farray_alloc)
"""
+call_str+
"""
include """+_include_string+"""
"""]
def cython_checks():
"""
List of strings that turn off Cython checks.
"""
return ["""
@cython.cdivision(True)
@cython.boundscheck(False)
@cython.wraparound(False)"""]
| indent |
test.py | # -*- coding: utf-8 -*-
import argparse
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.autograd import Variable
from tqdm import tqdm
from models.protonet_embedding import ProtoNetEmbedding
from models.R2D2_embedding import R2D2Embedding
from models.ResNet12_embedding import resnet12
from models.classification_heads import ClassificationHead, R2D2Head
from utils import pprint, set_gpu, Timer, count_accuracy, log
import random
import numpy as np
import os
import pdb
def get_model(options):
# Choose the embedding network
if options.network == 'ProtoNet':
network = ProtoNetEmbedding().cuda()
elif options.network == 'R2D2':
network = R2D2Embedding().cuda()
elif options.network == 'ResNet':
if options.dataset == 'miniImageNet' or options.dataset == 'tieredImageNet':
network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=5).cuda()
network = torch.nn.DataParallel(network)
else:
network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=2).cuda()
network = torch.nn.DataParallel(network)
else:
print ("Cannot recognize the network type")
assert(False)
# Choose the classification head
if opt.head == 'ProtoNet':
cls_head = ClassificationHead(base_learner='ProtoNet').cuda()
elif opt.head == 'Ridge':
cls_head = ClassificationHead(base_learner='Ridge').cuda()
elif opt.head == 'R2D2':
cls_head = R2D2Head().cuda()
elif opt.head == 'SVM':
cls_head = ClassificationHead(base_learner='SVM-CS').cuda()
else:
print ("Cannot recognize the classification head type")
assert(False)
return (network, cls_head)
def get_dataset(options):
# Choose the embedding network
if options.dataset == 'miniImageNet':
from data.mini_imagenet import MiniImageNet, FewShotDataloader
dataset_test = MiniImageNet(phase='test')
data_loader = FewShotDataloader
elif options.dataset == 'tieredImageNet':
from data.tiered_imagenet import tieredImageNet, FewShotDataloader
dataset_test = tieredImageNet(phase='test')
data_loader = FewShotDataloader
elif options.dataset == 'CIFAR_FS':
from data.CIFAR_FS import CIFAR_FS, FewShotDataloader
dataset_test = CIFAR_FS(phase='test')
data_loader = FewShotDataloader
elif options.dataset == 'FC100':
from data.FC100 import FC100, FewShotDataloader
dataset_test = FC100(phase='test')
data_loader = FewShotDataloader
else:
print ("Cannot recognize the dataset type")
assert(False)
return (dataset_test, data_loader)
def self_mix(data):
size = data.size()
W = size[-1]
H = size[-2]
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
cut_w = W//2
cut_h = H//2
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
while True:
bbxn = np.random.randint(0, W-(bbx2-bbx1))
bbyn = np.random.randint(0, H-(bby2-bby1))
if bbxn != bbx1 or bbyn != bby1:
break
if (bbx2 - bbx1) == (bby2 - bby1):
k = random.sample([0, 1, 2, 3], 1)[0]
else:
k = 0
data[:, :, bbx1:bbx2, bby1:bby2] = torch.rot90(data[:, :, bbxn:bbxn + (bbx2-bbx1), bbyn:bbyn + (bby2-bby1)], k, [2,3])
#data[:, :, bbx1:bbx2, bby1:bby2] = data[:, :, bbxn:bbxn + (bbx2-bbx1), bbyn:bbyn + (bby2-bby1)]
return data
def flip(x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1,
dtype=torch.long, device=x.device)
return x[tuple(indices)]
def build_grid(source_size,target_size):
k = float(target_size)/float(source_size)
direct = torch.linspace(-k,k,target_size).unsqueeze(0).repeat(target_size,1).unsqueeze(-1)
full = torch.cat([direct,direct.transpose(1,0)],dim=2).unsqueeze(0)
return full.cuda()
def random_crop_grid(x,grid):
delta = x.size(2)-grid.size(1)
grid = grid.repeat(x.size(0),1,1,1).cuda()
#Add random shifts by x
grid[:,:,:,0] = grid[:,:,:,0]+ torch.FloatTensor(x.size(0)).cuda().random_(0, delta).unsqueeze(-1).unsqueeze(-1).expand(-1, grid.size(1), grid.size(2)) /x.size(2)
#Add random shifts by y
grid[:,:,:,1] = grid[:,:,:,1]+ torch.FloatTensor(x.size(0)).cuda().random_(0, delta).unsqueeze(-1).unsqueeze(-1).expand(-1, grid.size(1), grid.size(2)) /x.size(2)
return grid
def random_cropping(batch, t):
#Building central crop of t pixel size
|
def shot_aug(data_support, labels_support, n_support, method, opt):
size = data_support.shape
if method == "fliplr":
n_support = opt.s_du * n_support
data_shot = flip(data_support, -1)
data_support = torch.cat((data_support, data_shot), dim = 1)
labels_support = torch.cat((labels_support, labels_support), dim = 1)
elif method == "random_crop":
n_support = opt.s_du * n_support
data_shot = F.pad(data_support.view([-1] + list(data_support.shape[-3:])), (4,4,4,4))
data_shot = random_cropping(data_shot, 32)
data_support = torch.cat((data_support, data_shot.view([size[0], -1] + list(data_support.shape[-3:]))), dim = 1)
labels_support = torch.cat((labels_support, labels_support), dim = 1)
return data_support, labels_support, n_support
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default='0')
parser.add_argument('--load', default='./experiments/exp_1/best_model.pth',
help='path of the checkpoint file')
parser.add_argument('--episode', type=int, default=1000,
help='number of episodes to test')
parser.add_argument('--way', type=int, default=5,
help='number of classes in one test episode')
parser.add_argument('--shot', type=int, default=1,
help='number of support examples per training class')
parser.add_argument('--shot_aug', '-shotaug', default=[], nargs='+', type=str,
help='If use shot level data augmentation.')
parser.add_argument('--s_du', type=int, default=1,
help='number of support examples augmented by shot')
parser.add_argument('--query', type=int, default=15,
help='number of query examples per training class')
parser.add_argument('--network', type=str, default='ProtoNet',
help='choose which embedding network to use. ProtoNet, R2D2, ResNet')
parser.add_argument('--head', type=str, default='ProtoNet',
help='choose which embedding network to use. ProtoNet, Ridge, R2D2, SVM')
parser.add_argument('--dataset', type=str, default='miniImageNet',
help='choose which classification head to use. miniImageNet, tieredImageNet, CIFAR_FS, FC100')
opt = parser.parse_args()
(dataset_test, data_loader) = get_dataset(opt)
dloader_test = data_loader(
dataset=dataset_test,
nKnovel=opt.way,
nKbase=0,
nExemplars=opt.shot, # num training examples per novel category
nTestNovel=opt.query * opt.way, # num test examples for all the novel categories
nTestBase=0, # num test examples for all the base categories
batch_size=1,
num_workers=1,
epoch_size=opt.episode, # num of batches per epoch
)
set_gpu(opt.gpu)
# Define the models
(embedding_net, cls_head) = get_model(opt)
# Load saved model checkpoints
saved_models = torch.load(opt.load)
embedding_net.load_state_dict(saved_models['embedding'])
embedding_net.eval()
cls_head.load_state_dict(saved_models['head'])
cls_head.eval()
# Evaluate on test set
test_accuracies = []
for i, batch in enumerate(tqdm(dloader_test()), 1):
data_support, labels_support, data_query, labels_query, _, _ = [x.cuda() for x in batch]
n_support = opt.way * opt.shot
n_query = opt.way * opt.query
for method in opt.shot_aug:
data_support, labels_support, n_support = shot_aug(data_support, labels_support, n_support, method, opt)
with torch.no_grad():
emb_support = embedding_net(data_support.reshape([-1] + list(data_support.shape[-3:])))
emb_support = emb_support.reshape(1, n_support, -1)
emb_query = embedding_net(data_query.reshape([-1] + list(data_query.shape[-3:])))
emb_query = emb_query.reshape(1, n_query, -1)
if opt.head == 'SVM':
logits = cls_head(emb_query, emb_support, labels_support, opt.way, opt.shot, maxIter=3)
else:
logits = cls_head(emb_query, emb_support, labels_support, opt.way, opt.shot)
acc = count_accuracy(logits.reshape(-1, opt.way), labels_query.reshape(-1))
test_accuracies.append(acc.item())
avg = np.mean(np.array(test_accuracies))
std = np.std(np.array(test_accuracies))
ci = std / np.sqrt(i + 1)
if i % 50 == 0:
print('Episode [{}/{}]:\t\t\tAccuracy: {:.2f} ± {:.2f} % ({:.2f} %)'\
.format(i, opt.episode, avg, ci, acc))
| grid_source = build_grid(batch.size(-1),t)
#Make radom shift for each batch
grid_shifted = random_crop_grid(batch,grid_source)
#Sample using grid sample
sampled_batch = F.grid_sample(batch, grid_shifted, mode='nearest')
return sampled_batch |
effect.py | from brave.overlays.overlay import Overlay
from gi.repository import Gst
class | (Overlay):
'''
For doing applying a video effect.
'''
def permitted_props(self):
return {
**super().permitted_props(),
'effect_name': {
'type': 'str',
'default': 'edgetv',
'permitted_values': {
'agingtv': 'AgingTV effect',
'burn': 'Burn',
'chromium': 'Chromium',
'dicetv': 'DiceTV effect',
'dilate': 'Dilate',
'dodge': 'Dodge',
'edgetv': 'EdgeTV effect',
'exclusion': 'Exclusion',
'optv': 'OpTV effect',
'radioactv': 'RadioacTV effect',
'revtv': 'RevTV effect',
'rippletv': 'RippleTV effect',
'solarize': 'Solarize',
'streaktv': 'StreakTV effect',
'vertigotv': 'VertigoTV effect',
'warptv': 'WarpTV effect'
# Note: quarktv and shagadelictv are removed as they were unreliable in testing
}
},
'visible': {
'type': 'bool',
'default': False
}
}
def create_elements(self):
# The effects filters can mess with the alpha channel.
# The best solution I've found is to allow it to move into RGBx, then force a detour via RGB
# to remove the alpha channel, before moving back to our default RGBA.
# This is done in a 'bin' so that the overlay can be manipulated as one thing.
desc = ('videoconvert ! %s ! videoconvert ! capsfilter caps="video/x-raw,format=RGB" ! '
'videoconvert ! capsfilter caps="video/x-raw,format=RGBA"') % self.effect_name
self.element = Gst.parse_bin_from_description(desc, True)
self.element.set_name('%s_bin' % self.uid)
place_to_add_elements = getattr(self.source, 'final_video_tee').parent
if not place_to_add_elements.add(self.element):
self.logger.warning('Unable to add effect overlay bin to the source pipeline')
| EffectOverlay |
test_client.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
from paddle_serving_client import Client
from paddle_serving_app.reader import *
import cv2
preprocess = DetectionSequential([
DetectionFile2Image(),
DetectionResize( | DetectionNormalize([123.675, 116.28, 103.53], [58.395, 57.12, 57.375], False),
DetectionTranspose((2,0,1))
])
postprocess = RCNNPostprocess("label_list.txt", "output")
client = Client()
client.load_client_config("serving_client/serving_client_conf.prototxt")
client.connect(['127.0.0.1:9494'])
im, im_info = preprocess(sys.argv[1])
fetch_map = client.predict(
feed={
"image": im,
"im_shape": np.array(list(im.shape[1:])).reshape(-1),
"scale_factor": im_info['scale_factor'],
},
fetch=["save_infer_model/scale_0.tmp_1"],
batch=False)
print(fetch_map) | (512, 512), False, interpolation=cv2.INTER_LINEAR), |
formatDmMessage.ts | import {Client, TextChannel} from "discord.js";
import wrap from 'word-wrap';
const block = '=============================================================';
/**
* Formats a Discord message with id [message_id] found at channel [channel_id].
* If the channel or the message cannot be found, this returns null.
* Otherwise, it returns the string-formatted message.
* @param client
* @param message_id
* @param channel_id
*/
export const formatDmMessage = async (
client: Client,
message_id: string,
channel_id: string
): Promise<string | null> => {
const channel = await client.channels.fetch(channel_id);
if (channel.type === "text") {
const message = await (channel as TextChannel).messages.fetch(message_id);
const author = message.author;
const guild = message.guild;
const content = message.content;
const reactions = message.reactions.cache.array(); | Message: ${message.url}
By: ${author.username}, ${author.toString()}
${guild ? `From Server: ${guild.name}, ${guild.id}` : 'Server unknown'}
Content:
${block}
${content}
${block}
${reactions.length > 0 ?
`${reactions.map(r => r.emoji.toString() + " " + r.count).join(' ')}`
: ''
}`;
return wrap(response, {width: block.length});
}
return null;
}; |
const response = ` |
check_callback.py | # Disactivate safety reflexes
# First, go to http://pepper.local/advanced/#/settings to enable the disactivation
import qi
import sys
# Connect to Naoqi session | try:
session.connect("tcp://127.0.0.1:9559")
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
# Access the module
mcnaoqidcm_service = session.service("MCNAOqiDCM")
# Check if the callback is connected to DCM loop
print "Is callback connected to DCM: " + str(mcnaoqidcm_service.isPreProccessConnected()) | session = qi.Session() |
idle-output.rs | #![no_main]
#[mock::app]
const APP: () = {
#[idle]
fn idle(_: idle::Context) -> u32 |
};
| {
0
} |
main.go | package main
import (
"bufio"
"errors"
"fmt"
"io"
"math"
"os"
"strconv"
)
/*********** I/O ***********/
var (
// ReadString returns a WORD string.
ReadString func() string
stdout *bufio.Writer
)
func init() {
ReadString = newReadString(os.Stdin)
stdout = bufio.NewWriter(os.Stdout)
}
func newReadString(ior io.Reader) func() string {
r := bufio.NewScanner(ior)
// r.Buffer(make([]byte, 1024), int(1e+11)) // for AtCoder
r.Buffer(make([]byte, 1024), int(1e+9)) // for Codeforces
// Split sets the split function for the Scanner. The default split function is ScanLines.
// Split panics if it is called after scanning has started.
r.Split(bufio.ScanWords)
return func() string {
if !r.Scan() {
panic("Scan failed")
}
return r.Text()
}
}
// ReadInt returns an integer.
func ReadInt() int {
return int(readInt64())
}
func ReadInt2() (int, int) {
return int(readInt64()), int(readInt64())
}
func ReadInt3() (int, int, int) {
return int(readInt64()), int(readInt64()), int(readInt64())
}
func | () (int, int, int, int) {
return int(readInt64()), int(readInt64()), int(readInt64()), int(readInt64())
}
// ReadInt64 returns as integer as int64.
func ReadInt64() int64 {
return readInt64()
}
func ReadInt64_2() (int64, int64) {
return readInt64(), readInt64()
}
func ReadInt64_3() (int64, int64, int64) {
return readInt64(), readInt64(), readInt64()
}
func ReadInt64_4() (int64, int64, int64, int64) {
return readInt64(), readInt64(), readInt64(), readInt64()
}
func readInt64() int64 {
i, err := strconv.ParseInt(ReadString(), 0, 64)
if err != nil {
panic(err.Error())
}
return i
}
// ReadIntSlice returns an integer slice that has n integers.
func ReadIntSlice(n int) []int {
b := make([]int, n)
for i := 0; i < n; i++ {
b[i] = ReadInt()
}
return b
}
// ReadInt64Slice returns as int64 slice that has n integers.
func ReadInt64Slice(n int) []int64 {
b := make([]int64, n)
for i := 0; i < n; i++ {
b[i] = ReadInt64()
}
return b
}
// ReadFloat64 returns an float64.
func ReadFloat64() float64 {
return float64(readFloat64())
}
func readFloat64() float64 {
f, err := strconv.ParseFloat(ReadString(), 64)
if err != nil {
panic(err.Error())
}
return f
}
// ReadFloatSlice returns an float64 slice that has n float64.
func ReadFloat64Slice(n int) []float64 {
b := make([]float64, n)
for i := 0; i < n; i++ {
b[i] = ReadFloat64()
}
return b
}
// ReadRuneSlice returns a rune slice.
func ReadRuneSlice() []rune {
return []rune(ReadString())
}
/*********** Debugging ***********/
// ZeroPaddingRuneSlice returns binary expressions of integer n with zero padding.
// For debugging use.
func ZeroPaddingRuneSlice(n, digitsNum int) []rune {
sn := fmt.Sprintf("%b", n)
residualLength := digitsNum - len(sn)
if residualLength <= 0 {
return []rune(sn)
}
zeros := make([]rune, residualLength)
for i := 0; i < len(zeros); i++ {
zeros[i] = '0'
}
res := []rune{}
res = append(res, zeros...)
res = append(res, []rune(sn)...)
return res
}
// Strtoi is a wrapper of strconv.Atoi().
// If strconv.Atoi() returns an error, Strtoi calls panic.
func Strtoi(s string) int {
if i, err := strconv.Atoi(s); err != nil {
panic(errors.New("[argument error]: Strtoi only accepts integer string"))
} else {
return i
}
}
// PrintIntsLine returns integers string delimited by a space.
func PrintIntsLine(A ...int) string {
res := []rune{}
for i := 0; i < len(A); i++ {
str := strconv.Itoa(A[i])
res = append(res, []rune(str)...)
if i != len(A)-1 {
res = append(res, ' ')
}
}
return string(res)
}
// PrintIntsLine returns integers string delimited by a space.
func PrintInts64Line(A ...int64) string {
res := []rune{}
for i := 0; i < len(A); i++ {
str := strconv.FormatInt(A[i], 10) // 64bit int version
res = append(res, []rune(str)...)
if i != len(A)-1 {
res = append(res, ' ')
}
}
return string(res)
}
// PrintDebug is wrapper of fmt.Fprintf(os.Stderr, format, a...)
func PrintDebug(format string, a ...interface{}) {
fmt.Fprintf(os.Stderr, format, a...)
}
/********** FAU standard libraries **********/
//fmt.Sprintf("%b\n", 255) // binary expression
/********** I/O usage **********/
//str := ReadString()
//i := ReadInt()
//X := ReadIntSlice(n)
//S := ReadRuneSlice()
//a := ReadFloat64()
//A := ReadFloat64Slice(n)
//str := ZeroPaddingRuneSlice(num, 32)
//str := PrintIntsLine(X...)
/*
ASCII code
ASCII 10進数 ASCII 10進数 ASCII 10進数
! 33 " 34 # 35
$ 36 % 37 & 38
' 39 ( 40 ) 41
* 42 + 43 , 44
- 45 . 46 / 47
0 48 1 49 2 50
3 51 4 52 5 53
6 54 7 55 8 56
9 57 : 58 ; 59
< 60 = 61 > 62
? 63 @ 64 A 65
B 66 C 67 D 68
E 69 F 70 G 71
H 72 I 73 J 74
K 75 L 76 M 77
N 78 O 79 P 80
Q 81 R 82 S 83
T 84 U 85 V 86
W 87 X 88 Y 89
Z 90 [ 91 \ 92
] 93 ^ 94 _ 95
` 96 a 97 b 98
c 99 d 100 e 101
f 102 g 103 h 104
i 105 j 106 k 107
l 108 m 109 n 110
o 111 p 112 q 113
r 114 s 115 t 116
u 117 v 118 w 119
x 120 y 121 z 122
{ 123 | 124 } 125
~ 126 127
*/
/*******************************************************************/
const (
// General purpose
MOD = 1000000000 + 7
ALPHABET_NUM = 26
INF_INT64 = math.MaxInt64
INF_BIT60 = 1 << 60
INF_INT32 = math.MaxInt32
INF_BIT30 = 1 << 30
NIL = -1
// for dijkstra, prim, and so on
WHITE = 0
GRAY = 1
BLACK = 2
)
var (
n int
S []rune
dp [3000 + 5][3000 + 5]int
)
func main() {
n = ReadInt()
S = ReadRuneSlice()
for i := 1; i <= n; i++ {
dp[1][1] = 1
}
for i := 2; i <= n; i++ {
sums := make([]int, 3000+5)
for j := 0; j <= n; j++ {
sums[j+1] = sums[j] + dp[i-1][j]
}
for j := 1; j <= i; j++ {
if S[i-2] == '<' {
dp[i][j] += sums[j]
} else {
dp[i][j] += (sums[i+1] - sums[j])
}
dp[i][j] %= MOD
}
}
ans := 0
for i := 1; i <= n; i++ {
ans += dp[n][i]
ans %= MOD
}
fmt.Println(ans)
}
/*
- まずは全探索を検討しましょう
- MODは最後にとりましたか?
- ループを抜けた後も処理が必要じゃありませんか?
- 和・積・あまりを求められたらint64が必要ではありませんか?
- いきなりオーバーフローはしていませんか?
- MOD取る系はint64必須ですよ?
- 後ろ・逆・ゴールから考えましたか?
- 3者のうち真ん中に着目しましたか?
*/
/*******************************************************************/
| ReadInt4 |
input.rs | pub fn | () -> &'static str {
"90: 86 86
122: 86 1 | 99 20
116: 86 58 | 99 75
20: 86 123
62: 99 95 | 86 113
81: 76 99 | 90 86
106: 120 86 | 93 99
73: 99 72 | 86 45
117: 131 99 | 72 86
92: 86 96 | 99 98
13: 3 99 | 118 86
56: 90 86 | 58 99
85: 72 99 | 51 86
51: 99 99 | 86 86
59: 99 25 | 86 62
65: 99 15 | 86 97
112: 86 13 | 99 38
46: 33 86 | 2 99
10: 67 86 | 68 99
33: 120 99 | 76 86
38: 35 86 | 125 99
26: 86 10 | 99 55
1: 33 99 | 60 86
8: 42 | 42 8
16: 51 86 | 93 99
107: 40 99 | 2 86
40: 17 120
34: 86 82 | 99 127
88: 93 17
2: 99 51 | 86 120
32: 100 99 | 7 86
113: 86 127 | 99 82
14: 73 86 | 44 99
25: 86 101 | 99 56
130: 110 86 | 109 99
19: 86 4 | 99 49
30: 86 92 | 99 70
27: 17 86 | 86 99
94: 47 86 | 53 99
115: 86 107 | 99 84
15: 76 99 | 58 86
58: 86 99
105: 130 86 | 32 99
71: 120 99 | 131 86
12: 99 131 | 86 82
60: 72 86 | 93 99
84: 86 102 | 99 80
44: 99 76 | 86 72
125: 76 99 | 131 86
18: 99 71 | 86 52
129: 37 86 | 111 99
102: 99 131 | 86 76
66: 86 105 | 99 41
99: \"a\"
9: 99 18 | 86 65
131: 17 99 | 99 86
39: 76 99 | 93 86
64: 115 99 | 114 86
57: 86 48 | 99 94
35: 72 86 | 51 99
0: 8 11
77: 86 83 | 99 106
118: 72 86 | 75 99
47: 99 103 | 86 85
23: 99 27 | 86 76
48: 119 99 | 78 86
49: 86 51 | 99 45
67: 86 120
61: 86 72 | 99 127
108: 72 99 | 72 86
95: 86 58 | 99 90
83: 86 27 | 99 131
75: 86 99 | 99 99
101: 51 99 | 27 86
103: 90 99 | 90 86
128: 86 69 | 99 33
70: 99 14 | 86 19
52: 127 86 | 90 99
21: 86 24 | 99 59
22: 86 63 | 99 12
42: 79 86 | 66 99
97: 51 17
104: 86 33 | 99 28
100: 99 16 | 86 39
72: 99 86
78: 86 43 | 99 50
55: 86 6 | 99 34
45: 99 99
5: 86 46 | 99 77
93: 99 99 | 99 86
6: 131 99 | 51 86
110: 71 86 | 28 99
68: 90 86 | 27 99
29: 87 86 | 122 99
80: 86 93 | 99 131
54: 120 86 | 75 99
43: 93 86
98: 99 103 | 86 117
7: 101 86 | 88 99
127: 17 86 | 99 99
96: 86 12 | 99 61
41: 99 5 | 86 112
79: 86 57 | 99 21
11: 42 31 | 42 11 31
86: \"b\"
111: 45 99 | 72 86
63: 76 99 | 127 86
124: 86 81 | 99 116
28: 86 75 | 99 58
82: 99 86 | 86 99
121: 64 86 | 74 99
87: 86 124 | 99 104
74: 99 26 | 86 9
31: 126 99 | 121 86
50: 99 72
119: 34 86 | 36 99
36: 86 82 | 99 75
91: 86 131
3: 58 99 | 27 86
114: 99 129 | 86 22
24: 86 89 | 99 128
53: 91 99 | 95 86
126: 29 86 | 30 99
109: 99 108 | 86 23
17: 86 | 99
76: 86 86 | 99 86
120: 17 17
89: 54 86 | 37 99
4: 99 131 | 86 58
69: 27 17
37: 99 58
123: 86 76 | 99 82
bbababbaabbaaabaaaabbabbbbbababbbababaaaabbaabaaaaaabaaaabbaabba
aaabbbabbabbbbbbaabbabababaaaaabaaabaaaaabaaaabbbbabbabb
babaabaabbabaaaaabbababb
babbabbaababbaaaababbaabbbbbaabaabbbababaabbbabbbabababaabbabbabaabbbaababbbbbbb
bababbbabaaabbaabbababab
aaaaabaaaabbaabaaaaabbaa
aabbbaaaaababbaaabaabbbbabbbaaaaabbaaaab
baaaabaaaabbababbaaabbab
ababababaaaaabaaaaabbaaa
baabaababaaabbaabbbababb
abbbabbbbbabaaabaabbabbb
aaababbbabababbbaabbaaaabababbaa
babbaaabbaaaabaababbbabb
bbbabbbabbabaaaabaabaaab
bbbbaaabaaaabbbaabbbbabb
aaaaaaaaaaaaabaabaaaabbb
babaabbababbabaababbaaba
bbabaaaabaaabbaaaabbbbbbbabbabbbbaaaabab
babaabbbbbbbbbaabbaaabaa
baabbbabababbababaabbbaa
baaaabaaaabaaaaaaaaaaaab
aaabbbbbbbabbbabbbbbabbb
bbabaaabaaababaabbbbbbba
baaabaaabbabbaaaaaaababb
abbabaabbaaaaaaaaabaabba
aaaabababbbbbbaabababbab
bbbbbbabababbaabaabbbabaaaabbabbaabbabbbbaababbbabaabbbbabbabaabababaabaaaaaabab
bbbabbaaaababbabaabaabbbabbbabab
bbbabbbababbbbaabbbbbbab
baabaaaaabaabbbbaaabaaaabbbbbbab
bbbbaaaaaabaaaaaabbbabaababbaabb
bbbbbabaabaaaaababbaaabbbaababbbbbaaabaa
aaabaabaaaabaaaaababbaaaaaababbbaaababab
bbaaaababbbaaabbabbabbabbabbbbabbababaabbbabbabbabaaabbabaaaabba
aababbbaaaabbbaabbbbababbabbaaaa
abbbaabbabaaaaababaaaaaa
babbabbabbaabababbabaaaaaabbbabbbabaabbbbabbbaabbaaaaaba
aabbbaabaaabbabbbbaabaaabbbabbababbaababababbbba
baabbabbbaabbbbbbbaaaaaababababaaabbbbabaabaabbaaaaabbab
bbabaaabbbbbaaaababbabbb
baabaabaaababbbabaaababa
aaaabbbbabbbaabbababaaaa
babbababaaaababababaabab
bbbaabbbabbbabaabbbabbbaaabbaabbabbbbbbbaaaabaaabaaabbaabbbbabbb
aabbaabbabbabbbabaaaabba
abaabbaaaaabbabbaaabbbabbabbaaaa
bababaabbaaabaaababbababbbbbbabbbaaabaab
aaababbbaabbaabbbbaaabbaaababaaabaabbbaa
abbaaababbaaababbaaaabba
bbaababaaabbabaaababbaabbaabbbbaaabbaabbabaaabbabbabbbbbabbbaaabbbababbb
bbaaabbbbbaaabbabaaaabba
abbbabaabbaabaaaaaabbaaa
bbbabaabbbbbbbaababbbbaaabbbabba
abbabbaaaaaabbbbbbabaaabbaaaaaab
bbaababaaaababaabaabbababbbbbaab
aabbababaaabbbaaaaabbabbaabbbbbbbabbbabb
aaabbbaaaababbabbababbbb
aababbababaaabaabababbbb
bbabbbabbbabbbabababbbba
aaabaaaaabbbbbbbbaaaaaab
babaabbbaaabbbabbaaaaaab
bbaabbbaaabbababbbaaabaa
aabbbaaaaaaaaaaaaaaaabaaaabbbbbabbabaaaaabbbbbab
abaaaaabaabbbaaaabbbaaab
abaabbabbabbbbbabbababaa
baaabbaaaabbbabbaaaabaaa
abbbbbaabbaaababbbbabaab
aababbaaaabbbbbabaababaabababaabbabbbaba
abaabaaabbabababababababbaabaaabbbaabaabaabbbababbbabbbabbabbbbbabababaabbabbaba
aaaaaabaaaaabababbbabbabbbaabbabaababbbb
babaaaababaaababaabbabba
ababbaaaabaaabbbbababaaa
baababbbabbbbaaabaababbbabbbbaababaaabaaaabbaabababababaaaaababa
bbaabbababbabaabbbabbabb
bbabbbbaababbaabbbbabbbbbabbabaaaaabbbababbbbaabaaaaaabbbababbbbbabbbabb
baaaaaaababaaaabbbbbaaaaabaabbbbbaababbb
aabbabaaabbbbbaabbbaabab
ababbabbbbaabaaabababaabaaaabbab
aaabbabaaaaabbbbabbbbaaa
bbaaaaabbaaaabaaabbbaaaa
abbbaaaaabbbabbbabbabaaa
bbaaaabbaaaababababaababaaaabbbaaabaabaaaababbabbbaaaabaabaaabbaababbbaaabaabbaababbbaabaabaabbb
bbaabbaaaababbababbaaabbbabbbababbbbbbaabbaabababbababaababbaababbbaaabbaabbbbab
aababbabaaabaaaabbaabbaa
babaaabbaaabaababbabbbbabbbaabaabbaabbbababbabaabababaabbbababbabbbbaababbaabaaa
baabbabbbbaabbbaaabbaaab
abaaaaabbbaababaabbaabab
aaabbbbbaaababaaabababaabbaababb
bbaaababbaabbbbababbbbbabbaabbabbbbbaaabbbabbabb
bbabbbbaabbbbbbbbaaaabba
aababbabaaabaababaabbabbabaaaabaaabbbabaaabbbabbbabaaaaa
babbbbbbaabbbaabbabbbabb
baabbbbaababbaabaaabbabb
aaabaabaabbbaabababbbbaaaaaabaabbabaabbbabaaabbbaaaabababaababbbababbabaaaaaabab
aabbabaabaaaaaaababbababaabaabba
ababbababbaabbbaabaabaababbaabbbaaaabbaa
aabbababaabbbbbbbaababaababbabba
baabbabbbaaaaaaaaaaabaaa
baabbbabaaabbabbbbbbbabb
babaabaaaabaaaaabbbaaaab
baaaaaaababbbbbaabbbbbab
bbabbbabababbaabbaaababa
bbbbabababaaaaababbabbbababbabaaabbaabababbababaaaaabbab
aabbbbbababbabaaabababaaaaabaababbbabbab
babbbbaabaabbbbabbbbbabb
aaabbbabbabaabbbabaaababbaaabaab
abbaaabbbbaaabbbbabbbbbbbbbbabaa
bbabbabaaaabbabbbbbababababbaaabbabaaaaababaaaba
bbaabaaaaabaaababaababbb
baabbaaaaaababbababbaabaabababbbabaaabbbbbbaaabbbbababaababaabbbbbabaaababbaaaabbbaaaaaa
aaabaaabbbbbaaaabbbbabba
abbbaabbbabbbaaaaabaabab
aaabbbaabbaabbabbaabbbbbbbabbaaaaaaabbbaaaaaabab
aaaaaaaaaabbaabaaabbbbaabbaaaaabaaabbaababbbbbab
bbbbababbabababbbbababba
abaaaabaabbbbaaaabbbbbbbbaababbabbbbaabbaabbbbbabaaabbbaaaaaabab
bbbbbbaabaabaaaaaaaaabbaaaaaaababbabbabaabbabbab
abbbbbbbbaaaabaaaababbabbbbbaabb
bababaabaababbbaabbaabab
abaaaabaabaabbabaaabbbbbbbaaaaaa
aaaaaabaababababbbaabbbabbaaaaba
abaabaabbbababbababbaabb
baaabbaabbbaabbbbabbabbb
baababaabaaabaaaabbabbbabababbaabbbbbbba
bbbaabbabbbabaabbaaabbaaaabababaaabbbbaaababbaab
abaabbbbabbabbbbaaaaabab
bbbabaababaabbbbbbbaaaab
abbaaabaaaaaaabaabaaaabaabbaaabbabbabbbbaabaabab
aabaaaaababbbaaaaaaababb
aaabbbaaabaabbbabbbbabaa
aaaaabaababaabbabaaaabaaabbaaaaa
aaaabaabababbbbbaaaaaaaaaabbbbbbbbbbbbababbbbabbbaaabaab
bbabbbabbaabbbbaaaabaabaabaaabbbaaaaaaababbbaaab
aabaaabaaabbababbabababa
babbbaaaaabbbaabbbabbababbbbbbbaabbababa
abbaabaabaabbaaaabaaaaaaababbaab
abaaaababbaabababaabbbababbabaabaaababbbbbbbabba
bbaaaabababbabababbabaabbbaaabbbbbbabbaa
aabbaabbaaabbbaabaaaaaab
bbaabbabaaaabbbabbabbabb
abbaaabbabbbaaaabbbbbaaabbabbabababbbbbbabaabbbbbabbaaaaabaababb
ababbaabbabbbbbbbbabaaaabbbbababbaaabbbabaabaabb
aaabbabaaabbbabaaaaabaabaaababbaabbbabbababbbbabaabbabbb
abbabbaaabababababbbabaaaaaabaabbabaabab
babaabaaababbaaaaabbaaab
aaaaaababababaababbaaaab
ababbabaaababbaaabbbaababaaaaabb
bbaaabbabaaaabaabbabaaaaaaaababbbbbabaaa
bababbbabbaabbabbbbbabba
bbaaabbaaabbbbaabababaabbabbaaaa
aaaabaabaaaaabbaababbaabababbaaa
ababbabaabbabbbbbbaababb
aabbbbbbbaaabbbababababa
bababbbaabbabbaabaaaabab
bbaaabbbaabbbbbaaabaabba
aabbbbaaababbbabbbaabbbb
baaaabaaabaaabbbababbbabaabbbabbaabbbaabbbbbbbabaaaabbaababbaabaaababbbb
babbbbabbbbababbabaaabba
babbbbaaababbbbbbabbbaaabaabbaab
bbbbbbaaaabbbabbaaabaaaabbbabaaaaabaabab
babbbbbabbabaaaaaaaaabbb
ababbbabbabaabbaaaabbabbaaaabbaa
bbbabbbabababbbabbaaabbbbbbbaaba
abbabbaaabaaababbaaaaabb
bbababbabababaaaaababbbbbabbbaabababbaabaabbbaababbaaaba
babbababaabbbaaaabbaaaab
abbaaabbbaaaaaaabbababab
bbbabbbabbabbabababbbbab
babbbaaabababaabaababbbababbbbbaabbbbaba
bbbababaabaabaabababbbbbabaaaababbabbbabbbaaabaababbaaaaabaaaabbaaaabbab
babababbabbabbbaaaaabbab
baabbabbabaabbaaababbbbb
aabbabaaaaaaabbaabbabbbaababbaabababbbbbbabbbababbbaaaba
aababababbbabbbaabbaaaaa
aabababaaabaaabaaabbaababbbabbaabbaaababbbbbbbababababbaabbbbbab
abbbaabbbbabbbbaabbabbab
aaaabbbbaabbaabbbabaaaaa
aababbababaabbbaaabaabba
baaaaabaaaaaabbababaabbbabaaaabbabaabbbaabaaabbaabaababaaaabbaaa
ababbaaabaababaabbbbbababaaabbbb
babbabababbbaaaabbbaabaa
baabbabbbaabbabaaabbbbaabbbaabbaabbabaababaaabaabbaababbbaaaabbb
abaabbbbaaaabaabbbbbbbba
baabbabaaaababaaabbaabbbbbbbbbbb
abbaaaabababbbbabababaabbbaaabbaababaabbbaaaaaaaaabaaaab
bbbabbabaaaabbababbaabababaaaabaaabbaaaabbbababa
bbbababababbbbbbababbbba
bbbbbbaaaaabbbaabaababab
aaaabbbaabbaaababaaababa
bbaaabbabbabaabaaaabaaaaabaaabbabaaababb
ababbabaabaabbaababaaaaa
bbbaabbaaaababbaabbbbabaaabaaaab
abbbabbbbbbabbaaabbaabbbababbbbaaabababb
bbbbbaaabaaabaabaabbaaab
bbaabbababaaababaaabbbabaaaababaabbaaababaabaaab
abbbabbbaabbbbbabbababbb
bbbaabbabbabbbabbaaaabba
bbabaabaababbbaababababaaabaabababaaaaaabaaababb
babaabaaaaaaabaaaabaabba
babbbbbaaabbbaaaaabbababbaababba
aababbaaababbbbbbabbabba
bbabbbabaabbaabbbbbbabaa
ababbababbbababaababbbaa
aaabbbaabbabaabaababbbba
bbaabbabbbabbaaabaabbbababaaabaabbbbbabbbbaaabaaababaaba
baababaabbaaabbabaaababb
abbaabbbababababaaababbbbaabaabb
baabbbbbaaabaababaababba
babaabaababaabbbbaabaabaaaababaababbabba
bbaaaaabaababbaaaabbaabaaabbabbbbababbaa
abbaaabbaaabbbbaaabbbbaaabbaaaabaabbaaabbaababbbabbabbabaaaababb
aaabbaabababaaaabaabaaab
bbaabbaabbbabaaabbabbbbbbbbabaabbabaaabababbbbabbaabbabbbbbabbaa
bbbabbbbbbabbbaabaababab
bbabbbbabbaabbbababbabba
aaabbbaabaabaababaabbabababbaaaabbbbbbba
babbaabababbabababbbbbbaababaabbbaabbbbababaaaabbbaaaaababbabaaabbbbaaaaabaaababbbbaabba
babbbabbbaaaaaaabbaabbbbabbbbabbaaabababaabbbbab
bbaaaaababaaabbaabbbaaaaaababbabbbaabaabaaabaaabbabaabab
baabbbbbabbaaababbbbaaabbbbbbaabbbbbbaabbaaaaababbaaaaaaaabbabba
bbabbbaaabaaabbbabbbabaaaababbaaaabbbaabababbbbabbababaaaaaababb
baaabaaaaabbababbbabbbbaabaabbbaaabbbaabbaaabbabbaaabaab
aaaaaabaaaaabbbabbbaabaa
bbaaabbbbabaaaabbabaaaabaabaaaaababaababaaaabaaaaababbbb
babbaaababbbaabaabbbbaab
aaabbaababbbbbabaaabbaaaaabbbbbaabaabaababababababbabbbb
babababbbaabaaaabaaabbbb
babbabaaaaabbbabbbbbbbaabbbaaaaa
aabbabaabaaabbbaaaaaabab
bbbabbaaabbbbabbbababbabaaabababaababaab
bbabaaaabaabbabbbbbabaabaaababbb
baababbbbaaaabbabbaabbbbbbbbbaaabbbbaaba
aabbaababaaaabaabaabbaba
bbbabbaababaabaaabbbaaaaabaaaabaaabaaababababaaabbababbbbbbbabba
bbabaabaabaaabaabbabbaab
aabaaaaaaaababbbaaababbbbbbaabab
baaaabaaabbbabbbabaaababbaabbaaa
baabbbbabababbbaaababbbabbabbbbabbaabbbaabbabababaaabababaabaabbbabbabba
baabaabaabababaabaaaabbb
abbaabbbbabaabaaababbababaabbabbbabbbbabbbbbaabababbaabb
abbbabbbbbbababaabbbabbabbbbbaab
abaaabbbbbbaabbbaabbaaab
ababbbabbbabbbaaabaaabaababababa
bbbbbbaaabbbaaaaaaabbabaabbbbaabbbbaaaab
bababbbbbabbbbbbbaabbbbbbbababbbbabbabbbabbbbaabbaaaabba
baaabbaaaaaaaababaaabbbaabaabaaabaabababaaabbaaaabbaabaa
aabbaabbbbbabaababaabaaaababbaaabbabbababbbbaabb
abbbbbaabbbabbbbbbbbaaba
aaabbabbbbbabbbababaaabb
abaabaaaaabbaaaababbbaab
aaaabaabbbbbaabababaaaaaaaaaaabb
bbbababaabbbabaabaababba
abbbaabbabaaababbbbbbabb
babbabbbaaaaabbbaababbabbaabbbbbbaaabbaaaabbbbbaabaababbbbaaaaba
aaaaaababbbababababaabab
bbaabaabbbaaaabbabbababaaaaaababaabaaabb
aabbabaababaaaababbbabab
ababbbababaabbbbbaaaabbbbbbbbaababbbbaaabbabbbaaabbbabbbbaaaabbbaaaaaaaaababaaab
bbbabaabaaaaabbaaaabaabb
babbaaabbaabaababbbbaaba
bbabaabababbbbbaabaabaaabbaaabbabbbabaaa
abbabaabbaaaabaabbbaaaba
aabbabaabaaabbbaaaabbbabbabababbbbbaabbb
aabbbbbbabababababbbbbab
abbabbbabbabaababaaabbbb
baaabbaaababbaaabaabaabb
bbbabbbaaaabaaaaabaaababaaaabbbbaabbaaaaabbbabab
aabbbabaaaababaabaaaaaba
abbbabbbababbabbbbaabbaa
abbbbbabaabababbbabababa
bbaabaabaababbaabababaaa
bbabaaabaababbbaaababbbaaaababba
abbaabaababbababaaaabbbaabbabaababbbaabaaaabaaabaabbbbab
babbbaaabbabbbaababbbbaaaabaababaaaabaaa
babababbabaaaabaaabbbabaaabbbabababaaaabbbabbaab
aabbbaababbaaabaababbaababaaabbbaaaaaabaababbaaaaabaaabbbbababaa
abbbabbbaaabbbbaaabaabab
abaabbaaaabbaabbbbbaaaaa
bbabaaabaaaabbbbaabbbaaaaabbbbbbabbbbbbabbbabaaa
bbbabbaababbbbbabbaaaaabbabaabaaababaababaaaaabb
abbbabbbababbaaabbabaaaaabbabbbabbabaabbbaaaaabbabbabaaabbbbbbbb
bababbbaaaabaabaabbababb
abbbbbbbabababaaabbaababbabaaababbaaaaaa
abbaabbbabaaaaabaaaaabbaabaaaaabababbbababbbabab
abbabbbbbbabaaaabbbabbbabbbbaaba
abbaabbbbaabaababaababba
bbabaaababaabbbaaaaaabbb
baabbabaaaaabbabaabbbbabbabbabbabbaaaaaa
babbbbbbababbababbabbbabaabbaaab
aaabaaabbbbabbbbaabbabbbbaaaaaabaabbbaabbaababaa
baabbbbbaaabaababababbab
babbaaabaaaabaababbabbaaaabaabaa
abaabbbbbbabaaaaaabaabab
aaaaabbaabababaabbbbbababbbababbbbaababb
abababababaabaaabbaaababbabbabbb
abaaaaabbbbbbabaaaaabbaa
aaaabbbbbbbbababbbabaabb
bbabbabaabaaabaaaaababab
aaabbbbbabbabbbbbabaababbbaabaababbababb
bbabaabaabbabbaaaabaabba
abbabaaababbaabaaaaabbbabbaaaaabbbaabbabbababaaabaabbaabbbbaaabaaaabbbaabbbbabba
ababbbabaaabbabbaaabbabaaabaabba
bbbabbbbbabbbbbbabaaabaaaaababbbbababaaa
babbabaaabbaaabbabbabaaa
babbbbaaabbbbbaaabaaabaabbbbababbaaababb
abbabbbbabababbbbbabbaaabaaababb
babbababbbaababaabbabbab
bbaaababbbbbaaababbbbabb
baaaabaababaabbbabaabbbaaabbbaabaaaabbab
abbaabbbbbaaaaabbbaabbabbaabbaab
aaaaaababbaababaaabaaabababbbabb
baaabbaabbbababaaababbbaaaaaabba
bbbbabaaaaabaaaaaaababbaaaababbbbbaabaaaaabaaaabaaaabbaabbbbbbbb
abaaabbbbaaabaaababbabba
aaaaabaaaaabbbaabbabaaaaabababaaabbbbbaabbaaaaba
ababbabababbbaaaaaaaabbb
aaababaaabaabbaababbabbb
ababababaabbbabbbaaabaab
baabbbabbaaabaaabaaabbbabbabbabb
bbbabbaabbabbababaabbbbaabaabbabaabaaabbbaaabbbb
aaabbbabaaabbbbbabbabbbbbbbaaaaa
aaabbababbaaababaaaabbaa
bbbabbaaababbbabaaabaabb
baababbabbbabaaababaaababababbaaaababaabbbbaaaabbabaaababbbabaababbabbbbbaaabbab
bbbabbaababaabbabbbbbaaa
abababaabaaaabaaabababbbbaababbbbbabbaaaaaabbaabbbbabababbabaabb
babbbababaaabbbabaababbbaaabababaaaababbbabbaabbabaabbba
bbbabbaabaabbbbaaaaaabaabaaabbab
bbbabbbbaabbaabaaaaabbbbbaabbabbabbaabaababbaabbaaabbaaa
aaababbbbaaaabaaabbabbbbabbbbaba
aaaaabbababaabbabaaaaaaaaaaaaababbbbbbaaababbaaaaabaabab
babbababbaaabbabbabbaaaabbaabaabbaaabbab"
}
| puzzle_input |
userService.ts | import User from 'models/user';
import api from 'utils/api'
import Cookie from 'js-cookie'
const UserService = {
async save(user: User) {
if(user.id !== '') {
return await api.put(`/api/User/${user.id}`, user)
} else { | const user: User = JSON.parse(Cookie.get('userData') as string)
return await api.get(`/api/User/${user.id}`)
}
}
export default UserService | return await api.post('/api/User', user)
}
},
async getUser() { |
keypairs.py | # Copyright 2014: Rackspace UK
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import novaclient.exceptions
from rally.common.i18n import _
from rally.common import log as logging
from rally.common import utils
from rally import osclients
from rally.plugins.openstack.context.cleanup import manager as resource_manager
from rally.task import context
LOG = logging.getLogger(__name__)
@context.configure(name="keypair", order=310)
class Keypair(context.Context):
KEYPAIR_NAME = "rally_ssh_key"
def _generate_keypair(self, endpoint):
keypair_name = "%s_%s" % (
self.KEYPAIR_NAME, self.context["task"]["uuid"])
nova_client = osclients.Clients(endpoint).nova()
# NOTE(hughsaunders): If keypair exists, it must be deleted as we can't
# retrieve the private key
try:
nova_client.keypairs.delete(keypair_name)
except novaclient.exceptions.NotFound:
pass
keypair = nova_client.keypairs.create(keypair_name)
return {"private": keypair.private_key,
"public": keypair.public_key,
"name": keypair_name,
"id": keypair.id}
@utils.log_task_wrapper(LOG.info, _("Enter context: `keypair`"))
def setup(self):
for user in self.context["users"]:
user["keypair"] = self._generate_keypair(user["endpoint"])
@utils.log_task_wrapper(LOG.info, _("Exit context: `keypair`"))
def | (self):
# TODO(boris-42): Delete only resources created by this context
resource_manager.cleanup(names=["nova.keypairs"],
users=self.context.get("users", []))
| cleanup |
statement.rs | // Copyright 2018 sqlparser-rs contributors. All rights reserved.
// Copyright Materialize, Inc. and contributors. All rights reserved.
//
// This file is derived from the sqlparser-rs project, available at
// https://github.com/andygrove/sqlparser-rs. It was incorporated
// directly into Materialize on December 21, 2019.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License in the LICENSE file at the
// root of this repository, or online at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt;
use crate::ast::display::{self, AstDisplay, AstFormatter};
use crate::ast::{
AstInfo, ColumnDef, CreateSinkConnector, CreateSourceConnector, CreateSourceFormat,
CreateSourceKeyEnvelope, DataType, Envelope, Expr, Format, Ident, KeyConstraint, Query,
TableConstraint, UnresolvedObjectName, Value,
};
/// A top-level statement (SELECT, INSERT, CREATE, etc.)
#[allow(clippy::large_enum_variant)]
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Statement<T: AstInfo> {
Select(SelectStatement<T>),
Insert(InsertStatement<T>),
Copy(CopyStatement<T>),
Update(UpdateStatement<T>),
Delete(DeleteStatement<T>),
CreateDatabase(CreateDatabaseStatement),
CreateSchema(CreateSchemaStatement),
CreateSource(CreateSourceStatement<T>),
CreateSink(CreateSinkStatement<T>),
CreateView(CreateViewStatement<T>),
CreateViews(CreateViewsStatement<T>),
CreateTable(CreateTableStatement<T>),
CreateIndex(CreateIndexStatement<T>),
CreateType(CreateTypeStatement<T>),
CreateRole(CreateRoleStatement),
AlterObjectRename(AlterObjectRenameStatement),
AlterIndex(AlterIndexStatement),
Discard(DiscardStatement),
DropDatabase(DropDatabaseStatement),
DropObjects(DropObjectsStatement),
SetVariable(SetVariableStatement),
ShowDatabases(ShowDatabasesStatement<T>),
ShowObjects(ShowObjectsStatement<T>),
ShowIndexes(ShowIndexesStatement<T>),
ShowColumns(ShowColumnsStatement<T>),
ShowCreateView(ShowCreateViewStatement),
ShowCreateSource(ShowCreateSourceStatement),
ShowCreateTable(ShowCreateTableStatement),
ShowCreateSink(ShowCreateSinkStatement),
ShowCreateIndex(ShowCreateIndexStatement),
ShowVariable(ShowVariableStatement),
StartTransaction(StartTransactionStatement),
SetTransaction(SetTransactionStatement),
Commit(CommitStatement),
Rollback(RollbackStatement),
Tail(TailStatement<T>),
Explain(ExplainStatement<T>),
Declare(DeclareStatement<T>),
Fetch(FetchStatement),
Close(CloseStatement),
Prepare(PrepareStatement<T>),
Execute(ExecuteStatement<T>),
Deallocate(DeallocateStatement),
}
impl<T: AstInfo> Statement<T> {
/// Reports whether the statement is cursor-related.
pub fn is_cursor(&self) -> bool {
matches!(
self,
Statement::Declare(_) | Statement::Fetch(_) | Statement::Close(_)
)
}
}
impl<T: AstInfo> AstDisplay for Statement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
match self {
Statement::Select(stmt) => f.write_node(stmt),
Statement::Insert(stmt) => f.write_node(stmt),
Statement::Copy(stmt) => f.write_node(stmt),
Statement::Update(stmt) => f.write_node(stmt),
Statement::Delete(stmt) => f.write_node(stmt),
Statement::CreateDatabase(stmt) => f.write_node(stmt),
Statement::CreateSchema(stmt) => f.write_node(stmt),
Statement::CreateSource(stmt) => f.write_node(stmt),
Statement::CreateSink(stmt) => f.write_node(stmt),
Statement::CreateView(stmt) => f.write_node(stmt),
Statement::CreateViews(stmt) => f.write_node(stmt),
Statement::CreateTable(stmt) => f.write_node(stmt),
Statement::CreateIndex(stmt) => f.write_node(stmt),
Statement::CreateRole(stmt) => f.write_node(stmt),
Statement::CreateType(stmt) => f.write_node(stmt),
Statement::AlterObjectRename(stmt) => f.write_node(stmt),
Statement::AlterIndex(stmt) => f.write_node(stmt),
Statement::Discard(stmt) => f.write_node(stmt),
Statement::DropDatabase(stmt) => f.write_node(stmt),
Statement::DropObjects(stmt) => f.write_node(stmt),
Statement::SetVariable(stmt) => f.write_node(stmt),
Statement::ShowDatabases(stmt) => f.write_node(stmt),
Statement::ShowObjects(stmt) => f.write_node(stmt),
Statement::ShowIndexes(stmt) => f.write_node(stmt),
Statement::ShowColumns(stmt) => f.write_node(stmt),
Statement::ShowCreateView(stmt) => f.write_node(stmt),
Statement::ShowCreateSource(stmt) => f.write_node(stmt),
Statement::ShowCreateTable(stmt) => f.write_node(stmt),
Statement::ShowCreateSink(stmt) => f.write_node(stmt),
Statement::ShowCreateIndex(stmt) => f.write_node(stmt),
Statement::ShowVariable(stmt) => f.write_node(stmt),
Statement::StartTransaction(stmt) => f.write_node(stmt),
Statement::SetTransaction(stmt) => f.write_node(stmt),
Statement::Commit(stmt) => f.write_node(stmt),
Statement::Rollback(stmt) => f.write_node(stmt),
Statement::Tail(stmt) => f.write_node(stmt),
Statement::Explain(stmt) => f.write_node(stmt),
Statement::Declare(stmt) => f.write_node(stmt),
Statement::Close(stmt) => f.write_node(stmt),
Statement::Fetch(stmt) => f.write_node(stmt),
Statement::Prepare(stmt) => f.write_node(stmt),
Statement::Execute(stmt) => f.write_node(stmt),
Statement::Deallocate(stmt) => f.write_node(stmt),
}
}
}
impl_display_t!(Statement);
/// `SELECT`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct SelectStatement<T: AstInfo> {
pub query: Query<T>,
pub as_of: Option<Expr<T>>,
}
impl<T: AstInfo> AstDisplay for SelectStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_node(&self.query);
if let Some(as_of) = &self.as_of {
f.write_str(" AS OF ");
f.write_node(as_of);
}
}
}
impl_display_t!(SelectStatement);
/// `INSERT`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct InsertStatement<T: AstInfo> {
/// TABLE
pub table_name: UnresolvedObjectName,
/// COLUMNS
pub columns: Vec<Ident>,
/// A SQL query that specifies what to insert.
pub source: InsertSource<T>,
}
impl<T: AstInfo> AstDisplay for InsertStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("INSERT INTO ");
f.write_node(&self.table_name);
if !self.columns.is_empty() {
f.write_str(" (");
f.write_node(&display::comma_separated(&self.columns));
f.write_str(")");
}
f.write_str(" ");
f.write_node(&self.source);
}
}
impl_display_t!(InsertStatement);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum CopyRelation<T: AstInfo> {
Table {
name: UnresolvedObjectName,
columns: Vec<Ident>,
},
Select(SelectStatement<T>),
Tail(TailStatement<T>),
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum CopyDirection {
To,
From,
}
impl AstDisplay for CopyDirection {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str(match self {
CopyDirection::To => "TO",
CopyDirection::From => "FROM",
})
}
}
impl_display!(CopyDirection);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum CopyTarget {
Stdin,
Stdout,
}
impl AstDisplay for CopyTarget {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str(match self {
CopyTarget::Stdin => "STDIN",
CopyTarget::Stdout => "STDOUT",
})
}
}
impl_display!(CopyTarget);
/// `COPY`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct CopyStatement<T: AstInfo> {
/// RELATION
pub relation: CopyRelation<T>,
/// DIRECTION
pub direction: CopyDirection,
// TARGET
pub target: CopyTarget,
// OPTIONS
pub options: Vec<WithOption>,
}
impl<T: AstInfo> AstDisplay for CopyStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("COPY ");
match &self.relation {
CopyRelation::Table { name, columns } => {
f.write_node(&name);
if !columns.is_empty() {
f.write_str("(");
f.write_node(&display::comma_separated(&columns));
f.write_str(")");
}
}
CopyRelation::Select(query) => {
f.write_str("(");
f.write_node(query);
f.write_str(")");
}
CopyRelation::Tail(query) => {
f.write_str("(");
f.write_node(query);
f.write_str(")");
}
};
f.write_str(" ");
f.write_node(&self.direction);
f.write_str(" ");
f.write_node(&self.target);
if !self.options.is_empty() {
f.write_str(" WITH (");
f.write_node(&display::comma_separated(&self.options));
f.write_str(")");
}
}
}
impl_display_t!(CopyStatement);
/// `UPDATE`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct UpdateStatement<T: AstInfo> {
/// TABLE
pub table_name: UnresolvedObjectName,
/// Column assignments
pub assignments: Vec<Assignment<T>>,
/// WHERE
pub selection: Option<Expr<T>>,
}
impl<T: AstInfo> AstDisplay for UpdateStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("UPDATE ");
f.write_node(&self.table_name);
if !self.assignments.is_empty() {
f.write_str(" SET ");
f.write_node(&display::comma_separated(&self.assignments));
}
if let Some(selection) = &self.selection {
f.write_str(" WHERE ");
f.write_node(selection);
}
}
}
impl_display_t!(UpdateStatement);
/// `DELETE`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct DeleteStatement<T: AstInfo> {
/// `FROM`
pub table_name: UnresolvedObjectName,
/// `WHERE`
pub selection: Option<Expr<T>>,
}
impl<T: AstInfo> AstDisplay for DeleteStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("DELETE FROM ");
f.write_node(&self.table_name);
if let Some(selection) = &self.selection {
f.write_str(" WHERE ");
f.write_node(selection);
}
}
}
impl_display_t!(DeleteStatement);
/// `CREATE DATABASE`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct CreateDatabaseStatement {
pub name: Ident,
pub if_not_exists: bool,
}
impl AstDisplay for CreateDatabaseStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("CREATE DATABASE ");
if self.if_not_exists {
f.write_str("IF NOT EXISTS ");
}
f.write_node(&self.name);
}
}
impl_display!(CreateDatabaseStatement);
/// `CREATE SCHEMA`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct CreateSchemaStatement {
pub name: UnresolvedObjectName,
pub if_not_exists: bool,
}
impl AstDisplay for CreateSchemaStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("CREATE SCHEMA ");
if self.if_not_exists {
f.write_str("IF NOT EXISTS ");
}
f.write_node(&self.name);
}
}
impl_display!(CreateSchemaStatement);
/// `CREATE SOURCE`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct CreateSourceStatement<T: AstInfo> {
pub name: UnresolvedObjectName,
pub col_names: Vec<Ident>,
pub connector: CreateSourceConnector,
pub with_options: Vec<SqlOption<T>>,
pub format: CreateSourceFormat<T>,
pub key_envelope: CreateSourceKeyEnvelope,
pub envelope: Envelope,
pub if_not_exists: bool,
pub materialized: bool,
pub key_constraint: Option<KeyConstraint>,
}
impl<T: AstInfo> AstDisplay for CreateSourceStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("CREATE ");
if self.materialized {
f.write_str("MATERIALIZED ");
}
f.write_str("SOURCE ");
if self.if_not_exists {
f.write_str("IF NOT EXISTS ");
}
f.write_node(&self.name);
f.write_str(" ");
if !self.col_names.is_empty() {
f.write_str("(");
f.write_node(&display::comma_separated(&self.col_names));
if self.key_constraint.is_some() {
f.write_str(", ");
f.write_node(self.key_constraint.as_ref().unwrap());
}
f.write_str(") ");
} else if self.key_constraint.is_some() {
f.write_str("(");
f.write_node(self.key_constraint.as_ref().unwrap());
f.write_str(") ")
}
f.write_str("FROM ");
f.write_node(&self.connector);
if !self.with_options.is_empty() {
f.write_str(" WITH (");
f.write_node(&display::comma_separated(&self.with_options));
f.write_str(")");
}
f.write_node(&self.format);
f.write_node(&self.key_envelope);
match self.envelope {
Envelope::None => (),
_ => {
f.write_str(" ENVELOPE ");
f.write_node(&self.envelope);
}
}
}
}
impl_display_t!(CreateSourceStatement);
/// `CREATE SINK`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct CreateSinkStatement<T: AstInfo> {
pub name: UnresolvedObjectName,
pub from: UnresolvedObjectName,
pub connector: CreateSinkConnector<T>,
pub with_options: Vec<SqlOption<T>>,
pub format: Option<Format<T>>,
pub envelope: Option<Envelope>,
pub with_snapshot: bool,
pub as_of: Option<Expr<T>>,
pub if_not_exists: bool,
}
impl<T: AstInfo> AstDisplay for CreateSinkStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("CREATE SINK ");
if self.if_not_exists {
f.write_str("IF NOT EXISTS ");
}
f.write_node(&self.name);
f.write_str(" FROM ");
f.write_node(&self.from);
f.write_str(" INTO ");
f.write_node(&self.connector);
if !self.with_options.is_empty() {
f.write_str(" WITH (");
f.write_node(&display::comma_separated(&self.with_options));
f.write_str(")");
}
if let Some(format) = &self.format {
f.write_str(" FORMAT ");
f.write_node(format);
}
if let Some(envelope) = &self.envelope {
f.write_str(" ENVELOPE ");
f.write_node(envelope);
}
if self.with_snapshot {
f.write_str(" WITH SNAPSHOT");
} else {
f.write_str(" WITHOUT SNAPSHOT");
}
if let Some(as_of) = &self.as_of {
f.write_str(" AS OF ");
f.write_node(as_of);
}
}
}
impl_display_t!(CreateSinkStatement);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ViewDefinition<T: AstInfo> {
/// View name
pub name: UnresolvedObjectName,
pub columns: Vec<Ident>,
pub with_options: Vec<SqlOption<T>>,
pub query: Query<T>,
}
impl<T: AstInfo> AstDisplay for ViewDefinition<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_node(&self.name);
if !self.with_options.is_empty() {
f.write_str(" WITH (");
f.write_node(&display::comma_separated(&self.with_options));
f.write_str(")");
}
if !self.columns.is_empty() {
f.write_str(" (");
f.write_node(&display::comma_separated(&self.columns));
f.write_str(")");
}
f.write_str(" AS ");
f.write_node(&self.query);
}
}
impl_display_t!(ViewDefinition);
/// `CREATE VIEW`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct CreateViewStatement<T: AstInfo> {
pub if_exists: IfExistsBehavior,
pub temporary: bool,
pub materialized: bool,
pub definition: ViewDefinition<T>,
}
impl<T: AstInfo> AstDisplay for CreateViewStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("CREATE");
if self.if_exists == IfExistsBehavior::Replace {
f.write_str(" OR REPLACE");
}
if self.temporary {
f.write_str(" TEMPORARY");
}
if self.materialized {
f.write_str(" MATERIALIZED");
}
f.write_str(" VIEW");
if self.if_exists == IfExistsBehavior::Skip {
f.write_str(" IF NOT EXISTS");
}
f.write_str(" ");
f.write_node(&self.definition);
}
}
impl_display_t!(CreateViewStatement);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct CreateViewsSourceTarget {
pub name: UnresolvedObjectName,
pub alias: Option<UnresolvedObjectName>,
}
impl AstDisplay for CreateViewsSourceTarget {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_node(&self.name);
if let Some(alias) = &self.alias {
f.write_str(" AS ");
f.write_node(alias);
}
}
}
impl_display!(CreateViewsSourceTarget);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum CreateViewsDefinitions<T: AstInfo> {
Source {
name: UnresolvedObjectName,
targets: Option<Vec<CreateViewsSourceTarget>>,
},
Literal(Vec<ViewDefinition<T>>),
}
impl<T: AstInfo> AstDisplay for CreateViewsDefinitions<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
match self {
Self::Source { name, targets } => {
f.write_str(" FROM SOURCE ");
f.write_node(name);
if let Some(targets) = targets {
f.write_str(" (");
f.write_node(&display::comma_separated(&targets));
f.write_str(")");
}
}
Self::Literal(defs) => {
let mut delim = " ";
for def in defs {
f.write_str(delim);
delim = ", ";
f.write_str('(');
f.write_node(def);
f.write_str(')');
}
}
}
}
}
impl_display_t!(CreateViewsDefinitions);
/// `CREATE VIEWS`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct CreateViewsStatement<T: AstInfo> {
pub if_exists: IfExistsBehavior,
pub temporary: bool,
pub materialized: bool,
pub definitions: CreateViewsDefinitions<T>,
}
impl<T: AstInfo> AstDisplay for CreateViewsStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("CREATE");
if self.if_exists == IfExistsBehavior::Replace {
f.write_str(" OR REPLACE");
}
if self.temporary {
f.write_str(" TEMPORARY");
}
if self.materialized {
f.write_str(" MATERIALIZED");
}
f.write_str(" VIEWS");
if self.if_exists == IfExistsBehavior::Skip {
f.write_str(" IF NOT EXISTS");
}
f.write_node(&self.definitions);
}
}
impl_display_t!(CreateViewsStatement);
/// `CREATE TABLE`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct CreateTableStatement<T: AstInfo> {
/// Table name
pub name: UnresolvedObjectName,
/// Optional schema
pub columns: Vec<ColumnDef<T>>,
pub constraints: Vec<TableConstraint<T>>,
pub with_options: Vec<SqlOption<T>>,
pub if_not_exists: bool,
pub temporary: bool,
}
impl<T: AstInfo> AstDisplay for CreateTableStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("CREATE ");
if self.temporary {
f.write_str("TEMPORARY ");
}
f.write_str("TABLE ");
if self.if_not_exists {
f.write_str("IF NOT EXISTS ");
}
f.write_node(&self.name);
f.write_str(" (");
f.write_node(&display::comma_separated(&self.columns));
if !self.constraints.is_empty() {
f.write_str(", ");
f.write_node(&display::comma_separated(&self.constraints));
}
f.write_str(")");
if !self.with_options.is_empty() {
f.write_str(" WITH (");
f.write_node(&display::comma_separated(&self.with_options));
f.write_str(")");
}
}
}
impl_display_t!(CreateTableStatement);
/// `CREATE INDEX`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct CreateIndexStatement<T: AstInfo> {
/// Optional index name.
pub name: Option<Ident>,
/// `ON` table or view name
pub on_name: UnresolvedObjectName,
/// Expressions that form part of the index key. If not included, the
/// key_parts will be inferred from the named object.
pub key_parts: Option<Vec<Expr<T>>>,
pub with_options: Vec<WithOption>,
pub if_not_exists: bool,
}
impl<T: AstInfo> AstDisplay for CreateIndexStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("CREATE ");
if self.key_parts.is_none() {
f.write_str("DEFAULT ");
}
f.write_str("INDEX ");
if self.if_not_exists {
f.write_str("IF NOT EXISTS ");
}
if let Some(name) = &self.name {
f.write_node(name);
f.write_str(" ");
}
f.write_str("ON ");
f.write_node(&self.on_name);
if let Some(key_parts) = &self.key_parts {
f.write_str(" (");
f.write_node(&display::comma_separated(key_parts));
f.write_str(")");
}
if !self.with_options.is_empty() {
f.write_str(" WITH (");
f.write_node(&display::comma_separated(&self.with_options));
f.write_str(")");
}
}
}
impl_display_t!(CreateIndexStatement);
/// A `CREATE ROLE` statement.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct CreateRoleStatement {
/// Whether this was actually a `CREATE USER` statement.
pub is_user: bool,
/// The specified role.
pub name: Ident,
/// Any options that were attached, in the order they were presented.
pub options: Vec<CreateRoleOption>,
}
impl AstDisplay for CreateRoleStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("CREATE ");
if self.is_user {
f.write_str("USER ");
} else {
f.write_str("ROLE ");
}
f.write_node(&self.name);
for option in &self.options {
f.write_str(" ");
option.fmt(f)
}
}
}
impl_display!(CreateRoleStatement);
/// Options that can be attached to [`CreateRoleStatement`].
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum CreateRoleOption {
/// The `SUPERUSER` option.
SuperUser,
/// The `NOSUPERUSER` option.
NoSuperUser,
/// The `LOGIN` option.
Login,
/// The `NOLOGIN` option.
NoLogin,
}
impl AstDisplay for CreateRoleOption {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
match self {
CreateRoleOption::SuperUser => f.write_str("SUPERUSER"),
CreateRoleOption::NoSuperUser => f.write_str("NOSUPERUSER"),
CreateRoleOption::Login => f.write_str("LOGIN"),
CreateRoleOption::NoLogin => f.write_str("NOLOGIN"),
}
}
}
impl_display!(CreateRoleOption);
/// `CREATE TYPE ..`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct CreateTypeStatement<T: AstInfo> {
/// Name of the created type.
pub name: UnresolvedObjectName,
/// The new type's "base type".
pub as_type: CreateTypeAs,
/// Provides the name and type for the key
/// and value.
pub with_options: Vec<SqlOption<T>>,
}
impl<T: AstInfo> AstDisplay for CreateTypeStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("CREATE TYPE ");
f.write_node(&self.name);
f.write_str(" AS ");
f.write_str(&self.as_type);
f.write_str("( ");
if !self.with_options.is_empty() {
f.write_node(&display::comma_separated(&self.with_options));
}
f.write_str(" )");
}
}
impl_display_t!(CreateTypeStatement);
/// `CREATE TYPE .. AS <TYPE>`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum CreateTypeAs {
List,
Map,
}
impl AstDisplay for CreateTypeAs {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
match self {
CreateTypeAs::List => f.write_str("LIST "),
CreateTypeAs::Map => f.write_str("MAP "),
}
}
}
impl_display!(CreateTypeAs);
/// `ALTER <OBJECT> ... RENAME TO`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct AlterObjectRenameStatement {
pub object_type: ObjectType,
pub if_exists: bool,
pub name: UnresolvedObjectName,
pub to_item_name: Ident,
}
impl AstDisplay for AlterObjectRenameStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) |
}
impl_display!(AlterObjectRenameStatement);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum AlterIndexAction {
SetOptions(Vec<WithOption>),
ResetOptions(Vec<Ident>),
Enable,
}
/// `ALTER INDEX ... {RESET, SET}`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct AlterIndexStatement {
pub index_name: UnresolvedObjectName,
pub if_exists: bool,
pub action: AlterIndexAction,
}
impl AstDisplay for AlterIndexStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("ALTER INDEX ");
if self.if_exists {
f.write_str("IF EXISTS ");
}
f.write_node(&self.index_name);
f.write_str(" ");
match &self.action {
AlterIndexAction::SetOptions(options) => {
f.write_str("SET (");
f.write_node(&display::comma_separated(&options));
f.write_str(")");
}
AlterIndexAction::ResetOptions(options) => {
f.write_str("RESET (");
f.write_node(&display::comma_separated(&options));
f.write_str(")");
}
AlterIndexAction::Enable => f.write_str("SET ENABLED"),
}
}
}
impl_display!(AlterIndexStatement);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct DiscardStatement {
pub target: DiscardTarget,
}
impl AstDisplay for DiscardStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("DISCARD ");
f.write_node(&self.target);
}
}
impl_display!(DiscardStatement);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum DiscardTarget {
Plans,
Sequences,
Temp,
All,
}
impl AstDisplay for DiscardTarget {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
match self {
DiscardTarget::Plans => f.write_str("PLANS"),
DiscardTarget::Sequences => f.write_str("SEQUENCES"),
DiscardTarget::Temp => f.write_str("TEMP"),
DiscardTarget::All => f.write_str("ALL"),
}
}
}
impl_display!(DiscardTarget);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct DropDatabaseStatement {
pub name: Ident,
pub if_exists: bool,
pub restrict: bool,
}
impl AstDisplay for DropDatabaseStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("DROP DATABASE ");
if self.if_exists {
f.write_str("IF EXISTS ");
}
f.write_node(&self.name);
if self.restrict {
f.write_str(" RESTRICT");
}
}
}
impl_display!(DropDatabaseStatement);
/// `DROP`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct DropObjectsStatement {
/// If this was constructed as `DROP MATERIALIZED <type>`
pub materialized: bool,
/// The type of the object to drop: TABLE, VIEW, etc.
pub object_type: ObjectType,
/// An optional `IF EXISTS` clause. (Non-standard.)
pub if_exists: bool,
/// One or more objects to drop. (ANSI SQL requires exactly one.)
pub names: Vec<UnresolvedObjectName>,
/// Whether `CASCADE` was specified. This will be `false` when
/// `RESTRICT` or no drop behavior at all was specified.
pub cascade: bool,
}
impl AstDisplay for DropObjectsStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("DROP ");
f.write_node(&self.object_type);
f.write_str(" ");
if self.if_exists {
f.write_str("IF EXISTS ");
}
f.write_node(&display::comma_separated(&self.names));
if self.cascade {
f.write_str(" CASCADE");
}
}
}
impl_display!(DropObjectsStatement);
/// `SET <variable>`
///
/// Note: this is not a standard SQL statement, but it is supported by at
/// least MySQL and PostgreSQL. Not all MySQL-specific syntatic forms are
/// supported yet.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct SetVariableStatement {
pub local: bool,
pub variable: Ident,
pub value: SetVariableValue,
}
impl AstDisplay for SetVariableStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("SET ");
if self.local {
f.write_str("LOCAL ");
}
f.write_node(&self.variable);
f.write_str(" = ");
f.write_node(&self.value);
}
}
impl_display!(SetVariableStatement);
/// `SHOW <variable>`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ShowVariableStatement {
pub variable: Ident,
}
impl AstDisplay for ShowVariableStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("SHOW ");
f.write_node(&self.variable);
}
}
impl_display!(ShowVariableStatement);
/// `SHOW DATABASES`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ShowDatabasesStatement<T: AstInfo> {
pub filter: Option<ShowStatementFilter<T>>,
}
impl<T: AstInfo> AstDisplay for ShowDatabasesStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("SHOW DATABASES");
if let Some(filter) = &self.filter {
f.write_str(" ");
f.write_node(filter);
}
}
}
impl_display_t!(ShowDatabasesStatement);
/// `SHOW <object>S`
///
/// ```sql
/// SHOW TABLES;
/// SHOW SOURCES;
/// SHOW VIEWS;
/// SHOW SINKS;
/// ```
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ShowObjectsStatement<T: AstInfo> {
pub object_type: ObjectType,
pub from: Option<UnresolvedObjectName>,
pub extended: bool,
pub full: bool,
pub materialized: bool,
pub filter: Option<ShowStatementFilter<T>>,
}
impl<T: AstInfo> AstDisplay for ShowObjectsStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("SHOW");
if self.extended {
f.write_str(" EXTENDED");
}
if self.full {
f.write_str(" FULL");
}
if self.materialized {
f.write_str(" MATERIALIZED");
}
f.write_str(" ");
f.write_str(match &self.object_type {
ObjectType::Schema => "SCHEMAS",
ObjectType::Table => "TABLES",
ObjectType::View => "VIEWS",
ObjectType::Source => "SOURCES",
ObjectType::Sink => "SINKS",
ObjectType::Type => "TYPES",
ObjectType::Role => "ROLES",
ObjectType::Object => "OBJECTS",
ObjectType::Index => unreachable!(),
});
if let Some(from) = &self.from {
f.write_str(" FROM ");
f.write_node(&from);
}
if let Some(filter) = &self.filter {
f.write_str(" ");
f.write_node(filter);
}
}
}
impl_display_t!(ShowObjectsStatement);
/// `SHOW INDEX|INDEXES|KEYS`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ShowIndexesStatement<T: AstInfo> {
pub table_name: UnresolvedObjectName,
pub extended: bool,
pub filter: Option<ShowStatementFilter<T>>,
}
impl<T: AstInfo> AstDisplay for ShowIndexesStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("SHOW ");
if self.extended {
f.write_str("EXTENDED ");
}
f.write_str("INDEXES FROM ");
f.write_node(&self.table_name);
if let Some(filter) = &self.filter {
f.write_str(" ");
f.write_node(filter);
}
}
}
impl_display_t!(ShowIndexesStatement);
/// `SHOW COLUMNS`
///
/// Note: this is a MySQL-specific statement.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ShowColumnsStatement<T: AstInfo> {
pub extended: bool,
pub full: bool,
pub table_name: UnresolvedObjectName,
pub filter: Option<ShowStatementFilter<T>>,
}
impl<T: AstInfo> AstDisplay for ShowColumnsStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("SHOW ");
if self.extended {
f.write_str("EXTENDED ");
}
if self.full {
f.write_str("FULL ");
}
f.write_str("COLUMNS FROM ");
f.write_node(&self.table_name);
if let Some(filter) = &self.filter {
f.write_str(" ");
f.write_node(filter);
}
}
}
impl_display_t!(ShowColumnsStatement);
/// `SHOW CREATE VIEW <view>`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ShowCreateViewStatement {
pub view_name: UnresolvedObjectName,
}
impl AstDisplay for ShowCreateViewStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("SHOW CREATE VIEW ");
f.write_node(&self.view_name);
}
}
impl_display!(ShowCreateViewStatement);
/// `SHOW CREATE SOURCE <source>`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ShowCreateSourceStatement {
pub source_name: UnresolvedObjectName,
}
impl AstDisplay for ShowCreateSourceStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("SHOW CREATE SOURCE ");
f.write_node(&self.source_name);
}
}
impl_display!(ShowCreateSourceStatement);
/// `SHOW CREATE TABLE <table>`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ShowCreateTableStatement {
pub table_name: UnresolvedObjectName,
}
impl AstDisplay for ShowCreateTableStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("SHOW CREATE TABLE ");
f.write_node(&self.table_name);
}
}
impl_display!(ShowCreateTableStatement);
/// `SHOW CREATE SINK <sink>`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ShowCreateSinkStatement {
pub sink_name: UnresolvedObjectName,
}
impl AstDisplay for ShowCreateSinkStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("SHOW CREATE SINK ");
f.write_node(&self.sink_name);
}
}
impl_display!(ShowCreateSinkStatement);
/// `SHOW CREATE INDEX <index>`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ShowCreateIndexStatement {
pub index_name: UnresolvedObjectName,
}
impl AstDisplay for ShowCreateIndexStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("SHOW CREATE INDEX ");
f.write_node(&self.index_name);
}
}
impl_display!(ShowCreateIndexStatement);
/// `{ BEGIN [ TRANSACTION | WORK ] | START TRANSACTION } ...`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct StartTransactionStatement {
pub modes: Vec<TransactionMode>,
}
impl AstDisplay for StartTransactionStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("START TRANSACTION");
if !self.modes.is_empty() {
f.write_str(" ");
f.write_node(&display::comma_separated(&self.modes));
}
}
}
impl_display!(StartTransactionStatement);
/// `SET TRANSACTION ...`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct SetTransactionStatement {
pub modes: Vec<TransactionMode>,
}
impl AstDisplay for SetTransactionStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("SET TRANSACTION");
if !self.modes.is_empty() {
f.write_str(" ");
f.write_node(&display::comma_separated(&self.modes));
}
}
}
impl_display!(SetTransactionStatement);
/// `COMMIT [ TRANSACTION | WORK ] [ AND [ NO ] CHAIN ]`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct CommitStatement {
pub chain: bool,
}
impl AstDisplay for CommitStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("COMMIT");
if self.chain {
f.write_str(" AND CHAIN");
}
}
}
impl_display!(CommitStatement);
/// `ROLLBACK [ TRANSACTION | WORK ] [ AND [ NO ] CHAIN ]`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct RollbackStatement {
pub chain: bool,
}
impl AstDisplay for RollbackStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("ROLLBACK");
if self.chain {
f.write_str(" AND CHAIN");
}
}
}
impl_display!(RollbackStatement);
/// `TAIL`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct TailStatement<T: AstInfo> {
pub name: UnresolvedObjectName,
pub options: Vec<WithOption>,
pub as_of: Option<Expr<T>>,
}
impl<T: AstInfo> AstDisplay for TailStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("TAIL ");
f.write_node(&self.name);
if !self.options.is_empty() {
f.write_str(" WITH (");
f.write_node(&display::comma_separated(&self.options));
f.write_str(")");
}
if let Some(as_of) = &self.as_of {
f.write_str(" AS OF ");
f.write_node(as_of);
}
}
}
impl_display_t!(TailStatement);
/// `EXPLAIN ...`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ExplainStatement<T: AstInfo> {
pub stage: ExplainStage,
pub explainee: Explainee<T>,
pub options: ExplainOptions,
}
impl<T: AstInfo> AstDisplay for ExplainStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("EXPLAIN ");
if self.options.typed {
f.write_str("TYPED ");
}
f.write_node(&self.stage);
f.write_str(" FOR ");
f.write_node(&self.explainee);
}
}
impl_display_t!(ExplainStatement);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum InsertSource<T: AstInfo> {
Query(Query<T>),
DefaultValues,
}
impl<T: AstInfo> AstDisplay for InsertSource<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
match self {
InsertSource::Query(query) => f.write_node(query),
InsertSource::DefaultValues => f.write_str("DEFAULT VALUES"),
}
}
}
impl_display_t!(InsertSource);
#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy)]
pub enum ObjectType {
Schema,
Table,
View,
Source,
Sink,
Index,
Type,
Role,
Object,
}
impl AstDisplay for ObjectType {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str(match self {
ObjectType::Schema => "SCHEMA",
ObjectType::Table => "TABLE",
ObjectType::View => "VIEW",
ObjectType::Source => "SOURCE",
ObjectType::Sink => "SINK",
ObjectType::Index => "INDEX",
ObjectType::Type => "TYPE",
ObjectType::Role => "ROLE",
ObjectType::Object => "OBJECT",
})
}
}
impl_display!(ObjectType);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum ShowStatementFilter<T: AstInfo> {
Like(String),
Where(Expr<T>),
}
impl<T: AstInfo> AstDisplay for ShowStatementFilter<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
use ShowStatementFilter::*;
match self {
Like(pattern) => {
f.write_str("LIKE '");
f.write_node(&display::escape_single_quote_string(pattern));
f.write_str("'");
}
Where(expr) => {
f.write_str("WHERE ");
f.write_node(expr);
}
}
}
}
impl_display_t!(ShowStatementFilter);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum SqlOption<T: AstInfo> {
Value {
name: Ident,
value: Value,
},
ObjectName {
name: Ident,
object_name: UnresolvedObjectName,
},
DataType {
name: Ident,
data_type: DataType<T>,
},
}
impl<T: AstInfo> SqlOption<T> {
pub fn name(&self) -> &Ident {
match self {
SqlOption::Value { name, .. } => name,
SqlOption::ObjectName { name, .. } => name,
SqlOption::DataType { name, .. } => name,
}
}
}
impl<T: AstInfo> AstDisplay for SqlOption<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
match self {
SqlOption::Value { name, value } => {
f.write_node(name);
f.write_str(" = ");
f.write_node(value);
}
SqlOption::ObjectName { name, object_name } => {
f.write_node(name);
f.write_str(" = ");
f.write_node(object_name);
}
SqlOption::DataType { name, data_type } => {
f.write_node(name);
f.write_str(" = ");
f.write_node(data_type);
}
}
}
}
impl_display_t!(SqlOption);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct WithOption {
pub key: Ident,
pub value: Option<WithOptionValue>,
}
impl AstDisplay for WithOption {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_node(&self.key);
if let Some(opt) = &self.value {
f.write_str(" = ");
f.write_node(opt);
}
}
}
impl_display!(WithOption);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum WithOptionValue {
Value(Value),
ObjectName(UnresolvedObjectName),
}
impl AstDisplay for WithOptionValue {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
match self {
WithOptionValue::Value(value) => f.write_node(value),
WithOptionValue::ObjectName(name) => f.write_node(name),
}
}
}
impl_display!(WithOptionValue);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum TransactionMode {
AccessMode(TransactionAccessMode),
IsolationLevel(TransactionIsolationLevel),
}
impl AstDisplay for TransactionMode {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
use TransactionMode::*;
match self {
AccessMode(access_mode) => f.write_node(access_mode),
IsolationLevel(iso_level) => {
f.write_str("ISOLATION LEVEL ");
f.write_node(iso_level);
}
}
}
}
impl_display!(TransactionMode);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum TransactionAccessMode {
ReadOnly,
ReadWrite,
}
impl AstDisplay for TransactionAccessMode {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
use TransactionAccessMode::*;
f.write_str(match self {
ReadOnly => "READ ONLY",
ReadWrite => "READ WRITE",
})
}
}
impl_display!(TransactionAccessMode);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum TransactionIsolationLevel {
ReadUncommitted,
ReadCommitted,
RepeatableRead,
Serializable,
}
impl AstDisplay for TransactionIsolationLevel {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
use TransactionIsolationLevel::*;
f.write_str(match self {
ReadUncommitted => "READ UNCOMMITTED",
ReadCommitted => "READ COMMITTED",
RepeatableRead => "REPEATABLE READ",
Serializable => "SERIALIZABLE",
})
}
}
impl_display!(TransactionIsolationLevel);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum SetVariableValue {
Ident(Ident),
Literal(Value),
}
impl AstDisplay for SetVariableValue {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
use SetVariableValue::*;
match self {
Ident(ident) => f.write_node(ident),
Literal(literal) => f.write_node(literal),
}
}
}
impl_display!(SetVariableValue);
/// SQL assignment `foo = expr` as used in SQLUpdate
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Assignment<T: AstInfo> {
pub id: Ident,
pub value: Expr<T>,
}
impl<T: AstInfo> AstDisplay for Assignment<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_node(&self.id);
f.write_str(" = ");
f.write_node(&self.value);
}
}
impl_display_t!(Assignment);
/// Specifies what [Statement::Explain] is actually explaining
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum ExplainStage {
/// The sql::HirRelationExpr after parsing
RawPlan,
/// The expr::MirRelationExpr after decorrelation
DecorrelatedPlan,
/// The expr::MirRelationExpr after optimization
OptimizedPlan,
}
impl AstDisplay for ExplainStage {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
match self {
ExplainStage::RawPlan => f.write_str("RAW PLAN"),
ExplainStage::DecorrelatedPlan => f.write_str("DECORRELATED PLAN"),
ExplainStage::OptimizedPlan => f.write_str("OPTIMIZED PLAN"),
}
}
}
impl_display!(ExplainStage);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Explainee<T: AstInfo> {
View(UnresolvedObjectName),
Query(Query<T>),
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ExplainOptions {
pub typed: bool,
}
impl<T: AstInfo> AstDisplay for Explainee<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
match self {
Explainee::View(name) => {
f.write_str("VIEW ");
f.write_node(&name);
}
Explainee::Query(query) => f.write_node(query),
}
}
}
impl_display_t!(Explainee);
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum IfExistsBehavior {
Error,
Skip,
Replace,
}
/// `DECLARE ...`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct DeclareStatement<T: AstInfo> {
pub name: Ident,
pub stmt: Box<Statement<T>>,
}
impl<T: AstInfo> AstDisplay for DeclareStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("DECLARE ");
f.write_node(&self.name);
f.write_str(" CURSOR FOR ");
f.write_node(&self.stmt);
}
}
impl_display_t!(DeclareStatement);
/// `CLOSE ...`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct CloseStatement {
pub name: Ident,
}
impl AstDisplay for CloseStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("CLOSE ");
f.write_node(&self.name);
}
}
impl_display!(CloseStatement);
/// `FETCH ...`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct FetchStatement {
pub name: Ident,
pub count: Option<FetchDirection>,
pub options: Vec<WithOption>,
}
impl AstDisplay for FetchStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("FETCH ");
if let Some(ref count) = self.count {
f.write_str(format!("{} ", count));
}
f.write_node(&self.name);
if !self.options.is_empty() {
f.write_str(" WITH (");
f.write_node(&display::comma_separated(&self.options));
f.write_str(")");
}
}
}
impl_display!(FetchStatement);
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum FetchDirection {
ForwardAll,
ForwardCount(u64),
}
impl AstDisplay for FetchDirection {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
match self {
FetchDirection::ForwardAll => f.write_str("ALL"),
FetchDirection::ForwardCount(count) => f.write_str(format!("{}", count)),
}
}
}
impl_display!(FetchDirection);
/// `PREPARE ...`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct PrepareStatement<T: AstInfo> {
pub name: Ident,
pub stmt: Box<Statement<T>>,
}
impl<T: AstInfo> AstDisplay for PrepareStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("PREPARE ");
f.write_node(&self.name);
f.write_str(" AS ");
f.write_node(&self.stmt);
}
}
impl_display_t!(PrepareStatement);
/// `EXECUTE ...`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ExecuteStatement<T: AstInfo> {
pub name: Ident,
pub params: Vec<Expr<T>>,
}
impl<T: AstInfo> AstDisplay for ExecuteStatement<T> {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("EXECUTE ");
f.write_node(&self.name);
if !self.params.is_empty() {
f.write_str(" (");
f.write_node(&display::comma_separated(&self.params));
f.write_str(")");
}
}
}
impl_display_t!(ExecuteStatement);
/// `DEALLOCATE ...`
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct DeallocateStatement {
pub name: Option<Ident>,
}
impl AstDisplay for DeallocateStatement {
fn fmt<W: fmt::Write>(&self, f: &mut AstFormatter<W>) {
f.write_str("DEALLOCATE ");
match &self.name {
Some(name) => f.write_node(name),
None => f.write_str("ALL"),
};
}
}
impl_display!(DeallocateStatement);
| {
f.write_str("ALTER ");
f.write_node(&self.object_type);
f.write_str(" ");
if self.if_exists {
f.write_str("IF EXISTS ");
}
f.write_node(&self.name);
f.write_str(" RENAME TO ");
f.write_node(&self.to_item_name);
} |
func.py | import email.mime.text
import urllib.request
import sqlite3
import hashlib
import smtplib
import bcrypt
import flask
import json
import html
import sys
import re
import os
try:
import css_html_js_minify
except:
pass
if sys.version_info < (3, 6):
import sha3
from set_mark.tool import *
from mark import *
def load_conn(data):
global conn
global curs
conn = data
curs = conn.cursor()
load_conn2(data)
def send_email(who, title, data):
smtp = smtplib.SMTP_SSL('smtp.gmail.com', 465)
try:
curs.execute('select name, data from other where name = "g_email" or name = "g_pass"')
rep_data = curs.fetchall()
if rep_data:
g_email = ''
g_pass = ''
for i in rep_data:
if i[0] == 'g_email':
g_email = i[1]
else:
g_pass = i[1]
smtp.login(g_email, g_pass)
msg = email.mime.text.MIMEText(data)
msg['Subject'] = title
smtp.sendmail(g_email, who, msg.as_string())
smtp.quit()
except:
print('error : email login error')
def easy_minify(data, tool = None):
try:
if not tool:
data = css_html_js_minify.html_minify(data)
else:
if tool == 'css':
data = css_html_js_minify.css_minify(data)
elif tool == 'js':
data = css_html_js_minify.js_minify(data)
except:
data = re.sub('\n +<', '\n<', data)
data = re.sub('>(\n| )+<', '> <', data)
return data
def render_set(title = '', data = '', num = 0):
if acl_check(title, 'render') == 1:
return 'http request 401.3'
else:
return namumark(title, data, num)
def captcha_get():
data = ''
if custom()[2] == 0:
curs.execute('select data from other where name = "recaptcha"')
recaptcha = curs.fetchall()
if recaptcha and recaptcha[0][0] != '':
curs.execute('select data from other where name = "sec_re"')
sec_re = curs.fetchall()
if sec_re and sec_re[0][0] != '':
data += recaptcha[0][0] + '<hr class=\"main_hr\">'
return data
def update():
# v3.0.5 사용자 문서, 파일 문서, 분류 문서 영어화
try:
all_rep = [['사용자:', 'user:'], ['파일:', 'file:'], ['분류:', 'category:']]
all_rep2 = ['data', 'history', 'acl', 'topic', 'back']
test = 0
for i in range(3):
for j in range(6):
if not j == 5:
curs.execute('select title from ' + all_rep2[j] + ' where title like ?', [all_rep[i][0] + '%'])
else:
curs.execute('select link from back where link like ?', [all_rep[i][0] + '%'])
user_rep = curs.fetchall()
if user_rep:
for user_rep2 in user_rep:
test = 1
first = re.sub('^' + all_rep[i][0], all_rep[i][1], user_rep2[0])
if j == 0:
curs.execute("update data set title = ? where title = ?", [first, user_rep2[0]])
elif j == 1:
curs.execute("update history set title = ? where title = ?", [first, user_rep2[0]])
elif j == 2:
curs.execute("update acl set title = ? where title = ?", [first, user_rep2[0]])
elif j == 3:
curs.execute("update topic set title = ? where title = ?", [first, user_rep2[0]])
elif j == 4:
curs.execute("update back set title = ? where title = ?", [first, user_rep2[0]])
elif j == 5:
curs.execute("update back set link = ? where link = ?", [first, user_rep2[0]])
if test == 1:
print('사용자 to user, 파일 to file, 분류 to category')
except:
pass
# v3.0.8 rd, agreedis, stop 테이블 통합
try:
curs.execute("select title, sub, close from stop")
for i in curs.fetchall():
if i[2] == '':
curs.execute("update rd set stop = 'S' where title = ? and sub = ?", [i[0], i[1]])
else:
curs.execute("update rd set stop = 'O' where title = ? and sub = ?", [i[0], i[1]])
except:
pass
try:
curs.execute("select title, sub from agreedis")
for i in curs.fetchall():
curs.execute("update rd set agree = 'O' where title = ? and sub = ?", [i[0], i[1]])
except:
pass
try:
curs.execute("drop table if exists stop")
curs.execute("drop table if exists agreedis")
except:
pass
def pw_encode(data, data2 = '', type_d = ''):
if type_d == '':
curs.execute('select data from other where name = "encode"')
set_data = curs.fetchall()
type_d = set_data[0][0]
if type_d == 'sha256':
return hashlib.sha256(bytes(data, 'utf-8')).hexdigest()
elif type_d == 'sha3':
if sys.version_info < (3, 6):
return sha3.sha3_256(bytes(data, 'utf-8')).hexdigest()
else:
return hashlib.sha3_256(bytes(data, 'utf-8')).hexdigest()
else:
if data2 != '':
salt_data = bytes(data2, 'utf-8')
else:
salt_data = bcrypt.gensalt(11)
return bcrypt.hashpw(bytes(data, 'utf-8'), salt_data).decode()
def pw_check(data, data2, type_d = 'no', id_d = ''):
curs.execute('select data from other where name = "encode"')
db_data = curs.fetchall()
if type_d != 'no':
if type_d == '':
set_data = 'bcrypt'
else:
set_data = type_d
else:
set_data = db_data[0][0]
while 1:
if set_data in ['sha256', 'sha3']:
data3 = pw_encode(data = data, type_d = set_data)
if data3 == data2:
re_data = 1
else:
re_data = 0
break
else:
try:
if pw_encode(data, data2, 'bcrypt') == data2:
re_data = 1
else:
re_data = 0
break
except:
set_data = db_data[0][0]
if db_data[0][0] != set_data and re_data == 1 and id_d != '':
curs.execute("update user set pw = ?, encode = ? where id = ?", [pw_encode(data), db_data[0][0], id_d])
return re_data
def captcha_post(re_data, num = 1):
if num == 1:
if custom()[2] == 0 and captcha_get() != '':
curs.execute('select data from other where name = "sec_re"')
sec_re = curs.fetchall()
if sec_re and sec_re[0][0] != '':
data = urllib.request.urlopen('https://www.google.com/recaptcha/api/siteverify?secret=' + sec_re[0][0] + '&response=' + re_data)
if not data:
return 0
else:
json_data = data.read().decode(data.headers.get_content_charset())
json_data = json.loads(json_data)
if data.getcode() == 200 and json_data['success'] == True:
return 0
else:
return 1
else:
return 0
else:
return 0
else:
pass
def load_lang(data, num = 2):
if num == 1:
curs.execute("select data from other where name = 'language'")
rep_data = curs.fetchall()
json_data = open(os.path.join('language', rep_data[0][0] + '.json'), 'rt', encoding='utf-8').read()
lang = json.loads(json_data)
if data in lang:
return lang[data]
else:
return data + ' (missing)'
else:
curs.execute('select data from user_set where name = "lang" and id = ?', [ip_check()])
rep_data = curs.fetchall()
if rep_data:
try:
json_data = open(os.path.join('language', rep_data[0][0] + '.json'), 'rt', encoding='utf-8').read()
lang = json.loads(json_data)
except:
return load_lang(data, 1)
if data in lang:
return lang[data]
else:
return load_lang(data, 1)
else:
return load_lang(data, 1)
def load_oauth(provider):
oauth = json.loads(open('oauthsettings.json', encoding='utf-8').read())
return oauth[provider]
def update_oauth(provider, target, content):
oauth = json.loads(open('oauthsettings.json', encoding='utf-8').read())
oauth[provider][target] = content
with open('oauthsettings.json', 'w', encoding='utf-8') as f:
json.dump(oauth, f)
return 'Done'
def ip_or_user(data):
if re.search('(\.|:)', data):
return 1
else:
return 0
def edit_help_button():
# https://stackoverflow.com/questions/11076975/insert-text-into-textarea-at-cursor-position-javascript
js_data = '''
<script>
function insert_data(name, data) {
if(document.selection) {
document.getElementById(name).focus();
sel = document.selection.createRange();
sel.text = data;
} else if(document.getElementById(name).selectionStart || document.getElementById(name).selectionStart == '0') {
var startPos = document.getElementById(name).selectionStart;
var endPos = document.getElementById(name).selectionEnd;
document.getElementById(name).value = document.getElementById(name).value.substring(0, startPos) + data + document.getElementById(name).value.substring(endPos, document.getElementById(name).value.length);
} else {
document.getElementById(name).value += data;
}
}
</script>
'''
insert_list = [['[[|]]', '[[|]]'], ['[*()]', '[*()]'], ['{{{#!}}}', '{{{#!}}}'], ['||<>||', '||<>||'], ["\\'\\'\\'", "\'\'\'"]]
data = ''
for insert_data in insert_list:
data += '<a href="javascript:void(0);" onclick="insert_data(\'content\', \'' + insert_data[0] + '\');">(' + insert_data[1] + ')</a> '
return [js_data, data + '<hr class=\"main_hr\">']
def ip_warring():
if custom()[2] == 0:
curs.execute('select data from other where name = "no_login_warring"')
data = curs.fetchall()
if data and data[0][0] != '':
text_data = '<span>' + data[0][0] + '</span><hr class=\"main_hr\">'
else:
text_data = '<span>' + load_lang('no_login_warring') + '</span><hr class=\"main_hr\">'
else:
text_data = ''
return text_data
def skin_check():
skin = './views/neo_yousoro/'
curs.execute('select data from other where name = "skin"')
skin_exist = curs.fetchall()
if skin_exist and skin_exist[0][0] != '':
if os.path.exists(os.path.abspath('./views/' + skin_exist[0][0] + '/index.html')) == 1:
skin = './views/' + skin_exist[0][0] + '/'
curs.execute('select data from user_set where name = "skin" and id = ?', [ip_check()])
skin_exist = curs.fetchall()
if skin_exist and skin_exist[0][0] != '':
if os.path.exists(os.path.abspath('./views/' + skin_exist[0][0] + '/index.html')) == 1:
skin = './views/' + skin_exist[0][0] + '/'
return skin + 'index.html'
def next_fix(link, num, page, end = 50):
list_data = ''
if num == 1:
if len(page) == end:
list_data += '<hr class=\"main_hr\"><a href="' + link + str(num + 1) + '">(' + load_lang('next') + ')</a>'
elif len(page) != end:
list_data += '<hr class=\"main_hr\"><a href="' + link + str(num - 1) + '">(' + load_lang('previous') + ')</a>'
else:
list_data += '<hr class=\"main_hr\"><a href="' + link + str(num - 1) + '">(' + load_lang('previous') + ')</a> <a href="' + link + str(num + 1) + '">(' + load_lang('next') + ')</a>'
return list_data
def other2(data):
return data + ['']
def wiki_set(num = 1):
if num == 1:
data_list = []
curs.execute('select data from other where name = ?', ['name'])
db_data = curs.fetchall()
if db_data and db_data[0][0] != '':
data_list += [db_data[0][0]]
else:
data_list += ['wiki']
curs.execute('select data from other where name = "license"')
db_data = curs.fetchall()
if db_data and db_data[0][0] != '':
data_list += [db_data[0][0]]
else:
data_list += ['CC 0']
data_list += ['', '']
curs.execute('select data from other where name = "logo"')
db_data = curs.fetchall()
if db_data and db_data[0][0] != '': | curs.execute("select data from other where name = 'head'")
db_data = curs.fetchall()
if db_data and db_data[0][0] != '':
data_list += [db_data[0][0]]
else:
data_list += ['']
return data_list
if num == 2:
var_data = 'FrontPage'
curs.execute('select data from other where name = "frontpage"')
elif num == 3:
var_data = '2'
curs.execute('select data from other where name = "upload"')
db_data = curs.fetchall()
if db_data and db_data[0][0] != '':
return db_data[0][0]
else:
return var_data
def diff(seqm):
output = []
for opcode, a0, a1, b0, b1 in seqm.get_opcodes():
if opcode == 'equal':
output += [seqm.a[a0:a1]]
elif opcode == 'insert':
output += ["<span style='background:#CFC;'>" + seqm.b[b0:b1] + "</span>"]
elif opcode == 'delete':
output += ["<span style='background:#FDD;'>" + seqm.a[a0:a1] + "</span>"]
elif opcode == 'replace':
output += ["<span style='background:#FDD;'>" + seqm.a[a0:a1] + "</span>"]
output += ["<span style='background:#CFC;'>" + seqm.b[b0:b1] + "</span>"]
end = ''.join(output)
end = end.replace('\r\n', '\n')
sub = ''
if not re.search('\n', end):
end += '\n'
num = 0
left = 1
while 1:
data = re.search('((?:(?!\n).)*)\n', end)
if data:
data = data.groups()[0]
left += 1
if re.search('<span style=\'(?:(?:(?!\').)+)\'>', data):
num += 1
if re.search('<\/span>', data):
num -= 1
sub += str(left) + ' : ' + re.sub('(?P<in>(?:(?!\n).)*)\n', '\g<in>', data, 1) + '<br>'
else:
if re.search('<\/span>', data):
num -= 1
sub += str(left) + ' : ' + re.sub('(?P<in>(?:(?!\n).)*)\n', '\g<in>', data, 1) + '<br>'
else:
if num > 0:
sub += str(left) + ' : ' + re.sub('(?P<in>.*)\n', '\g<in>', data, 1) + '<br>'
end = re.sub('((?:(?!\n).)*)\n', '', end, 1)
else:
break
return sub
def admin_check(num = None, what = None):
ip = ip_check()
curs.execute("select acl from user where id = ?", [ip])
user = curs.fetchall()
if user:
reset = 0
while 1:
if num == 1 and reset == 0:
check = 'ban'
elif num == 3 and reset == 0:
check = 'toron'
elif num == 4 and reset == 0:
check = 'check'
elif num == 5 and reset == 0:
check = 'acl'
elif num == 6 and reset == 0:
check = 'hidel'
elif num == 7 and reset == 0:
check = 'give'
else:
check = 'owner'
curs.execute('select name from alist where name = ? and acl = ?', [user[0][0], check])
if curs.fetchall():
if what:
curs.execute("insert into re_admin (who, what, time) values (?, ?, ?)", [ip, what, get_time()])
conn.commit()
return 1
else:
if reset == 0:
reset = 1
else:
break
return 0
def ip_pas(raw_ip):
hide = 0
if re.search("(\.|:)", raw_ip):
if not re.search("^" + load_lang('tool', 1) + ":", raw_ip):
curs.execute("select data from other where name = 'ip_view'")
data = curs.fetchall()
if data and data[0][0] != '':
ip = '<span style="font-size: 75%;">' + hashlib.md5(bytes(raw_ip, 'utf-8')).hexdigest() + '</span>'
if not admin_check('ban', None):
hide = 1
else:
ip = raw_ip
else:
ip = raw_ip
hide = 1
else:
curs.execute("select title from data where title = ?", ['user:' + raw_ip])
if curs.fetchall():
ip = '<a href="/w/' + url_pas('user:' + raw_ip) + '">' + raw_ip + '</a>'
else:
ip = '<a id="not_thing" href="/w/' + url_pas('user:' + raw_ip) + '">' + raw_ip + '</a>'
if hide == 0:
ip += ' <a href="/tool/' + url_pas(raw_ip) + '">(' + load_lang('tool') + ')</a>'
return ip
def custom():
if 'head' in flask.session:
user_head = flask.session['head']
else:
user_head = ''
if 'state' in flask.session and flask.session['state'] == 1:
curs.execute('select name from alarm where name = ? limit 1', [ip_check()])
if curs.fetchall():
user_icon = 2
else:
user_icon = 1
else:
user_icon = 0
if user_icon != 0:
curs.execute('select data from user_set where name = "email" and id = ?', [ip_check()])
data = curs.fetchall()
if data:
email = data[0][0]
else:
email = ''
else:
email = ''
if user_icon != 0:
user_name = ip_check()
else:
user_name = load_lang('user')
return ['', '', user_icon, user_head, email, user_name, load_lang(data = '', num = 2)]
def load_skin(data = ''):
div2 = ''
system_file = ['main_css', 'easter_egg.html']
if data == '':
ip = ip_check()
curs.execute('select data from user_set where name = "skin" and id = ?', [ip])
data = curs.fetchall()
for skin_data in os.listdir(os.path.abspath('views')):
if not skin_data in system_file:
if not data:
curs.execute('select data from other where name = "skin"')
sql_data = curs.fetchall()
if sql_data and sql_data[0][0] == skin_data:
div2 = '<option value="' + skin_data + '">' + skin_data + '</option>' + div2
else:
div2 += '<option value="' + skin_data + '">' + skin_data + '</option>'
elif data[0][0] == skin_data:
div2 = '<option value="' + skin_data + '">' + skin_data + '</option>' + div2
else:
div2 += '<option value="' + skin_data + '">' + skin_data + '</option>'
else:
for skin_data in os.listdir(os.path.abspath('views')):
if not skin_data in system_file:
if data == skin_data:
div2 = '<option value="' + skin_data + '">' + skin_data + '</option>' + div2
else:
div2 += '<option value="' + skin_data + '">' + skin_data + '</option>'
return div2
def acl_check(name, tool = ''):
ip = ip_check()
if tool == 'render':
curs.execute("select view from acl where title = ?", [name])
acl_data = curs.fetchall()
if acl_data:
if acl_data[0][0] == 'user':
if not user_data:
return 1
if acl_data[0][0] == 'admin':
if not user_data:
return 1
if not admin_check(5, 'view (' + name + ')') == 1:
return 1
return 0
else:
if ban_check() == 1:
return 1
acl_c = re.search("^user:([^/]*)", name)
if acl_c:
acl_n = acl_c.groups()
if admin_check(5, None) == 1:
return 0
curs.execute("select dec from acl where title = ?", ['user:' + acl_n[0]])
acl_data = curs.fetchall()
if acl_data:
if acl_data[0][0] == 'all':
return 0
if acl_data[0][0] == 'user' and not re.search("(\.|:)", ip):
return 0
if ip != acl_n[0] or re.search("(\.|:)", ip):
return 1
if ip == acl_n[0] and not re.search("(\.|:)", ip) and not re.search("(\.|:)", acl_n[0]):
return 0
else:
return 1
file_c = re.search("^file:(.*)", name)
if file_c and admin_check(5, 'edit (' + name + ')') != 1:
return 1
curs.execute("select acl from user where id = ?", [ip])
user_data = curs.fetchall()
curs.execute("select dec from acl where title = ?", [name])
acl_data = curs.fetchall()
if acl_data:
if acl_data[0][0] == 'user':
if not user_data:
return 1
if acl_data[0][0] == 'admin':
if not user_data:
return 1
if not admin_check(5, 'edit (' + name + ')') == 1:
return 1
curs.execute('select data from other where name = "edit"')
set_data = curs.fetchall()
if set_data:
if set_data[0][0] == 'login':
if not user_data:
return 1
if set_data[0][0] == 'admin':
if not user_data:
return 1
if not admin_check(5, None) == 1:
return 1
return 0
def ban_check(ip = None, tool = None):
if not ip:
ip = ip_check()
band = re.search("^([0-9]{1,3}\.[0-9]{1,3})", ip)
if band:
band_it = band.groups()[0]
else:
band_it = '-'
curs.execute("select end, login from ban where block = ?", [band_it])
band_d = curs.fetchall()
curs.execute("select end, login from ban where block = ?", [ip])
ban_d = curs.fetchall()
data = band_d or ban_d
if data and (data[0][0] == '' or data[0][0] > get_time()):
if tool and tool == 'login':
if data[0][1] == 'O':
return 0
return 1
return 0
def topic_check(name, sub):
ip = ip_check()
if ban_check() == 1:
return 1
curs.execute("select acl from user where id = ?", [ip])
user_data = curs.fetchall()
curs.execute('select data from other where name = "discussion"')
acl_data = curs.fetchall()
if acl_data:
if acl_data[0][0] == 'login':
if not user_data:
return 1
if acl_data[0][0] == 'admin':
if not user_data:
return 1
if not admin_check(3, 'topic (' + name + ')') == 1:
return 1
curs.execute("select dis from acl where title = ?", [name])
acl_data = curs.fetchall()
if acl_data:
if acl_data[0][0] == 'user':
if not user_data:
return 1
if acl_data[0][0] == 'admin':
if not user_data:
return 1
if not admin_check(3, 'topic (' + name + ')') == 1:
return 1
curs.execute("select title from rd where title = ? and sub = ? and not stop = ''", [name, sub])
if curs.fetchall():
if not admin_check(3, 'topic (' + name + ')') == 1:
return 1
return 0
def ban_insert(name, end, why, login, blocker):
now_time = get_time()
if re.search("^([0-9]{1,3}\.[0-9]{1,3})$", name):
band = 'O'
else:
band = ''
curs.execute("select block from ban where block = ?", [name])
if curs.fetchall():
curs.execute("insert into rb (block, end, today, blocker, why, band) values (?, ?, ?, ?, ?, ?)", [name, load_lang('release', 1), now_time, blocker, '', band])
curs.execute("delete from ban where block = ?", [name])
else:
if login != '':
login = 'O'
else:
login = ''
if end != '0':
time = datetime.datetime.now()
plus = datetime.timedelta(seconds = int(end))
r_time = (time + plus).strftime("%Y-%m-%d %H:%M:%S")
else:
r_time = ''
curs.execute("insert into rb (block, end, today, blocker, why, band) values (?, ?, ?, ?, ?, ?)", [name, r_time, now_time, blocker, why, band])
curs.execute("insert into ban (block, end, why, band, login) values (?, ?, ?, ?, ?)", [name, r_time, why, band, login])
conn.commit()
def rd_plus(title, sub, date):
curs.execute("select title from rd where title = ? and sub = ?", [title, sub])
if curs.fetchall():
curs.execute("update rd set date = ? where title = ? and sub = ?", [date, title, sub])
else:
curs.execute("insert into rd (title, sub, date) values (?, ?, ?)", [title, sub, date])
def history_plus(title, data, date, ip, send, leng):
curs.execute("select id from history where title = ? order by id + 0 desc limit 1", [title])
id_data = curs.fetchall()
curs.execute("insert into history (id, title, data, date, ip, send, leng, hide) values (?, ?, ?, ?, ?, ?, ?, '')", [str(int(id_data[0][0]) + 1) if id_data else '1', title, data, date, ip, send, leng])
def leng_check(first, second):
if first < second:
all_plus = '+' + str(second - first)
elif second < first:
all_plus = '-' + str(first - second)
else:
all_plus = '0'
return all_plus
def edit_filter_do(data):
if admin_check(1, 'edit_filter pass') != 1:
curs.execute("select regex, sub from filter")
for data_list in curs.fetchall():
match = re.compile(data_list[0], re.I)
if match.search(data):
ban_insert(
ip_check(),
'0' if data_list[1] == 'X' else data_list[1],
load_lang('edit', 1) + ' ' + load_lang('filter', 1),
None,
load_lang('tool', 1) + ':' + load_lang('edit', 1) + ' ' + load_lang('filter', 1)
)
return 1
return 0
def redirect(data):
return flask.redirect(data)
def re_error(data):
conn.commit()
if data == '/ban':
ip = ip_check()
end = '<li>' + load_lang('why') + ' : ' + load_lang('authority_error') + '</li>'
if ban_check() == 1:
curs.execute("select end, why from ban where block = ?", [ip])
end_data = curs.fetchall()
if not end_data:
match = re.search("^([0-9]{1,3}\.[0-9]{1,3})", ip)
if match:
curs.execute("select end, why from ban where block = ?", [match.groups()[0]])
end_data = curs.fetchall()
if end_data:
end = '<li>' + load_lang('state') + ' : ' + load_lang('ban') + '</li><li>'
if end_data[0][0]:
now = int(re.sub('(\-| |:)', '', get_time()))
day = int(re.sub('(\-| |:)', '', end_data[0][0]))
if now >= day:
curs.execute("delete from ban where block = ?", [ip])
conn.commit()
end += '<script>location.reload();</script>'
else:
end += 'end : ' + end_data[0][0]
else:
end += load_lang('limitless')
end += '</li>'
if end_data[0][1] != '':
end += '<li>' + load_lang('why') + ' : ' + end_data[0][1] + '</li>'
return easy_minify(flask.render_template(skin_check(),
imp = ['error', wiki_set(1), custom(), other2([0, 0])],
data = '<h2>error</h2><ul>' + end + '</ul>',
menu = 0
))
else:
error_data = re.search('\/error\/([0-9]+)', data)
if error_data:
num = int(error_data.groups()[0])
if num == 1:
data = load_lang('no_login_error')
elif num == 2:
data = load_lang('no_exist_user_error')
elif num == 3:
data = load_lang('authority_error')
elif num == 4:
data = load_lang('no_admin_block_error')
elif num == 5:
data = load_lang('skin_error')
elif num == 6:
data = load_lang('same_id_exist_error')
elif num == 7:
data = load_lang('long_id_error')
elif num == 8:
data = load_lang('id_char_error') + ' <a href="/name_filter">(' + load_lang('id') + ' ' + load_lang('filter') + ')</a>'
elif num == 9:
data = load_lang('file_exist_error')
elif num == 10:
data = load_lang('password_error')
elif num == 13:
data = load_lang('recaptcha_error')
elif num == 14:
data = load_lang('file_extension_error')
elif num == 15:
data = load_lang('edit_record_error')
elif num == 16:
data = load_lang('same_file_error')
elif num == 17:
data = load_lang('file_capacity_error') + ' ' + wiki_set(3)
elif num == 19:
data = load_lang('decument_exist_error')
elif num == 20:
data = load_lang('password_diffrent_error')
elif num == 21:
data = load_lang('edit_filter_error')
elif num == 22:
data = load_lang('file_name_error')
else:
data = '???'
return easy_minify(flask.render_template(skin_check(),
imp = ['error', wiki_set(1), custom(), other2([0, 0])],
data = '<h2>error</h2><ul><li>' + data + '</li></ul>',
menu = 0
))
else:
return redirect('/') | data_list += [db_data[0][0]]
else:
data_list += [data_list[0]]
|
run.go | // Copyright (C) 2021 Toitware ApS. All rights reserved.
// Use of this source code is governed by an MIT-style license that can be
// found in the LICENSE file.
package commands
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/google/uuid"
"github.com/setanta314/ar"
"github.com/spf13/cobra"
"github.com/toitlang/jaguar/cmd/jag/directory"
)
// Checks whether a file is a snapshot file. Starts by checking for an ar
// file, since snapshot files are ar files.
func IsSnapshot(filename string) bool {
file, err := os.Open(filename)
if err != nil {
return false
}
defer file.Close()
magic_sequence := make([]byte, 8)
_, err = io.ReadAtLeast(file, magic_sequence, 8)
if err != nil {
return false
}
if bytes.Compare(magic_sequence, []byte("!<arch>\n")) != 0 {
return false
}
file.Seek(0, io.SeekStart)
reader := ar.NewReader(file)
header, err := reader.Next()
if err != nil {
return false
}
if header.Name != "toit" {
return false
}
return true
}
// Get the UUID out of a snapshot file, which is an ar archive.
func GetUuid(filename string) (uuid.UUID, error) {
source, err := os.Open(filename)
if err != nil {
fmt.Printf("Failed to open '%s'n", filename)
return uuid.Nil, err
}
reader := ar.NewReader(source)
readAtLeastOneEntry := false
for {
header, err := reader.Next()
if err != nil {
if readAtLeastOneEntry {
fmt.Printf("Did not include UUID: '%s'n", filename)
} else {
fmt.Printf("Not a snapshot file: '%s'n", filename)
}
return uuid.Nil, err
}
if header.Name == "uuid" {
raw_uuid := make([]byte, 16)
_, err = io.ReadAtLeast(reader, raw_uuid, 16)
if err != nil {
fmt.Printf("UUID in snapshot too short: '%s'n", filename)
return uuid.Nil, err
}
return uuid.FromBytes(raw_uuid)
}
}
}
func RunCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "run <file>",
Short: "Run Toit code on a Jaguar device",
Long: "Run the specified .toit file on a Jaguar device as a new program. If the\n" +
"device is already executing another program, that program is stopped before\n" +
"the new program is started.",
Args: cobra.ExactArgs(1),
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
cfg, err := directory.GetWorkspaceConfig()
if err != nil {
return err
}
entrypoint := args[0]
if stat, err := os.Stat(entrypoint); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("no such file or directory: '%s'", entrypoint)
}
return fmt.Errorf("can't stat file '%s', reason: %w", entrypoint, err)
} else if stat.IsDir() {
return fmt.Errorf("can't run directory: '%s'", entrypoint)
}
ctx := cmd.Context()
deviceSelect, err := parseDeviceFlag(cmd)
if err != nil {
return err
}
sdk, err := GetSDK(ctx)
if err != nil {
return err
}
device, err := GetDevice(ctx, cfg, sdk, true, deviceSelect)
if err != nil {
return err
}
return RunFile(cmd, device, sdk, entrypoint)
},
}
cmd.Flags().StringP("device", "d", "", "use device with a given name or id")
return cmd
}
func RunFile(cmd *cobra.Command, device *Device, sdk *SDK, path string) error {
fmt.Printf("Running '%s' on '%s' ...\n", path, device.Name)
ctx := cmd.Context()
snapshotsCache, err := directory.GetSnapshotsCachePath()
if err != nil {
return err
}
var snapshot string = ""
if IsSnapshot(path) {
snapshot = path
} else {
// We are running a toit file, so we need to compile it to a
// snapshot first.
tempdir, err := ioutil.TempDir("", "jag_run")
if err != nil {
return err
}
defer os.RemoveAll(tempdir)
snapshotFile, err := ioutil.TempFile(tempdir, "jag_run_*.snapshot")
if err != nil |
snapshot = snapshotFile.Name()
err = sdk.Compile(ctx, snapshot, path)
if err != nil {
// We assume the error has been printed.
// Mark the command as silent to avoid printing the error twice.
cmd.SilenceErrors = true
return err
}
}
programId, err := GetUuid(snapshot)
if err != nil {
return err
}
cacheDestination := filepath.Join(snapshotsCache, programId.String()+".snapshot")
// Copy the snapshot into the cache dir so it is available for
// decoding stack traces etc. We want to add it to the cache in
// an atomic rename, but atomic renames only work within a single
// filesystem/mount point. So we have to do this in two steps,
// first copying to a temp file in the cache dir, then renaming
// in that directory.
if cacheDestination != snapshot {
tempFileInCacheDirectory, err := ioutil.TempFile(snapshotsCache, "jag_run_*.snapshot")
if err != nil {
fmt.Printf("Failed to write temporary file in '%s'\n", snapshotsCache)
return err
}
defer tempFileInCacheDirectory.Close()
defer os.Remove(tempFileInCacheDirectory.Name())
source, err := os.Open(snapshot)
if err != nil {
fmt.Printf("Failed to read '%s'n", snapshot)
return err
}
defer source.Close()
defer tempFileInCacheDirectory.Close()
_, err = io.Copy(tempFileInCacheDirectory, source)
if err != nil {
fmt.Printf("Failed to write '%s'n", tempFileInCacheDirectory.Name())
return err
}
tempFileInCacheDirectory.Close()
// Atomic move so no other process can see a half-written snapshot file.
err = os.Rename(tempFileInCacheDirectory.Name(), cacheDestination)
if err != nil {
return err
}
}
b, err := sdk.Build(ctx, device, cacheDestination)
if err != nil {
// We assume the error has been printed.
// Mark the command as silent to avoid printing the error twice.
cmd.SilenceErrors = true
return err
}
if err := device.Run(ctx, sdk, b); err != nil {
fmt.Println("Error:", err)
// We just printed the error.
// Mark the command as silent to avoid printing the error twice.
cmd.SilenceErrors = true
return err
}
fmt.Printf("Success: Sent %dKB code to '%s'\n", len(b)/1024, device.Name)
return nil
}
| {
return err
} |
solution_01_tests.py | import unittest
from src.google_foobar.P008_carrotland.solution_01 import answer
class TestSolution(unittest.TestCase):
def testcase_001(self):
vertices = [[2, 3], [6, 9], [10, 160]]
expected = 289
self.assertEqual(answer(vertices), expected)
def testcase_002(self):
vertices = [[91207, 89566], [-88690, -83026], [67100, 47194]]
expected = 1730960165
self.assertEqual(answer(vertices), expected)
def testcase_003(self):
vertices = [[0, 0], [0, 1], [1, 0]]
expected = 0
self.assertEqual(answer(vertices), expected)
# Illustrated as problem_analysis_triangle_01.png
def | (self):
vertices = [[-1, -1], [1, 0], [0, 1]]
expected = 1
self.assertEqual(answer(vertices), expected)
# Illustrated as problem_analysis_triangle_02.png
def testcase_005(self):
vertices = [[0, 0], [0, 10], [10, 0]]
expected = 36
self.assertEqual(answer(vertices), expected)
# Illustrated as problem_analysis_triangle_03.png
def testcase_006(self):
vertices = [[1, 1], [4, 10], [10, 6]]
expected = 31
self.assertEqual(answer(vertices), expected)
# Illustrated as problem_analysis_triangle_04.png
def testcase_007(self):
vertices = [[-5, 4], [4, 6], [3, -3]]
expected = 39
self.assertEqual(answer(vertices), expected)
# Illustrated as problem_analysis_triangle_05.png
def testcase_008(self):
vertices = [[-5, -3], [5, -3], [0, 6]]
expected = 40
self.assertEqual(answer(vertices), expected)
if __name__ == '__main__':
unittest.main()
| testcase_004 |
zstdchunked_test.go | /*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package zstdchunked
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"testing"
"github.com/containerd/stargz-snapshotter/estargz"
"github.com/klauspost/compress/zstd"
)
// TestZstdChunked tests zstd:chunked
func TestZstdChunked(t *testing.T) {
estargz.CompressionTestSuite(t,
zstdControllerWithLevel(zstd.SpeedFastest),
zstdControllerWithLevel(zstd.SpeedDefault),
zstdControllerWithLevel(zstd.SpeedBetterCompression),
// zstdControllerWithLevel(zstd.SpeedBestCompression), // consumes too much memory to pass on CI
)
}
func zstdControllerWithLevel(compressionLevel zstd.EncoderLevel) estargz.TestingController {
return &zstdController{&Compressor{CompressionLevel: compressionLevel}, &Decompressor{}}
}
type zstdController struct {
*Compressor
*Decompressor
}
func (zc *zstdController) String() string {
return fmt.Sprintf("zstd_compression_level=%v", zc.Compressor.CompressionLevel)
}
func (zc *zstdController) CountStreams(t *testing.T, b []byte) (numStreams int) {
t.Logf("got zstd streams (compressed size: %d):", len(b))
zh := new(zstd.Header)
magicLen := 4 // length of magic bytes and skippable frame magic bytes
zoff := 0
for {
if len(b) <= zoff {
break
} else if len(b)-zoff <= magicLen {
t.Fatalf("invalid frame size %d is too small", len(b)-zoff)
}
remainingFrames := b[zoff:]
// Check if zoff points to the beginning of a frame
if !bytes.Equal(remainingFrames[:magicLen], zstdFrameMagic) {
if !bytes.Equal(remainingFrames[:magicLen], skippableFrameMagic) {
t.Fatalf("frame must start from magic bytes; but %x",
remainingFrames[:magicLen])
}
// This is a skippable frame
size := binary.LittleEndian.Uint32(remainingFrames[magicLen : magicLen+4])
t.Logf(" [%d] at %d in stargz, SKIPPABLE FRAME (nextFrame: %d/%d)",
numStreams, zoff, zoff+(magicLen+4+int(size)), len(b))
zoff += (magicLen + 4 + int(size))
numStreams++
continue
}
// Parse header and get uncompressed size of this frame
if err := zh.Decode(remainingFrames); err != nil {
t.Fatalf("countStreams(zstd), *Header.Decode: %v", err)
}
uncompressedFrameSize := zh.FrameContentSize
if uncompressedFrameSize == 0 {
// FrameContentSize is optional so it's possible we cannot get size info from
// this field. If this frame contains only one block, we can get the decompressed
// size from that block header.
if zh.FirstBlock.OK && zh.FirstBlock.Last && !zh.FirstBlock.Compressed {
uncompressedFrameSize = uint64(zh.FirstBlock.DecompressedSize)
} else {
t.Fatalf("countStreams(zstd), failed to get uncompressed frame size")
}
}
// Identify the offset of the next frame
nextFrame := magicLen // ignore the magic bytes of this frame
for {
// search for the beginning magic bytes of the next frame
searchBase := nextFrame
nextMagicIdx := nextIndex(remainingFrames[searchBase:], zstdFrameMagic)
nextSkippableIdx := nextIndex(remainingFrames[searchBase:], skippableFrameMagic)
nextFrame = len(remainingFrames)
for _, i := range []int{nextMagicIdx, nextSkippableIdx} {
if 0 < i && searchBase+i < nextFrame {
nextFrame = searchBase + i
}
}
// "nextFrame" seems the offset of the next frame. Verify it by checking if
// the decompressed size of this frame is the same value as set in the header.
zr, err := zstd.NewReader(bytes.NewReader(remainingFrames[:nextFrame]))
if err != nil |
defer zr.Close()
res, err := ioutil.ReadAll(zr)
if err != nil && err != io.ErrUnexpectedEOF {
t.Fatalf("countStreams(zstd), ReadAll: %v", err)
}
if uint64(len(res)) == uncompressedFrameSize {
break
}
// Try the next magic byte candidate until end
if uint64(len(res)) > uncompressedFrameSize || nextFrame > len(remainingFrames) {
t.Fatalf("countStreams(zstd), cannot identify frame (off:%d)", zoff)
}
}
t.Logf(" [%d] at %d in stargz, uncompressed length %d (nextFrame: %d/%d)",
numStreams, zoff, uncompressedFrameSize, zoff+nextFrame, len(b))
zoff += nextFrame
numStreams++
}
return numStreams
}
func nextIndex(s1, sub []byte) int {
for i := 0; i < len(s1); i++ {
if len(s1)-i < len(sub) {
return -1
} else if bytes.Equal(s1[i:i+len(sub)], sub) {
return i
}
}
return -1
}
func (zc *zstdController) DiffIDOf(t *testing.T, b []byte) string {
h := sha256.New()
zr, err := zstd.NewReader(bytes.NewReader(b))
if err != nil {
t.Fatalf("diffIDOf(zstd): %v", err)
}
defer zr.Close()
if _, err := io.Copy(h, zr); err != nil {
t.Fatalf("diffIDOf(zstd).Copy: %v", err)
}
return fmt.Sprintf("sha256:%x", h.Sum(nil))
}
// Tests footer encoding, size, and parsing of zstd:chunked.
func TestZstdChunkedFooter(t *testing.T) {
max := int64(200000)
for off := int64(0); off <= max; off += 1023 {
size := max - off
checkZstdChunkedFooter(t, off, size, size/2)
}
}
func checkZstdChunkedFooter(t *testing.T, off, size, cSize int64) {
footer := zstdFooterBytes(uint64(off), uint64(size), uint64(cSize))
if len(footer) != FooterSize {
t.Fatalf("for offset %v, footer length was %d, not expected %d. got bytes: %q", off, len(footer), FooterSize, footer)
}
gotBlobPayloadSize, gotOff, gotSize, err := (&Decompressor{}).ParseFooter(footer)
if err != nil {
t.Fatalf("failed to parse footer for offset %d, footer: %x: err: %v",
off, footer, err)
}
if gotBlobPayloadSize != off-8 {
// 8 is the size of the zstd skippable frame header + the frame size (see WriteTOCAndFooter)
t.Fatalf("ParseFooter(footerBytes(offset %d)) = blobPayloadSize %d; want %d", off, gotBlobPayloadSize, off-8)
}
if gotOff != off {
t.Fatalf("ParseFooter(footerBytes(offset %d)) = off %d; want %d", off, gotOff, off)
}
if gotSize != cSize {
t.Fatalf("ParseFooter(footerBytes(offset %d)) = size %d; want %d", off, gotSize, cSize)
}
}
| {
t.Logf(" [%d] invalid frame candidate: %v", numStreams, err)
continue
} |
output.rs | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
pub struct GetMediaOutput {
/// <p>The content type of the requested media.</p>
pub content_type: std::option::Option<std::string::String>,
/// <p> The payload Kinesis Video Streams returns is a sequence of chunks from the specified
/// stream. For information about the chunks, see . The
/// chunks that Kinesis Video Streams returns in the <code>GetMedia</code> call also include the
/// following additional Matroska (MKV) tags: </p>
/// <ul>
/// <li>
/// <p>AWS_KINESISVIDEO_CONTINUATION_TOKEN (UTF-8 string) - In the event your
/// <code>GetMedia</code> call terminates, you can use this continuation token in your next
/// request to get the next chunk where the last request terminated.</p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_MILLIS_BEHIND_NOW (UTF-8 string) - Client applications can use
/// this tag value to determine how far behind the chunk returned in the response is from the
/// latest chunk on the stream. </p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_FRAGMENT_NUMBER - Fragment number returned in the chunk.</p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_SERVER_TIMESTAMP - Server timestamp of the fragment.</p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_PRODUCER_TIMESTAMP - Producer timestamp of the fragment.</p>
/// </li>
/// </ul>
/// <p>The following tags will be present if an error occurs:</p>
/// <ul>
/// <li>
/// <p>AWS_KINESISVIDEO_ERROR_CODE - String description of an error that caused GetMedia
/// to stop.</p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_ERROR_ID: Integer code of the error.</p>
/// </li>
/// </ul>
/// <p>The error codes are as follows:</p>
/// <ul>
/// <li>
/// <p>3002 - Error writing to the stream</p>
/// </li>
/// <li>
/// <p>4000 - Requested fragment is not found</p>
/// </li>
/// <li>
/// <p>4500 - Access denied for the stream's KMS key</p>
/// </li>
/// <li>
/// <p>4501 - Stream's KMS key is disabled</p>
/// </li>
/// <li>
/// <p>4502 - Validation error on the stream's KMS key</p>
/// </li>
/// <li>
/// <p>4503 - KMS key specified in the stream is unavailable</p>
/// </li>
/// <li>
/// <p>4504 - Invalid usage of the KMS key specified in the stream</p>
/// </li>
/// <li>
/// <p>4505 - Invalid state of the KMS key specified in the stream</p>
/// </li>
/// <li>
/// <p>4506 - Unable to find the KMS key specified in the stream</p>
/// </li>
/// <li>
/// <p>5000 - Internal error</p>
/// </li>
/// </ul>
pub payload: aws_smithy_http::byte_stream::ByteStream,
}
impl GetMediaOutput {
/// <p>The content type of the requested media.</p>
pub fn content_type(&self) -> std::option::Option<&str> {
self.content_type.as_deref()
}
/// <p> The payload Kinesis Video Streams returns is a sequence of chunks from the specified
/// stream. For information about the chunks, see . The
/// chunks that Kinesis Video Streams returns in the <code>GetMedia</code> call also include the
/// following additional Matroska (MKV) tags: </p>
/// <ul>
/// <li>
/// <p>AWS_KINESISVIDEO_CONTINUATION_TOKEN (UTF-8 string) - In the event your
/// <code>GetMedia</code> call terminates, you can use this continuation token in your next
/// request to get the next chunk where the last request terminated.</p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_MILLIS_BEHIND_NOW (UTF-8 string) - Client applications can use
/// this tag value to determine how far behind the chunk returned in the response is from the
/// latest chunk on the stream. </p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_FRAGMENT_NUMBER - Fragment number returned in the chunk.</p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_SERVER_TIMESTAMP - Server timestamp of the fragment.</p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_PRODUCER_TIMESTAMP - Producer timestamp of the fragment.</p>
/// </li>
/// </ul>
/// <p>The following tags will be present if an error occurs:</p>
/// <ul>
/// <li>
/// <p>AWS_KINESISVIDEO_ERROR_CODE - String description of an error that caused GetMedia
/// to stop.</p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_ERROR_ID: Integer code of the error.</p>
/// </li>
/// </ul>
/// <p>The error codes are as follows:</p>
/// <ul>
/// <li>
/// <p>3002 - Error writing to the stream</p>
/// </li>
/// <li>
/// <p>4000 - Requested fragment is not found</p>
/// </li>
/// <li>
/// <p>4500 - Access denied for the stream's KMS key</p>
/// </li>
/// <li>
/// <p>4501 - Stream's KMS key is disabled</p> | /// <li>
/// <p>4503 - KMS key specified in the stream is unavailable</p>
/// </li>
/// <li>
/// <p>4504 - Invalid usage of the KMS key specified in the stream</p>
/// </li>
/// <li>
/// <p>4505 - Invalid state of the KMS key specified in the stream</p>
/// </li>
/// <li>
/// <p>4506 - Unable to find the KMS key specified in the stream</p>
/// </li>
/// <li>
/// <p>5000 - Internal error</p>
/// </li>
/// </ul>
pub fn payload(&self) -> &aws_smithy_http::byte_stream::ByteStream {
&self.payload
}
}
impl std::fmt::Debug for GetMediaOutput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetMediaOutput");
formatter.field("content_type", &self.content_type);
formatter.field("payload", &self.payload);
formatter.finish()
}
}
/// See [`GetMediaOutput`](crate::output::GetMediaOutput)
pub mod get_media_output {
/// A builder for [`GetMediaOutput`](crate::output::GetMediaOutput)
#[non_exhaustive]
#[derive(std::default::Default, std::fmt::Debug)]
pub struct Builder {
pub(crate) content_type: std::option::Option<std::string::String>,
pub(crate) payload: std::option::Option<aws_smithy_http::byte_stream::ByteStream>,
}
impl Builder {
/// <p>The content type of the requested media.</p>
pub fn content_type(mut self, input: impl Into<std::string::String>) -> Self {
self.content_type = Some(input.into());
self
}
/// <p>The content type of the requested media.</p>
pub fn set_content_type(mut self, input: std::option::Option<std::string::String>) -> Self {
self.content_type = input;
self
}
/// <p> The payload Kinesis Video Streams returns is a sequence of chunks from the specified
/// stream. For information about the chunks, see . The
/// chunks that Kinesis Video Streams returns in the <code>GetMedia</code> call also include the
/// following additional Matroska (MKV) tags: </p>
/// <ul>
/// <li>
/// <p>AWS_KINESISVIDEO_CONTINUATION_TOKEN (UTF-8 string) - In the event your
/// <code>GetMedia</code> call terminates, you can use this continuation token in your next
/// request to get the next chunk where the last request terminated.</p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_MILLIS_BEHIND_NOW (UTF-8 string) - Client applications can use
/// this tag value to determine how far behind the chunk returned in the response is from the
/// latest chunk on the stream. </p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_FRAGMENT_NUMBER - Fragment number returned in the chunk.</p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_SERVER_TIMESTAMP - Server timestamp of the fragment.</p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_PRODUCER_TIMESTAMP - Producer timestamp of the fragment.</p>
/// </li>
/// </ul>
/// <p>The following tags will be present if an error occurs:</p>
/// <ul>
/// <li>
/// <p>AWS_KINESISVIDEO_ERROR_CODE - String description of an error that caused GetMedia
/// to stop.</p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_ERROR_ID: Integer code of the error.</p>
/// </li>
/// </ul>
/// <p>The error codes are as follows:</p>
/// <ul>
/// <li>
/// <p>3002 - Error writing to the stream</p>
/// </li>
/// <li>
/// <p>4000 - Requested fragment is not found</p>
/// </li>
/// <li>
/// <p>4500 - Access denied for the stream's KMS key</p>
/// </li>
/// <li>
/// <p>4501 - Stream's KMS key is disabled</p>
/// </li>
/// <li>
/// <p>4502 - Validation error on the stream's KMS key</p>
/// </li>
/// <li>
/// <p>4503 - KMS key specified in the stream is unavailable</p>
/// </li>
/// <li>
/// <p>4504 - Invalid usage of the KMS key specified in the stream</p>
/// </li>
/// <li>
/// <p>4505 - Invalid state of the KMS key specified in the stream</p>
/// </li>
/// <li>
/// <p>4506 - Unable to find the KMS key specified in the stream</p>
/// </li>
/// <li>
/// <p>5000 - Internal error</p>
/// </li>
/// </ul>
pub fn payload(mut self, input: aws_smithy_http::byte_stream::ByteStream) -> Self {
self.payload = Some(input);
self
}
/// <p> The payload Kinesis Video Streams returns is a sequence of chunks from the specified
/// stream. For information about the chunks, see . The
/// chunks that Kinesis Video Streams returns in the <code>GetMedia</code> call also include the
/// following additional Matroska (MKV) tags: </p>
/// <ul>
/// <li>
/// <p>AWS_KINESISVIDEO_CONTINUATION_TOKEN (UTF-8 string) - In the event your
/// <code>GetMedia</code> call terminates, you can use this continuation token in your next
/// request to get the next chunk where the last request terminated.</p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_MILLIS_BEHIND_NOW (UTF-8 string) - Client applications can use
/// this tag value to determine how far behind the chunk returned in the response is from the
/// latest chunk on the stream. </p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_FRAGMENT_NUMBER - Fragment number returned in the chunk.</p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_SERVER_TIMESTAMP - Server timestamp of the fragment.</p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_PRODUCER_TIMESTAMP - Producer timestamp of the fragment.</p>
/// </li>
/// </ul>
/// <p>The following tags will be present if an error occurs:</p>
/// <ul>
/// <li>
/// <p>AWS_KINESISVIDEO_ERROR_CODE - String description of an error that caused GetMedia
/// to stop.</p>
/// </li>
/// <li>
/// <p>AWS_KINESISVIDEO_ERROR_ID: Integer code of the error.</p>
/// </li>
/// </ul>
/// <p>The error codes are as follows:</p>
/// <ul>
/// <li>
/// <p>3002 - Error writing to the stream</p>
/// </li>
/// <li>
/// <p>4000 - Requested fragment is not found</p>
/// </li>
/// <li>
/// <p>4500 - Access denied for the stream's KMS key</p>
/// </li>
/// <li>
/// <p>4501 - Stream's KMS key is disabled</p>
/// </li>
/// <li>
/// <p>4502 - Validation error on the stream's KMS key</p>
/// </li>
/// <li>
/// <p>4503 - KMS key specified in the stream is unavailable</p>
/// </li>
/// <li>
/// <p>4504 - Invalid usage of the KMS key specified in the stream</p>
/// </li>
/// <li>
/// <p>4505 - Invalid state of the KMS key specified in the stream</p>
/// </li>
/// <li>
/// <p>4506 - Unable to find the KMS key specified in the stream</p>
/// </li>
/// <li>
/// <p>5000 - Internal error</p>
/// </li>
/// </ul>
pub fn set_payload(
mut self,
input: std::option::Option<aws_smithy_http::byte_stream::ByteStream>,
) -> Self {
self.payload = input;
self
}
/// Consumes the builder and constructs a [`GetMediaOutput`](crate::output::GetMediaOutput)
pub fn build(self) -> crate::output::GetMediaOutput {
crate::output::GetMediaOutput {
content_type: self.content_type,
payload: self.payload.unwrap_or_default(),
}
}
}
}
impl GetMediaOutput {
/// Creates a new builder-style object to manufacture [`GetMediaOutput`](crate::output::GetMediaOutput)
pub fn builder() -> crate::output::get_media_output::Builder {
crate::output::get_media_output::Builder::default()
}
} | /// </li>
/// <li>
/// <p>4502 - Validation error on the stream's KMS key</p>
/// </li> |
connection.go | /*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package connection
import (
"context"
"fmt"
"io"
"github.com/golang/protobuf/proto"
cb "github.com/hyperledger/fabric-protos-go/common"
ab "github.com/hyperledger/fabric-protos-go/orderer"
pb "github.com/hyperledger/fabric-protos-go/peer"
"github.com/littlegirlpppp/fabric-sdk-go-gm/internal/github.com/hyperledger/fabric/common/crypto"
"github.com/littlegirlpppp/fabric-sdk-go-gm/internal/github.com/hyperledger/fabric/protoutil"
"github.com/littlegirlpppp/fabric-sdk-go-gm/pkg/common/logging"
"github.com/littlegirlpppp/fabric-sdk-go-gm/pkg/common/options"
fabcontext "github.com/littlegirlpppp/fabric-sdk-go-gm/pkg/common/providers/context"
"github.com/littlegirlpppp/fabric-sdk-go-gm/pkg/common/providers/fab"
"github.com/littlegirlpppp/fabric-sdk-go-gm/pkg/fab/comm"
clientdisp "github.com/littlegirlpppp/fabric-sdk-go-gm/pkg/fab/events/client/dispatcher"
"github.com/pkg/errors"
"google.golang.org/grpc"
)
var logger = logging.NewLogger("fabsdk/fab")
type deliverStream interface {
grpc.ClientStream
Send(*cb.Envelope) error
Recv() (*pb.DeliverResponse, error)
}
// DeliverConnection manages the connection to the deliver server
type DeliverConnection struct {
*comm.StreamConnection
url string
}
// StreamProvider creates a deliver stream
type StreamProvider func(pb.DeliverClient) (stream deliverStream, cancel func(), err error)
var (
// Deliver creates a Deliver stream
Deliver = func(client pb.DeliverClient) (deliverStream, func(), error) {
ctx, cancel := context.WithCancel(context.Background())
stream, err := client.Deliver(ctx)
return stream, cancel, err
}
// DeliverFiltered creates a DeliverFiltered stream
DeliverFiltered = func(client pb.DeliverClient) (deliverStream, func(), error) {
ctx, cancel := context.WithCancel(context.Background())
stream, err := client.DeliverFiltered(ctx)
return stream, cancel, err
}
)
// New returns a new Deliver Server connection
func | (ctx fabcontext.Client, chConfig fab.ChannelCfg, streamProvider StreamProvider, url string, opts ...options.Opt) (*DeliverConnection, error) {
logger.Debugf("Connecting to %s...", url)
connect, err := comm.NewStreamConnection(
ctx, chConfig,
func(grpcconn *grpc.ClientConn) (grpc.ClientStream, func(), error) {
return streamProvider(pb.NewDeliverClient(grpcconn))
},
url, opts...,
)
if err != nil {
return nil, err
}
return &DeliverConnection{
StreamConnection: connect,
url: url,
}, nil
}
func (c *DeliverConnection) deliverStream() deliverStream {
if c.Stream() == nil {
return nil
}
stream, ok := c.Stream().(deliverStream)
if !ok {
panic(fmt.Sprintf("invalid DeliverStream type %T", c.Stream()))
}
return stream
}
// Send sends a seek request to the deliver server
func (c *DeliverConnection) Send(seekInfo *ab.SeekInfo) error {
if c.Closed() {
return errors.New("connection is closed")
}
logger.Debugf("Sending %#v", seekInfo)
env, err := c.createSignedEnvelope(seekInfo)
if err != nil {
return err
}
return c.deliverStream().Send(env)
}
// Receive receives events from the deliver server
func (c *DeliverConnection) Receive(eventch chan<- interface{}) {
for {
stream := c.deliverStream()
if stream == nil {
logger.Warn("The stream has closed. Terminating loop.")
break
}
in, err := stream.Recv()
logger.Debugf("Got deliver response: %#v", in)
if c.Closed() {
logger.Debugf("The connection has closed with error [%s]. Terminating loop.", err)
break
}
if err == io.EOF {
// This signifies that the stream has been terminated at the client-side. No need to send an event.
logger.Debug("Received EOF from stream.")
break
}
if err != nil {
logger.Warnf("Received error from stream: [%s]. Sending disconnected event.", err)
eventch <- clientdisp.NewDisconnectedEvent(err)
break
}
eventch <- NewEvent(in, c.url)
}
logger.Debug("Exiting stream listener")
}
func (c *DeliverConnection) createSignedEnvelope(msg proto.Message) (*cb.Envelope, error) {
// TODO: Do we need to make these configurable?
var msgVersion int32
var epoch uint64
payloadChannelHeader := protoutil.MakeChannelHeader(cb.HeaderType_DELIVER_SEEK_INFO, msgVersion, c.ChannelConfig().ID(), epoch)
payloadChannelHeader.TlsCertHash = c.TLSCertHash()
data, err := proto.Marshal(msg)
if err != nil {
return nil, err
}
identity, err := c.Context().Serialize()
if err != nil {
return nil, err
}
nonce, err := crypto.GetRandomNonce()
if err != nil {
return nil, err
}
payloadSignatureHeader := &cb.SignatureHeader{
Creator: identity,
Nonce: nonce,
}
paylBytes := protoutil.MarshalOrPanic(&cb.Payload{
Header: protoutil.MakePayloadHeader(payloadChannelHeader, payloadSignatureHeader),
Data: data,
})
signature, err := c.Context().SigningManager().Sign(paylBytes, c.Context().PrivateKey())
if err != nil {
return nil, err
}
return &cb.Envelope{Payload: paylBytes, Signature: signature}, nil
}
// Event contains the deliver event as well as the event source
type Event struct {
SourceURL string
Event interface{}
}
// NewEvent returns a deliver event
func NewEvent(event interface{}, sourceURL string) *Event {
return &Event{
SourceURL: sourceURL,
Event: event,
}
}
| New |
Suggestion.js | import React from "react";
import styled from "styled-components";
import theme from "../../styles/theme";
const { fontSizes } = theme;
const SuggestionWrapper = styled.form`
display: grid;
align-items: center;
justify-items: center;
grid-template-rows: 1fr;
grid-row-gap: 10px;
margin: 10px 0;
@media screen and (min-width: 768px) {
.submit {
font-size: ${fontSizes.bodyMedium};
}
}
@media screen and (min-width: 1024px) {
grid-template-columns: 2fr 1fr;
grid-column-gap: 10px;
}
`;
const Suggestion = () => {
return (
<SuggestionWrapper | >
<input type="hidden" name="bot-field" />
<input type="hidden" name="form-name" value="book-suggestion" />
<input type="text" name="book" placeholder="Suggest a book..."></input>
<button type="submit" className="submit">
Submit
</button>
</SuggestionWrapper>
);
};
export default Suggestion; | name="book-suggestion"
method="post"
data-netlify="true"
data-netlify-honeypot="bot-field" |
main.rs | extern crate sdl2;
extern crate rustc_serialize;
use sdl2::keycode::KeyCode;
use sdl2::event::Event;
use sdl2::timer::get_ticks;
mod sprite;
mod assets;
mod draw; | mod tile;
mod map;
mod physics;
use sprite::Sprite;
use player::Player;
use player::PlayerStatus;
use draw::Draw;
fn main() {
//initialize sdl
let sdl_context = sdl2::init().video().events().build()
.ok().expect("Failed to initialize SDL.");
//create a window
let window = sdl_context.window("Rust-Man", 640, 480)
.position_centered()
.build()
.ok().expect("Failed to create window.");
//create a renderer
let mut renderer = window.renderer().accelerated().build()
.ok().expect("Failed to create accelerated renderer.");
//create a new player
let mut player = Player::new(Sprite::new_from_file("sonic.bmp", &renderer),
PlayerStatus::Stationary);
//start drawing
let mut drawer = renderer.drawer();
drawer.clear();
drawer.present();
//event loop stuff
let mut running = true;
let mut event_pump = sdl_context.event_pump();
let mut prev_time = get_ticks();
let mut delta_t = get_ticks() - prev_time;
while running {
//timer stuff
delta_t = get_ticks() - prev_time;
//limit to 60fps
if delta_t > 16 {
//handle event queue
for event in event_pump.poll_iter() {
match event {
Event::Quit {..} | Event::KeyDown {keycode: KeyCode::Escape, .. } => {
running = false
},
Event::KeyDown {keycode: KeyCode::Left, .. } => {
player.status = PlayerStatus::MovingLeft
},
Event::KeyDown {keycode: KeyCode::Right, .. } => {
player.status = PlayerStatus::MovingRight
},
Event::KeyUp {keycode: KeyCode::Left, .. } => {
player.status = PlayerStatus::Decelerating
},
Event::KeyUp {keycode: KeyCode::Right, .. } => {
player.status = PlayerStatus::Decelerating
},
_ => {}
}
}
//move player
player.update();
//draw
drawer.clear();
player.draw(&mut drawer, &None);
drawer.present();
//more timer stuff
prev_time = get_ticks();
}
}
println!("Goodbye, world!");
} | mod player; |
__init__.py | from .nondet import Nondet |
||
0026_course_last_taught.py | # Generated by Django 2.1.5 on 2019-08-29 21:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0025_courseuser_section'),
] | migrations.AddField(
model_name='course',
name='last_taught',
field=models.CharField(default='', max_length=100),
),
] |
operations = [ |
IVoteAction.ts | import { VoteActionType } from 'constants/voteActionType'
/**
* Vote action interface
*
* @export
* @interface IVoteAction
*/
export interface IVoteAction {
| } | payload: any,
type: VoteActionType
|
rental.routes.ts | import { CreateRentalController } from "@modules/rentals/useCases/createRental/CreateRentalController";
import { DevolutionRentalController } from "@modules/rentals/useCases/devolutionRental/DevolutionRentalController";
import { ListRentalsByUserController } from "@modules/rentals/useCases/listRentalsByUser/ListRentalsByUserController";
import { Router } from "express";
import { ensureAuthenticated } from "../middlewares/ensureAuthenticated";
const rentalRoutes = Router();
const createRentalController = new CreateRentalController();
const devolutionRentalController = new DevolutionRentalController();
const listRentalsByUserController = new ListRentalsByUserController();
rentalRoutes.post("/", ensureAuthenticated, createRentalController.handle);
rentalRoutes.post(
"/devolution/:id",
ensureAuthenticated,
devolutionRentalController.handle
);
rentalRoutes.get(
"/user",
ensureAuthenticated,
listRentalsByUserController.handle | );
export { rentalRoutes }; | |
gradient.rs | #[inline(always)]
#[rustfmt::skip]
pub(crate) fn get2(index: usize) -> [f64; 2] {
// Vectors are combinations of -1, 0, and 1
// Precompute the normalized element
const DIAG : f64 = std::f64::consts::FRAC_1_SQRT_2;
match index % 8 {
0 => [ 1.0, 0.0],
1 => [ -1.0, 0.0],
2 => [ 0.0, 1.0],
3 => [ 0.0, -1.0],
4 => [ DIAG, DIAG],
5 => [-DIAG, DIAG],
6 => [ DIAG, -DIAG],
7 => [-DIAG, -DIAG],
_ => panic!("Attempt to access gradient {} of 8", index % 8),
}
}
#[inline(always)]
#[rustfmt::skip]
pub(crate) fn get3(index: usize) -> [f64; 3] {
// Vectors are combinations of -1, 0, and 1
// Precompute the normalized elements
const DIAG : f64 = std::f64::consts::FRAC_1_SQRT_2;
const DIAG2 : f64 = 0.577_350_269_189_625_8;
match index % 32 {
// 12 edges repeated twice then 8 corners
0 | 12 => [ DIAG, DIAG, 0.0],
1 | 13 => [ -DIAG, DIAG, 0.0],
2 | 14 => [ DIAG, -DIAG, 0.0],
3 | 15 => [ -DIAG, -DIAG, 0.0],
4 | 16 => [ DIAG, 0.0, DIAG],
5 | 17 => [ -DIAG, 0.0, DIAG],
6 | 18 => [ DIAG, 0.0, -DIAG],
7 | 19 => [ -DIAG, 0.0, -DIAG],
8 | 20 => [ 0.0, DIAG, DIAG],
9 | 21 => [ 0.0, -DIAG, DIAG],
10 | 22 => [ 0.0, DIAG, -DIAG],
11 | 23 => [ 0.0, -DIAG, -DIAG],
24 => [ DIAG2, DIAG2, DIAG2],
25 => [-DIAG2, DIAG2, DIAG2],
26 => [ DIAG2, -DIAG2, DIAG2],
27 => [-DIAG2, -DIAG2, DIAG2],
28 => [ DIAG2, DIAG2, -DIAG2],
29 => [-DIAG2, DIAG2, -DIAG2],
30 => [ DIAG2, -DIAG2, -DIAG2],
31 => [-DIAG2, -DIAG2, -DIAG2],
_ => panic!("Attempt to access gradient {} of 32", index % 32),
}
}
#[inline(always)]
#[rustfmt::skip]
pub(crate) fn get4(index: usize) -> [f64; 4] {
// Vectors are combinations of -1, 0, and 1
// Precompute the normalized elements
const DIAG : f64 = 0.577_350_269_189_625_8;
const DIAG2 : f64 = 0.5;
match index % 64 {
// 32 edges then 16 corners repeated twice
0 => [ 0.0, DIAG, DIAG, DIAG],
1 => [ 0.0, DIAG, DIAG, -DIAG],
2 => [ 0.0, DIAG, -DIAG, DIAG],
3 => [ 0.0, DIAG, -DIAG, -DIAG],
4 => [ 0.0, -DIAG, DIAG, DIAG], | 9 => [ DIAG, 0.0, DIAG, -DIAG],
10 => [ DIAG, 0.0, -DIAG, DIAG],
11 => [ DIAG, 0.0, -DIAG, -DIAG],
12 => [ -DIAG, 0.0, DIAG, DIAG],
13 => [ -DIAG, 0.0, DIAG, -DIAG],
14 => [ -DIAG, 0.0, -DIAG, DIAG],
15 => [ -DIAG, 0.0, -DIAG, -DIAG],
16 => [ DIAG, DIAG, 0.0, DIAG],
17 => [ DIAG, DIAG, 0.0, -DIAG],
18 => [ DIAG, -DIAG, 0.0, DIAG],
19 => [ DIAG, -DIAG, 0.0, -DIAG],
20 => [ -DIAG, DIAG, 0.0, DIAG],
21 => [ -DIAG, DIAG, 0.0, -DIAG],
22 => [ -DIAG, -DIAG, 0.0, DIAG],
23 => [ -DIAG, -DIAG, 0.0, -DIAG],
24 => [ DIAG, DIAG, DIAG, 0.0],
25 => [ DIAG, DIAG, -DIAG, 0.0],
26 => [ DIAG, -DIAG, DIAG, 0.0],
27 => [ DIAG, -DIAG, -DIAG, 0.0],
28 => [ -DIAG, DIAG, DIAG, 0.0],
29 => [ -DIAG, DIAG, -DIAG, 0.0],
30 => [ -DIAG, -DIAG, DIAG, 0.0],
31 => [ -DIAG, -DIAG, -DIAG, 0.0],
32 | 48 => [ DIAG2, DIAG2, DIAG2, DIAG2],
33 | 49 => [-DIAG2, DIAG2, DIAG2, DIAG2],
34 | 50 => [ DIAG2, -DIAG2, DIAG2, DIAG2],
35 | 51 => [-DIAG2, -DIAG2, DIAG2, DIAG2],
36 | 52 => [ DIAG2, DIAG2, -DIAG2, DIAG2],
37 | 53 => [-DIAG2, DIAG2, -DIAG2, DIAG2],
38 | 54 => [ DIAG2, DIAG2, DIAG2, -DIAG2],
39 | 55 => [-DIAG2, DIAG2, DIAG2, -DIAG2],
40 | 56 => [ DIAG2, -DIAG2, -DIAG2, DIAG2],
41 | 57 => [-DIAG2, -DIAG2, -DIAG2, DIAG2],
42 | 58 => [ DIAG2, -DIAG2, DIAG2, -DIAG2],
43 | 59 => [-DIAG2, -DIAG2, DIAG2, -DIAG2],
44 | 60 => [ DIAG2, DIAG2, -DIAG2, -DIAG2],
45 | 61 => [-DIAG2, DIAG2, -DIAG2, -DIAG2],
46 | 62 => [ DIAG2, -DIAG2, -DIAG2, -DIAG2],
47 | 63 => [-DIAG2, -DIAG2, -DIAG2, -DIAG2],
_ => panic!("Attempt to access gradient {} of 64", index % 64),
}
} | 5 => [ 0.0, -DIAG, DIAG, -DIAG],
6 => [ 0.0, -DIAG, -DIAG, DIAG],
7 => [ 0.0, -DIAG, -DIAG, -DIAG],
8 => [ DIAG, 0.0, DIAG, DIAG], |
apiversion.go | package lib
import (
"fmt"
"os"
)
var (
DefaultApiVersionNumber = "45.0"
apiVersionNumber = DefaultApiVersionNumber
apiVersion = fmt.Sprintf("v%s", apiVersionNumber)
) |
func ApiVersion() string {
return apiVersion
}
func ApiVersionNumber() string {
return apiVersionNumber
}
func (f *Force) UpdateApiVersion(version string) (err error) {
SetApiVersion(version)
f.Credentials.SessionOptions.ApiVersion = version
_, err = ForceSaveLogin(*f.Credentials, os.Stdout)
return
}
func SetApiVersion(version string) {
apiVersion = "v" + version
apiVersionNumber = version
} | |
loss.py | import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
LAMBDA_COORD = 5
LAMBDA_NOOBJ = 0.5
def calc_loss(inp , target, opt):
if inp.size(0) != target.size(0):
raise Exception("Batch size does not match")
total_loss = torch.tensor(0.0)
#total_loss = total_loss.dtype(tensor)
for i in range(inp.size(0)):
inp = inp[i]
target = target[i]
Q = predict_one_bbox(inp, target, opt)
total_loss = total_loss + calc_loss_single(Q, target, opt)
return total_loss
def predict_one_bbox(inp, target, opt):
|
def calc_loss_single(inp, target, opt):
loss = torch.zeros(1)
for i in range(opt.S):
for j in range(opt.S):
# case 1: grid cell HAS object
if len(target[i, j, :].nonzero()) > 1:
# localization
loss = loss + LAMBDA_COORD * (torch.pow(inp[i, j, 0] - target[i, j, 0], 2) + torch.pow(inp[i, j, 1] - target[i, j, 1], 2))
loss = loss + LAMBDA_COORD * (torch.pow(torch.sqrt(torch.abs(inp[i, j, 2])) - torch.sqrt(torch.abs(target[i, j,2])), 2) \
+ torch.pow(torch.sqrt(torch.abs(inp[i, j, 3])) - torch.sqrt(torch.abs(target[i, j, 3])), 2)) # org
# loss = loss + LAMBDA_COORD * (torch.sqrt(torch.abs(P[i, j, 2] - G[i, j, 2])) +
# torch.sqrt(torch.abs(P[i, j, 3] - G[i, j, 3]))) # ZZ
loss = loss + torch.pow(inp[i, j, 4]-1, 2) # Ground truth confidence is constant 1
# classification
true_cls = target[i, j, -1].type(torch.int64)
true_cls_vec = torch.zeros(opt.C)
true_cls_vec[true_cls] = torch.tensor(1)
pred_cls_vec = inp[i, j, -opt.C:]
loss = loss + torch.sum(torch.pow(pred_cls_vec - true_cls_vec, 2))
# case 2: grid cell NO object
# classification
else:
loss = loss + LAMBDA_NOOBJ * torch.pow(inp[i, j, 4] - 0, 2) # Ground truth confidence is constant 0
return loss
def calc_IOU(box_1, box_2, device=torch.device('cpu'), use_float64=False):
"""
Tensor version of calc_IOU()
compute IOU between two bounding boxes
:param box_1: Detection x, y, w, h image coordinates in [0, 1]
:param box_2: GroundTruth x, y, w, h image coordinates in [0, 1]
:return:
"""
'''
x_min_1 = torch.clamp((box_1[0] - box_1[2] / 2), 0, 1).to(device)
x_max_1 = torch.clamp((box_1[0] + box_1[2] / 2), 0, 1).to(device)
y_min_1 = torch.clamp((box_1[1] - box_1[3] / 2), 0, 1).to(device)
y_max_1 = torch.clamp((box_1[1] + box_1[3] / 2), 0, 1).to(device)
'''
x_min_1 = torch.clamp((abs(box_1[0]) - abs(box_1[2]) / 2), 0, 1).to(device)
x_max_1 = torch.clamp((abs(box_1[0]) + abs(box_1[2]) / 2), 0, 1).to(device)
y_min_1 = torch.clamp((abs(box_1[1]) - abs(box_1[3]) / 2), 0, 1).to(device)
y_max_1 = torch.clamp((abs(box_1[1]) + abs(box_1[3]) / 2), 0, 1).to(device)
x_min_2 = torch.clamp((box_2[0] - box_2[2] / 2), 0, 1).to(device)
x_max_2 = torch.clamp((box_2[0] + box_2[2] / 2), 0, 1).to(device)
y_min_2 = torch.clamp((box_2[1] - box_2[3] / 2), 0, 1).to(device)
y_max_2 = torch.clamp((box_2[1] + box_2[3] / 2), 0, 1).to(device)
# z = torch.tensor(0, dtype=torch.float).to(device)
z = torch.tensor(0.).to(device)
a = torch.min(x_max_1, x_max_2)
b = torch.max(x_min_1, x_min_2)
c = torch.min(y_max_1, y_max_2)
d = torch.max(y_min_1, y_min_2)
overlap_width = torch.max(a-b, z)
overlap_height = torch.max(c-d, z)
overlap_area = overlap_width * overlap_height
union_area = (x_max_1 - x_min_1) * (y_max_1 - y_min_1) \
+ (x_max_2 - x_min_2) * (y_max_2 - y_min_2) \
- overlap_area
intersection_over_union = overlap_area / union_area
return intersection_over_union
| Q = torch.zeros(opt.S, opt.S, 5 + opt.C)
select = torch.tensor(0).to(device)
for i in range(opt.S):
for j in range(opt.S):
for b in range(opt.B):
if b==0:
boxes = inp[i, j, b*5 : b*5+5].to(device)
else:
boxes = torch.stack((boxes, inp[i, j, b*5 : b*5+5])).to(device)
if len(target[i, j, :].nonzero()) > 1:
max_iou = torch.tensor([0.]).to(device)
groundtruth_box = target[i, j, :4].clone()
for b in range(opt.B):
iou = calc_IOU(groundtruth_box, boxes[b][:-1], device)
if iou > max_iou:
max_iou = iou
select = torch.tensor(b).to(device)
else:
max_confidence = torch.tensor(0.).to(device)
for b in range(opt.B):
confidence = boxes[b][-1]
if confidence > max_confidence:
max_confidence = confidence
select = torch.tensor(b).to(device)
Q[i, j, :5] = boxes[select]
Q[i, j, 5:] = inp[i, j, -opt.C:]
return Q |
Command.spec.ts | import * as sinon from 'sinon';
import * as assert from 'assert';
import auth from './Auth';
import Command, {
CommandValidate,
CommandCancel,
CommandOption,
CommandTypes,
CommandError
} from './Command';
import Utils from './Utils';
import appInsights from './appInsights';
import { CommandInstance } from './cli/CommandInstance';
import * as chalk from 'chalk';
class MockCommand1 extends Command {
public get name(): string {
return 'mock-command';
}
public get description(): string {
return 'Mock command description';
}
public alias(): string[] | undefined {
return ['mc1'];
}
public autocomplete(): string[] | undefined {
const autocomplete = ['param1', 'param2'];
const parentAutocomplete: string[] | undefined = super.autocomplete();
if (parentAutocomplete) {
return autocomplete.concat(parentAutocomplete);
}
else {
return autocomplete;
}
}
public allowUnknownOptions(): boolean {
return true;
}
public commandAction(cmd: CommandInstance, args: any, cb: (err?: any) => void): void {
this.showDeprecationWarning(cmd, 'mc1', this.name);
cb();
}
public commandHelp(args: any, log: (message: string) => void): void {
}
public validate(): CommandValidate | undefined {
return () => {
return true;
};
}
public cancel(): CommandCancel | undefined {
return () => { };
}
public types(): CommandTypes | undefined {
return {
string: ['option2']
};
}
public options(): CommandOption[] {
return [
{
option: '--debug',
description: 'Runs command with debug logging'
},
{
option: '--option1 [option1]',
description: 'Some option'
},
{
option: '--option2 [option2]',
description: 'Some other option'
}
];
}
public trackUnknownOptionsPublic(telemetryProps: any, options: any) {
return this.trackUnknownOptions(telemetryProps, options);
}
public addUnknownOptionsToPayloadPublic(payload: any, options: any) {
return this.addUnknownOptionsToPayload(payload, options);
}
}
class MockCommand2 extends Command {
public get name(): string {
return 'Mock command 2 [opt]';
}
public get description(): string {
return 'Mock command 2 description';
}
public commandAction(): void {
}
public commandHelp(args: any, log: (message: string) => void): void {
log('MockCommand2 help');
}
public handlePromiseError(response: any, cmd: CommandInstance, callback: (err?: any) => void): void {
this.handleRejectedODataJsonPromise(response, cmd, callback);
}
}
class MockCommand3 extends Command {
public get name(): string {
return 'mock-command';
}
public get description(): string {
return 'Mock command description';
}
public commandAction(): void {
}
public commandHelp(args: any, log: (message: string) => void): void {
}
public options(): CommandOption[] {
return [
{
option: '--debug',
description: 'Runs command with debug logging'
},
{
option: '--option1 [option1]',
description: 'Some option'
}
];
}
}
class | extends Command {
public get name(): string {
return 'mock-command';
}
public get description(): string {
return 'Mock command description';
}
public allowUnknownOptions(): boolean {
return true;
}
public commandAction(cmd: CommandInstance, args: any, cb: (err?: any) => void): void {
cb();
}
public commandHelp(args: any, log: (message: string) => void): void {
}
public options(): CommandOption[] {
return [
{
option: '--debug',
description: 'Runs command with debug logging'
}
];
}
}
describe('Command', () => {
const vcmd = {
action: () => vcmd,
alias: () => vcmd,
option: () => vcmd,
validate: () => vcmd,
cancel: () => vcmd,
help: () => vcmd,
types: () => vcmd,
allowUnknownOptions: () => vcmd
};
let actionSpy: sinon.SinonSpy;
let aliasSpy: sinon.SinonSpy;
let optionSpy: sinon.SinonSpy;
let validateSpy: sinon.SinonSpy;
let cancelSpy: sinon.SinonSpy;
let helpSpy: sinon.SinonSpy;
let typesSpy: sinon.SinonSpy;
let telemetry: any;
before(() => {
sinon.stub(auth, 'restoreAuth').callsFake(() => Promise.resolve());
sinon.stub(appInsights, 'trackEvent').callsFake((t) => {
telemetry = t;
});
});
beforeEach(() => {
actionSpy = sinon.spy(vcmd, 'action');
aliasSpy = sinon.spy(vcmd, 'alias');
optionSpy = sinon.spy(vcmd, 'option');
validateSpy = sinon.spy(vcmd, 'validate');
cancelSpy = sinon.spy(vcmd, 'cancel');
helpSpy = sinon.spy(vcmd, 'help');
typesSpy = sinon.spy(vcmd, 'types');
telemetry = null;
auth.service.connected = true;
});
afterEach(() => {
Utils.restore([
vcmd.action,
vcmd.alias,
vcmd.option,
vcmd.validate,
vcmd.cancel,
vcmd.help,
vcmd.types,
vcmd.allowUnknownOptions,
process.exit
]);
auth.service.connected = false;
});
after(() => {
Utils.restore([
appInsights.trackEvent,
auth.restoreAuth
]);
});
it('has no autocomplete by default', () => {
const cmd = new MockCommand2();
assert.equal(typeof cmd.autocomplete(), 'undefined');
});
it('has no validation logic by default', () => {
const cmd = new MockCommand2();
assert.equal(typeof cmd.validate(), 'undefined');
});
it('does not define option types by default', () => {
const cmd = new MockCommand2();
assert.equal(typeof cmd.types(), 'undefined');
});
it('removes optional arguments from command name', () => {
const cmd = new MockCommand2();
assert.equal(cmd.getCommandName(), 'Mock command 2');
});
// it('initiates command 1 with vorpal', () => {
// const cmd = new MockCommand1();
// const vorpalCommandStub = sinon.stub(vorpal, 'command').callsFake(() => vcmd as any);
// cmd.init(vorpal);
// Utils.restore(vorpal.command);
// assert(vorpalCommandStub.calledOnce);
// });
// it('initiates command 2 with vorpal', () => {
// const cmd = new MockCommand2();
// const vorpalCommandStub = sinon.stub(vorpal, 'command').callsFake(() => vcmd as any);
// cmd.init(vorpal);
// Utils.restore(vorpal.command);
// assert(vorpalCommandStub.calledOnce);
// });
// it('initiates command with command name', () => {
// const cmd = new MockCommand1();
// let name;
// sinon.stub(vorpal, 'command').callsFake((_name) => {
// name = _name;
// return vcmd as any;
// });
// cmd.init(vorpal);
// Utils.restore(vorpal.command);
// assert.equal(name, cmd.name);
// });
// it('initiates command with command description', () => {
// const cmd = new MockCommand1();
// let description;
// sinon.stub(vorpal, 'command').callsFake((_name, _description) => {
// description = _description;
// return vcmd as any;
// });
// cmd.init(vorpal);
// Utils.restore(vorpal.command);
// assert.equal(description, cmd.description);
// });
// it('initiates command with command autocomplete', () => {
// const cmd = new MockCommand1();
// let autocomplete;
// sinon.stub(vorpal, 'command').callsFake((_name, _description, _autocomplete) => {
// autocomplete = _autocomplete;
// return vcmd as any;
// });
// cmd.init(vorpal);
// Utils.restore(vorpal.command);
// assert.deepEqual(autocomplete, cmd.autocomplete());
// });
// it('configures command action', () => {
// const cmd = new MockCommand1();
// sinon.stub(vorpal, 'command').callsFake(() => vcmd as any);
// cmd.init(vorpal);
// Utils.restore(vorpal.command);
// assert(actionSpy.calledOnce);
// });
// it('configures options when available', () => {
// const cmd = new MockCommand1();
// sinon.stub(vorpal, 'command').callsFake(() => vcmd as any);
// cmd.init(vorpal);
// Utils.restore(vorpal.command);
// assert(optionSpy.calledThrice); // there are three options
// });
// it('configures alias when available', () => {
// const cmd = new MockCommand1();
// sinon.stub(vorpal, 'command').callsFake(() => vcmd as any);
// cmd.init(vorpal);
// Utils.restore(vorpal.command);
// assert(aliasSpy.calledOnce);
// });
// it('configures validation when available', () => {
// const cmd = new MockCommand1();
// sinon.stub(vorpal, 'command').callsFake(() => vcmd as any);
// cmd.init(vorpal);
// Utils.restore(vorpal.command);
// assert(validateSpy.calledOnce);
// });
// it('doesn\'t configure validation when unavailable', () => {
// const cmd = new MockCommand2();
// sinon.stub(vorpal, 'command').callsFake(() => vcmd as any);
// cmd.init(vorpal);
// Utils.restore(vorpal.command);
// assert(validateSpy.notCalled);
// });
// it('configures cancellation when available', () => {
// const cmd = new MockCommand1();
// sinon.stub(vorpal, 'command').callsFake(() => vcmd as any);
// cmd.init(vorpal);
// Utils.restore(vorpal.command);
// assert(cancelSpy.calledOnce);
// });
// it('doesn\'t configure cancellation when unavailable', () => {
// const cmd = new MockCommand2();
// sinon.stub(vorpal, 'command').callsFake(() => vcmd as any);
// cmd.init(vorpal);
// Utils.restore(vorpal.command);
// assert(cancelSpy.notCalled);
// });
// it('configures help when available', () => {
// const cmd = new MockCommand1();
// sinon.stub(vorpal, 'command').callsFake(() => vcmd as any);
// cmd.init(vorpal);
// Utils.restore(vorpal.command);
// assert(helpSpy.calledOnce);
// });
// it('configures types when available', () => {
// const cmd = new MockCommand1();
// sinon.stub(vorpal, 'command').callsFake(() => vcmd as any);
// cmd.init(vorpal);
// Utils.restore(vorpal.command);
// assert(typesSpy.calledOnce);
// });
// it('doesn\'t configure type when unavailable', () => {
// const cmd = new MockCommand2();
// sinon.stub(vorpal, 'command').callsFake(() => vcmd as any);
// cmd.init(vorpal);
// Utils.restore(vorpal.command);
// assert(typesSpy.notCalled);
// });
// it('prints help using the log argument when called from the help command', () => {
// const sandbox = sinon.createSandbox();
// sandbox.stub(vorpal, '_command').value({
// command: 'help mock2'
// });
// const log = (msg?: string) => { };
// const logSpy = sinon.spy(log);
// const mock = new MockCommand2();
// const cmd = {
// help: mock.help()
// };
// cmd.help({}, logSpy);
// sandbox.restore();
// assert(logSpy.called);
// });
it('displays error message when it\'s serialized in the error property', () => {
const cmd = {
commandWrapper: {
command: 'command'
},
log: (msg?: string) => { },
prompt: () => { }
};
const mock = new MockCommand2();
mock.handlePromiseError({
error: JSON.stringify({
error: {
message: 'An error has occurred'
}
})
}, cmd, (err?: any) => {
assert.equal(JSON.stringify(err), JSON.stringify(new CommandError('An error has occurred')));
});
});
it('displays the raw error message when the serialized value from the error property is not an error object', () => {
const cmd = {
commandWrapper: {
command: 'command'
},
log: (msg?: string) => { },
prompt: () => { }
};
const mock = new MockCommand2();
mock.handlePromiseError({
error: JSON.stringify({
error: {
id: '123'
}
})
}, cmd, (err?: any) => {
assert.equal(JSON.stringify(err), JSON.stringify(new CommandError(JSON.stringify({
error: {
id: '123'
}
}))));
});
});
it('displays the raw error message when the serialized value from the error property is not a JSON object', () => {
const cmd = {
commandWrapper: {
command: 'command'
},
log: (msg?: string) => { },
prompt: () => { }
};
const mock = new MockCommand2();
mock.handlePromiseError({
error: 'abc'
}, cmd, (err?: any) => {
assert.equal(JSON.stringify(err), JSON.stringify(new CommandError('abc')));
});
});
it('displays error message coming from ADALJS', () => {
const cmd = {
commandWrapper: {
command: 'command'
},
log: (msg?: string) => { },
prompt: () => { }
};
const mock = new MockCommand2();
mock.handlePromiseError({
error: { error_description: 'abc' }
}, cmd, (err?: any) => {
assert.equal(JSON.stringify(err), JSON.stringify(new CommandError('abc')));
});
});
it('shows deprecation warning when command executed using the deprecated name', () => {
const cmd = {
commandWrapper: {
command: 'mc1'
},
log: (msg?: string) => { },
prompt: () => { }
};
const cmdLogSpy: sinon.SinonSpy = sinon.spy(cmd, 'log');
const mock = new MockCommand1();
mock.commandAction(cmd, {}, (err?: any): void => {
assert(cmdLogSpy.calledWith(chalk.yellow(`Command 'mc1' is deprecated. Please use 'mock-command' instead`)))
});
});
it('logs command name in the telemetry when command name used', (done) => {
const mock = new MockCommand1();
const cmd = {
action: mock.action(),
commandWrapper: {
command: 'mock-command'
},
log: (msg?: string) => { },
prompt: () => { }
};
cmd.action({ options: {} }, () => {
try {
assert.equal(telemetry.name, 'mock-command');
done();
}
catch (e) {
done(e);
}
});
});
it('logs command alias in the telemetry when command alias used', (done) => {
const mock = new MockCommand1();
const cmd = {
action: mock.action(),
commandWrapper: {
command: 'mc1'
},
log: (msg?: string) => { },
prompt: () => { }
};
cmd.action({ options: {} }, () => {
try {
assert.equal(telemetry.name, 'mc1');
done();
}
catch (e) {
done(e);
}
});
});
it('logs empty command name in telemetry when command called using something else than name or alias', (done) => {
const mock = new MockCommand1();
const cmd = {
action: mock.action(),
commandWrapper: {
command: 'foo'
},
log: (msg?: string) => { },
prompt: () => { }
};
cmd.action({ options: {} }, () => {
try {
assert.equal(telemetry.name, '');
done();
}
catch (e) {
done(e);
}
});
});
// it('doesn\'t remove leading zeroes from unknown options', (done) => {
// const cmd = new MockCommand1();
// const delimiter = (vorpal as any)._delimiter;
// const argv = process.argv;
// vorpal.delimiter('');
// sinon.stub(cmd as any, 'initAction').callsFake((args) => {
// try {
// assert.strictEqual(args.options.option3, '00123');
// done();
// }
// catch (e) {
// done(e);
// }
// finally {
// vorpal.delimiter(delimiter);
// process.argv = argv;
// }
// });
// sinon.stub(process, 'exit');
// cmd.init(vorpal);
// process.argv = ['node', 'm365', 'mock-command', '--option3', '00123'];
// vorpal.parse(['node', 'm365', 'mock-command', '--option3', '00123']);
// });
// it('removes leading zeroes from known options that aren\'t a string', (done) => {
// const cmd = new MockCommand1();
// const delimiter = (vorpal as any)._delimiter;
// const argv = process.argv;
// vorpal.delimiter('');
// sinon.stub(cmd as any, 'initAction').callsFake((args) => {
// try {
// assert.strictEqual(args.options.option1, 123);
// done();
// }
// catch (e) {
// done(e);
// }
// finally {
// vorpal.delimiter(delimiter);
// process.argv = argv;
// }
// });
// sinon.stub(process, 'exit');
// cmd.init(vorpal);
// process.argv = ['node', 'm365', 'mock-command', '--option1', '00123'];
// vorpal.parse(['node', 'm365', 'mock-command', '--option1', '00123']);
// });
// it('doesn\'t remove leading zeroes from known options that are a string', (done) => {
// const cmd = new MockCommand1();
// const delimiter = (vorpal as any)._delimiter;
// const argv = process.argv;
// vorpal.delimiter('');
// sinon.stub(cmd as any, 'initAction').callsFake((args) => {
// try {
// assert.strictEqual(args.options.option2, '00123');
// done();
// }
// catch (e) {
// done(e);
// }
// finally {
// vorpal.delimiter(delimiter);
// process.argv = argv;
// }
// });
// sinon.stub(process, 'exit');
// cmd.init(vorpal);
// process.argv = ['node', 'm365', 'mock-command', '--option2', '00123'];
// vorpal.parse(['node', 'm365', 'mock-command', '--option2', '00123']);
// });
// it('doesn\'t remove leading zeroes from unknown options where no types specified', (done) => {
// const cmd = new MockCommand4();
// const delimiter = (vorpal as any)._delimiter;
// const argv = process.argv;
// vorpal.delimiter('');
// sinon.stub(cmd as any, 'initAction').callsFake((args) => {
// try {
// assert.strictEqual(args.options.option1, '00123');
// done();
// }
// catch (e) {
// done(e);
// }
// finally {
// vorpal.delimiter(delimiter);
// process.argv = argv;
// }
// });
// sinon.stub(process, 'exit');
// cmd.init(vorpal);
// process.argv = ['node', 'm365', 'mock-command', '--option1', '00123'];
// vorpal.parse(['node', 'm365', 'mock-command', '--option1', '00123']);
// });
// it('removes leading zeroes from known options when the command doesn\'t support unknown options', (done) => {
// const cmd = new MockCommand3();
// const delimiter = (vorpal as any)._delimiter;
// const argv = process.argv;
// vorpal.delimiter('');
// sinon.stub(cmd as any, 'initAction').callsFake((args) => {
// try {
// assert.strictEqual(args.options.option1, 123);
// done();
// }
// catch (e) {
// done(e);
// }
// finally {
// vorpal.delimiter(delimiter);
// process.argv = argv;
// }
// });
// sinon.stub(process, 'exit');
// cmd.init(vorpal);
// process.argv = ['node', 'm365', 'mock-command', '--option1', '00123'];
// vorpal.parse(['node', 'm365', 'mock-command', '--option1', '00123']);
// });
it('correctly handles error when instance of error returned from the promise', (done) => {
const cmd = new MockCommand3();
(cmd as any).handleRejectedODataPromise(new Error('An error has occurred'), undefined, (msg: any): void => {
try {
assert.equal(JSON.stringify(msg), JSON.stringify(new CommandError('An error has occurred')));
done();
}
catch (e) {
done(e);
}
});
});
it('correctly handles graph response (code) from the promise', (done) => {
const errorMessage = "forbidden-message";
const errorCode = "Access Denied";
const cmd = new MockCommand3();
(cmd as any).handleRejectedODataPromise({ error: { error: { message: errorMessage, code: errorCode } } }, undefined, (msg: any): void => {
try {
assert.equal(JSON.stringify(msg), JSON.stringify(new CommandError(errorCode + " - " + errorMessage)));
done();
}
catch (e) {
done(e);
}
});
});
it('correctly handles graph response error (without code) from the promise', (done) => {
const errorMessage = "forbidden-message";
const cmd = new MockCommand3();
(cmd as any).handleRejectedODataPromise({ error: { error: { message: errorMessage } } }, undefined, (msg: any): void => {
try {
assert.equal(JSON.stringify(msg), JSON.stringify(new CommandError(errorMessage)));
done();
}
catch (e) {
done(e);
}
});
});
it('tracks the use of unknown options in telemetry', () => {
const command = new MockCommand1();
const actual = {
prop1: true
};
const expected = JSON.stringify({
prop1: true,
// this is expected, because we're not tracking the actual value but rather
// whether the property is used or not, so the tracked value for an unknown
// property will be always true
Prop2: true
});
command.trackUnknownOptionsPublic(actual, { Prop2: false });
assert.equal(JSON.stringify(actual), expected);
});
it('adds unknown options to payload', () => {
const command = new MockCommand1();
const actual = {
prop1: true
};
const expected = JSON.stringify({
prop1: true,
Prop2: false
});
command.addUnknownOptionsToPayloadPublic(actual, { Prop2: false })
assert.equal(JSON.stringify(actual), expected);
});
}); | MockCommand4 |
oct.rs | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp::Ordering;
use std::fmt;
use common_datavalues::prelude::*;
use common_exception::ErrorCode;
use common_exception::Result;
use crate::scalars::cast_column_field;
use crate::scalars::Function;
use crate::scalars::FunctionDescription;
use crate::scalars::FunctionFeatures;
trait OctString {
fn oct_string(self) -> String;
}
impl OctString for i64 {
fn oct_string(self) -> String {
match self.cmp(&0) {
Ordering::Less => {
format!("-0{:o}", self.unsigned_abs())
}
Ordering::Equal => "0".to_string(),
Ordering::Greater => format!("0{:o}", self),
}
}
}
impl OctString for u64 {
fn oct_string(self) -> String {
if self == 0 {
"0".to_string()
} else {
format!("0{:o}", self)
}
}
}
#[derive(Clone)]
pub struct OctFunction {
_display_name: String,
}
impl OctFunction {
pub fn try_create(display_name: &str) -> Result<Box<dyn Function>> {
Ok(Box::new(OctFunction {
_display_name: display_name.to_string(),
}))
}
pub fn desc() -> FunctionDescription {
FunctionDescription::creator(Box::new(Self::try_create))
.features(FunctionFeatures::default().deterministic().num_arguments(1))
}
}
impl Function for OctFunction {
fn name(&self) -> &str {
"oct"
}
fn return_type(&self, args: &[&DataTypePtr]) -> Result<DataTypePtr> {
if !args[0].data_type_id().is_numeric() {
return Err(ErrorCode::IllegalDataType(format!(
"Expected integer but got {}",
args[0].data_type_id()
)));
}
Ok(StringType::arc())
}
fn eval(&self, columns: &ColumnsWithField, input_rows: usize) -> Result<ColumnRef> {
let mut builder: ColumnBuilder<Vu8> = ColumnBuilder::with_capacity(input_rows);
match columns[0].data_type().data_type_id() {
TypeID::UInt8 | TypeID::UInt16 | TypeID::UInt32 | TypeID::UInt64 => {
let col = cast_column_field(&columns[0], &UInt64Type::arc())?;
let col = col.as_any().downcast_ref::<UInt64Column>().unwrap();
for val in col.iter() {
builder.append(val.oct_string().as_bytes());
}
}
_ => {
let col = cast_column_field(&columns[0], &Int64Type::arc())?;
let col = col.as_any().downcast_ref::<Int64Column>().unwrap();
for val in col.iter() {
builder.append(val.oct_string().as_bytes());
}
}
}
Ok(builder.build(input_rows))
}
}
| fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "OCT")
}
} | impl fmt::Display for OctFunction { |
create-thread.component.ts | import { Component, OnInit, Inject } from '@angular/core';
import {FourmServiceService} from '../service/fourm-service.service';
//import { EventEmitter } from 'protractor';
import {Forum} from '../models/forum-thread';
import { MatDialogRef,MAT_DIALOG_DATA} from '@angular/material/dialog';
import { AngularEditorConfig } from '@kolkov/angular-editor';
import { ActivatedRoute,Router } from '@angular/router';
import { Types } from '../models/forumType';
@Component({
selector: 'app-create-thread',
templateUrl: './create-thread.component.html',
styleUrls: ['./create-thread.component.css']
})
export class CreateThreadComponent implements OnInit {
attempted = true;
user = JSON.parse(localStorage.getItem('user'));
forumTypes:string[];
type : string;
formControls = this.forumService.form.controls;
formControlsT = this.forumService.formType.controls;
formControlsU = this.forumService.updateForm.controls;
public threadList: any;
flag = true;
image: string = '';
newForum = false;
newThread = false;
updateForum = false;
toppingList: string[] = ['Rajitha Gayashan', 'Nipuna Sarachchandra', 'Pasindu Bhashitha', 'Sasika Nawarathna', 'Vihaga Shamal', 'Tharindu Madhusanka'];
tag = ''
updateThread : any;
constructor( public forumService : FourmServiceService,
private matdialogRef:MatDialogRef<CreateThreadComponent>,
@Inject(MAT_DIALOG_DATA) public data: any,
public route : ActivatedRoute, private router : Router) {} |
try{
console.log(this.data)
this.type = this.data.type;
this.forumTypes = this.data.types,
this.image = this.data.threadImage
this.newForum = this.data.newForum,
this.newThread = this.data.newThread,
this.updateForum = this.data.updateForum,
console.log( this.newForum,this.newThread,this.updateForum)
}catch{
console.log("this is not error");
}
if(this.type){
this.flag = !this.flag;
}
this.forumService.getForumtypes();
}
uplodeImage(event){
const img = (event.target as HTMLInputElement).files[0];
const reader = new FileReader();
reader.onload = () => {
this.image = reader.result as string;
// console.log(this.image)
};
reader.readAsDataURL(img);
// console.log(img)
// console.log(event)
}
onSubmit(){
if(!this.formControls._id.value){
const emp : Forum = {
_id : null,
title : this.formControls.title.value,
body : this.formControls.body.value,
type : this.formControls.type.value,
image : this.image,
timestamps: new Date(),
views: 0,
owner:this.user.id,
timeAgo:'',
replies:0,
votes:0,
status : false
}
this.forumService.regForum(emp).subscribe(()=>{
this.forumService.form.reset();
this.getThreds();
this.forumService.success("Submited Successfully")
});
this.onNoClick();
}else{
console.log(this.formControls._id.value)
const updateThread = {
title : this.formControls.title.value,
body : this.formControls.body.value,
image : this.image
}
this.forumService.updateThread(this.formControls._id.value , updateThread).subscribe(()=>{
this.forumService.form.reset();
this.getThreds();
this.forumService.success("Updated Successfully")
})
}
}
newType(){
if(!this.formControlsT._id.value){
const type : Types = {
_id : null,
forumOwner : this.formControlsT.forumOwner.value,
description : this.formControlsT.description.value,
type : this.formControlsT.type.value,
teachers : this.formControlsT.teachers.value,
}
// console.log(type)
if(confirm('Are you sure to add this Forum type?')){
this.forumService.setType(type).subscribe(res=>{
// console.log(res)
this.forumService.formType.reset();
this.forumService.success("New Forum Type Successfully created!");
window.location.reload();
})
this.matdialogRef.afterClosed().subscribe(result => {
this.forumService.formType.reset()
})
this.matdialogRef.close();
}
}
}
updateF(){
if(this.formControlsU._id.value){
const updatedForum = {
description : this.formControlsU.description.value,
teachers : this.formControlsU.teachers.value
}
if(confirm('Are you sure to Update this Forum?')){
this.forumService.updateForum(this.formControlsU._id.value, updatedForum).subscribe(res=>{
// console.log(res)
this.forumService.updateForm.reset();
this.forumService.success("Forum is Successfully updated!");
window.location.reload();
})
this.matdialogRef.afterClosed().subscribe(result => {
this.forumService.updateForm.reset()
})
this.matdialogRef.close();
}
}
}
onNoClick(): void {
this.forumService.form.reset();
this.matdialogRef.close();
}
getThreds(){
this.forumService.getAll().subscribe((res)=>{
this.threadList = res;
// this.childEvent.emit(this.threadList);
//console.log(this.threadList)
});
}
onCheck(type : string){
if(!type){
return;
}
let count = 0 ;
for(let i in this.forumTypes){
// console.log(i)
if(this.forumTypes[i] != type){
// console.log(this.forumTypes[i])
continue
}
else{
count = count + 1;
// console.log(count)
}
}
// console.log(count)
if(count){
this.flag = false;
// console.log(this.flag);
}
}
editorConfig: AngularEditorConfig = {
editable: true,
spellcheck: true,
height: '150px',
minHeight: '0',
maxHeight: 'auto',
width: 'auto',
minWidth: '0',
translate: 'yes',
enableToolbar: true,
showToolbar: true,
placeholder: 'providing suppoting details or context...',
defaultParagraphSeparator: '',
defaultFontName: '',
defaultFontSize: '',
uploadUrl: '',
fonts: [
{class: 'arial', name: 'Arial'},
{class: 'times-new-roman', name: 'Times New Roman'},
{class: 'calibri', name: 'Calibri'},
{class: 'comic-sans-ms', name: 'Comic Sans MS'}
],
customClasses: [
{
name: 'quote',
class: 'quote',
},
{
name: 'redText',
class: 'redText'
},
{
name: 'titleText',
class: 'titleText',
tag: 'h1',
},
],
sanitize: true,
toolbarPosition: 'bottom',
toolbarHiddenButtons: [
[
'redo',
'strikeThrough',
'subscript',
'superscript',
'justifyLeft',
'justifyCenter',
'justifyRight',
'justifyFull',
'indent',
'outdent',
'heading',
'fontName'
],
[
'textColor',
'backgroundColor',
'customClasses',
'unlink',
'insertImage',
'insertVideo',
'insertHorizontalRule',
'removeFormat'
]
]
};
} |
ngOnInit(): void { |
rocksdb.go | // Copyright 2014 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Spencer Kimball ([email protected])
// Author: Andrew Bonventre ([email protected])
// Author: Tobias Schottdorf ([email protected])
// Author: Jiang-Ming Yang ([email protected])
package engine
import (
"bytes"
"fmt"
"io/ioutil"
"math"
"os"
"path/filepath"
"runtime"
"runtime/debug"
"sort"
"sync"
"time"
"unsafe"
"golang.org/x/net/context"
"github.com/dustin/go-humanize"
"github.com/elastic/gosigar"
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage/engine/enginepb"
"github.com/cockroachdb/cockroach/pkg/storage/engine/rocksdb"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/humanizeutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
)
// #cgo darwin LDFLAGS: -Wl,-undefined -Wl,dynamic_lookup
// #cgo !darwin LDFLAGS: -Wl,-unresolved-symbols=ignore-all
// #cgo linux LDFLAGS: -lrt
//
// #include <stdlib.h>
// #include "rocksdb/db.h"
import "C"
const (
defaultBlockSize = 32 << 10 // 32KB (rocksdb default is 4KB)
// DefaultMaxOpenFiles is the default value for rocksDB's max_open_files
// option.
DefaultMaxOpenFiles = -1
// RecommendedMaxOpenFiles is the recommended value for rocksDB's
// max_open_files option. If more file descriptors are available than the
// recommended number, than the default value is used.
RecommendedMaxOpenFiles = 10000
// MinimumMaxOpenFiles is The minimum value that rocksDB's max_open_files
// option can be set to.
MinimumMaxOpenFiles = 2000
)
func init() {
rocksdb.Logger = func(format string, args ...interface{}) { log.Infof(context.TODO(), format, args...) }
}
// SSTableInfo contains metadata about a single RocksDB sstable. This mirrors
// the C.DBSSTable struct contents.
type SSTableInfo struct {
Level int
Size int64
Start MVCCKey
End MVCCKey
}
// SSTableInfos is a slice of SSTableInfo structures.
type SSTableInfos []SSTableInfo
func (s SSTableInfos) Len() int {
return len(s)
}
func (s SSTableInfos) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s SSTableInfos) Less(i, j int) bool {
switch {
case s[i].Level < s[j].Level:
return true
case s[i].Level > s[j].Level:
return false
case s[i].Size > s[j].Size:
return true
case s[i].Size < s[j].Size:
return false
default:
return s[i].Start.Less(s[j].Start)
}
}
func (s SSTableInfos) String() string {
const (
KB = 1 << 10
MB = 1 << 20
GB = 1 << 30
TB = 1 << 40
)
roundTo := func(val, to int64) int64 {
return (val + to/2) / to
}
// We're intentionally not using humanizeutil here as we want a slightly more
// compact representation.
humanize := func(size int64) string {
switch {
case size < MB:
return fmt.Sprintf("%dK", roundTo(size, KB))
case size < GB:
return fmt.Sprintf("%dM", roundTo(size, MB))
case size < TB:
return fmt.Sprintf("%dG", roundTo(size, GB))
default:
return fmt.Sprintf("%dT", roundTo(size, TB))
}
}
type levelInfo struct {
size int64
count int
}
var levels []*levelInfo
for _, t := range s {
for i := len(levels); i <= t.Level; i++ {
levels = append(levels, &levelInfo{})
}
info := levels[t.Level]
info.size += t.Size
info.count++
}
var maxSize int
var maxLevelCount int
for _, info := range levels {
size := len(humanize(info.size))
if maxSize < size {
maxSize = size
}
count := 1 + int(math.Log10(float64(info.count)))
if maxLevelCount < count {
maxLevelCount = count
}
}
levelFormat := fmt.Sprintf("%%d [ %%%ds %%%dd ]:", maxSize, maxLevelCount)
level := -1
var buf bytes.Buffer
var lastSize string
var lastSizeCount int
flushLastSize := func() {
if lastSizeCount > 0 {
fmt.Fprintf(&buf, " %s", lastSize)
if lastSizeCount > 1 {
fmt.Fprintf(&buf, "[%d]", lastSizeCount)
}
lastSizeCount = 0
}
}
maybeFlush := func(newLevel, i int) {
if level == newLevel {
return
}
flushLastSize()
if buf.Len() > 0 {
buf.WriteString("\n")
}
level = newLevel
if level >= 0 {
info := levels[level]
fmt.Fprintf(&buf, levelFormat, level, humanize(info.size), info.count)
}
}
for i, t := range s {
maybeFlush(t.Level, i)
size := humanize(t.Size)
if size == lastSize {
lastSizeCount++
} else {
flushLastSize()
lastSize = size
lastSizeCount = 1
}
}
maybeFlush(-1, 0)
return buf.String()
}
// ReadAmplification returns RocksDB's read amplification, which is the number
// of level-0 sstables plus the number of levels, other than level 0, with at
// least one sstable.
//
// This definition comes from here:
// https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide#level-style-compaction
func (s SSTableInfos) ReadAmplification() int {
var readAmp int
seenLevel := make(map[int]bool)
for _, t := range s {
if t.Level == 0 {
readAmp++
} else if !seenLevel[t.Level] {
readAmp++
seenLevel[t.Level] = true
}
}
return readAmp
}
// RocksDBCache is a wrapper around C.DBCache
type RocksDBCache struct {
cache *C.DBCache
}
// NewRocksDBCache creates a new cache of the specified size. Note that the
// cache is refcounted internally and starts out with a refcount of one (i.e.
// Release() should be called after having used the cache).
func NewRocksDBCache(cacheSize int64) RocksDBCache {
return RocksDBCache{cache: C.DBNewCache(C.uint64_t(cacheSize))}
}
func (c RocksDBCache) ref() RocksDBCache {
if c.cache != nil {
c.cache = C.DBRefCache(c.cache)
}
return c
}
// Release releases the cache. Note that the cache will continue to be used
// until all of the RocksDB engines it was attached to have been closed, and
// that RocksDB engines which use it auto-release when they close.
func (c RocksDBCache) Release() {
if c.cache != nil {
C.DBReleaseCache(c.cache)
}
}
// RocksDB is a wrapper around a RocksDB database instance.
type RocksDB struct {
rdb *C.DBEngine
attrs roachpb.Attributes // Attributes for this engine
dir string // The data directory
cache RocksDBCache // Shared cache.
maxSize int64 // Used for calculating rebalancing and free space.
maxOpenFiles int // The maximum number of open files this instance will use.
deallocated chan struct{} // Closed when the underlying handle is deallocated.
}
var _ Engine = &RocksDB{}
// NewRocksDB allocates and returns a new RocksDB object.
// This creates options and opens the database. If the database
// doesn't yet exist at the specified directory, one is initialized
// from scratch.
// The caller must call the engine's Close method when the engine is no longer
// needed.
func | (
attrs roachpb.Attributes, dir string, cache RocksDBCache, maxSize int64, maxOpenFiles int,
) (*RocksDB, error) {
if dir == "" {
panic("dir must be non-empty")
}
r := &RocksDB{
attrs: attrs,
dir: dir,
cache: cache.ref(),
maxSize: maxSize,
maxOpenFiles: maxOpenFiles,
deallocated: make(chan struct{}),
}
if err := r.open(); err != nil {
return nil, err
}
return r, nil
}
func newMemRocksDB(attrs roachpb.Attributes, cache RocksDBCache, maxSize int64) (*RocksDB, error) {
r := &RocksDB{
attrs: attrs,
// dir: empty dir == "mem" RocksDB instance.
cache: cache.ref(),
maxSize: maxSize,
deallocated: make(chan struct{}),
}
if err := r.open(); err != nil {
return nil, err
}
return r, nil
}
// String formatter.
func (r *RocksDB) String() string {
return fmt.Sprintf("%s=%s", r.attrs.Attrs, r.dir)
}
func (r *RocksDB) open() error {
var ver storageVersion
if len(r.dir) != 0 {
log.Infof(context.TODO(), "opening rocksdb instance at %q", r.dir)
// Check the version number.
var err error
if ver, err = getVersion(r.dir); err != nil {
return err
}
if ver < versionMinimum || ver > versionCurrent {
// Instead of an error, we should call a migration if possible when
// one is needed immediately following the DBOpen call.
return fmt.Errorf("incompatible rocksdb data version, current:%d, on disk:%d, minimum:%d",
versionCurrent, ver, versionMinimum)
}
} else {
log.Infof(context.TODO(), "opening in memory rocksdb instance")
// In memory dbs are always current.
ver = versionCurrent
}
blockSize := envutil.EnvOrDefaultBytes("COCKROACH_ROCKSDB_BLOCK_SIZE", defaultBlockSize)
walTTL := envutil.EnvOrDefaultDuration("COCKROACH_ROCKSDB_WAL_TTL", 0).Seconds()
status := C.DBOpen(&r.rdb, goToCSlice([]byte(r.dir)),
C.DBOptions{
cache: r.cache.cache,
block_size: C.uint64_t(blockSize),
wal_ttl_seconds: C.uint64_t(walTTL),
allow_os_buffer: C.bool(true),
logging_enabled: C.bool(log.V(3)),
num_cpu: C.int(runtime.NumCPU()),
max_open_files: C.int(r.maxOpenFiles),
})
if err := statusToError(status); err != nil {
return errors.Errorf("could not open rocksdb instance: %s", err)
}
// Update or add the version file if needed.
if ver < versionCurrent {
if err := writeVersionFile(r.dir); err != nil {
return err
}
}
// Start a goroutine that will finish when the underlying handle
// is deallocated. This is used to check a leak in tests.
go func() {
<-r.deallocated
}()
return nil
}
// Close closes the database by deallocating the underlying handle.
func (r *RocksDB) Close() {
if r.rdb == nil {
log.Errorf(context.TODO(), "closing unopened rocksdb instance")
return
}
if len(r.dir) == 0 {
if log.V(1) {
log.Infof(context.TODO(), "closing in-memory rocksdb instance")
}
} else {
log.Infof(context.TODO(), "closing rocksdb instance at %q", r.dir)
}
if r.rdb != nil {
C.DBClose(r.rdb)
r.rdb = nil
}
r.cache.Release()
close(r.deallocated)
}
// closed returns true if the engine is closed.
func (r *RocksDB) closed() bool {
return r.rdb == nil
}
// Attrs returns the list of attributes describing this engine. This
// may include a specification of disk type (e.g. hdd, ssd, fio, etc.)
// and potentially other labels to identify important attributes of
// the engine.
func (r *RocksDB) Attrs() roachpb.Attributes {
return r.attrs
}
// Put sets the given key to the value provided.
//
// The key and value byte slices may be reused safely. put takes a copy of
// them before returning.
func (r *RocksDB) Put(key MVCCKey, value []byte) error {
return dbPut(r.rdb, key, value)
}
// Merge implements the RocksDB merge operator using the function goMergeInit
// to initialize missing values and goMerge to merge the old and the given
// value into a new value, which is then stored under key.
// Currently 64-bit counter logic is implemented. See the documentation of
// goMerge and goMergeInit for details.
//
// The key and value byte slices may be reused safely. merge takes a copy
// of them before returning.
func (r *RocksDB) Merge(key MVCCKey, value []byte) error {
return dbMerge(r.rdb, key, value)
}
// ApplyBatchRepr atomically applies a set of batched updates. Created by
// calling Repr() on a batch. Using this method is equivalent to constructing
// and committing a batch whose Repr() equals repr.
func (r *RocksDB) ApplyBatchRepr(repr []byte) error {
return dbApplyBatchRepr(r.rdb, repr)
}
// Get returns the value for the given key.
func (r *RocksDB) Get(key MVCCKey) ([]byte, error) {
return dbGet(r.rdb, key)
}
// GetProto fetches the value at the specified key and unmarshals it.
func (r *RocksDB) GetProto(
key MVCCKey, msg proto.Message,
) (ok bool, keyBytes, valBytes int64, err error) {
return dbGetProto(r.rdb, key, msg)
}
// Clear removes the item from the db with the given key.
func (r *RocksDB) Clear(key MVCCKey) error {
return dbClear(r.rdb, key)
}
// Iterate iterates from start to end keys, invoking f on each
// key/value pair. See engine.Iterate for details.
func (r *RocksDB) Iterate(start, end MVCCKey, f func(MVCCKeyValue) (bool, error)) error {
return dbIterate(r.rdb, r, start, end, f)
}
// Capacity queries the underlying file system for disk capacity information.
func (r *RocksDB) Capacity() (roachpb.StoreCapacity, error) {
fileSystemUsage := gosigar.FileSystemUsage{}
dir := r.dir
if dir == "" {
// This is an in-memory instance. Pretend we're empty since we
// don't know better and only use this for testing. Using any
// part of the actual file system here can throw off allocator
// rebalancing in a hard-to-trace manner. See #7050.
return roachpb.StoreCapacity{
Capacity: r.maxSize,
Available: r.maxSize,
}, nil
}
if err := fileSystemUsage.Get(dir); err != nil {
return roachpb.StoreCapacity{}, err
}
if fileSystemUsage.Total > math.MaxInt64 {
return roachpb.StoreCapacity{}, fmt.Errorf("unsupported disk size %s, max supported size is %s",
humanize.IBytes(fileSystemUsage.Total), humanizeutil.IBytes(math.MaxInt64))
}
if fileSystemUsage.Avail > math.MaxInt64 {
return roachpb.StoreCapacity{}, fmt.Errorf("unsupported disk size %s, max supported size is %s",
humanize.IBytes(fileSystemUsage.Avail), humanizeutil.IBytes(math.MaxInt64))
}
fsuTotal := int64(fileSystemUsage.Total)
fsuAvail := int64(fileSystemUsage.Avail)
// If no size limitation have been placed on the store size or if the
// limitation is greater than what's available, just return the actual
// totals.
if r.maxSize == 0 || r.maxSize >= fsuTotal || r.dir == "" {
return roachpb.StoreCapacity{
Capacity: fsuTotal,
Available: fsuAvail,
}, nil
}
// Find the total size of all the files in the r.dir and all its
// subdirectories.
var totalUsedBytes int64
if errOuter := filepath.Walk(r.dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return nil
}
if info.Mode().IsRegular() {
totalUsedBytes += info.Size()
}
return nil
}); errOuter != nil {
return roachpb.StoreCapacity{}, errOuter
}
available := r.maxSize - totalUsedBytes
if available > fsuAvail {
available = fsuAvail
}
if available < 0 {
available = 0
}
return roachpb.StoreCapacity{
Capacity: r.maxSize,
Available: available,
}, nil
}
// Compact forces compaction on the database.
func (r *RocksDB) Compact() error {
return statusToError(C.DBCompact(r.rdb))
}
// Destroy destroys the underlying filesystem data associated with the database.
func (r *RocksDB) Destroy() error {
return statusToError(C.DBDestroy(goToCSlice([]byte(r.dir))))
}
// Flush causes RocksDB to write all in-memory data to disk immediately.
func (r *RocksDB) Flush() error {
return statusToError(C.DBFlush(r.rdb))
}
// NewIterator returns an iterator over this rocksdb engine.
func (r *RocksDB) NewIterator(prefix bool) Iterator {
return newRocksDBIterator(r.rdb, prefix, r)
}
// NewSnapshot creates a snapshot handle from engine and returns a
// read-only rocksDBSnapshot engine.
func (r *RocksDB) NewSnapshot() Reader {
if r.rdb == nil {
panic("RocksDB is not initialized yet")
}
return &rocksDBSnapshot{
parent: r,
handle: C.DBNewSnapshot(r.rdb),
}
}
// NewBatch returns a new batch wrapping this rocksdb engine.
func (r *RocksDB) NewBatch() Batch {
return newRocksDBBatch(r, false /* writeOnly */)
}
// NewWriteOnlyBatch returns a new write-only batch wrapping this rocksdb
// engine.
func (r *RocksDB) NewWriteOnlyBatch() Batch {
return newRocksDBBatch(r, true /* writeOnly */)
}
// GetSSTables retrieves metadata about this engine's live sstables.
func (r *RocksDB) GetSSTables() SSTableInfos {
var n C.int
tables := C.DBGetSSTables(r.rdb, &n)
// We can't index into tables because it is a pointer, not a slice. The
// hackery below treats the pointer as an array and then constructs a slice
// from it.
tablesPtr := uintptr(unsafe.Pointer(tables))
tableSize := unsafe.Sizeof(C.DBSSTable{})
tableVal := func(i int) C.DBSSTable {
return *(*C.DBSSTable)(unsafe.Pointer(tablesPtr + uintptr(i)*tableSize))
}
res := make(SSTableInfos, n)
for i := range res {
r := &res[i]
tv := tableVal(i)
r.Level = int(tv.level)
r.Size = int64(tv.size)
r.Start = cToGoKey(tv.start_key)
r.End = cToGoKey(tv.end_key)
if ptr := tv.start_key.key.data; ptr != nil {
C.free(unsafe.Pointer(ptr))
}
if ptr := tv.end_key.key.data; ptr != nil {
C.free(unsafe.Pointer(ptr))
}
}
C.free(unsafe.Pointer(tables))
sort.Sort(res)
return res
}
// getUserProperties fetches the user properties stored in each sstable's
// metadata.
func (r *RocksDB) getUserProperties() (enginepb.SSTUserPropertiesCollection, error) {
buf := cStringToGoBytes(C.DBGetUserProperties(r.rdb))
var ssts enginepb.SSTUserPropertiesCollection
if err := ssts.Unmarshal(buf); err != nil {
return enginepb.SSTUserPropertiesCollection{}, err
}
if ssts.Error != "" {
return enginepb.SSTUserPropertiesCollection{}, errors.New(ssts.Error)
}
return ssts, nil
}
// GetStats retrieves stats from this engine's RocksDB instance and
// returns it in a new instance of Stats.
func (r *RocksDB) GetStats() (*Stats, error) {
var s C.DBStatsResult
if err := statusToError(C.DBGetStats(r.rdb, &s)); err != nil {
return nil, err
}
return &Stats{
BlockCacheHits: int64(s.block_cache_hits),
BlockCacheMisses: int64(s.block_cache_misses),
BlockCacheUsage: int64(s.block_cache_usage),
BlockCachePinnedUsage: int64(s.block_cache_pinned_usage),
BloomFilterPrefixChecked: int64(s.bloom_filter_prefix_checked),
BloomFilterPrefixUseful: int64(s.bloom_filter_prefix_useful),
MemtableHits: int64(s.memtable_hits),
MemtableMisses: int64(s.memtable_misses),
MemtableTotalSize: int64(s.memtable_total_size),
Flushes: int64(s.flushes),
Compactions: int64(s.compactions),
TableReadersMemEstimate: int64(s.table_readers_mem_estimate),
}, nil
}
type rocksDBSnapshot struct {
parent *RocksDB
handle *C.DBEngine
}
// Close releases the snapshot handle.
func (r *rocksDBSnapshot) Close() {
C.DBClose(r.handle)
r.handle = nil
}
// closed returns true if the engine is closed.
func (r *rocksDBSnapshot) closed() bool {
return r.handle == nil
}
// Get returns the value for the given key, nil otherwise using
// the snapshot handle.
func (r *rocksDBSnapshot) Get(key MVCCKey) ([]byte, error) {
return dbGet(r.handle, key)
}
func (r *rocksDBSnapshot) GetProto(
key MVCCKey, msg proto.Message,
) (ok bool, keyBytes, valBytes int64, err error) {
return dbGetProto(r.handle, key, msg)
}
// Iterate iterates over the keys between start inclusive and end
// exclusive, invoking f() on each key/value pair using the snapshot
// handle.
func (r *rocksDBSnapshot) Iterate(start, end MVCCKey, f func(MVCCKeyValue) (bool, error)) error {
return dbIterate(r.handle, r, start, end, f)
}
// NewIterator returns a new instance of an Iterator over the
// engine using the snapshot handle.
func (r *rocksDBSnapshot) NewIterator(prefix bool) Iterator {
return newRocksDBIterator(r.handle, prefix, r)
}
// reusableIterator wraps rocksDBIterator and allows reuse of an iterator
// for the lifetime of a batch.
type reusableIterator struct {
rocksDBIterator
inuse bool
}
func (r *reusableIterator) Close() {
// reusableIterator.Close() leaves the underlying rocksdb iterator open until
// the associated batch is closed.
if !r.inuse {
panic("closing idle iterator")
}
r.inuse = false
}
type distinctBatch struct {
*rocksDBBatch
prefixIter reusableIterator
normalIter reusableIterator
}
func (r *distinctBatch) Close() {
if !r.distinctOpen {
panic("distinct batch not open")
}
r.distinctOpen = false
}
// NewIterator returns an iterator over the batch and underlying engine. Note
// that the returned iterator is cached and re-used for the lifetime of the
// batch. A panic will be thrown if multiple prefix or normal (non-prefix)
// iterators are used simultaneously on the same batch.
func (r *distinctBatch) NewIterator(prefix bool) Iterator {
// Used the cached iterator, creating it on first access.
iter := &r.normalIter
if prefix {
iter = &r.prefixIter
}
if iter.rocksDBIterator.iter == nil {
iter.rocksDBIterator.init(r.batch, prefix, r)
}
if iter.inuse {
panic("iterator already in use")
}
iter.inuse = true
return iter
}
func (r *distinctBatch) Get(key MVCCKey) ([]byte, error) {
return dbGet(r.batch, key)
}
func (r *distinctBatch) GetProto(
key MVCCKey, msg proto.Message,
) (ok bool, keyBytes, valBytes int64, err error) {
return dbGetProto(r.batch, key, msg)
}
func (r *distinctBatch) Iterate(start, end MVCCKey, f func(MVCCKeyValue) (bool, error)) error {
return dbIterate(r.batch, r, start, end, f)
}
func (r *distinctBatch) Put(key MVCCKey, value []byte) error {
r.builder.Put(key, value)
return nil
}
func (r *distinctBatch) Merge(key MVCCKey, value []byte) error {
r.builder.Merge(key, value)
return nil
}
func (r *distinctBatch) Clear(key MVCCKey) error {
r.builder.Clear(key)
return nil
}
func (r *distinctBatch) close() {
if i := &r.prefixIter.rocksDBIterator; i.iter != nil {
i.destroy()
}
if i := &r.normalIter.rocksDBIterator; i.iter != nil {
i.destroy()
}
}
// rocksDBBatchIterator wraps rocksDBIterator and allows reuse of an iterator
// for the lifetime of a batch.
type rocksDBBatchIterator struct {
iter rocksDBIterator
batch *rocksDBBatch
}
func (r *rocksDBBatchIterator) Close() {
// rocksDBBatchIterator.Close() leaves the underlying rocksdb iterator open
// until the associated batch is closed.
if r.batch == nil {
panic("closing idle iterator")
}
r.batch = nil
}
func (r *rocksDBBatchIterator) Seek(key MVCCKey) {
r.batch.flushMutations()
r.iter.Seek(key)
}
func (r *rocksDBBatchIterator) SeekReverse(key MVCCKey) {
r.batch.flushMutations()
r.iter.SeekReverse(key)
}
func (r *rocksDBBatchIterator) Valid() bool {
return r.iter.Valid()
}
func (r *rocksDBBatchIterator) Next() {
r.batch.flushMutations()
r.iter.Next()
}
func (r *rocksDBBatchIterator) Prev() {
r.batch.flushMutations()
r.iter.Prev()
}
func (r *rocksDBBatchIterator) NextKey() {
r.batch.flushMutations()
r.iter.NextKey()
}
func (r *rocksDBBatchIterator) PrevKey() {
r.batch.flushMutations()
r.iter.PrevKey()
}
func (r *rocksDBBatchIterator) ComputeStats(
start, end MVCCKey, nowNanos int64,
) (enginepb.MVCCStats, error) {
r.batch.flushMutations()
return r.iter.ComputeStats(start, end, nowNanos)
}
func (r *rocksDBBatchIterator) Key() MVCCKey {
return r.iter.Key()
}
func (r *rocksDBBatchIterator) Value() []byte {
return r.iter.Value()
}
func (r *rocksDBBatchIterator) ValueProto(msg proto.Message) error {
return r.iter.ValueProto(msg)
}
func (r *rocksDBBatchIterator) unsafeKey() MVCCKey {
return r.iter.unsafeKey()
}
func (r *rocksDBBatchIterator) unsafeValue() []byte {
return r.iter.unsafeValue()
}
func (r *rocksDBBatchIterator) Error() error {
return r.iter.Error()
}
func (r *rocksDBBatchIterator) Less(key MVCCKey) bool {
return r.iter.Less(key)
}
type rocksDBBatch struct {
parent *RocksDB
batch *C.DBEngine
flushes int
flushedCount int
flushedSize int
prefixIter rocksDBBatchIterator
normalIter rocksDBBatchIterator
builder RocksDBBatchBuilder
distinct distinctBatch
distinctOpen bool
distinctNeedsFlush bool
writeOnly bool
}
func newRocksDBBatch(parent *RocksDB, writeOnly bool) *rocksDBBatch {
r := &rocksDBBatch{
parent: parent,
batch: C.DBNewBatch(parent.rdb, C.bool(writeOnly)),
writeOnly: writeOnly,
}
r.distinct.rocksDBBatch = r
return r
}
func (r *rocksDBBatch) Close() {
r.distinct.close()
if i := &r.prefixIter.iter; i.iter != nil {
i.destroy()
}
if i := &r.normalIter.iter; i.iter != nil {
i.destroy()
}
if r.batch != nil {
C.DBClose(r.batch)
r.batch = nil
}
}
// closed returns true if the engine is closed.
func (r *rocksDBBatch) closed() bool {
return r.batch == nil
}
func (r *rocksDBBatch) Put(key MVCCKey, value []byte) error {
if r.distinctOpen {
panic("distinct batch open")
}
r.distinctNeedsFlush = true
r.builder.Put(key, value)
return nil
}
func (r *rocksDBBatch) Merge(key MVCCKey, value []byte) error {
if r.distinctOpen {
panic("distinct batch open")
}
r.distinctNeedsFlush = true
r.builder.Merge(key, value)
return nil
}
// ApplyBatchRepr atomically applies a set of batched updates to the current
// batch (the receiver).
func (r *rocksDBBatch) ApplyBatchRepr(repr []byte) error {
if r.distinctOpen {
panic("distinct batch open")
}
r.flushMutations()
r.flushes++ // make sure that Repr() doesn't take a shortcut
return dbApplyBatchRepr(r.batch, repr)
}
func (r *rocksDBBatch) Get(key MVCCKey) ([]byte, error) {
if r.writeOnly {
panic("write-only batch")
}
if r.distinctOpen {
panic("distinct batch open")
}
r.flushMutations()
return dbGet(r.batch, key)
}
func (r *rocksDBBatch) GetProto(
key MVCCKey, msg proto.Message,
) (ok bool, keyBytes, valBytes int64, err error) {
if r.writeOnly {
panic("write-only batch")
}
if r.distinctOpen {
panic("distinct batch open")
}
r.flushMutations()
return dbGetProto(r.batch, key, msg)
}
func (r *rocksDBBatch) Iterate(start, end MVCCKey, f func(MVCCKeyValue) (bool, error)) error {
if r.writeOnly {
panic("write-only batch")
}
if r.distinctOpen {
panic("distinct batch open")
}
r.flushMutations()
return dbIterate(r.batch, r, start, end, f)
}
func (r *rocksDBBatch) Clear(key MVCCKey) error {
if r.distinctOpen {
panic("distinct batch open")
}
r.distinctNeedsFlush = true
r.builder.Clear(key)
return nil
}
// NewIterator returns an iterator over the batch and underlying engine. Note
// that the returned iterator is cached and re-used for the lifetime of the
// batch. A panic will be thrown if multiple prefix or normal (non-prefix)
// iterators are used simultaneously on the same batch.
func (r *rocksDBBatch) NewIterator(prefix bool) Iterator {
if r.writeOnly {
panic("write-only batch")
}
if r.distinctOpen {
panic("distinct batch open")
}
// Used the cached iterator, creating it on first access.
iter := &r.normalIter
if prefix {
iter = &r.prefixIter
}
if iter.iter.iter == nil {
iter.iter.init(r.batch, prefix, r)
}
if iter.batch != nil {
panic("iterator already in use")
}
iter.batch = r
return iter
}
func (r *rocksDBBatch) Commit() error {
if r.closed() {
panic("this batch was already committed")
}
start := timeutil.Now()
var count, size int
r.distinctOpen = false
if r.flushes > 0 {
// We've previously flushed mutations to the C++ batch, so we have to flush
// any remaining mutations as well and then commit the batch.
r.flushMutations()
if err := statusToError(C.DBCommitAndCloseBatch(r.batch)); err != nil {
return err
}
r.batch = nil
count, size = r.flushedCount, r.flushedSize
} else if r.builder.count > 0 {
count, size = r.builder.count, len(r.builder.repr)
// Fast-path which avoids flushing mutations to the C++ batch. Instead, we
// directly apply the mutations to the database.
if err := r.parent.ApplyBatchRepr(r.builder.Finish()); err != nil {
return err
}
C.DBClose(r.batch)
r.batch = nil
}
const batchCommitWarnThreshold = 500 * time.Millisecond
if elapsed := timeutil.Since(start); elapsed >= batchCommitWarnThreshold {
log.Warningf(context.TODO(), "batch [%d/%d/%d] commit took %s (>%s):\n%s",
count, size, r.flushes, elapsed, batchCommitWarnThreshold, debug.Stack())
}
return nil
}
func (r *rocksDBBatch) Repr() []byte {
if r.flushes == 0 {
// We've never flushed to C++. Return the mutations only.
return r.builder.getRepr()
}
r.flushMutations()
return cSliceToGoBytes(C.DBBatchRepr(r.batch))
}
func (r *rocksDBBatch) Distinct() ReadWriter {
if r.distinctNeedsFlush {
r.flushMutations()
}
if r.distinctOpen {
panic("distinct batch already open")
}
r.distinctOpen = true
return &r.distinct
}
func (r *rocksDBBatch) flushMutations() {
if r.builder.count == 0 {
return
}
r.distinctNeedsFlush = false
r.flushes++
r.flushedCount += r.builder.count
r.flushedSize += len(r.builder.repr)
if err := r.ApplyBatchRepr(r.builder.Finish()); err != nil {
panic(err)
}
// Force a seek of the underlying iterator on the next Seek/ReverseSeek.
r.prefixIter.iter.reseek = true
r.normalIter.iter.reseek = true
}
type rocksDBIterator struct {
engine Reader
iter *C.DBIterator
valid bool
reseek bool
key C.DBKey
value C.DBSlice
}
// TODO(peter): Is this pool useful now that rocksDBBatch.NewIterator doesn't
// allocate by returning internal pointers?
var iterPool = sync.Pool{
New: func() interface{} {
return &rocksDBIterator{}
},
}
// newRocksDBIterator returns a new iterator over the supplied RocksDB
// instance. If snapshotHandle is not nil, uses the indicated snapshot.
// The caller must call rocksDBIterator.Close() when finished with the
// iterator to free up resources.
func newRocksDBIterator(rdb *C.DBEngine, prefix bool, engine Reader) Iterator {
// In order to prevent content displacement, caching is disabled
// when performing scans. Any options set within the shared read
// options field that should be carried over needs to be set here
// as well.
r := iterPool.Get().(*rocksDBIterator)
r.init(rdb, prefix, engine)
return r
}
func (r *rocksDBIterator) init(rdb *C.DBEngine, prefix bool, engine Reader) {
r.iter = C.DBNewIter(rdb, C.bool(prefix))
r.engine = engine
}
func (r *rocksDBIterator) checkEngineOpen() {
if r.engine.closed() {
panic("iterator used after backing engine closed")
}
}
func (r *rocksDBIterator) destroy() {
C.DBIterDestroy(r.iter)
*r = rocksDBIterator{}
}
// The following methods implement the Iterator interface.
func (r *rocksDBIterator) Close() {
r.destroy()
iterPool.Put(r)
}
func (r *rocksDBIterator) Seek(key MVCCKey) {
r.checkEngineOpen()
if len(key.Key) == 0 {
// start=Key("") needs special treatment since we need
// to access start[0] in an explicit seek.
r.setState(C.DBIterSeekToFirst(r.iter))
} else {
// We can avoid seeking if we're already at the key we seek.
if r.valid && !r.reseek && key.Equal(r.unsafeKey()) {
return
}
r.setState(C.DBIterSeek(r.iter, goToCKey(key)))
}
}
func (r *rocksDBIterator) SeekReverse(key MVCCKey) {
r.checkEngineOpen()
if len(key.Key) == 0 {
r.setState(C.DBIterSeekToLast(r.iter))
} else {
// We can avoid seeking if we're already at the key we seek.
if r.valid && !r.reseek && key.Equal(r.unsafeKey()) {
return
}
r.setState(C.DBIterSeek(r.iter, goToCKey(key)))
// Maybe the key sorts after the last key in RocksDB.
if !r.Valid() {
r.setState(C.DBIterSeekToLast(r.iter))
}
if !r.Valid() {
return
}
// Make sure the current key is <= the provided key.
if key.Less(r.unsafeKey()) {
r.Prev()
}
}
}
func (r *rocksDBIterator) Valid() bool {
return r.valid
}
func (r *rocksDBIterator) Next() {
r.checkEngineOpen()
r.setState(C.DBIterNext(r.iter, false /* !skip_current_key_versions */))
}
func (r *rocksDBIterator) Prev() {
r.checkEngineOpen()
r.setState(C.DBIterPrev(r.iter, false /* !skip_current_key_versions */))
}
func (r *rocksDBIterator) NextKey() {
r.checkEngineOpen()
r.setState(C.DBIterNext(r.iter, true /* skip_current_key_versions */))
}
func (r *rocksDBIterator) PrevKey() {
r.checkEngineOpen()
r.setState(C.DBIterPrev(r.iter, true /* skip_current_key_versions */))
}
func (r *rocksDBIterator) Key() MVCCKey {
// The data returned by rocksdb_iter_{key,value} is not meant to be
// freed by the client. It is a direct reference to the data managed
// by the iterator, so it is copied instead of freed.
return cToGoKey(r.key)
}
func (r *rocksDBIterator) Value() []byte {
return cSliceToGoBytes(r.value)
}
func (r *rocksDBIterator) ValueProto(msg proto.Message) error {
if r.value.len <= 0 {
return nil
}
return proto.Unmarshal(r.unsafeValue(), msg)
}
func (r *rocksDBIterator) unsafeKey() MVCCKey {
return cToUnsafeGoKey(r.key)
}
func (r *rocksDBIterator) unsafeValue() []byte {
return cSliceToUnsafeGoBytes(r.value)
}
func (r *rocksDBIterator) Error() error {
return statusToError(C.DBIterError(r.iter))
}
func (r *rocksDBIterator) Less(key MVCCKey) bool {
return r.unsafeKey().Less(key)
}
func (r *rocksDBIterator) setState(state C.DBIterState) {
r.valid = bool(state.valid)
r.reseek = false
r.key = state.key
r.value = state.value
}
func (r *rocksDBIterator) ComputeStats(
start, end MVCCKey, nowNanos int64,
) (enginepb.MVCCStats, error) {
result := C.MVCCComputeStats(r.iter, goToCKey(start), goToCKey(end), C.int64_t(nowNanos))
ms := enginepb.MVCCStats{}
if err := statusToError(result.status); err != nil {
return ms, err
}
ms.ContainsEstimates = false
ms.LiveBytes = int64(result.live_bytes)
ms.KeyBytes = int64(result.key_bytes)
ms.ValBytes = int64(result.val_bytes)
ms.IntentBytes = int64(result.intent_bytes)
ms.LiveCount = int64(result.live_count)
ms.KeyCount = int64(result.key_count)
ms.ValCount = int64(result.val_count)
ms.IntentCount = int64(result.intent_count)
ms.IntentAge = int64(result.intent_age)
ms.GCBytesAge = int64(result.gc_bytes_age)
ms.SysBytes = int64(result.sys_bytes)
ms.SysCount = int64(result.sys_count)
ms.LastUpdateNanos = nowNanos
return ms, nil
}
// goToCSlice converts a go byte slice to a DBSlice. Note that this is
// potentially dangerous as the DBSlice holds a reference to the go
// byte slice memory that the Go GC does not know about. This method
// is only intended for use in converting arguments to C
// functions. The C function must copy any data that it wishes to
// retain once the function returns.
func goToCSlice(b []byte) C.DBSlice {
if len(b) == 0 {
return C.DBSlice{data: nil, len: 0}
}
return C.DBSlice{
data: (*C.char)(unsafe.Pointer(&b[0])),
len: C.int(len(b)),
}
}
func goToCKey(key MVCCKey) C.DBKey {
return C.DBKey{
key: goToCSlice(key.Key),
wall_time: C.int64_t(key.Timestamp.WallTime),
logical: C.int32_t(key.Timestamp.Logical),
}
}
func cToGoKey(key C.DBKey) MVCCKey {
// When converting a C.DBKey to an MVCCKey, give the underlying slice an
// extra byte of capacity in anticipation of roachpb.Key.Next() being
// called. The extra byte is trivial extra space, but allows callers to avoid
// an allocation and copy when calling roachpb.Key.Next(). Note that it is
// important that the extra byte contain the value 0 in order for the
// roachpb.Key.Next() fast-path to be invoked. This is true for the code
// below because make() zero initializes all of the bytes.
unsafeKey := cSliceToUnsafeGoBytes(key.key)
safeKey := make([]byte, len(unsafeKey), len(unsafeKey)+1)
copy(safeKey, unsafeKey)
return MVCCKey{
Key: safeKey,
Timestamp: hlc.Timestamp{
WallTime: int64(key.wall_time),
Logical: int32(key.logical),
},
}
}
func cToUnsafeGoKey(key C.DBKey) MVCCKey {
return MVCCKey{
Key: cSliceToUnsafeGoBytes(key.key),
Timestamp: hlc.Timestamp{
WallTime: int64(key.wall_time),
Logical: int32(key.logical),
},
}
}
func cStringToGoString(s C.DBString) string {
if s.data == nil {
return ""
}
result := C.GoStringN(s.data, s.len)
C.free(unsafe.Pointer(s.data))
return result
}
func cStringToGoBytes(s C.DBString) []byte {
if s.data == nil {
return nil
}
result := C.GoBytes(unsafe.Pointer(s.data), s.len)
C.free(unsafe.Pointer(s.data))
return result
}
func cSliceToGoBytes(s C.DBSlice) []byte {
if s.data == nil {
return nil
}
return C.GoBytes(unsafe.Pointer(s.data), s.len)
}
func cSliceToUnsafeGoBytes(s C.DBSlice) []byte {
if s.data == nil {
return nil
}
// Interpret the C pointer as a pointer to a Go array, then slice.
return (*[maxArrayLen]byte)(unsafe.Pointer(s.data))[:s.len:s.len]
}
func statusToError(s C.DBStatus) error {
if s.data == nil {
return nil
}
return errors.New(cStringToGoString(s))
}
// goMerge takes existing and update byte slices that are expected to
// be marshalled roachpb.Values and merges the two values returning a
// marshalled roachpb.Value or an error.
func goMerge(existing, update []byte) ([]byte, error) {
var result C.DBString
status := C.DBMergeOne(goToCSlice(existing), goToCSlice(update), &result)
if status.data != nil {
return nil, errors.Errorf("%s: existing=%q, update=%q",
cStringToGoString(status), existing, update)
}
return cStringToGoBytes(result), nil
}
func emptyKeyError() error {
return errors.Errorf("attempted access to empty key")
}
func dbPut(rdb *C.DBEngine, key MVCCKey, value []byte) error {
if len(key.Key) == 0 {
return emptyKeyError()
}
// *Put, *Get, and *Delete call memcpy() (by way of MemTable::Add)
// when called, so we do not need to worry about these byte slices
// being reclaimed by the GC.
return statusToError(C.DBPut(rdb, goToCKey(key), goToCSlice(value)))
}
func dbMerge(rdb *C.DBEngine, key MVCCKey, value []byte) error {
if len(key.Key) == 0 {
return emptyKeyError()
}
// DBMerge calls memcpy() (by way of MemTable::Add)
// when called, so we do not need to worry about these byte slices being
// reclaimed by the GC.
return statusToError(C.DBMerge(rdb, goToCKey(key), goToCSlice(value)))
}
func dbApplyBatchRepr(rdb *C.DBEngine, repr []byte) error {
return statusToError(C.DBApplyBatchRepr(rdb, goToCSlice(repr)))
}
// dbGet returns the value for the given key.
func dbGet(rdb *C.DBEngine, key MVCCKey) ([]byte, error) {
if len(key.Key) == 0 {
return nil, emptyKeyError()
}
var result C.DBString
err := statusToError(C.DBGet(rdb, goToCKey(key), &result))
if err != nil {
return nil, err
}
return cStringToGoBytes(result), nil
}
func dbGetProto(
rdb *C.DBEngine, key MVCCKey, msg proto.Message,
) (ok bool, keyBytes, valBytes int64, err error) {
if len(key.Key) == 0 {
err = emptyKeyError()
return
}
var result C.DBString
if err = statusToError(C.DBGet(rdb, goToCKey(key), &result)); err != nil {
return
}
if result.len <= 0 {
msg.Reset()
return
}
ok = true
if msg != nil {
// Make a byte slice that is backed by result.data. This slice
// cannot live past the lifetime of this method, but we're only
// using it to unmarshal the roachpb.
data := cSliceToUnsafeGoBytes(C.DBSlice(result))
err = proto.Unmarshal(data, msg)
}
C.free(unsafe.Pointer(result.data))
keyBytes = int64(key.EncodedSize())
valBytes = int64(result.len)
return
}
func dbClear(rdb *C.DBEngine, key MVCCKey) error {
if len(key.Key) == 0 {
return emptyKeyError()
}
return statusToError(C.DBDelete(rdb, goToCKey(key)))
}
func dbIterate(
rdb *C.DBEngine, engine Reader, start, end MVCCKey, f func(MVCCKeyValue) (bool, error),
) error {
if !start.Less(end) {
return nil
}
it := newRocksDBIterator(rdb, false, engine)
defer it.Close()
it.Seek(start)
for ; it.Valid(); it.Next() {
k := it.Key()
if !k.Less(end) {
break
}
if done, err := f(MVCCKeyValue{Key: k, Value: it.Value()}); done || err != nil {
return err
}
}
// Check for any errors during iteration.
return it.Error()
}
// RocksDBSstFileReader allows iteration over a number of non-overlapping
// sstables exported by `RocksDBSstFileWriter`.
type RocksDBSstFileReader struct {
// TODO(dan): This currently works by creating a RocksDB instance in a
// temporary directory that's cleaned up on `Close`. It doesn't appear that
// we can use an in-memory RocksDB with this, because AddFile doesn't then
// work with files on disk. This should also work with overlapping files.
dir string
rocksDB *RocksDB
}
// MakeRocksDBSstFileReader creates a RocksDBSstFileReader that uses a scratch
// directory which is cleaned up by `Close`.
func MakeRocksDBSstFileReader() (RocksDBSstFileReader, error) {
dir, err := ioutil.TempDir("", "RocksDBSstFileReader")
if err != nil {
return RocksDBSstFileReader{}, err
}
// TODO(dan): I pulled all these magic numbers out of nowhere. Make them
// less magic.
cache := NewRocksDBCache(1 << 20)
rocksDB, err := NewRocksDB(
roachpb.Attributes{}, dir, cache, 512<<20, DefaultMaxOpenFiles)
if err != nil {
return RocksDBSstFileReader{}, err
}
return RocksDBSstFileReader{dir, rocksDB}, nil
}
// AddFile links the file at the given path into a database. See the RocksDB
// documentation on `AddFile` for the various restrictions on what can be added.
func (fr *RocksDBSstFileReader) AddFile(path string) error {
if fr.rocksDB == nil {
return errors.New("cannot call AddFile on a closed reader")
}
return statusToError(C.DBEngineAddFile(fr.rocksDB.rdb, goToCSlice([]byte(path))))
}
// Iterate iterates over the keys between start inclusive and end
// exclusive, invoking f() on each key/value pair.
func (fr *RocksDBSstFileReader) Iterate(
start, end MVCCKey, f func(MVCCKeyValue) (bool, error),
) error {
if fr.rocksDB == nil {
return errors.New("cannot call Iterate on a closed reader")
}
return fr.rocksDB.Iterate(start, end, f)
}
// Close finishes the reader.
func (fr *RocksDBSstFileReader) Close() {
if fr.rocksDB == nil {
return
}
fr.rocksDB.Close()
fr.rocksDB = nil
if err := os.RemoveAll(fr.dir); err != nil {
log.Warningf(context.TODO(), "error removing temp rocksdb directory %q: %s", fr.dir, err)
}
}
// RocksDBSstFileWriter creates a file suitable for importing with
// RocksDBSstFileReader.
type RocksDBSstFileWriter struct {
fw *C.DBSstFileWriter
// DataSize tracks the total key and value bytes added so far.
DataSize int64
}
// MakeRocksDBSstFileWriter creates a new RocksDBSstFileWriter with the default
// configuration.
func MakeRocksDBSstFileWriter() RocksDBSstFileWriter {
return RocksDBSstFileWriter{C.DBSstFileWriterNew(), 0}
}
// Open creates a file at the given path for output of an sstable.
func (fw *RocksDBSstFileWriter) Open(path string) error {
if fw == nil {
return errors.New("cannot call Open on a closed writer")
}
return statusToError(C.DBSstFileWriterOpen(fw.fw, goToCSlice([]byte(path))))
}
// Add puts a kv entry into the sstable being built. An error is returned if it
// is not greater than any previously added entry (according to the comparator
// configured during writer creation). `Open` must have been called. `Close`
// cannot have been called.
func (fw *RocksDBSstFileWriter) Add(kv MVCCKeyValue) error {
if fw == nil {
return errors.New("cannot call Open on a closed writer")
}
fw.DataSize += int64(len(kv.Key.Key)) + int64(len(kv.Value))
return statusToError(C.DBSstFileWriterAdd(fw.fw, goToCKey(kv.Key), goToCSlice(kv.Value)))
}
// Close finishes the writer, flushing any remaining writes to disk. At least
// one kv entry must have been added.
func (fw *RocksDBSstFileWriter) Close() error {
if fw.fw == nil {
return errors.New("writer is already closed")
}
err := statusToError(C.DBSstFileWriterClose(fw.fw))
fw.fw = nil
return err
}
| NewRocksDB |
pretty.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! Utilities for printing record batches. Note this module is not
//! available unless `feature = "prettyprint"` is enabled.
use crate::{array::ArrayRef, record_batch::RecordBatch};
use std::fmt::Display;
use comfy_table::{Cell, Table};
use crate::error::Result;
use super::display::array_value_to_string;
///! Create a visual representation of record batches
pub fn pretty_format_batches(results: &[RecordBatch]) -> Result<impl Display> {
create_table(results)
}
///! Create a visual representation of columns
pub fn pretty_format_columns(
col_name: &str,
results: &[ArrayRef],
) -> Result<impl Display> {
create_column(col_name, results)
}
///! Prints a visual representation of record batches to stdout
pub fn print_batches(results: &[RecordBatch]) -> Result<()> {
println!("{}", create_table(results)?);
Ok(())
}
///! Prints a visual representation of a list of column to stdout
pub fn print_columns(col_name: &str, results: &[ArrayRef]) -> Result<()> {
println!("{}", create_column(col_name, results)?);
Ok(())
}
///! Convert a series of record batches into a table
fn create_table(results: &[RecordBatch]) -> Result<Table> {
let mut table = Table::new();
table.load_preset("||--+-++| ++++++");
if results.is_empty() {
return Ok(table);
}
let schema = results[0].schema();
let mut header = Vec::new();
for field in schema.fields() {
header.push(Cell::new(&field.name()));
}
table.set_header(header);
for batch in results {
for row in 0..batch.num_rows() {
let mut cells = Vec::new();
for col in 0..batch.num_columns() {
let column = batch.column(col);
cells.push(Cell::new(&array_value_to_string(column, row)?));
}
table.add_row(cells);
}
}
Ok(table)
}
fn create_column(field: &str, columns: &[ArrayRef]) -> Result<Table> {
let mut table = Table::new();
table.load_preset("||--+-++| ++++++");
if columns.is_empty() {
return Ok(table);
}
let header = vec![Cell::new(field)];
table.set_header(header);
for col in columns {
for row in 0..col.len() {
let cells = vec![Cell::new(&array_value_to_string(col, row)?)];
table.add_row(cells);
}
}
Ok(table)
}
#[cfg(test)]
mod tests {
use crate::{
array::{
self, new_null_array, Array, Date32Array, Date64Array,
FixedSizeBinaryBuilder, Float16Array, PrimitiveBuilder, StringArray,
StringBuilder, StringDictionaryBuilder, StructArray, Time32MillisecondArray,
Time32SecondArray, Time64MicrosecondArray, Time64NanosecondArray,
TimestampMicrosecondArray, TimestampMillisecondArray,
TimestampNanosecondArray, TimestampSecondArray,
},
datatypes::{DataType, Field, Int32Type, Schema},
};
use super::*;
use crate::array::{DecimalArray, FixedSizeListBuilder, Int32Array};
use std::fmt::Write;
use std::sync::Arc;
use half::f16;
#[test]
fn test_pretty_format_batches() -> Result<()> {
// define a schema.
let schema = Arc::new(Schema::new(vec![
Field::new("a", DataType::Utf8, true),
Field::new("b", DataType::Int32, true),
]));
// define data.
let batch = RecordBatch::try_new(
schema,
vec![
Arc::new(array::StringArray::from(vec![
Some("a"),
Some("b"),
None,
Some("d"),
])),
Arc::new(array::Int32Array::from(vec![
Some(1),
None,
Some(10),
Some(100),
])),
],
)?;
let table = pretty_format_batches(&[batch])?.to_string();
let expected = vec![
"+---+-----+",
"| a | b |",
"+---+-----+",
"| a | 1 |",
"| b | |",
"| | 10 |",
"| d | 100 |",
"+---+-----+",
];
let actual: Vec<&str> = table.lines().collect();
assert_eq!(expected, actual, "Actual result:\n{}", table);
Ok(())
}
#[test]
fn test_pretty_format_columns() -> Result<()> {
let columns = vec![
Arc::new(array::StringArray::from(vec![
Some("a"),
Some("b"),
None,
Some("d"),
])) as ArrayRef,
Arc::new(array::StringArray::from(vec![Some("e"), None, Some("g")])),
];
let table = pretty_format_columns("a", &columns)?.to_string();
let expected = vec![
"+---+", "| a |", "+---+", "| a |", "| b |", "| |", "| d |", "| e |",
"| |", "| g |", "+---+",
];
let actual: Vec<&str> = table.lines().collect();
assert_eq!(expected, actual, "Actual result:\n{}", table);
Ok(())
}
#[test]
fn test_pretty_format_null() {
let schema = Arc::new(Schema::new(vec![
Field::new("a", DataType::Utf8, true),
Field::new("b", DataType::Int32, true),
Field::new("c", DataType::Null, true),
]));
let num_rows = 4;
let arrays = schema
.fields()
.iter()
.map(|f| new_null_array(f.data_type(), num_rows))
.collect();
// define data (null)
let batch = RecordBatch::try_new(schema, arrays).unwrap();
let table = pretty_format_batches(&[batch]).unwrap().to_string();
let expected = vec![
"+---+---+---+",
"| a | b | c |",
"+---+---+---+",
"| | | |",
"| | | |",
"| | | |",
"| | | |",
"+---+---+---+",
];
let actual: Vec<&str> = table.lines().collect();
assert_eq!(expected, actual, "Actual result:\n{:#?}", table);
}
#[test]
fn test_pretty_format_dictionary() -> Result<()> {
// define a schema.
let field_type =
DataType::Dictionary(Box::new(DataType::Int32), Box::new(DataType::Utf8));
let schema = Arc::new(Schema::new(vec![Field::new("d1", field_type, true)]));
let keys_builder = PrimitiveBuilder::<Int32Type>::new(10);
let values_builder = StringBuilder::new(10);
let mut builder = StringDictionaryBuilder::new(keys_builder, values_builder);
builder.append("one")?;
builder.append_null()?;
builder.append("three")?;
let array = Arc::new(builder.finish());
let batch = RecordBatch::try_new(schema, vec![array])?;
let table = pretty_format_batches(&[batch])?.to_string();
let expected = vec![
"+-------+",
"| d1 |",
"+-------+",
"| one |",
"| |",
"| three |",
"+-------+",
];
let actual: Vec<&str> = table.lines().collect();
assert_eq!(expected, actual, "Actual result:\n{}", table);
Ok(())
}
#[test]
fn test_pretty_format_fixed_size_list() -> Result<()> {
// define a schema.
let field_type = DataType::FixedSizeList(
Box::new(Field::new("item", DataType::Int32, true)),
3,
);
let schema = Arc::new(Schema::new(vec![Field::new("d1", field_type, true)]));
let keys_builder = Int32Array::builder(3);
let mut builder = FixedSizeListBuilder::new(keys_builder, 3);
builder.values().append_slice(&[1, 2, 3]).unwrap();
builder.append(true).unwrap();
builder.values().append_slice(&[4, 5, 6]).unwrap();
builder.append(false).unwrap();
builder.values().append_slice(&[7, 8, 9]).unwrap();
builder.append(true).unwrap();
let array = Arc::new(builder.finish());
let batch = RecordBatch::try_new(schema, vec![array])?;
let table = pretty_format_batches(&[batch])?.to_string();
let expected = vec![
"+-----------+",
"| d1 |",
"+-----------+",
"| [1, 2, 3] |",
"| |",
"| [7, 8, 9] |",
"+-----------+",
];
let actual: Vec<&str> = table.lines().collect();
assert_eq!(expected, actual, "Actual result:\n{}", table);
Ok(())
}
#[test]
fn test_pretty_format_fixed_size_binary() -> Result<()> {
// define a schema.
let field_type = DataType::FixedSizeBinary(3);
let schema = Arc::new(Schema::new(vec![Field::new("d1", field_type, true)]));
let mut builder = FixedSizeBinaryBuilder::new(3, 3);
builder.append_value(&[1, 2, 3]).unwrap();
builder.append_null().unwrap();
builder.append_value(&[7, 8, 9]).unwrap();
let array = Arc::new(builder.finish());
let batch = RecordBatch::try_new(schema, vec![array])?;
let table = pretty_format_batches(&[batch])?.to_string();
let expected = vec![
"+--------+",
"| d1 |",
"+--------+",
"| 010203 |",
"| |",
"| 070809 |",
"+--------+",
];
let actual: Vec<&str> = table.lines().collect();
assert_eq!(expected, actual, "Actual result:\n{}", table);
Ok(())
}
/// Generate an array with type $ARRAYTYPE with a numeric value of
/// $VALUE, and compare $EXPECTED_RESULT to the output of
/// formatting that array with `pretty_format_batches`
macro_rules! check_datetime {
($ARRAYTYPE:ident, $VALUE:expr, $EXPECTED_RESULT:expr) => {
let mut builder = $ARRAYTYPE::builder(10);
builder.append_value($VALUE).unwrap();
builder.append_null().unwrap();
let array = builder.finish();
let schema = Arc::new(Schema::new(vec![Field::new(
"f",
array.data_type().clone(),
true,
)]));
let batch = RecordBatch::try_new(schema, vec![Arc::new(array)]).unwrap();
let table = pretty_format_batches(&[batch])
.expect("formatting batches")
.to_string();
let expected = $EXPECTED_RESULT;
let actual: Vec<&str> = table.lines().collect();
assert_eq!(expected, actual, "Actual result:\n\n{:#?}\n\n", actual);
};
}
#[test]
fn test_pretty_format_timestamp_second() {
let expected = vec![
"+---------------------+",
"| f |",
"+---------------------+",
"| 1970-05-09 14:25:11 |",
"| |",
"+---------------------+",
];
check_datetime!(TimestampSecondArray, 11111111, expected);
}
#[test]
fn test_pretty_format_timestamp_millisecond() {
let expected = vec![
"+-------------------------+",
"| f |",
"+-------------------------+",
"| 1970-01-01 03:05:11.111 |",
"| |",
"+-------------------------+",
];
check_datetime!(TimestampMillisecondArray, 11111111, expected);
}
#[test]
fn test_pretty_format_timestamp_microsecond() {
let expected = vec![
"+----------------------------+",
"| f |",
"+----------------------------+",
"| 1970-01-01 00:00:11.111111 |",
"| |",
"+----------------------------+",
];
check_datetime!(TimestampMicrosecondArray, 11111111, expected);
}
#[test]
fn test_pretty_format_timestamp_nanosecond() |
#[test]
fn test_pretty_format_date_32() {
let expected = vec![
"+------------+",
"| f |",
"+------------+",
"| 1973-05-19 |",
"| |",
"+------------+",
];
check_datetime!(Date32Array, 1234, expected);
}
#[test]
fn test_pretty_format_date_64() {
let expected = vec![
"+------------+",
"| f |",
"+------------+",
"| 2005-03-18 |",
"| |",
"+------------+",
];
check_datetime!(Date64Array, 1111111100000, expected);
}
#[test]
fn test_pretty_format_time_32_second() {
let expected = vec![
"+----------+",
"| f |",
"+----------+",
"| 00:18:31 |",
"| |",
"+----------+",
];
check_datetime!(Time32SecondArray, 1111, expected);
}
#[test]
fn test_pretty_format_time_32_millisecond() {
let expected = vec![
"+--------------+",
"| f |",
"+--------------+",
"| 03:05:11.111 |",
"| |",
"+--------------+",
];
check_datetime!(Time32MillisecondArray, 11111111, expected);
}
#[test]
fn test_pretty_format_time_64_microsecond() {
let expected = vec![
"+-----------------+",
"| f |",
"+-----------------+",
"| 00:00:11.111111 |",
"| |",
"+-----------------+",
];
check_datetime!(Time64MicrosecondArray, 11111111, expected);
}
#[test]
fn test_pretty_format_time_64_nanosecond() {
let expected = vec![
"+--------------------+",
"| f |",
"+--------------------+",
"| 00:00:00.011111111 |",
"| |",
"+--------------------+",
];
check_datetime!(Time64NanosecondArray, 11111111, expected);
}
#[test]
fn test_int_display() -> Result<()> {
let array = Arc::new(Int32Array::from(vec![6, 3])) as ArrayRef;
let actual_one = array_value_to_string(&array, 0).unwrap();
let expected_one = "6";
let actual_two = array_value_to_string(&array, 1).unwrap();
let expected_two = "3";
assert_eq!(actual_one, expected_one);
assert_eq!(actual_two, expected_two);
Ok(())
}
#[test]
fn test_decimal_display() -> Result<()> {
let precision = 10;
let scale = 2;
let array = [Some(101), None, Some(200), Some(3040)]
.into_iter()
.collect::<DecimalArray>()
.with_precision_and_scale(precision, scale)
.unwrap();
let dm = Arc::new(array) as ArrayRef;
let schema = Arc::new(Schema::new(vec![Field::new(
"f",
dm.data_type().clone(),
true,
)]));
let batch = RecordBatch::try_new(schema, vec![dm])?;
let table = pretty_format_batches(&[batch])?.to_string();
let expected = vec![
"+-------+",
"| f |",
"+-------+",
"| 1.01 |",
"| |",
"| 2.00 |",
"| 30.40 |",
"+-------+",
];
let actual: Vec<&str> = table.lines().collect();
assert_eq!(expected, actual, "Actual result:\n{}", table);
Ok(())
}
#[test]
fn test_decimal_display_zero_scale() -> Result<()> {
let precision = 5;
let scale = 0;
let array = [Some(101), None, Some(200), Some(3040)]
.into_iter()
.collect::<DecimalArray>()
.with_precision_and_scale(precision, scale)
.unwrap();
let dm = Arc::new(array) as ArrayRef;
let schema = Arc::new(Schema::new(vec![Field::new(
"f",
dm.data_type().clone(),
true,
)]));
let batch = RecordBatch::try_new(schema, vec![dm])?;
let table = pretty_format_batches(&[batch])?.to_string();
let expected = vec![
"+------+", "| f |", "+------+", "| 101 |", "| |", "| 200 |",
"| 3040 |", "+------+",
];
let actual: Vec<&str> = table.lines().collect();
assert_eq!(expected, actual, "Actual result:\n{}", table);
Ok(())
}
#[test]
fn test_pretty_format_struct() -> Result<()> {
let schema = Schema::new(vec![
Field::new(
"c1",
DataType::Struct(vec![
Field::new("c11", DataType::Int32, false),
Field::new(
"c12",
DataType::Struct(vec![Field::new("c121", DataType::Utf8, false)]),
false,
),
]),
false,
),
Field::new("c2", DataType::Utf8, false),
]);
let c1 = StructArray::from(vec![
(
Field::new("c11", DataType::Int32, false),
Arc::new(Int32Array::from(vec![Some(1), None, Some(5)])) as ArrayRef,
),
(
Field::new(
"c12",
DataType::Struct(vec![Field::new("c121", DataType::Utf8, false)]),
false,
),
Arc::new(StructArray::from(vec![(
Field::new("c121", DataType::Utf8, false),
Arc::new(StringArray::from(vec![Some("e"), Some("f"), Some("g")]))
as ArrayRef,
)])) as ArrayRef,
),
]);
let c2 = StringArray::from(vec![Some("a"), Some("b"), Some("c")]);
let batch =
RecordBatch::try_new(Arc::new(schema), vec![Arc::new(c1), Arc::new(c2)])
.unwrap();
let table = pretty_format_batches(&[batch])?.to_string();
let expected = vec![
r#"+-------------------------------------+----+"#,
r#"| c1 | c2 |"#,
r#"+-------------------------------------+----+"#,
r#"| {"c11": 1, "c12": {"c121": "e"}} | a |"#,
r#"| {"c11": null, "c12": {"c121": "f"}} | b |"#,
r#"| {"c11": 5, "c12": {"c121": "g"}} | c |"#,
r#"+-------------------------------------+----+"#,
];
let actual: Vec<&str> = table.lines().collect();
assert_eq!(expected, actual, "Actual result:\n{}", table);
Ok(())
}
#[test]
fn test_writing_formatted_batches() -> Result<()> {
// define a schema.
let schema = Arc::new(Schema::new(vec![
Field::new("a", DataType::Utf8, true),
Field::new("b", DataType::Int32, true),
]));
// define data.
let batch = RecordBatch::try_new(
schema,
vec![
Arc::new(array::StringArray::from(vec![
Some("a"),
Some("b"),
None,
Some("d"),
])),
Arc::new(array::Int32Array::from(vec![
Some(1),
None,
Some(10),
Some(100),
])),
],
)?;
let mut buf = String::new();
write!(&mut buf, "{}", pretty_format_batches(&[batch])?).unwrap();
let s = vec![
"+---+-----+",
"| a | b |",
"+---+-----+",
"| a | 1 |",
"| b | |",
"| | 10 |",
"| d | 100 |",
"+---+-----+",
];
let expected = s.join("\n");
assert_eq!(expected, buf);
Ok(())
}
#[test]
fn test_float16_display() -> Result<()> {
let values = vec![
Some(f16::from_f32(f32::NAN)),
Some(f16::from_f32(4.0)),
Some(f16::from_f32(f32::NEG_INFINITY)),
];
let array = Arc::new(values.into_iter().collect::<Float16Array>()) as ArrayRef;
let schema = Arc::new(Schema::new(vec![Field::new(
"f16",
array.data_type().clone(),
true,
)]));
let batch = RecordBatch::try_new(schema, vec![array])?;
let table = pretty_format_batches(&[batch])?.to_string();
let expected = vec![
"+------+", "| f16 |", "+------+", "| NaN |", "| 4 |", "| -inf |",
"+------+",
];
let actual: Vec<&str> = table.lines().collect();
assert_eq!(expected, actual, "Actual result:\n{}", table);
Ok(())
}
}
| {
let expected = vec![
"+-------------------------------+",
"| f |",
"+-------------------------------+",
"| 1970-01-01 00:00:00.011111111 |",
"| |",
"+-------------------------------+",
];
check_datetime!(TimestampNanosecondArray, 11111111, expected);
} |
block.ts | import * as crypto from "crypto";
export class Block {
readonly nonce: number;
readonly hash: string;
constructor(
readonly index: number,
readonly previousHash: string,
readonly timestamp: number,
readonly data: string
) {
const { nonce, hash } = this.mine();
this.nonce = nonce; | }
private calculateHash(nonce: number): string {
const data =
this.index + this.previousHash + this.timestamp + this.data + this.nonce;
return crypto.createHash("sha256").update(data).digest("hex");
}
private mine(): {nonce: number, hash:string} {
let hash: string;
let nonce = 0;
do {
hash = this.calculateHash(++nonce);
} while (hash.startsWith('0000') === false);
return {nonce, hash};
}
} | this.hash = hash; |
tessellate.rs | use crate::InnerAtom;
use lyon::path::Path;
use lyon::tessellation::geometry_builder::simple_builder;
use lyon::tessellation::math::point;
use lyon::tessellation::{FillOptions, FillTessellator};
use std::collections::HashMap;
pub use lyon::tessellation::{geometry_builder::VertexBuffers, math::Point, TessellationError};
pub fn tessellate_2d(
poly: geo::Polygon<f64>,
interior: Vec<InnerAtom>,
) -> Result<VertexBuffers<Point, u16>, TessellationError> {
let mut buffers: VertexBuffers<Point, u16> = VertexBuffers::new();
let mut vertex_builder = simple_builder(&mut buffers);
let mut tessellator = FillTessellator::new();
let options = FillOptions::default();
let mut path_builder = Path::builder();
let mut last: Option<geo::Point<f64>> = None;
for p in poly.exterior().points_iter() {
let (x, y) = (p.x() as f32, p.y() as f32);
if last.is_none() {
path_builder.begin(point(x, y));
} else {
path_builder.line_to(point(x, y));
}
last = Some(p);
}
path_builder.end(true);
for hole in poly.interiors() {
let mut last: Option<geo::Point<f64>> = None;
for p in hole.points_iter() {
let (x, y) = (p.x() as f32, p.y() as f32);
if last.is_none() {
path_builder.begin(point(x, y));
} else {
path_builder.line_to(point(x, y));
}
last = Some(p);
}
path_builder.end(true);
}
for f in interior {
if let InnerAtom::Drill {
center,
radius,
plated: _,
} = f
{
use geo::{algorithm::rotate::RotatePoint, Point};
let right_edge: Point<_> = (center.x + radius, center.y).into();
let start = right_edge.rotate_around_point(0.0, center.into());
path_builder.begin(point(start.x() as f32, start.y() as f32));
for i in (0..=360).step_by(8) {
let p = right_edge.rotate_around_point(i as f64, center.into());
path_builder.line_to(point(p.x() as f32, p.y() as f32));
}
path_builder.end(true);
}
}
let path = path_builder.build();
tessellator.tessellate_path(&path, &options, &mut vertex_builder)?;
Ok(buffers)
}
pub fn | (buffer: VertexBuffers<Point, u16>) -> (Vec<[f64; 3]>, Vec<u16>) {
// eprintln!("buffer: {:?} ({})", buffer, buffer.vertices.chunks_exact(3).count());
// Iterate through the edges represented by the indices, building a map
// of the indice indexes which use it.
let mut lines: HashMap<(u16, u16), Vec<(usize, bool)>> =
HashMap::with_capacity(buffer.indices.len());
// For the three corners of each triangle ...
for (i, in3) in buffer.indices.chunks_exact(3).enumerate() {
// Loop each edge (line) of the triangle ...
for (i, verts) in &[
(i * 3 + 0, &[in3[0], in3[1]]),
(i * 3 + 1, &[in3[1], in3[2]]),
(i * 3 + 2, &[in3[2], in3[0]]),
] {
// We make sure a forward or reverse edge
// maps to the same key (2->1 is the same as 1->2).
let key = (verts[0].min(verts[1]), verts[1].max(verts[0]));
// ... And track how many times we see an edge between those
// two points, by inserting it into the hash map.
match lines.get_mut(&key) {
Some(v) => v.push((*i, verts[0] < verts[1])),
None => {
lines.insert(key, vec![(*i, verts[0] < verts[1])]);
}
}
}
}
// Edges which are on the boundary of the polygon are those which are only
// part of a single triangle.
let mut boundary_lines: Vec<_> = lines
.into_iter()
.filter(|(_k, v)| v.len() == 1)
.map(|(k, v)| (k, v[0])) // (v1, v2), (idx, ordered)
.collect();
// Sort them into the order in which they appeared in the original index buffer.
boundary_lines.sort_by(|a, b| a.1 .0.cmp(&b.1 .0));
// First buffer.vertices.len() items are the vertices of the bottom surface.
// The last buffer.vertices.len() items are the vertices of the top surface.
let mut vertices: Vec<[f64; 3]> =
Vec::with_capacity(2 * buffer.vertices.len() + 6 * boundary_lines.len());
for v in &buffer.vertices {
vertices.push([v.x.into(), v.y.into(), -0.8]);
}
for v in &buffer.vertices {
vertices.push([v.x.into(), v.y.into(), 0.8]);
}
// Compute the vertices: the front and back faces are easy - we just duplicate
// the original tessellation, with the back face in reverse order for correct
// winding.
let c = buffer.vertices.len() as u16;
let mut indices: Vec<u16> =
Vec::with_capacity((buffer.indices.len() * 2) + (buffer.vertices.len() * 6));
// Front
for i in &buffer.indices {
indices.push(*i);
}
// Back
for i3 in buffer.indices.chunks_exact(3) {
indices.push(i3[2] + c); // reverse order - presumably to represent winding order?
indices.push(i3[1] + c);
indices.push(i3[0] + c);
}
// For the sides, we loop through the boundary edges to construct 2 triangles
// for each edge.
for ((v_low, v_high), (_, original_order)) in boundary_lines {
if !original_order {
indices.push(v_low);
indices.push(v_high);
indices.push(v_low + c);
indices.push(v_high);
indices.push(v_high + c);
indices.push(v_low + c);
} else {
indices.push(v_high);
indices.push(v_low);
indices.push(v_high + c);
indices.push(v_low);
indices.push(v_low + c);
indices.push(v_high + c);
}
}
(vertices, indices)
}
| tessellate_3d |
setup.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import sys
from setuptools import setup, find_packages | if sys.version_info < (3, 5):
error = """
uwsgi-sloth only supports Python 3.5 and above.
If you are using Python 2.7, please install "uwsgi-sloth<3.0.0" instead.
"""
print(error, file=sys.stderr)
sys.exit(1)
setup(
name='uwsgi-sloth',
version='3.0.2',
description='A simple uwsgi access log analyzer',
long_description=open('README.rst').read(),
author='piglei',
author_email='[email protected]',
url='https://github.com/piglei/uwsgi-sloth',
keywords='uwsgi log analyzer',
license='Apache License, Version 2.0',
packages=find_packages(),
package_data={"": ['templates/*.html', 'sample.conf']},
classifiers=[
"Programming Language :: Python :: 3",
],
install_requires=[
'jinja2',
'configobj'
],
scripts=['uwsgi_sloth/uwsgi-sloth']
) |
# **Python version check** |
tx_utils.go | package incclient
import (
"fmt"
"github.com/incognitochain/go-incognito-sdk-v2/transaction"
"github.com/incognitochain/go-incognito-sdk-v2/transaction/tx_generic"
"math/big"
"sort"
"time"
"github.com/incognitochain/go-incognito-sdk-v2/coin"
"github.com/incognitochain/go-incognito-sdk-v2/common"
"github.com/incognitochain/go-incognito-sdk-v2/common/base58"
"github.com/incognitochain/go-incognito-sdk-v2/crypto"
"github.com/incognitochain/go-incognito-sdk-v2/key"
"github.com/incognitochain/go-incognito-sdk-v2/metadata"
"github.com/incognitochain/go-incognito-sdk-v2/privacy"
"github.com/incognitochain/go-incognito-sdk-v2/rpchandler"
"github.com/incognitochain/go-incognito-sdk-v2/rpchandler/jsonresult"
"github.com/incognitochain/go-incognito-sdk-v2/rpchandler/rpc"
"github.com/incognitochain/go-incognito-sdk-v2/transaction/utils"
"github.com/incognitochain/go-incognito-sdk-v2/wallet"
)
const pageSize = 100
type coinParams struct {
coinList []coin.PlainCoin
idxList []uint64
}
func (cp coinParams) Bytes() []byte {
if cp.coinList == nil || len(cp.coinList) == 0 {
return nil
}
resBytes := make([]byte, 0)
// first byte is the number of coins
resBytes = append(resBytes, byte(len(cp.coinList)))
for _, c := range cp.coinList {
cBytes := c.Bytes()
resBytes = append(resBytes, byte(len(cBytes)))
resBytes = append(resBytes, cBytes...)
}
// next byte is the length of indices
resBytes = append(resBytes, byte(len(cp.idxList)))
for _, idx := range cp.idxList {
idxBytes := common.IntToBytes(int(idx))
resBytes = append(resBytes, byte(len(idxBytes)))
resBytes = append(resBytes, idxBytes...)
}
return resBytes
}
func (cp *coinParams) SetBytes(data []byte) error {
if len(data) == 0 {
return fmt.Errorf("length data is zero")
}
var err error
offSet := 0
// get num input coins
if offSet >= len(data) {
return fmt.Errorf("out of range numInCoins")
}
numInCoins := int(data[offSet])
offSet++
cp.coinList = make([]coin.PlainCoin, numInCoins)
for i := 0; i < numInCoins; i++ {
if offSet >= len(data) {
return fmt.Errorf("out of range lenCoin")
}
lenCoin := int(data[offSet])
offSet++
if offSet+lenCoin > len(data) {
return fmt.Errorf("out of range input coins")
}
coinBytes := data[offSet : offSet+lenCoin]
cp.coinList[i], err = coin.NewPlainCoinFromByte(coinBytes)
if err != nil {
return fmt.Errorf("set byte to inputCoin got error")
}
offSet += lenCoin
}
if offSet >= len(data) {
return fmt.Errorf("out of range numIndices")
}
numIndices := int(data[offSet])
offSet++
if numIndices == 0 {
return nil
}
cp.idxList = make([]uint64, numIndices)
for i := 0; i < numIndices; i++ {
if offSet >= len(data) {
return fmt.Errorf("out of range lenIdx")
}
lenIdx := int(data[offSet])
offSet++
if offSet+lenIdx > len(data) {
return fmt.Errorf("out of range index")
}
idxBytes := data[offSet : offSet+lenIdx]
cp.idxList[i] = uint64(common.BytesToInt(idxBytes))
offSet += lenIdx
}
if len(cp.idxList) > 0 && len(cp.idxList) != len(cp.coinList) {
return fmt.Errorf("lengths of idxList and coinList mismatch: %v != %v", len(cp.idxList), len(cp.coinList))
}
return nil
}
// createPaymentInfos creates a list of key.PaymentInfo based on the provided address list and corresponding amount list.
func createPaymentInfos(addrList []string, amountList []uint64) ([]*key.PaymentInfo, error) {
if len(addrList) != len(amountList) {
return nil, fmt.Errorf("length of payment address (%v) and length amount (%v) mismatch", len(addrList), len(amountList))
}
paymentInfos := make([]*key.PaymentInfo, 0)
for i, addr := range addrList {
receiverWallet, err := wallet.Base58CheckDeserialize(addr)
if err != nil {
return nil, fmt.Errorf("cannot deserialize key %v: %v", addr, err)
}
paymentInfo := key.PaymentInfo{PaymentAddress: receiverWallet.KeySet.PaymentAddress, Amount: amountList[i], Message: []byte{}}
paymentInfos = append(paymentInfos, &paymentInfo)
}
return paymentInfos, nil
}
// chooseBestCoinsByAmount chooses best UTXOs to spend depending on the provided amount.
//
// Assume that the input coins have be sorted in the descending order.
func chooseBestCoinsByAmount(coinList []coin.PlainCoin, requiredAmount uint64) ([]coin.PlainCoin, []uint64, error) {
totalInputAmount := uint64(0)
for _, inputCoin := range coinList {
totalInputAmount += inputCoin.GetValue()
}
if totalInputAmount < requiredAmount {
return nil, nil, fmt.Errorf("total unspent amount (%v) is less than the required amount (%v)", totalInputAmount, requiredAmount)
}
if totalInputAmount == requiredAmount {
chosenIndexList := make([]uint64, 0)
for i := 0; i < len(coinList); i++ {
chosenIndexList = append(chosenIndexList, uint64(i))
}
return coinList, chosenIndexList, nil
}
coinsToSpend := make([]coin.PlainCoin, 0)
chosenIndexList := make([]uint64, 0)
remainAmount := requiredAmount
totalChosenAmount := uint64(0)
//TODO: find a better solution for this.
for i := 0; i < len(coinList)-1; i++ {
if coinList[i].GetValue() > remainAmount {
if coinList[i+1].GetValue() >= remainAmount {
continue
} else {
coinsToSpend = append(coinsToSpend, coinList[i])
chosenIndexList = append(chosenIndexList, uint64(i))
totalChosenAmount += coinList[i].GetValue()
break
}
} else {
coinsToSpend = append(coinsToSpend, coinList[i])
chosenIndexList = append(chosenIndexList, uint64(i))
remainAmount -= coinList[i].GetValue()
totalChosenAmount += coinList[i].GetValue()
}
}
if totalChosenAmount < requiredAmount {
totalChosenAmount += coinList[len(coinList)-1].GetValue()
coinsToSpend = append(coinsToSpend, coinList[len(coinList)-1])
chosenIndexList = append(chosenIndexList, uint64(len(coinList)-1))
if totalChosenAmount < requiredAmount |
}
return coinsToSpend, chosenIndexList, nil
}
// divideCoins divides the list of coins w.r.t their version and sort them by values if needed.
func divideCoins(coinList []coin.PlainCoin, idxList []*big.Int, needSorted bool) ([]coin.PlainCoin, []coin.PlainCoin, []uint64, error) {
if idxList != nil {
if len(coinList) != len(idxList) {
return nil, nil, nil, fmt.Errorf("cannot divide coins: length of coin (%v) != length of index (%v)", len(coinList), len(idxList))
}
}
coinV1List := make([]coin.PlainCoin, 0)
coinV2List := make([]coin.PlainCoin, 0)
idxV2List := make([]uint64, 0)
for i, inputCoin := range coinList {
if inputCoin.GetVersion() == 2 {
tmpCoin, ok := inputCoin.(*coin.CoinV2)
if !ok {
return nil, nil, nil, fmt.Errorf("cannot parse coinV2")
}
coinV2List = append(coinV2List, tmpCoin)
if idxList != nil {
if idxList[i] == nil {
return nil, nil, nil, fmt.Errorf("idx of coinV2 %v is nil: (idxList: %v)", i, idxList)
}
idxV2List = append(idxV2List, idxList[i].Uint64())
}
} else {
tmpCoin, ok := inputCoin.(*coin.PlainCoinV1)
if !ok {
return nil, nil, nil, fmt.Errorf("cannot parse coinV2")
}
coinV1List = append(coinV1List, tmpCoin)
}
}
if needSorted {
sort.Slice(coinV1List, func(i, j int) bool {
return coinV1List[i].GetValue() > coinV1List[j].GetValue()
})
sort.Slice(coinV2List, func(i, j int) bool {
return coinV2List[i].GetValue() > coinV2List[j].GetValue()
})
var err error
idxV2List, err = getListIdx(coinV2List, coinList, idxList)
if err != nil {
return nil, nil, nil, err
}
}
return coinV1List, coinV2List, idxV2List, nil
}
func getListIdx(inCoins []coin.PlainCoin, allCoins []coin.PlainCoin, allIdx []*big.Int) ([]uint64, error) {
if len(allIdx) == 0 {
return []uint64{}, nil
}
res := make([]uint64, 0)
for _, inCoin := range inCoins {
for i, c := range allCoins {
if c.GetVersion() != 2 {
continue
}
if c.GetPublicKey().String() == inCoin.GetPublicKey().String() {
res = append(res, allIdx[i].Uint64())
break
}
}
}
if len(res) != len(inCoins) {
return nil, fmt.Errorf("some coin cannot be retrieved")
}
return res, nil
}
// getVersionFromInputCoins checks if all of the given input coins have the same version, and return the version.
func getVersionFromInputCoins(inputCoins []coin.PlainCoin) (uint8, error) {
if len(inputCoins) == 0 {
return 0, fmt.Errorf("no coin to check")
}
version := inputCoins[0].GetVersion()
for i := 1; i < len(inputCoins); i++ {
if inputCoins[i].GetVersion() != version {
return 0, fmt.Errorf("expect input coin %v to have version %v, but got %v", i, version, inputCoins[i].GetVersion())
}
}
return version, nil
}
func (client *IncClient) getRandomCommitmentV1(inputCoins []coin.PlainCoin, tokenID string) (map[string]interface{}, error) {
if len(inputCoins) == 0 {
return nil, fmt.Errorf("no input coin to retrieve random commitments, tokenID: %v", tokenID)
}
outCoinList := make([]jsonresult.OutCoin, 0)
for _, inputCoin := range inputCoins {
outCoin := jsonresult.NewOutCoin(inputCoin)
outCoin.Conceal()
outCoinList = append(outCoinList, outCoin)
}
lastByte := inputCoins[0].GetPublicKey().ToBytesS()[len(inputCoins[0].GetPublicKey().ToBytesS())-1]
shardID := common.GetShardIDFromLastByte(lastByte)
responseInBytes, err := client.rpcServer.RandomCommitments(shardID, outCoinList, tokenID)
if err != nil {
return nil, err
}
var randomCommitment jsonresult.RandomCommitmentResult
err = rpchandler.ParseResponse(responseInBytes, &randomCommitment)
if err != nil {
return nil, err
}
commitmentList := make([]*crypto.Point, 0)
for _, commitmentStr := range randomCommitment.Commitments {
cmtBytes, _, err := base58.Base58Check{}.Decode(commitmentStr)
if err != nil {
return nil, fmt.Errorf("cannot decode commitment %v: %v", commitmentStr, err)
}
commitment, err := new(crypto.Point).FromBytesS(cmtBytes)
if err != nil {
return nil, fmt.Errorf("cannot parse commitment %v: %v", cmtBytes, err)
}
commitmentList = append(commitmentList, commitment)
}
result := make(map[string]interface{})
result[utils.CommitmentIndices] = randomCommitment.CommitmentIndices
result[utils.MyIndices] = randomCommitment.MyCommitmentIndices
result[utils.Commitments] = commitmentList
return result, nil
}
func (client *IncClient) getRandomCommitmentV2(shardID byte, tokenID string, lenDecoy int) (map[string]interface{}, error) {
if lenDecoy == 0 {
return nil, fmt.Errorf("no input coin to retrieve random commitments")
}
responseInBytes, err := client.rpcServer.RandomCommitmentsAndPublicKeys(shardID, tokenID, lenDecoy)
if err != nil {
return nil, err
}
var randomCmtAndPk jsonresult.RandomCommitmentAndPublicKeyResult
err = rpchandler.ParseResponse(responseInBytes, &randomCmtAndPk)
if err != nil {
return nil, err
}
commitmentList := make([]*crypto.Point, 0)
for _, commitmentStr := range randomCmtAndPk.Commitments {
cmtBytes, _, err := base58.Base58Check{}.Decode(commitmentStr)
if err != nil {
return nil, fmt.Errorf("cannot decode commitment %v: %v", commitmentStr, err)
}
commitment, err := new(crypto.Point).FromBytesS(cmtBytes)
if err != nil {
return nil, fmt.Errorf("cannot parse commitment %v: %v", cmtBytes, err)
}
commitmentList = append(commitmentList, commitment)
}
pkList := make([]*crypto.Point, 0)
for _, pubKeyStr := range randomCmtAndPk.PublicKeys {
pkBytes, _, err := base58.Base58Check{}.Decode(pubKeyStr)
if err != nil {
return nil, fmt.Errorf("cannot decode public key %v: %v", pubKeyStr, err)
}
pk, err := new(crypto.Point).FromBytesS(pkBytes)
if err != nil {
return nil, fmt.Errorf("cannot parse public key %v: %v", pkBytes, err)
}
pkList = append(pkList, pk)
}
assetTagList := make([]*crypto.Point, 0)
for _, assetStr := range randomCmtAndPk.AssetTags {
assetBytes, _, err := base58.Base58Check{}.Decode(assetStr)
if err != nil {
return nil, fmt.Errorf("cannot decode assetTag %v: %v", assetStr, err)
}
assetTag, err := new(crypto.Point).FromBytesS(assetBytes)
if err != nil {
return nil, fmt.Errorf("cannot parse assetTag %v: %v", assetBytes, err)
}
assetTagList = append(assetTagList, assetTag)
}
result := make(map[string]interface{})
result[utils.CommitmentIndices] = randomCmtAndPk.CommitmentIndices
result[utils.Commitments] = commitmentList
result[utils.PublicKeys] = pkList
result[utils.AssetTags] = assetTagList
return result, nil
}
// initParams queries and chooses coins to spend + init random params.
func (client *IncClient) initParams(privateKey string, tokenIDStr string, totalAmount uint64, hasPrivacy bool, version int) ([]coin.PlainCoin, map[string]interface{}, error) {
_, err := new(common.Hash).NewHashFromStr(tokenIDStr)
if err != nil {
return nil, nil, err
}
//Create sender private key from string
senderWallet, err := wallet.Base58CheckDeserialize(privateKey)
if err != nil {
return nil, nil, fmt.Errorf("cannot init private key %v: %v", privateKey, err)
}
lastByteSender := senderWallet.KeySet.PaymentAddress.Pk[len(senderWallet.KeySet.PaymentAddress.Pk)-1]
shardID := common.GetShardIDFromLastByte(lastByteSender)
//fmt.Printf("Getting UTXOs for tokenID %v...\n", tokenIDStr)
//Get list of UTXOs
utxoList, idxList, err := client.GetUnspentOutputCoins(privateKey, tokenIDStr, 0)
if err != nil {
return nil, nil, err
}
//fmt.Printf("Finish getting UTXOs for %v of %v. Length of UTXOs: %v\n", totalAmount, tokenIDStr, len(utxoList))
coinV1List, coinV2List, idxV2List, err := divideCoins(utxoList, idxList, true)
if err != nil {
return nil, nil, fmt.Errorf("cannot divide coin: %v", err)
}
var coinsToSpend []coin.PlainCoin
var kvArgs = make(map[string]interface{})
if version == 1 {
//Choose best coins for creating transactions
coinsToSpend, _, err = chooseBestCoinsByAmount(coinV1List, totalAmount)
if err != nil {
return nil, nil, err
}
if hasPrivacy {
//fmt.Printf("Getting random commitments for %v.\n", tokenIDStr)
//Retrieve commitments and indices
kvArgs, err = client.getRandomCommitmentV1(coinsToSpend, tokenIDStr)
if err != nil {
return nil, nil, err
}
//fmt.Printf("Finish getting random commitments.\n")
}
return coinsToSpend, kvArgs, nil
} else {
var chosenIdxList []uint64
coinsToSpend, chosenIdxList, err = chooseBestCoinsByAmount(coinV2List, totalAmount)
if err != nil {
return nil, nil, err
}
//fmt.Printf("Getting random commitments for %v.\n", tokenIDStr)
//Retrieve commitments and indices
kvArgs, err = client.getRandomCommitmentV2(shardID, tokenIDStr, len(coinsToSpend)*(privacy.RingSize-1))
if err != nil {
return nil, nil, err
}
//fmt.Printf("Finish getting random commitments.\n")
idxToSpendPRV := make([]uint64, 0)
for _, idx := range chosenIdxList {
idxToSpendPRV = append(idxToSpendPRV, idxV2List[idx])
}
kvArgs[utils.MyIndices] = idxToSpendPRV
return coinsToSpend, kvArgs, nil
}
}
// initParamsV1 queries and chooses coins to spend + init random params v1.
func (client *IncClient) initParamsV1(txParam *TxParam, tokenIDStr string, totalAmount uint64, hasPrivacy bool) ([]coin.PlainCoin, map[string]interface{}, error) {
_, err := new(common.Hash).NewHashFromStr(tokenIDStr)
if err != nil {
return nil, nil, err
}
//Create sender private key from string
privateKey := txParam.senderPrivateKey
var coinsToSpend []coin.PlainCoin
if txParam.kArgs != nil { // in case we use provided input coins to init the transaction.
var ok bool
var cpInterface interface{}
if tokenIDStr == common.PRVIDStr {
cpInterface, ok = txParam.kArgs[prvInCoinKey]
} else {
cpInterface, ok = txParam.kArgs[tokenInCoinKey]
}
if ok {
cp, ok := cpInterface.(coinParams)
if ok {
v, _ := getVersionFromInputCoins(cp.coinList)
if v == 1 {
coinsToSpend = cp.coinList
}
}
}
}
if coinsToSpend == nil {
//Get list of UTXOs
utxoList, idxList, err := client.GetUnspentOutputCoins(privateKey, tokenIDStr, 0)
if err != nil {
return nil, nil, err
}
//fmt.Printf("Finish getting UTXOs for %v of %v. Length of UTXOs: %v\n", totalAmount, tokenIDStr, len(utxoList))
coinV1List, _, _, err := divideCoins(utxoList, idxList, true)
if err != nil {
return nil, nil, fmt.Errorf("cannot divide coin: %v", err)
}
//Choose best coins for creating transactions
coinsToSpend, _, err = chooseBestCoinsByAmount(coinV1List, totalAmount)
if err != nil {
return nil, nil, err
}
}
var kvArgs = make(map[string]interface{})
if hasPrivacy {
//Retrieve commitments and indices
kvArgs, err = client.getRandomCommitmentV1(coinsToSpend, tokenIDStr)
if err != nil {
return nil, nil, err
}
}
return coinsToSpend, kvArgs, nil
}
// initParamsV2 queries and chooses coins to spend + init random params v2.
func (client *IncClient) initParamsV2(txParam *TxParam, tokenIDStr string, totalAmount uint64) ([]coin.PlainCoin, map[string]interface{}, error) {
_, err := new(common.Hash).NewHashFromStr(tokenIDStr)
if err != nil {
return nil, nil, err
}
//Create sender private key from string
privateKey := txParam.senderPrivateKey
senderWallet, err := wallet.Base58CheckDeserialize(privateKey)
if err != nil {
return nil, nil, fmt.Errorf("cannot init private key %v: %v", privateKey, err)
}
lastByteSender := senderWallet.KeySet.PaymentAddress.Pk[len(senderWallet.KeySet.PaymentAddress.Pk)-1]
shardID := common.GetShardIDFromLastByte(lastByteSender)
var coinsToSpend []coin.PlainCoin
var myIndices []uint64
if txParam.kArgs != nil { // in case we use provided input coins to init the transaction.
var ok bool
var cpInterface interface{}
if tokenIDStr == common.PRVIDStr {
cpInterface, ok = txParam.kArgs[prvInCoinKey]
} else {
cpInterface, ok = txParam.kArgs[tokenInCoinKey]
}
if ok {
cp, ok := cpInterface.(coinParams)
if ok {
v, _ := getVersionFromInputCoins(cp.coinList)
if v == 2 {
coinsToSpend = cp.coinList
myIndices = cp.idxList
}
}
}
} // in case we use provided input coins to init the transaction.
if coinsToSpend == nil {
//Get list of UTXOs
utxoList, idxList, err := client.GetUnspentOutputCoins(privateKey, tokenIDStr, 0)
if err != nil {
return nil, nil, err
}
_, coinV2List, idxV2List, err := divideCoins(utxoList, idxList, true)
if err != nil {
return nil, nil, fmt.Errorf("cannot divide coin: %v", err)
}
var chosenIdxList []uint64
coinsToSpend, chosenIdxList, err = chooseBestCoinsByAmount(coinV2List, totalAmount)
if err != nil {
return nil, nil, err
}
myIndices = make([]uint64, 0)
for _, idx := range chosenIdxList {
myIndices = append(myIndices, idxV2List[idx])
}
}
//Retrieve commitments and indices
var kvArgs = make(map[string]interface{})
kvArgs, err = client.getRandomCommitmentV2(shardID, tokenIDStr, len(coinsToSpend)*(privacy.RingSize-1))
if err != nil {
return nil, nil, err
}
kvArgs[utils.MyIndices] = myIndices
return coinsToSpend, kvArgs, nil
}
// GetTokenFee returns the token fee per kb.
func (client *IncClient) GetTokenFee(shardID byte, tokenIDStr string) (uint64, error) {
if tokenIDStr == common.PRVIDStr {
return DefaultPRVFee, nil
}
responseInBytes, err := client.rpcServer.EstimateFeeWithEstimator(-1, shardID, 10, tokenIDStr)
if err != nil {
return 0, err
}
var feeEstimateResult rpc.EstimateFeeResult
err = rpchandler.ParseResponse(responseInBytes, &feeEstimateResult)
if err != nil {
return 0, err
}
return feeEstimateResult.EstimateFeeCoinPerKb, nil
}
// GetTxDetail retrieves the transaction detail from its hash.
func (client *IncClient) GetTxDetail(txHash string) (*jsonresult.TransactionDetail, error) {
responseInBytes, err := client.rpcServer.GetTransactionByHash(txHash)
if err != nil {
return nil, err
}
var txDetail jsonresult.TransactionDetail
err = rpchandler.ParseResponse(responseInBytes, &txDetail)
if err != nil {
return nil, err
}
return &txDetail, err
}
// GetTx retrieves the transaction detail and parses it to a transaction object.
func (client *IncClient) GetTx(txHash string) (metadata.Transaction, error) {
txDetail, err := client.GetTxDetail(txHash)
if err != nil {
return nil, err
}
return jsonresult.ParseTxDetail(*txDetail)
}
// GetTxs retrieves transactions and parses them to transaction objects given their hashes.
// By default, it will not re-calculate the hashes of the transactions. Set `hashReCheck` to true to re-check the hashes.
func (client *IncClient) GetTxs(txHashList []string, hashReCheck ...bool) (map[string]metadata.Transaction, error) {
responseInBytes, err := client.rpcServer.GetEncodedTransactionsByHashes(txHashList)
if err != nil {
return nil, err
}
mapRes := make(map[string]string)
err = rpchandler.ParseResponse(responseInBytes, &mapRes)
if err != nil {
panic(err)
}
res := make(map[string]metadata.Transaction)
doubleCheck := false
if len(hashReCheck) > 0 {
doubleCheck = hashReCheck[0]
}
for txHash, encodedTx := range mapRes {
txBytes, _, err := base58.Base58Check{}.Decode(encodedTx)
if err != nil {
Logger.Printf("base58-decode failed: %v\n", string(txBytes))
return nil, err
}
txChoice, err := transaction.DeserializeTransactionJSON(txBytes)
if err != nil {
Logger.Printf("unMarshal failed: %v\n", string(txBytes))
return nil, err
}
tx := txChoice.ToTx()
if doubleCheck && tx.Hash().String() != txHash {
Logger.Printf("txParseFail: %v\n", string(txBytes))
return nil, fmt.Errorf("txHash changes after unmarshalling, expect %v, got %v", txHash, tx.Hash().String())
}
res[txHash] = tx
}
return res, nil
}
// GetTransactionHashesByReceiver retrieves the list of all transactions received by a payment address.
func (client *IncClient) GetTransactionHashesByReceiver(paymentAddress string) ([]string, error) {
responseInBytes, err := client.rpcServer.GetTxHashByReceiver(paymentAddress)
if err != nil {
return nil, err
}
var tmpRes map[string][]string
err = rpchandler.ParseResponse(responseInBytes, &tmpRes)
if err != nil {
return nil, err
}
res := make([]string, 0)
for _, txList := range tmpRes {
res = append(res, txList...)
}
return res, nil
}
// GetTransactionsByReceiver retrieves the list of all transactions (in object) received by a payment address.
//
// Notice that this function is time-consuming since it has to parse every single transaction into an object.
func (client *IncClient) GetTransactionsByReceiver(paymentAddress string) (map[string]metadata.Transaction, error) {
txList, err := client.GetTransactionHashesByReceiver(paymentAddress)
if err != nil {
return nil, err
}
fmt.Printf("#Txs: %v\n", len(txList))
count := 0
start := time.Now()
res := make(map[string]metadata.Transaction)
for _, txHash := range txList {
tx, err := client.GetTx(txHash)
if err != nil {
return nil, fmt.Errorf("cannot retrieve tx %v: %v", txHash, err)
}
res[txHash] = tx
count += 1
if count%5 == 0 {
Logger.Printf("count %v, timeElapsed: %v\n", count, time.Since(start).Seconds())
}
}
return res, nil
}
// GetTxHashByPublicKeys retrieves the list of all transactions' hash sent to a list of public keys.
func (client *IncClient) GetTxHashByPublicKeys(publicKeys []string) (map[string][]string, error) {
responseInBytes, err := client.rpcServer.GetTxHashByPublicKey(publicKeys)
if err != nil {
return nil, err
}
tmpRes := make(map[string]map[byte][]string)
err = rpchandler.ParseResponse(responseInBytes, &tmpRes)
if err != nil {
return nil, err
}
res := make(map[string][]string)
for publicKeyStr, txMap := range tmpRes {
txList := make([]string, 0)
for _, tmpTxList := range txMap {
txList = append(txList, tmpTxList...)
}
res[publicKeyStr] = txList
}
return res, nil
}
// GetTransactionsByPublicKeys retrieves the list of all transactions (in object) sent to a list of base58-encoded public keys.
//
// Notice that this function is time-consuming since it has to parse every single transaction into an object.
func (client *IncClient) GetTransactionsByPublicKeys(publicKeys []string) (map[string]map[string]metadata.Transaction, error) {
txMap, err := client.GetTxHashByPublicKeys(publicKeys)
if err != nil {
return nil, err
}
res := make(map[string]map[string]metadata.Transaction)
for publicKeyStr, txList := range txMap {
tmpRes := make(map[string]metadata.Transaction)
for current := 0; current < len(txList); current += pageSize {
next := current + pageSize
if next > len(txList) {
next = len(txList)
}
mapRes, err := client.GetTxs(txList[current:next])
if err != nil {
return nil, err
}
for txHash, tx := range mapRes {
tmpRes[txHash] = tx
}
}
res[publicKeyStr] = tmpRes
}
return res, nil
}
// GetTxHashBySerialNumbers retrieves the list of tokenIDStr transactions in which serial numbers have been spent.
//
// Set shardID = 255 to retrieve in all shards.
func (client *IncClient) GetTxHashBySerialNumbers(snList []string, tokenIDStr string, shardID byte) (map[string]string, error) {
responseInBytes, err := client.rpcServer.GetTxHashBySerialNumber(snList, tokenIDStr, shardID)
if err != nil {
return nil, err
}
res := make(map[string]string)
err = rpchandler.ParseResponse(responseInBytes, &res)
if err != nil {
return nil, err
}
return res, nil
}
// CheckTxInBlock checks if a transaction has been included in a block or not.
func (client *IncClient) CheckTxInBlock(txHash string) (bool, error) {
txDetail, err := client.GetTxDetail(txHash)
if err != nil {
return false, err
}
if txDetail.IsInMempool {
return false, nil
}
return txDetail.IsInBlock, nil
}
// GetReceivingInfo verifies if a transaction is sent to the given `otaKey` and the transacted tokenIds.
// Furthermore, in case a read-only key is given, it will
// decrypt the received output coins and return the total amounts. If there are multiple read-only keys,
// only the first one is used.
func (client *IncClient) GetReceivingInfo(
txHash string,
otaKey string,
readonlyKey ...string,
) (received bool, mapResult map[string]uint64, err error) {
mapResult = make(map[string]uint64)
// deserialize the ota key
w, err := wallet.Base58CheckDeserialize(otaKey)
if err != nil || w.KeySet.OTAKey.GetOTASecretKey() == nil || w.KeySet.OTAKey.GetPublicSpend() == nil {
err = fmt.Errorf("otaKey is invalid: %v", err)
return
}
keySet := w.KeySet
keySet.PaymentAddress = key.PaymentAddress{Pk: keySet.OTAKey.GetPublicSpend().ToBytesS()}
// deserialize the ota key (if have)
if len(readonlyKey) > 0 {
tmpWallet, tmpErr := wallet.Base58CheckDeserialize(readonlyKey[0])
if tmpErr != nil ||
tmpWallet.KeySet.ReadonlyKey.GetPublicSpend() == nil ||
tmpWallet.KeySet.ReadonlyKey.GetPrivateView() == nil {
err = fmt.Errorf("readonlyKey is invalid: %v", tmpErr)
return
}
keySet.ReadonlyKey = tmpWallet.KeySet.ReadonlyKey
}
// get the transaction detail
tmpTxs, err := client.GetTxs([]string{txHash})
if err != nil {
return
}
tx := tmpTxs[txHash]
tokenIdStr := tx.GetTokenID().String()
// get the output coins
outCoins := make([]coin.Coin, 0)
switch tx.GetType() {
case common.TxCustomTokenPrivacyType, common.TxTokenConversionType:
txToken, ok := tx.(tx_generic.TransactionToken)
if !ok {
err = fmt.Errorf("cannot parse tx as a token transaction")
return
}
// get the PRV amount (if have)
if txToken.GetTxBase() != nil {
prvAmount, err := getTxOutputAmountByKeySet(txToken, common.PRVIDStr, &keySet)
if err != nil {
Logger.Printf("get PRV amount error: %v\n", err)
}
if prvAmount > 0 {
received = true
}
mapResult[common.PRVIDStr] = prvAmount
}
txNormal := txToken.GetTxNormal()
if txNormal.GetProof() != nil && txNormal.GetProof().GetOutputCoins() != nil {
outCoins = append(outCoins, txNormal.GetProof().GetOutputCoins()...)
}
case common.TxNormalType, common.TxRewardType, common.TxReturnStakingType, common.TxConversionType:
prvAmount, err := getTxOutputAmountByKeySet(tx, common.PRVIDStr, &keySet)
if err != nil {
Logger.Printf("get PRV amount error: %v\n", err)
}
if prvAmount > 0 {
received = true
}
mapResult[common.PRVIDStr] = prvAmount
default:
err = fmt.Errorf("transaction type `%v` is invalid", tx.GetType())
}
if len(outCoins) == 0 {
err = fmt.Errorf("transaction does not have output coins")
}
// getAssetTags
assetTags, err := client.GetAllAssetTags()
if err != nil {
return
}
// check if there is any output coins belong to the `keySet`, and decrypt it if there is a read-only key.
var plainCoin coin.PlainCoin
var tmpTokenId *common.Hash
for _, outCoin := range outCoins {
belong, _ := outCoin.DoesCoinBelongToKeySet(&keySet)
if belong {
received = true
// try to decrypt first
amount := uint64(0)
plainCoin, _ = outCoin.Decrypt(&keySet)
if plainCoin != nil {
amount = plainCoin.GetValue()
}
switch tokenIdStr {
case common.ConfidentialAssetID.String():
if tmpTokenId == nil {
tmpTokenId, err = outCoin.(*coin.CoinV2).GetTokenId(&keySet, assetTags)
if err != nil {
return
}
}
mapResult[tmpTokenId.String()] += amount
default:
mapResult[tokenIdStr] += amount
}
}
}
return
}
| {
return nil, nil, fmt.Errorf("not enough coin to spend")
} |
index.js | import React, { useState } from "react";
import { useSelector} from "react-redux";
import StartPhotos from "../StartPhotos";
import LogIn from "../LogIn";
import SingIn from "../SingIn";
import "./style.scss";
function Auth() {
const [loginVisible, setLoginVisible] = useState(true);
const error = useSelector((state) => state.auth.error) | const buttonName = loginVisible ? "Зарегистрироваться" : "Войти";
return (
<>
<div style={{ paddingTop: '60px'}} className="row">
<div className="col-md-6">
<button
className="btn btn-link btn-sm"
onClick={() => setLoginVisible(!loginVisible)}
>
{buttonName}
</button>
{loginVisible ? (
<>
<LogIn />
</>
) : (
<>
<SingIn />
</>
)}
<div>
{error}
</div>
</div>
<div className="col-md-6">
<StartPhotos />
</div>
</div>
</>
);
}
export default Auth; | |
__init__.py | from .connection import Connection | ||
app.container.ts | import { container } from '../../../../src';
import { IService1, Service1, IService2, Service2, IService3, Service3, Service4, IService4 } from '~/shared';
export function | (): void {
container.addSingleton<IService1>(Service1);
container.addSingleton<IService2>(Service2);
container.addSingleton<IService3>(Service3);
container.addSingleton<IService4>(Service4);
}
| containerBuilder |
The Running Race.rs | use std::io;
fn main() {
let mut buf = String::new();
io::stdin().read_line(&mut buf).unwrap();
let mut buf = buf.split_whitespace();
let D: u8 = buf.next().unwrap().parse().unwrap();
let X: u8 = buf.next().unwrap().parse().unwrap();
let Y: u8 = buf.next().unwrap().parse().unwrap();
match X > Y {
true => print!("Alex"),
false => match X < Y {
true => print!("Ryan"), | false => print!("Draw"),
},
};
} |
|
azure_com.py | import requests
import stickerify
from credentials import CREDENTIALS
from errors import NoFace
AZURE_LINK = CREDENTIALS["AZURE_LINK"]
def | (url):
r = requests.post(AZURE_LINK + url)
return r.json()
def get_sticker_from_photo(url):
image_info = get_image_info(url)
if ('emotion' in image_info):
max_val = 0
for key, value in image_info['emotion'].items():
if max_val < value:
(max_val, emotion) = (value, key)
else:
emotion = "not_detected"
if ('faceRectangle' in image_info):
coords = image_info['faceRectangle']
else:
coords = None
image_path = stickerify.sticker_from_rectangle(coords, url)
return {
"path": image_path,
"emotion": emotion,
}
| get_image_info |
index.ts | export { default as GridColumnProvider } from "./GridColumnProvider"; |
||
7.go | package main
import (
"fmt"
"math"
)
func eratosthenesSieve(n int) []int {
upperBound := 100
if n > 6 {
t := float64(n)
upperBound = int(math.Ceil(t * (math.Log(t) + math.Log(math.Log(t)))))
}
sieve := make([]bool, upperBound)
for i := 0; i < upperBound; i++ {
sieve[i] = true
}
for i := 2; i < upperBound; i++ {
if sieve[i] {
for j := i * i; j < upperBound; j += i {
sieve[j] = false
}
}
}
var primes []int
for i := 0; i < upperBound; i++ {
if sieve[i] {
primes = append(primes, i)
}
}
return primes[2 : n+2]
}
func main() | {
fmt.Println(eratosthenesSieve(10001)[10000])
} |
|
app.js | const { archivoTabla } = require('./helpers/multiplicar');
const argv = require('./config/yargs');
require('colors')
| console.clear();
archivoTabla(argv.b, argv.l, argv.h)
.then(nombreArchivoTabla => console.log(nombreArchivoTabla.rainbow, 'Se creo Correctamente.'.rainbow))
.catch(err => console.error(err)); | |
index.js | "use strict";
let router = require('express').Router(); | res.status(200).send('Inventory Microservice');
});
module.exports = router; |
router.get('/', (req, res, next)=>{ |
lib.rs | #![allow(clippy::module_inception)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::ptr_arg)]
#![allow(clippy::large_enum_variant)]
#![doc = "generated by AutoRust 0.1.0"]
#[cfg(feature = "package-2021-08")]
pub mod package_2021_08;
#[cfg(all(feature = "package-2021-08", not(feature = "no-default-version")))]
pub use package_2021_08::{models, operations, operations::Client, operations::ClientBuilder, operations::Error};
#[cfg(feature = "package-2021-07")]
pub mod package_2021_07;
#[cfg(all(feature = "package-2021-07", not(feature = "no-default-version")))] | #[cfg(all(feature = "package-2021-06", not(feature = "no-default-version")))]
pub use package_2021_06::{models, operations, operations::Client, operations::ClientBuilder, operations::Error};
#[cfg(feature = "package-2021-04")]
pub mod package_2021_04;
#[cfg(all(feature = "package-2021-04", not(feature = "no-default-version")))]
pub use package_2021_04::{models, operations, operations::Client, operations::ClientBuilder, operations::Error};
#[cfg(feature = "package-2021-03")]
pub mod package_2021_03;
#[cfg(all(feature = "package-2021-03", not(feature = "no-default-version")))]
pub use package_2021_03::{models, operations, operations::Client, operations::ClientBuilder, operations::Error};
#[cfg(feature = "package-2021-02-10")]
pub mod package_2021_02_10;
#[cfg(all(feature = "package-2021-02-10", not(feature = "no-default-version")))]
pub use package_2021_02_10::{models, operations, operations::Client, operations::ClientBuilder, operations::Error};
#[cfg(feature = "package-2021-02-preview")]
pub mod package_2021_02_preview;
#[cfg(all(feature = "package-2021-02-preview", not(feature = "no-default-version")))]
pub use package_2021_02_preview::{models, operations, operations::Client, operations::ClientBuilder, operations::Error};
#[cfg(feature = "package-2021-02")]
pub mod package_2021_02;
#[cfg(all(feature = "package-2021-02", not(feature = "no-default-version")))]
pub use package_2021_02::{models, operations, operations::Client, operations::ClientBuilder, operations::Error};
#[cfg(feature = "package-2021-01")]
pub mod package_2021_01;
#[cfg(all(feature = "package-2021-01", not(feature = "no-default-version")))]
pub use package_2021_01::{models, operations, operations::Client, operations::ClientBuilder, operations::Error};
#[cfg(feature = "package-2020-12")]
pub mod package_2020_12;
#[cfg(all(feature = "package-2020-12", not(feature = "no-default-version")))]
pub use package_2020_12::{models, operations, operations::Client, operations::ClientBuilder, operations::Error};
#[cfg(feature = "package-2020-02")]
pub mod package_2020_02;
#[cfg(all(feature = "package-2020-02", not(feature = "no-default-version")))]
pub use package_2020_02::{models, operations, operations::Client, operations::ClientBuilder, operations::Error};
#[cfg(feature = "package-2019-06")]
pub mod package_2019_06;
#[cfg(all(feature = "package-2019-06", not(feature = "no-default-version")))]
pub use package_2019_06::{models, operations, operations::Client, operations::ClientBuilder, operations::Error};
#[cfg(feature = "package-2019-05")]
pub mod package_2019_05;
#[cfg(all(feature = "package-2019-05", not(feature = "no-default-version")))]
pub use package_2019_05::{models, operations, operations::Client, operations::ClientBuilder, operations::Error};
#[cfg(feature = "package-2017-07")]
pub mod package_2017_07;
#[cfg(all(feature = "package-2017-07", not(feature = "no-default-version")))]
pub use package_2017_07::{models, operations, operations::Client, operations::ClientBuilder, operations::Error};
#[cfg(feature = "package-2016-06")]
pub mod package_2016_06;
#[cfg(all(feature = "package-2016-06", not(feature = "no-default-version")))]
pub use package_2016_06::{models, operations, operations::Client, operations::ClientBuilder, operations::Error};
#[cfg(feature = "package-2016-08")]
pub mod package_2016_08;
#[cfg(all(feature = "package-2016-08", not(feature = "no-default-version")))]
pub use package_2016_08::{models, operations, operations::Client, operations::ClientBuilder, operations::Error};
#[cfg(feature = "package-2016-12")]
pub mod package_2016_12;
#[cfg(all(feature = "package-2016-12", not(feature = "no-default-version")))]
pub use package_2016_12::{models, operations, operations::Client, operations::ClientBuilder, operations::Error};
#[cfg(feature = "package-2017-07-only")]
pub mod package_2017_07_only;
#[cfg(all(feature = "package-2017-07-only", not(feature = "no-default-version")))]
pub use package_2017_07_only::{models, operations, operations::Client, operations::ClientBuilder, operations::Error}; | pub use package_2021_07::{models, operations, operations::Client, operations::ClientBuilder, operations::Error};
#[cfg(feature = "package-2021-06")]
pub mod package_2021_06; |
CSVreader.py | import csv
def ClassFactory(class_name, dictionary):
return type(class_name, (object,), dictionary)
class CsvReader:
def __init__(self, filepath):
self.data = []
with open(filepath) as csv_files:
csv_data = csv.DictReader(csv_files, delimiter=',')
for row in csv_data:
self.data.append(row)
pass
def return_data_object(self, class_name):
objects = []
for row in self.data: | objects.append(ClassFactory(class_name, row))
return objects |
|
metadata.rs | #[macro_use]
extern crate collect_mac;
extern crate env_logger;
extern crate gluon_base as base;
extern crate gluon_check as check;
extern crate gluon_parser as parser;
use base::ast::{Argument, SpannedExpr};
use base::metadata::{Attribute, Comment, CommentType, Metadata, MetadataEnv};
use base::symbol::{Symbol, SymbolRef};
fn metadata(env: &MetadataEnv, expr: &mut SpannedExpr<Symbol>) -> Metadata {
check::metadata::metadata(env, expr).0
}
mod support;
use support::intern;
struct MockEnv;
impl MetadataEnv for MockEnv {
fn get_metadata(&self, _id: &SymbolRef) -> Option<&Metadata> {
None
}
}
fn line_comment(s: &str) -> Comment {
Comment {
typ: CommentType::Line,
content: s.into(),
}
}
#[test]
fn propagate_metadata_let_in() {
let _ = env_logger::try_init();
let text = r#"
/// The identity function
let id x = x
id
"#;
let (mut expr, result) = support::typecheck_expr(text);
assert!(result.is_ok(), "{}", result.unwrap_err());
let metadata = metadata(&MockEnv, &mut expr);
assert_eq!(
metadata,
Metadata {
comment: Some(line_comment("The identity function")),
args: vec![Argument::explicit(intern("x:35"))],
..Metadata::default()
}
);
}
#[test]
fn propagate_metadata_let_record() {
let _ = env_logger::try_init();
let text = r#"
/// The identity function
let id x = x
{ id }
"#;
let (mut expr, result) = support::typecheck_expr(text);
assert!(result.is_ok(), "{}", result.unwrap_err());
let metadata = metadata(&MockEnv, &mut expr);
assert_eq!(
metadata.module.get("id"),
Some(&Metadata {
comment: Some(line_comment("The identity function")),
args: vec![Argument::explicit(intern("x:35"))],
..Metadata::default()
})
);
}
#[test]
fn propagate_metadata_type_record() {
let _ = env_logger::try_init();
let text = r#"
/// A test type
type Test = Int
{ Test }
"#;
let (mut expr, result) = support::typecheck_expr(text);
assert!(result.is_ok(), "{}", result.unwrap_err());
let metadata = metadata(&MockEnv, &mut expr);
assert_eq!(
metadata.module.get("Test"),
Some(&Metadata {
comment: Some(line_comment("A test type")),
..Metadata::default()
})
);
}
#[test]
fn propagate_metadata_record_field_comment() {
let _ = env_logger::try_init();
let text = r#"
{
/// The identity function
id = \x -> x
}
"#;
let (mut expr, result) = support::typecheck_expr(text);
assert!(result.is_ok(), "{}", result.unwrap_err());
let metadata = metadata(&MockEnv, &mut expr);
assert_eq!(
metadata.module.get("id"),
Some(&Metadata {
comment: Some(line_comment("The identity function")),
..Metadata::default()
})
);
}
#[test]
fn projection_has_metadata() {
let _ = env_logger::try_init();
let text = r#"
let x = {
/// The identity function
id = \x -> x
}
x.id
"#;
let (mut expr, result) = support::typecheck_expr(text);
assert!(result.is_ok(), "{}", result.unwrap_err());
let metadata = metadata(&MockEnv, &mut expr);
assert_eq!(
metadata,
Metadata {
comment: Some(line_comment("The identity function")),
..Metadata::default()
}
);
}
#[test]
fn | () {
let _ = env_logger::try_init();
let text = r#"
type Test = {
/// A field
x : Int
}
{ Test }
"#;
let (mut expr, result) = support::typecheck_expr(text);
assert!(result.is_ok(), "{}", result.unwrap_err());
let metadata = metadata(&MockEnv, &mut expr);
assert_eq!(
metadata
.module
.get("Test")
.and_then(|metadata| metadata.module.get("x")),
Some(&Metadata {
comment: Some(line_comment("A field")),
..Metadata::default()
})
);
}
#[test]
fn propagate_metadata_from_types_to_values() {
let _ = env_logger::try_init();
let text = r#"
/// A type
type Test = {
/// A field
x : Int,
/// Another field
y : String,
}
/// Shadowing comment
let test: Test = {
x = 1,
/// Shadowing field comment
y = "",
}
{ test }
"#;
let (mut expr, result) = support::typecheck_expr(text);
assert!(result.is_ok(), "{}", result.unwrap_err());
let metadata = metadata(&MockEnv, &mut expr);
assert_eq!(
metadata
.module
.get("test")
.and_then(|metadata| metadata.module.get("x")),
Some(&Metadata {
comment: Some(line_comment("A field")),
..Metadata::default()
})
);
assert_eq!(
metadata
.module
.get("test")
.and_then(|metadata| metadata.module.get("y")),
Some(&Metadata {
comment: Some(line_comment("Shadowing field comment")),
..Metadata::default()
})
);
assert_eq!(
metadata
.module
.get("test")
.and_then(|metadata| metadata.comment.as_ref()),
Some(&line_comment("Shadowing comment"))
);
}
#[test]
fn propagate_metadata_from_types_through_arg() {
let _ = env_logger::try_init();
let text = r#"
type Test a = {
/// A field
x : a,
}
let x ?test : [Test a] -> a = test.x
{ x }
"#;
let (mut expr, result) = support::typecheck_expr(text);
assert!(result.is_ok(), "{}", result.unwrap_err());
let metadata = metadata(&MockEnv, &mut expr);
assert_eq!(
metadata.module.get("x"),
Some(&Metadata {
comment: Some(line_comment("A field")),
..Metadata::default()
})
);
}
#[test]
fn propagate_metadata_through_argument() {
let _ = env_logger::try_init();
let text = r#"
type Test a = {
/// A field
x : a,
}
let x ?test : [Test a] -> a = test.x
{ x }
"#;
let (mut expr, result) = support::typecheck_expr(text);
assert!(result.is_ok(), "{}", result.unwrap_err());
let metadata = metadata(&MockEnv, &mut expr);
assert_eq!(
metadata.module.get("x"),
Some(&Metadata {
comment: Some(line_comment("A field")),
..Metadata::default()
})
);
}
#[test]
fn propagate_metadata_through_implicits() {
let _ = env_logger::try_init();
let text = r#"
#[attribute]
type Test a = {
x : a,
}
type Wrap a = | Wrap a
let x ?test : [Test a] -> Test (Wrap a) = { x = Wrap test.x }
{ x }
"#;
let (mut expr, result) = support::typecheck_expr(text);
assert!(result.is_ok(), "{}", result.unwrap_err());
let metadata = metadata(&MockEnv, &mut expr);
assert_eq!(
metadata.module.get("x"),
Some(&Metadata {
attributes: vec![Attribute {
name: "attribute".into(),
arguments: None,
}],
args: vec![Argument::implicit(intern("test:76"))],
..Metadata::default()
})
);
}
| propagate_metadata_from_field_in_type |
web.rs | //! Test suite for the Web and headless browsers.
#![cfg(target_arch = "wasm32")]
extern crate wasm_bindgen_test;
use wasm_bindgen_test::*;
use helloworld::start;
wasm_bindgen_test_configure!(run_in_browser);
#[wasm_bindgen_test]
fn pass() {
assert_eq!(1 + 1, 2);
}
#[wasm_bindgen_test]
async fn | () {
start().unwrap();
}
| run |
main.rs | mod gallery;
pub mod ingest;
#[cfg(test)]
mod tests;
mod thumbs;
use std::fs;
use std::io;
use std::io::Read;
use std::io::Write;
use std::path;
use std::sync::Arc;
use std::sync::Mutex;
use failure::Error;
use lazy_static::lazy_static;
use rand::RngCore;
use rouille::input::json::JsonError;
use rouille::input::json_input;
use rouille::input::post;
use rouille::post_input;
use rouille::router;
use rouille::Request;
use rouille::Response;
use serde_json::json;
const BAD_REQUEST: u16 = 400;
type Conn = Arc<Mutex<rusqlite::Connection>>;
lazy_static! {
static ref IMAGE_ID: regex::Regex =
regex::Regex::new("^e/[a-zA-Z0-9]{10}\\.(?:png|jpg|gif)$").unwrap();
static ref GALLERY_SPEC: regex::Regex =
regex::Regex::new("^([a-zA-Z][a-zA-Z0-9]{3,9})!(.{4,99})$").unwrap();
}
fn upload(request: &Request) -> Response {
let params = match post_input!(request, {
image: Vec<post::BufferedFile>,
return_json: Option<String>,
}) {
Ok(params) => params,
Err(_) => return bad_request("invalid / missing parameters"),
};
let image = match params.image.len() {
1 => ¶ms.image[0],
_ => return bad_request("exactly one upload required"),
};
let return_json = match params.return_json {
Some(string) => match string.parse() {
Ok(val) => val,
Err(_) => return bad_request("invalid return_json value"),
},
None => false,
};
match ingest::store(&image.data) {
Ok(image_id) => {
let remote_addr = request.remote_addr();
let remote_forwarded = request.header("X-Forwarded-For");
println!("{:?} {:?}: {}", remote_addr, remote_forwarded, image_id);
if let Err(e) = thumbs::thumbnail(&image_id) {
return log_error("thumbnailing just written", request, &e);
}
if return_json {
data_response(resource_object(image_id, "image"))
} else {
// relative to api/upload
Response::redirect_303(format!("../{}", image_id))
}
}
Err(e) => log_error("storing image", request, &e),
}
}
/// http://jsonapi.org/format/#errors
fn error_object(message: &str) -> Response {
println!("error: {}", message);
Response::json(&json!({ "errors": [
{ "title": message }
] }))
}
fn json_api_validate_obj(obj: &serde_json::Map<String, serde_json::Value>) {
assert!(obj.contains_key("id"), "id is mandatory in {:?}", obj);
assert!(obj.contains_key("type"), "type is mandatory in {:?}", obj);
}
/// panic if something isn't valid json-api.
/// panic is fine because the structure should be static in code
/// could be only tested at debug time..
fn json_api_validate(obj: &serde_json::Value) {
if let Some(obj) = obj.as_object() {
json_api_validate_obj(obj)
} else if let Some(list) = obj.as_array() {
for obj in list {
if let Some(obj) = obj.as_object() {
json_api_validate_obj(obj)
} else {
panic!("array item must be obj, not {:?}", obj);
}
}
} else {
panic!("data response contents must be obj, not {:?}", obj);
}
}
/// http://jsonapi.org/format/#document-top-level
fn data_response(inner: serde_json::Value) -> Response {
json_api_validate(&inner);
Response::json(&json!({ "data": inner }))
}
/// http://jsonapi.org/format/#document-resource-objects
fn resource_object<I: AsRef<str>>(id: I, type_: &'static str) -> serde_json::Value {
json!({ "id": id.as_ref(), "type": type_ })
}
fn bad_request(message: &str) -> Response {
error_object(message).with_status_code(BAD_REQUEST)
}
fn log_error(location: &str, request: &Request, error: &Error) -> Response {
let remote_addr = request.remote_addr();
let remote_forwarded = request.header("X-Forwarded-For");
println!(
"{:?} {:?}: failed: {}: {:?}",
remote_addr, remote_forwarded, location, error
);
error_object(location).with_status_code(500)
}
fn gallery_put(conn: Conn, global_secret: &[u8], request: &Request) -> Response {
let body: serde_json::Value = match json_input(request) {
Ok(body) => body,
Err(JsonError::WrongContentType) => return bad_request("missing/invalid content type"),
Err(other) => {
println!("invalid request: {:?}", other);
return bad_request("missing/invalid content type");
}
};
let body = match body.as_object() {
Some(body) => body,
None => return bad_request("non-object body"),
};
if body.contains_key("errors") {
return bad_request("'errors' must be absent");
}
let body = match body.get("data").and_then(|data| data.as_object()) {
Some(body) => body,
None => return bad_request("missing/invalid data attribute"),
};
if !body
.get("type")
.and_then(|ty| ty.as_str())
.map(|ty| "gallery" == ty)
.unwrap_or(false)
{
return bad_request("missing/invalid type: gallery");
}
let body = match body.get("attributes").and_then(|body| body.as_object()) {
Some(body) => body,
None => return bad_request("missing/invalid type: attributes"),
};
let gallery_input = match body.get("gallery").and_then(|val| val.as_str()) {
Some(string) => string,
None => return bad_request("missing/invalid type: gallery attribute"),
}; | None => return bad_request("missing/invalid type: images"),
};
let mut images = Vec::with_capacity(raw_images.len());
for image in raw_images {
let image = match image.as_str() {
Some(image) => image,
None => return bad_request("non-string image in list"),
};
if !IMAGE_ID.is_match(image) {
return bad_request("invalid image id");
}
if !path::Path::new(image).exists() {
return bad_request("no such image");
}
images.push(image);
}
let (gallery, private) = match GALLERY_SPEC.captures(gallery_input) {
Some(captures) => (
captures.get(1).unwrap().as_str(),
captures.get(2).unwrap().as_str(),
),
None => {
return bad_request(concat!(
"gallery format: name!password, ",
"4-10 letters, pass: 4+ anything"
));
}
};
match gallery::gallery_store(conn, global_secret, gallery, private, &images) {
Ok(public) => data_response(resource_object(public, "gallery")),
Err(e) => log_error("saving gallery item", request, &e),
}
}
#[test]
fn validate_image_id() {
assert!(IMAGE_ID.is_match("e/abcdefghij.png"));
assert!(!IMAGE_ID.is_match(" e/abcdefghij.png"));
assert!(!IMAGE_ID.is_match("e/abcdefghi.png"));
}
fn gallery_get(request: &Request, conn: Conn, public: &str) -> Response {
if public.len() > 32 || public.find(|c: char| !c.is_ascii_graphic()).is_some() {
return bad_request("invalid gallery id");
}
let mut conn = match conn.lock() {
Ok(conn) => conn,
Err(_posion) => {
println!("poisoned! {:?}", _posion);
return error_object("internal error").with_status_code(500);
}
};
match gallery::gallery_list_all(&mut *conn, public) {
Ok(resp) => {
let values: Vec<_> = resp
.into_iter()
.map(|id| json!({"id": id, "type": "image"}))
.collect();
data_response(json!(values))
}
Err(e) => log_error("listing gallery", request, &e),
}
}
fn app_secret() -> Result<[u8; 32], Error> {
let mut buf = [0u8; 32];
let path = path::Path::new(".secret");
if path.exists() {
fs::File::open(path)?.read_exact(&mut buf)?;
} else {
rand::thread_rng().fill_bytes(&mut buf);
fs::File::create(path)?.write_all(&buf)?;
}
Ok(buf)
}
fn gallery_db() -> Result<rusqlite::Connection, Error> {
Ok(rusqlite::Connection::open("gallery.db")?)
}
fn main() -> Result<(), Error> {
let mut conn = gallery_db()?;
gallery::migrate_gallery(&mut conn)?;
thumbs::generate_all_thumbs()?;
let secret = app_secret()?;
let conn = Arc::new(Mutex::new(conn));
rouille::start_server("127.0.0.1:6699", move |request| {
rouille::log(&request, io::stdout(), || {
if let Some(e) = request.remove_prefix("/e") {
return rouille::match_assets(&e, "e");
}
router!(request,
(GET) ["/"] => { static_html("web/index.html") },
(GET) ["/dumb/"] => { static_html("web/dumb/index.html") },
(GET) ["/terms/"] => { static_html("web/terms/index.html") },
(GET) ["/gallery/"] => { static_html("web/gallery/index.html") },
(GET) ["/root.css"] => { static_css ("web/root.css") },
(GET) ["/user.svg"] => { static_svg ("web/user.svg") },
(GET) ["/bundle.js" ] => { static_js ("web/bundle.js") },
(GET) ["/gallery/gallery.css"] => { static_css ("web/gallery/gallery.css") },
(GET) ["/jquery-3.3.1.min.js"] => { static_js ("web/jquery-3.3.1.min.js") },
(POST) ["/api/upload"] => { upload(request) },
(PUT) ["/api/gallery"] => {
gallery_put(conn.clone(), &secret, request)
},
(GET) ["/api/gallery/{public}", public: String] => {
gallery_get(request, conn.clone(), &public)
},
_ => rouille::Response::empty_404()
)
})
});
}
fn static_html(path: &'static str) -> Response {
static_file("text/html", path)
}
fn static_css(path: &'static str) -> Response {
static_file("text/css", path)
}
fn static_js(path: &'static str) -> Response {
static_file("application/javascript", path)
}
fn static_svg(path: &'static str) -> Response {
static_file("image/svg+xml", path)
}
fn static_file(content_type: &'static str, path: &'static str) -> Response {
Response::from_file(content_type, fs::File::open(path).expect("static"))
} |
let raw_images = match body.get("images").and_then(|val| val.as_array()) {
Some(raw_images) => raw_images, |
main.rs | use clap::{App, Arg};
use image::imageops;
use std::path::{Path, PathBuf};
use std::str;
use inversion_list::InversionList;
use pixelsort::interval_func;
use pixelsort::sorting;
#[derive(Clone, Copy)]
pub enum SortingMode {
Lightness,
Intensity,
Minimum,
Maximum,
}
impl SortingMode {
pub fn function<P>(self) -> fn(&P) -> u32
where
P: image::Pixel<Subpixel = u8>,
{
match self {
SortingMode::Lightness => sorting::lightness,
SortingMode::Intensity => sorting::intensity,
SortingMode::Minimum => sorting::chan_max,
SortingMode::Maximum => sorting::chan_min,
}
}
}
impl str::FromStr for SortingMode {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"lightness" => Ok(SortingMode::Lightness),
"intensity" => Ok(SortingMode::Intensity),
"minimum" => Ok(SortingMode::Minimum),
"maximum" => Ok(SortingMode::Maximum),
_ => Err(String::from(s)),
}
}
}
#[derive(Clone, Copy)]
pub enum Rotation {
Zero,
Quarter,
Half,
NegQuarter,
}
impl str::FromStr for Rotation {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let num = s
.parse::<isize>()
.map_err(|e| format!("{:?}", e))?
.rem_euclid(360);
match num {
0 => Ok(Rotation::Zero),
90 => Ok(Rotation::Quarter),
180 => Ok(Rotation::Half),
270 => Ok(Rotation::NegQuarter),
_ => Err(String::from("rotation angle must be a multiple of 90")),
}
}
}
#[derive(Clone, Copy)]
pub enum IntervalFunction {
Full,
#[cfg(feature = "imageproc")]
Edges,
#[cfg(feature = "rand")]
Random,
Threshold,
SplitEqual,
}
impl str::FromStr for IntervalFunction {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"full" => Ok(IntervalFunction::Full),
#[cfg(feature = "imageproc")]
"edge" => Ok(IntervalFunction::Edges),
#[cfg(feature = "rand")]
"random" => Ok(IntervalFunction::Random),
"threshold" => Ok(IntervalFunction::Threshold),
"split" => Ok(IntervalFunction::SplitEqual),
_ => Err(String::from(s)),
}
}
}
// FIXME: clean this mess up
fn main() {
use std::str::FromStr;
let matches = App::new("pixelsort")
.version(clap::crate_version!())
.author(clap::crate_authors!())
.arg(
Arg::with_name("input")
.help("The input image to sort.") | arg_interval(),
arg_output(),
arg_mask(),
arg_upper(),
arg_lower(),
arg_rotation(),
arg_num(),
arg_sorting(),
])
.get_matches();
let input = Path::new(matches.value_of_os("input").unwrap());
let mut image = image::open(input)
.expect("failed to read input image")
.to_rgba();
let output = matches
.value_of_os("output")
.map(PathBuf::from)
.unwrap_or_else(|| {
let extension = input
.extension()
.and_then(std::ffi::OsStr::to_str)
.unwrap_or("png");
input.with_extension(["sorted", ".", extension].concat())
});
let rotate = Rotation::from_str(matches.value_of("rotation").unwrap()).unwrap();
//rotate
match rotate {
Rotation::Quarter => image = imageops::rotate90(&image),
Rotation::Half => image = imageops::rotate180(&image),
Rotation::NegQuarter => image = imageops::rotate270(&image),
Rotation::Zero => (),
}
let sorting_func = SortingMode::from_str(matches.value_of("sorting").unwrap())
.unwrap()
.function();
let interval_func =
IntervalFunction::from_str(matches.value_of("interval_func").unwrap()).unwrap();
let mut intervals = intervals_from_image(&image);
if let Some(mask_path) = matches.value_of_os("mask").map(Path::new) {
let mut mask = image::open(mask_path).unwrap().to_luma();
match rotate {
Rotation::Quarter => mask = imageops::rotate90(&mask),
Rotation::Half => mask = imageops::rotate180(&mask),
Rotation::NegQuarter => mask = imageops::rotate270(&mask),
Rotation::Zero => (),
}
interval_func::mask(&mut intervals, &mask);
}
let upper = matches.value_of("upper").unwrap_or_default();
let lower = matches.value_of("lower").unwrap_or_default();
match interval_func {
IntervalFunction::Full => (),
IntervalFunction::SplitEqual => interval_func::split_equal(
&mut intervals,
matches
.value_of("num")
.unwrap()
.parse()
.expect("num was not an integer"),
),
#[cfg(feature = "imageproc")]
IntervalFunction::Edges => interval_func::edges_canny(
&mut intervals,
&image,
lower.parse().expect("lower was not an float"),
upper.parse().expect("upper was not an float"),
),
#[cfg(feature = "rand")]
IntervalFunction::Random => interval_func::random(
&mut intervals,
lower.parse().expect("lower was not an integer"),
upper.parse().expect("upper was not an integer"),
),
IntervalFunction::Threshold => interval_func::threshold(
&mut intervals,
&image,
lower.parse().expect("lower was not a byte integer"),
upper.parse().expect("upper was not a byte integer"),
),
};
pixelsort::sort_image(&mut image, intervals, sorting_func);
// rotate back
match rotate {
Rotation::Quarter => image = imageops::rotate270(&image),
Rotation::Half => image = imageops::rotate180(&image),
Rotation::NegQuarter => image = imageops::rotate90(&image),
Rotation::Zero => (),
}
image.save(&output).unwrap();
}
fn arg_sorting() -> Arg<'static, 'static> {
Arg::with_name("sorting")
.short("s")
.long("sorting")
.help("The function to use for sorting pixels.")
.long_help(
"The function to use for sorting pixels.\n\
\n\
This mode defines how pixels are sorted, be it by lightness, intensity or min/maxmimum channel value of each pixel.",
)
.default_value("lightness")
.takes_value(true)
}
fn arg_num() -> Arg<'static, 'static> {
Arg::with_name("num")
.short("n")
.long("num")
.help("The number of parts to split the intervals into.")
.long_help(
"The number of parts to split the intervals into.\n\
\n\
Required by interval function `split`, splits the file into even intervals.",
)
.required_if("interval_func", "split")
.takes_value(true)
}
fn arg_rotation() -> Arg<'static, 'static> {
Arg::with_name("rotation")
.short("r")
.long("rotation")
.help("The rotation to apply to the image prior sorting.")
.long_help(
"The rotation to apply to the image(and mask) prior sorting.\n\
\n\
This value defines the angle at which pixels will be sorted. This may be any multiple of 90 degrees.\n\
To sort vertically instead of horizontally for example one would specifiy a rotation of 90 or 270 degrees.",
)
.default_value("0")
.takes_value(true)
}
fn arg_upper() -> Arg<'static, 'static> {
Arg::with_name("upper")
.short("u")
.long("upper")
.help("The upper threshold used by some interval functions.")
.long_help(
"The upper threshold used by some interval functions.\n\
\n\
Required by `edge` in the range of [0.0;1140.39), accepts floating point numbers.\n\
Required by `random`, defines the maximum possible size of the random intervals in integers.\n\
Required by `threshold`, defines the upper threshold a pixels lightness has to fall below to be sorted.",
)
.required_ifs(&[("interval_func", "edges"), ("interval_func", "threshold"), ("interval_func", "random")])
.takes_value(true)
}
fn arg_lower() -> Arg<'static, 'static> {
Arg::with_name("lower")
.short("l")
.long("lower")
.help("The lower threshold used by some interval functions.")
.long_help(
"The lower threshold used by some interval functions.\n\
\n\
Required by `edge` in the range of [0.0;1140.39), accepts floating point numbers.\n\
Required by `random`, defines the minimum possible size of the random intervals in integers.\n\
Required by `threshold`, defines the lower threshold a pixels lightness has to surpass to be sorted.",
)
.required_ifs(&[("interval_func", "edges"), ("interval_func", "threshold"), ("interval_func", "random")])
.takes_value(true)
}
fn arg_mask() -> Arg<'static, 'static> {
Arg::with_name("mask")
.short("m")
.long("mask")
.help("A file path to a gray image to mask parts of the input image.")
.long_help(
"A file path to a gray image to mask parts of the input image.\n\
White pixels may be sorted, black pixels may not.",
)
.takes_value(true)
}
fn arg_output() -> Arg<'static, 'static> {
Arg::with_name("output")
.short("o")
.long("output")
.help("A file path to save the output image to.")
.takes_value(true)
}
fn arg_interval() -> Arg<'static, 'static> {
Arg::with_name("interval_func")
.short("i")
.long("interval")
.help("Interval function used to seperate the image into intervals.")
.possible_values(&["full", "edge", "random", "split", "threshold"])
.default_value("full")
.takes_value(true)
}
pub fn intervals_from_image<I: image::GenericImageView>(image: &I) -> Vec<InversionList> {
(0..image.height())
.map(|_| InversionList::from(0..image.width() as usize))
.collect()
} | .required(true)
.takes_value(true),
)
.args(&[ |
topic_handlers.py | """
Copyright 2019 EUROCONTROL
==========================================
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==========================================
Editorial note: this license is an instance of the BSD license template as provided by the Open Source Initiative:
http://opensource.org/licenses/BSD-3-Clause
Details on EUROCONTROL: http://www.eurocontrol.int
"""
from subscription_manager.db import topics as db
from subscription_manager.events.subscription_handlers import delete_subscription_handler
__author__ = "EUROCONTROL (SWIM)"
def create_topic_handler(topic):
db.create_topic(topic)
# def update_topic_handler(current_topic, updated_topic):
# db.update_topic(updated_topic)
#
# for subscription in updated_topic.subscriptions:
# broker.delete_queue_binding(queue=subscription.queue, topic=current_topic.name)
# broker.bind_queue_to_topic(queue=subscription.queue, topic=updated_topic.name, durable=subscription.durable)
|
def delete_topic_subscriptions_handler(topic):
for subscription in topic.subscriptions:
delete_subscription_handler(subscription) |
def delete_topic_handler(topic):
db.delete_topic(topic) |
setup.py | # -*- coding: utf-8 -*-
############################################################################
#
# Copyright © 2012, 2013, 2014, 2015 OnlineGroups.net and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
import codecs
import os
import sys
from setuptools import setup, find_packages
from version import get_version
version = get_version()
# The argparse library was added to core in Python 2.7
core = ['setuptools',
'blessings',
'gs.config', # Note: without zope-support
'gs.form', ]
if sys.version_info > (2, 6):
requires = core
else:
requires = core + ['argparse']
with codecs.open('README.rst', encoding='utf-8') as f:
long_description = f.read()
with codecs.open(os.path.join("docs", "HISTORY.rst"),
encoding='utf-8') as f:
long_description += '\n' + f.read()
setup(
name='gs.profile.status.send',
version=version,
description="Send the profile-status notifications out", | 'Environment :: Console',
"Intended Audience :: Developers",
'License :: OSI Approved :: Zope Public License',
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
'Topic :: Communications :: Email',
'Topic :: Communications :: Email :: Mailing List Servers',
'Topic :: Communications :: Email :: Mail Transport Agents',
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='groupserver, profile, notification',
author='Michael JasonSmith',
author_email='[email protected]',
url='https://github.com/groupserver/gs.profile.status.send/',
license='ZPL 2.1',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['gs', 'gs.profile', 'gs.profile.status', ],
include_package_data=True,
zip_safe=False,
install_requires=requires,
extras_require={'docs': ['Sphinx'], },
entry_points={
'console_scripts': [
'sendprofile = gs.profile.status.send.script:main',
],
# --=mpj17=-- Entry points are the work of the devil. Some time
# you, me and Mr Soldering Iron are going to have a little chat
# about how to do things better.
},
) | long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable', |
emoji-picker-api.directive.d.ts | import { ComponentFactoryResolver, ViewContainerRef, ElementRef, EventEmitter } from '@angular/core';
import 'rxjs/add/operator/takeUntil';
import 'rxjs/add/operator/distinctUntilChanged';
export declare class | {
private _cfr;
private _vcr;
private _el;
private _directionCode;
emojiPickerDirection: string;
emojiPickerIf: boolean;
emojiPickerIfEmitter: EventEmitter<boolean>;
selectEmitter: EventEmitter<{}>;
private _emojiPickerOpenState;
private _destroyed;
private _emojiPickerFactory;
private _emojiPickerRef;
private _emojiSubs;
constructor(_cfr: ComponentFactoryResolver, _vcr: ViewContainerRef, _el: ElementRef);
openPicker(): void;
closePicker(): void;
ngOnDestroy(): void;
}
| EmojiPickerApiDirective |
serializers.py | from rest_framework import serializers
from ..models import Tweet
from accounts.api.serializers import UserModelSerializer
from django.utils.timesince import timesince
class ParentTweetModelSerializer(serializers.ModelSerializer):
user = UserModelSerializer(read_only=True)
date_display = serializers.SerializerMethodField()
timesince = serializers.SerializerMethodField()
likes = serializers.SerializerMethodField()
didlike = serializers.SerializerMethodField()
class Meta:
model = Tweet
fields = [
'id',
'user',
'content',
'timestamp',
'date_display',
'timesince',
'likes',
'didlike',
]
def get_likes(self ,obj):
return obj.liked.all().count()
def get_didlike(self,obj):
try:
request = self.context.get('request').user
user = request.user
if user in obj.liked.all():
return True
except:
pass
return False
def get_date_display(self , obj):
|
def get_timesince(self , obj):
return timesince(obj.timestamp) + " ago"
class TweetModelSerializer(serializers.ModelSerializer):
parent_id = serializers.CharField(required=False)
user = UserModelSerializer(read_only=True)
date_display = serializers.SerializerMethodField()
timesince = serializers.SerializerMethodField()
is_retweet = serializers.SerializerMethodField()
parent = ParentTweetModelSerializer(read_only=True)
likes = serializers.SerializerMethodField()
didlike = serializers.SerializerMethodField()
class Meta:
model = Tweet
fields = [
'parent_id',
'id',
'user',
'content',
'timestamp',
'date_display',
'timesince',
'is_retweet',
'parent',
'likes',
'didlike',
'reply',
]
#read_only_fields =['reply']
def get_likes(self ,obj):
return obj.liked.all().count()
def get_didlike(self,obj):
try:
user = self.context.get('request').user
if user in obj.liked.all():
return True
except:
pass
return False
def get_date_display(self , obj):
return obj.timestamp.strftime("%b %d, %Y | at %I:%M %p")
def get_timesince(self , obj):
return timesince(obj.timestamp) + " ago"
def get_is_retweet(self , obj):
if obj.parent:
return True
return False
| return obj.timestamp.strftime("%b %d, %Y | at %I:%M %p") |
get_github_repo.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import requests
import sys
import traceback
from bs4 import BeautifulSoup as Soup
parser = argparse.ArgumentParser(
description='Get github repo from go import path.')
parser.add_argument(
'go_dependency_list_file',
nargs='?',
default='dep.txt',
help=
'File path of a golang dependency list file, one line has a dependency name. '
+'(default: %(default)s)',
)
parser.add_argument(
'-o',
'--output', | help=
'Output file with one line per resolved github repo. Format: org/repo. (default: %(default)s)',
)
parser.add_argument(
'--manual-dep-repo-mapping',
dest='manual_dep_repo_mapping_file',
nargs='?',
default='dep_repo.manual.csv',
help=
'Optional dependency to repo mapping maintained manually for dependencies we cannot '
+'automatically resolve. Format: each line has dependency import name and its github repo '
+'separated by comma. Like, "upper.io/db.v3,upper/db". Note: github/upper/db is the repo. '
+'(default: %(default)s)'
)
args = parser.parse_args()
protocol = 'https://'
godoc_base = 'godoc.org/'
github_base = 'github.com/'
gopkg_base = 'gopkg.in/'
def github_link_to_repo(repo):
'''
Removes extra sub folder in github url.
'''
if len(repo.split('/')) > 2:
print('repo {} has subfolder'.format(repo), file=sys.stderr)
repo = '/'.join(repo.split('/')[:2])
assert len(repo.split(
'/')) == 2, 'repo name should be org/repo, but is {}'.format(repo)
return repo
def get_github_repo(url):
'''
Tries to resolve github repo from a github url.
Returns org/repo format github repo string.
'''
if url.startswith(protocol):
url = url[len(protocol):]
if not url.startswith(github_base):
raise Exception('Package url is not github: {}'.format(url))
github_repo = url[len(github_base):]
github_repo = github_link_to_repo(github_repo)
if github_repo[-1] == '/':
github_repo = github_repo[:-1]
return github_repo
def fetch_github_uri_from_godoc(url):
'''
Tries to resolve github repo from godoc website.
Implementation: Godoc is a standard way for a lot of golang libraries to
host its documentation. Godoc page usually has a link on top left with
github repo url. This function crawls godoc page for the library and finds
the github url there. If the link there isn't a github url, it throws an
exception.
'''
full_url = protocol + godoc_base + url
print('fetching godoc {}'.format(full_url), file=sys.stderr)
response = requests.get(full_url)
assert response.ok, 'it failed with {} {}'.format(response.status_code,
response.reason)
soup = Soup(response.text, features="html.parser")
navs = soup.select('#x-projnav')
if len(navs) != 1:
raise Exception(
'#x-projnav should occur exactly once, but {} found for {}'.format(len(navs), url))
nav = navs[0]
package_name = nav.select_one('span').contents[0]
assert package_name == url, 'fetched package name should be the same'
link = nav.select_one('a').attrs.get('href')
return get_github_repo(link)
def fetch_gopkg_uri(url):
'''
Tries to resolve github repo for gopkg libraries.
Implementation: gopkg library page has a button with text 'Source code', its
url is usually the corresponding github repo. Throws an exception if the url
found is not github.
'''
response = requests.get(protocol + url)
assert response.ok, 'fetching {} failed with {} {}'.format(
url, response.status_code, response.reason)
soup = Soup(response.text, features="html.parser")
def is_source_code_link(link):
return link.getText().find('Source Code') >= 0
source_code_links = list(filter(is_source_code_link, soup.select('a')))
assert len(
source_code_links) == 1, 'Expect exactly one source code link found'
link = source_code_links[0].attrs.get('href')
return get_github_repo(link)
def get_github_repo_for_dep(dep):
'''
Tries to resolve github repo by three ways:
1. fetch gopkg website
2. parse from github url
3. fetch godoc website
'''
print('Fetching github uri for {}'.format(dep), file=sys.stderr)
repo = None
if dep.startswith(gopkg_base):
print('Try fetching {} from gopkg'.format(dep), file=sys.stderr)
repo = fetch_gopkg_uri(dep)
elif dep.startswith(github_base):
print('{} is already github'.format(dep), file=sys.stderr)
repo = get_github_repo(dep)
else:
print('Try fetching {} repo from godoc'.format(dep), file=sys.stderr)
repo = fetch_github_uri_from_godoc(dep)
return repo
def main():
with open(args.go_dependency_list_file,
'r') as dep_file, open(args.output_file, 'w') as output_file:
mappings = {}
try:
with open(args.manual_dep_repo_mapping_file, 'r') as dep_repo_mapping_file:
for line in dep_repo_mapping_file:
mapping = line.strip().split(',')
assert len(mapping) == 2
[dep, repo] = mapping
mappings[dep] = repo
except Exception: # pylint: disable=broad-except
print('ignore manual_dep_repo_mapping_file', file=sys.stderr)
deps = [line.strip() for line in dep_file]
repo_seen = set()
dep_succeeded = []
# Dependencies that we couldn't resolve their github repos.
dep_failed = []
for dep in deps:
try:
# Get dep's repo from manually maintained mapping first.
repo = mappings.get(dep)
if repo is not None:
print('repo of {} is already configured to {}'.format(dep, repo), file=sys.stderr)
else:
# Try to resolve if not found
repo = get_github_repo_for_dep(dep)
if repo in repo_seen:
print('repo {} is seen more than once'.format(repo), file=sys.stderr)
else:
repo_seen.add(repo)
print(repo, file=output_file)
dep_succeeded.append(dep)
except Exception as e: # pylint: disable=broad-except
print('[failed]', e, file=sys.stderr)
traceback.print_exc(file=sys.stderr)
dep_failed.append(dep)
print()
print((
'Successfully resolved github repo for {} dependencies and saved to {}. '
+'Failed to resolve {} dependencies.'
).format(len(dep_succeeded), args.output_file, len(dep_failed)),
file=sys.stderr)
if dep_failed:
print('We failed to resolve the following dependencies:', file=sys.stderr)
for dep in dep_failed:
print(dep, file=sys.stderr)
if __name__ == '__main__':
main() | dest='output_file',
nargs='?',
default='repo.txt', |
client_test.go | //go:build !js
// +build !js
package turn
import (
"net"
"testing"
"time"
"github.com/pion/logging"
"github.com/pion/transport/vnet"
"github.com/stretchr/testify/assert"
)
func createListeningTestClient(t *testing.T, loggerFactory logging.LoggerFactory) (*Client, net.PacketConn, bool) |
func createListeningTestClientWithSTUNServ(t *testing.T, loggerFactory logging.LoggerFactory) (*Client, net.PacketConn, bool) {
conn, err := net.ListenPacket("udp4", "0.0.0.0:0")
assert.NoError(t, err)
c, err := NewClient(&ClientConfig{
STUNServerAddr: "stun1.l.google.com:19302",
Conn: conn,
Net: vnet.NewNet(nil),
LoggerFactory: loggerFactory,
})
assert.NoError(t, err)
assert.NoError(t, c.Listen())
return c, conn, true
}
func TestClientWithSTUN(t *testing.T) {
loggerFactory := logging.NewDefaultLoggerFactory()
log := loggerFactory.NewLogger("test")
t.Run("SendBindingRequest", func(t *testing.T) {
c, pc, ok := createListeningTestClientWithSTUNServ(t, loggerFactory)
if !ok {
return
}
defer c.Close()
resp, err := c.SendBindingRequest()
assert.NoError(t, err, "should succeed")
log.Debugf("mapped-addr: %s", resp.String())
assert.Equal(t, 0, c.trMap.Size(), "should be no transaction left")
assert.NoError(t, pc.Close())
})
t.Run("SendBindingRequestTo Parallel", func(t *testing.T) {
c, pc, ok := createListeningTestClient(t, loggerFactory)
if !ok {
return
}
defer c.Close()
// simple channel fo go routine start signaling
started := make(chan struct{})
finished := make(chan struct{})
var err1 error
to, err := net.ResolveUDPAddr("udp4", "stun1.l.google.com:19302")
assert.NoError(t, err)
// stun1.l.google.com:19302, more at https://gist.github.com/zziuni/3741933#file-stuns-L5
go func() {
close(started)
_, err1 = c.SendBindingRequestTo(to)
close(finished)
}()
// block until go routine is started to make two almost parallel requests
<-started
if _, err = c.SendBindingRequestTo(to); err != nil {
t.Fatal(err)
}
<-finished
if err1 != nil {
t.Fatal(err)
}
assert.NoError(t, pc.Close())
})
t.Run("NewClient should fail if Conn is nil", func(t *testing.T) {
_, err := NewClient(&ClientConfig{
LoggerFactory: loggerFactory,
})
assert.Error(t, err, "should fail")
})
t.Run("SendBindingRequestTo timeout", func(t *testing.T) {
c, pc, ok := createListeningTestClient(t, loggerFactory)
if !ok {
return
}
defer c.Close()
to, err := net.ResolveUDPAddr("udp4", "127.0.0.1:9")
assert.NoError(t, err)
c.rto = 10 * time.Millisecond // force short timeout
_, err = c.SendBindingRequestTo(to)
assert.NotNil(t, err)
assert.NoError(t, pc.Close())
})
}
// Create an allocation, and then delete all nonces
// The subsequent Write on the allocation will cause a CreatePermission
// which will be forced to handle a stale nonce response
func TestClientNonceExpiration(t *testing.T) {
// lim := test.TimeOut(time.Second * 30)
// defer lim.Stop()
// report := test.CheckRoutines(t)
// defer report()
udpListener, err := net.ListenPacket("udp4", "0.0.0.0:3478")
assert.NoError(t, err)
server, err := NewServer(ServerConfig{
AuthHandler: func(username, realm string, srcAddr net.Addr) (key []byte, ok bool) {
return GenerateAuthKey(username, realm, "pass"), true
},
PacketConnConfigs: []PacketConnConfig{
{
PacketConn: udpListener,
RelayAddressGenerator: &RelayAddressGeneratorStatic{
RelayAddress: net.ParseIP("127.0.0.1"),
Address: "0.0.0.0",
},
},
},
Realm: "pion.ly",
})
assert.NoError(t, err)
conn, err := net.ListenPacket("udp4", "0.0.0.0:0")
assert.NoError(t, err)
client, err := NewClient(&ClientConfig{
Conn: conn,
STUNServerAddr: "127.0.0.1:3478",
TURNServerAddr: "127.0.0.1:3478",
Username: "foo",
Password: "pass",
})
assert.NoError(t, err)
assert.NoError(t, client.Listen())
allocation, err := client.Allocate()
assert.NoError(t, err)
server.nonces.Range(func(key, value interface{}) bool {
server.nonces.Delete(key)
return true
})
_, err = allocation.WriteTo([]byte{0x00}, &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8080})
assert.NoError(t, err)
// Shutdown
assert.NoError(t, allocation.Close())
assert.NoError(t, conn.Close())
assert.NoError(t, server.Close())
}
| {
conn, err := net.ListenPacket("udp4", "0.0.0.0:0")
assert.NoError(t, err)
c, err := NewClient(&ClientConfig{
Conn: conn,
Software: "TEST SOFTWARE",
LoggerFactory: loggerFactory,
})
assert.NoError(t, err)
assert.NoError(t, c.Listen())
return c, conn, true
} |
build.rs | use anyhow::{ensure, Error};
use std::{env, process::Command, str::from_utf8};
fn have_drat_trim() -> Result<(), Error> {
println!("rerun-if-env-changed=VARISAT_HAVE_DRAT_TRIM");
if env::var("VARISAT_HAVE_DRAT_TRIM").is_ok() { | let output = Command::new("drat-trim").output()?;
let stdout = from_utf8(&output.stdout)?;
ensure!(
stdout.contains("force binary proof parse mode"),
"no force binary proof option found"
);
Ok(())
}
fn have_rate() -> Result<(), Error> {
println!("rerun-if-env-changed=VARISAT_HAVE_RATE");
if env::var("VARISAT_HAVE_RATE").is_ok() {
return Ok(());
}
let _ = Command::new("rate").arg("--version").output()?;
Ok(())
}
fn main() {
match have_drat_trim() {
Ok(_) => println!("cargo:rustc-cfg=test_drat_trim"),
Err(err) => println!(
"cargo:warning=drat-trim proof checker not found, some tests will be disabled: {}",
err
),
}
match have_rate() {
Ok(_) => println!("cargo:rustc-cfg=test_rate"),
Err(err) => println!(
"cargo:warning=rate proof checker not found, some tests will be disabled: {}",
err
),
}
} | return Ok(());
}
|
config-1.0.12.schema.test.js | import expect from 'expect';
import { CoordinationType } from '../app/constants';
import viewConfigSchema from './config-1.0.12.schema.json';
describe('view config schema', () => {
describe('coordination types', () => {
it('defines schema for all valid coordination types', () => {
const allCoordinationTypes = Object.values(CoordinationType);
const inCoordinationSpace = Object.keys(
viewConfigSchema.properties.coordinationSpace.properties,
);
const inCoordinationScopes = Object.keys(
viewConfigSchema.definitions.components.items.properties
.coordinationScopes.properties,
); | expect(inCoordinationSpace).toEqual(expect.arrayContaining(allCoordinationTypes));
expect(inCoordinationScopes).toEqual(expect.arrayContaining(allCoordinationTypes));
});
it('defines schema for only valid coordination types (does not have extra)', () => {
const allCoordinationTypes = Object.values(CoordinationType);
const inCoordinationSpace = Object.keys(
viewConfigSchema.properties.coordinationSpace.properties,
);
const inCoordinationScopes = Object.keys(
viewConfigSchema.definitions.components.items.properties
.coordinationScopes.properties,
);
expect(allCoordinationTypes).toEqual(expect.arrayContaining(inCoordinationSpace));
expect(allCoordinationTypes).toEqual(expect.arrayContaining(inCoordinationScopes));
});
});
}); | |
test_validators.py | #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2021 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import pytest
from pymeasure.instruments.validators import (
strict_range, strict_discrete_range, strict_discrete_set,
truncated_range, truncated_discrete_set,
modular_range, modular_range_bidirectional,
joined_validators
)
def | ():
assert strict_range(5, range(10)) == 5
assert strict_range(5.1, range(10)) == 5.1
with pytest.raises(ValueError):
strict_range(20, range(10))
def test_strict_discrete_range():
assert strict_discrete_range(0.1, [0, 0.2], 0.001) == 0.1
assert strict_discrete_range(5, range(10), 0.1) == 5
assert strict_discrete_range(5.1, range(10), 0.1) == 5.1
assert strict_discrete_range(5.1, range(10), 0.001) == 5.1
assert strict_discrete_range(-5.1, [-20, 20], 0.001) == -5.1
with pytest.raises(ValueError):
strict_discrete_range(5.1, range(5), 0.001)
with pytest.raises(ValueError):
strict_discrete_range(5.01, range(5), 0.1)
with pytest.raises(ValueError):
strict_discrete_range(0.003, [0, 0.2], 0.002)
def test_strict_discrete_set():
assert strict_discrete_set(5, range(10)) == 5
with pytest.raises(ValueError):
strict_discrete_set(5.1, range(10))
with pytest.raises(ValueError):
strict_discrete_set(20, range(10))
def test_truncated_range():
assert truncated_range(5, range(10)) == 5
assert truncated_range(5.1, range(10)) == 5.1
assert truncated_range(-10, range(10)) == 0
assert truncated_range(20, range(10)) == 9
def test_truncated_discrete_set():
assert truncated_discrete_set(5, range(10)) == 5
assert truncated_discrete_set(5.1, range(10)) == 6
assert truncated_discrete_set(11, range(10)) == 9
assert truncated_discrete_set(-10, range(10)) == 0
def test_modular_range():
assert modular_range(5, range(10)) == 5
assert abs(modular_range(5.1, range(10)) - 5.1) < 1e-6
assert modular_range(11, range(10)) == 2
assert abs(modular_range(11.3, range(10)) - 2.3) < 1e-6
assert abs(modular_range(-7.1, range(10)) - 1.9) < 1e-6
assert abs(modular_range(-13.2, range(10)) - 4.8) < 1e-6
def test_modular_range_bidirectional():
assert modular_range_bidirectional(5, range(10)) == 5
assert abs(modular_range_bidirectional(5.1, range(10)) - 5.1) < 1e-6
assert modular_range_bidirectional(11, range(10)) == 2
assert abs(modular_range_bidirectional(11.3, range(10)) - 2.3) < 1e-6
assert modular_range_bidirectional(-7, range(10)) == -7
assert abs(modular_range_bidirectional(-7.1, range(10)) - (-7.1)) < 1e-6
assert abs(modular_range_bidirectional(-13.2, range(10)) - (-4.2)) < 1e-6
def test_joined_validators():
tst_validator = joined_validators(strict_discrete_set, strict_range)
values = [["ON", "OFF"], range(10)]
assert tst_validator(5, values) == 5
assert tst_validator(5.1, values) == 5.1
assert tst_validator("ON", values) == "ON"
with pytest.raises(ValueError):
tst_validator("OUT", values)
with pytest.raises(ValueError):
tst_validator(20, values)
| test_strict_range |
xcore.rs | //! Contains xcore-specific types
use core::convert::From;
use core::{cmp, fmt, slice};
// XXX todo(tmfink): create rusty versions
pub use capstone_sys::xcore_insn_group as XcoreInsnGroup;
pub use capstone_sys::xcore_insn as XcoreInsn;
pub use capstone_sys::xcore_reg as XcoreReg;
use capstone_sys::{cs_xcore, cs_xcore_op, xcore_op_mem, xcore_op_type};
pub use crate::arch::arch_builder::xcore::*;
use crate::arch::DetailsArchInsn;
use crate::instruction::{RegId, RegIdInt};
/// Contains XCORE-specific details for an instruction
pub struct XcoreInsnDetail<'a>(pub(crate) &'a cs_xcore);
impl_PartialEq_repr_fields!(XcoreInsnDetail<'a> [ 'a ];
operands
);
/// XCORE operand
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum XcoreOperand {
/// Register
Reg(RegId),
/// Immediate
Imm(i32),
/// Memory
Mem(XcoreOpMem),
/// Invalid
Invalid,
}
impl Default for XcoreOperand {
fn default() -> Self {
XcoreOperand::Invalid
}
}
/// XCORE memory operand | impl XcoreOpMem {
/// Base register
pub fn base(&self) -> RegId {
RegId(RegIdInt::from(self.0.base))
}
/// Index register
pub fn index(&self) -> RegId {
RegId(RegIdInt::from(self.0.index))
}
/// Disp value
pub fn disp(&self) -> i32 {
self.0.disp
}
/// Direct value
pub fn direct(&self) -> i32 {
self.0.direct
}
}
impl_PartialEq_repr_fields!(XcoreOpMem;
base, index, disp, direct
);
impl cmp::Eq for XcoreOpMem {}
impl<'a> From<&'a cs_xcore_op> for XcoreOperand {
fn from(insn: &cs_xcore_op) -> XcoreOperand {
match insn.type_ {
xcore_op_type::XCORE_OP_REG => {
XcoreOperand::Reg(RegId(unsafe { insn.__bindgen_anon_1.reg } as RegIdInt))
}
xcore_op_type::XCORE_OP_IMM => XcoreOperand::Imm(unsafe { insn.__bindgen_anon_1.imm }),
xcore_op_type::XCORE_OP_MEM => {
XcoreOperand::Mem(XcoreOpMem(unsafe { insn.__bindgen_anon_1.mem }))
}
xcore_op_type::XCORE_OP_INVALID => XcoreOperand::Invalid,
}
}
}
def_arch_details_struct!(
InsnDetail = XcoreInsnDetail;
Operand = XcoreOperand;
OperandIterator = XcoreOperandIterator;
OperandIteratorLife = XcoreOperandIterator<'a>;
[ pub struct XcoreOperandIterator<'a>(slice::Iter<'a, cs_xcore_op>); ]
cs_arch_op = cs_xcore_op;
cs_arch = cs_xcore;
); | #[derive(Debug, Copy, Clone)]
pub struct XcoreOpMem(pub(crate) xcore_op_mem);
|
main.plugin.py | from composo_py import ioc
def init(config):
|
def test():
app = ioc.Plugin.plugin()
app.run("test")
if __name__ == "__main__":
test()
| ioc.Config.config.from_dict(config)
app = ioc.Plugin.plugin()
return app |
configDefault.py | import os
DEBUG_MODE = True
SECRET_KEY = 'secret'
# Database config
DB_USER = 'postgres'
DB_NAME = 'postgres'
DB_PASSWORD = ''
DB_HOST = os.environ.get('POSTGRES_HOST', 'localhost')
DB_PORT = os.environ.get('POSTGRES_PORT', 5432)
# Slack config
SLACK_TOKEN = 'token'
SLACK_API_INVITE_URL = 'https://slack.com/api/users.admin.invite'
# Email config
EMAIL_HOST = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_PORT = 587
|
FACEBOOK_TOKEN = ''
GOOGLE_MAPS_API_KEY = '' | PROJECTOR_IP = ''
DOOR_ENDPOINT = '' |
vector-sets.js | "use strict";
// Exports a "vector set" (an array of arrays of holodeck vectors).
module.exports = [// Frame latch OPM is used to create a value mailbox that decouples
// the writer of the mailbox from the knowledge of how the value
// written to the mailbox is consumed and for what purposes.
//
// Other OPM's may observe a Frame Latch OPMI by its fully-qualified
// OCD path (or OPMI-relative path) using the transition operator
// @encapsule/holarchy-cm TransitionOperator-opm-at-step.js.
// to determine when it has reached its "updated" step and trigger
// whatever action(s) are required.
//
// Broadly, entire reactive systems can be viewed as chains
// of OPM that use Frame Latches to define their input value(s)
// and/or output value(s) such that they can be observed | require("./vector-set-frame-latch"), require("./vector-set-cml")]; | // (and consequently processed) by other OPMI's. |
sqlfmt.go | package main
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os" |
"github.com/pkg/errors"
"github.com/otaviof/go-sqlfmt/pkg/sqlfmt"
)
var (
// main operation modes
list = flag.Bool("l", false, "list files whose formatting differs from goreturns's")
write = flag.Bool("w", false, "write result to (source) file instead of stdout")
doDiff = flag.Bool("d", false, "display diffs instead of rewriting files")
options = &sqlfmt.Options{}
)
func init() {
flag.IntVar(&options.Distance, "distance", 0, "write the distance from the edge to the begin of SQL statements")
}
func usage() {
fmt.Fprintf(os.Stderr, "usage: sqlfmt [flags] [path ...]\n")
flag.PrintDefaults()
}
func isGoFile(info os.FileInfo) bool {
name := info.Name()
return !info.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go")
}
func visitFile(path string, info os.FileInfo, err error) error {
if err == nil && isGoFile(info) {
err = processFile(path, nil, os.Stdout)
}
if err != nil {
processError(errors.Wrap(err, "visit file failed"))
}
return nil
}
func walkDir(path string) {
filepath.Walk(path, visitFile)
}
func processFile(filename string, in io.Reader, out io.Writer) error {
if in == nil {
f, err := os.Open(filename)
if err != nil {
return errors.Wrap(err, "os.Open failed")
}
in = f
}
src, err := ioutil.ReadAll(in)
if err != nil {
return errors.Wrap(err, "ioutil.ReadAll failed")
}
res, err := sqlfmt.Process(filename, src, options)
if err != nil {
return errors.Wrap(err, "sqlfmt.Process failed")
}
if !bytes.Equal(src, res) {
if *list {
fmt.Fprintln(out, filename)
}
if *write {
if err = ioutil.WriteFile(filename, res, 0); err != nil {
return errors.Wrap(err, "ioutil.WriteFile failed")
}
}
if *doDiff {
data, err := diff(src, res)
if err != nil {
return errors.Wrap(err, "diff failed")
}
fmt.Printf("diff %s gofmt/%s\n", filename, filename)
out.Write(data)
}
if !*list && !*write && !*doDiff {
_, err = out.Write(res)
if err != nil {
return errors.Wrap(err, "out.Write failed")
}
}
}
return nil
}
func sqlfmtMain() {
flag.Usage = usage
flag.Parse()
// the user is piping their source into go-sqlfmt
if flag.NArg() == 0 {
if *write {
log.Fatal("can not use -w while using pipeline")
}
if err := processFile("<standard input>", os.Stdin, os.Stdout); err != nil {
processError(errors.Wrap(err, "processFile failed"))
}
return
}
for i := 0; i < flag.NArg(); i++ {
path := flag.Arg(i)
switch dir, err := os.Stat(path); {
case err != nil:
processError(err)
case dir.IsDir():
walkDir(path)
default:
info, err := os.Stat(path)
if err != nil {
processError(err)
}
if isGoFile(info) {
err = processFile(path, nil, os.Stdout)
if err != nil {
processError(err)
}
}
}
}
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
sqlfmtMain()
}
func diff(b1, b2 []byte) (data []byte, err error) {
f1, err := ioutil.TempFile("", "sqlfmt")
if err != nil {
return
}
defer os.Remove(f1.Name())
defer f1.Close()
f2, err := ioutil.TempFile("", "sqlfmt")
if err != nil {
return
}
defer os.Remove(f2.Name())
defer f2.Close()
f1.Write(b1)
f2.Write(b2)
data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
if len(data) > 0 {
// diff exits with a non-zero status when the files don't match.
// Ignore that failure as long as we get output.
err = nil
}
return
}
func processError(err error) {
switch err.(type) {
case *sqlfmt.FormatError:
log.Println(err)
default:
log.Fatal(err)
}
} | "os/exec"
"path/filepath"
"runtime"
"strings" |
session_data.rs | use std::fmt;
use std::str::FromStr;
use derive_builder::Builder;
use shorthand::ShortHand;
use crate::attribute::AttributePairs;
use crate::types::ProtocolVersion;
use crate::utils::{quote, tag, unquote};
use crate::{Error, RequiredVersion};
/// The data of [`ExtXSessionData`].
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum SessionData {
/// Contains the data identified by the [`ExtXSessionData::data_id`].
///
/// If a [`language`] is specified, this variant should contain a
/// human-readable string written in the specified language.
///
/// [`data_id`]: ExtXSessionData::data_id
/// [`language`]: ExtXSessionData::language
Value(String),
/// An [`URI`], which points to a [`json`] file.
///
/// [`json`]: https://tools.ietf.org/html/rfc8259
/// [`URI`]: https://tools.ietf.org/html/rfc3986
Uri(String),
}
/// Allows arbitrary session data to be carried in a [`MasterPlaylist`].
///
/// [`MasterPlaylist`]: crate::MasterPlaylist
#[derive(ShortHand, Builder, Hash, Eq, Ord, Debug, PartialEq, Clone, PartialOrd)]
#[builder(setter(into))]
#[shorthand(enable(must_use, into))]
pub struct ExtXSessionData {
/// This should conform to a [reverse DNS] naming convention, such as
/// `com.example.movie.title`.
///
/// # Note
///
/// There is no central registration authority, so a value
/// should be choosen, that is unlikely to collide with others.
///
/// This field is required.
///
/// [reverse DNS]: https://en.wikipedia.org/wiki/Reverse_domain_name_notation
data_id: String,
/// The [`SessionData`] associated with the
/// [`data_id`](ExtXSessionData::data_id).
///
/// # Note
///
/// This field is required.
#[shorthand(enable(skip))]
pub data: SessionData,
/// The `language` attribute identifies the language of the [`SessionData`].
///
/// # Note
///
/// This field is optional and the provided value should conform to
/// [RFC5646].
///
/// [RFC5646]: https://tools.ietf.org/html/rfc5646
#[builder(setter(into, strip_option), default)]
language: Option<String>,
}
impl ExtXSessionData {
pub(crate) const PREFIX: &'static str = "#EXT-X-SESSION-DATA:";
/// Makes a new [`ExtXSessionData`] tag.
///
/// # Example
///
/// ```
/// # use hls_m3u8::tags::ExtXSessionData;
/// use hls_m3u8::tags::SessionData;
///
/// let session_data = ExtXSessionData::new(
/// "com.example.movie.title",
/// SessionData::Uri("https://www.example.com/".into()),
/// );
/// ```
#[must_use]
pub fn new<T: Into<String>>(data_id: T, data: SessionData) -> Self {
Self {
data_id: data_id.into(),
data,
language: None,
}
}
/// Returns a builder for [`ExtXSessionData`].
///
/// # Example
///
/// ```
/// # use hls_m3u8::tags::ExtXSessionData;
/// use hls_m3u8::tags::SessionData;
///
/// let session_data = ExtXSessionData::builder()
/// .data_id("com.example.movie.title")
/// .data(SessionData::Value("some data".into()))
/// .language("en")
/// .build()?;
/// # Ok::<(), String>(())
/// ```
#[must_use]
pub fn | () -> ExtXSessionDataBuilder { ExtXSessionDataBuilder::default() }
/// Makes a new [`ExtXSessionData`] tag, with the given language.
///
/// # Example
///
/// ```
/// # use hls_m3u8::tags::ExtXSessionData;
/// use hls_m3u8::tags::SessionData;
///
/// let session_data = ExtXSessionData::with_language(
/// "com.example.movie.title",
/// SessionData::Value("some data".into()),
/// "en",
/// );
/// ```
#[must_use]
pub fn with_language<T, K>(data_id: T, data: SessionData, language: K) -> Self
where
T: Into<String>,
K: Into<String>,
{
Self {
data_id: data_id.into(),
data,
language: Some(language.into()),
}
}
}
/// This tag requires [`ProtocolVersion::V1`].
impl RequiredVersion for ExtXSessionData {
fn required_version(&self) -> ProtocolVersion { ProtocolVersion::V1 }
}
impl fmt::Display for ExtXSessionData {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", Self::PREFIX)?;
write!(f, "DATA-ID={}", quote(&self.data_id))?;
match &self.data {
SessionData::Value(value) => write!(f, ",VALUE={}", quote(value))?,
SessionData::Uri(value) => write!(f, ",URI={}", quote(value))?,
}
if let Some(value) = &self.language {
write!(f, ",LANGUAGE={}", quote(value))?;
}
Ok(())
}
}
impl FromStr for ExtXSessionData {
type Err = Error;
fn from_str(input: &str) -> Result<Self, Self::Err> {
let input = tag(input, Self::PREFIX)?;
let mut data_id = None;
let mut session_value = None;
let mut uri = None;
let mut language = None;
for (key, value) in AttributePairs::new(input) {
match key {
"DATA-ID" => data_id = Some(unquote(value)),
"VALUE" => session_value = Some(unquote(value)),
"URI" => uri = Some(unquote(value)),
"LANGUAGE" => language = Some(unquote(value)),
_ => {
// [6.3.1. General Client Responsibilities]
// > ignore any attribute/value pair with an unrecognized
// AttributeName.
}
}
}
let data_id = data_id.ok_or_else(|| Error::missing_value("EXT-X-DATA-ID"))?;
let data = {
if let Some(value) = session_value {
if uri.is_some() {
return Err(Error::custom("unexpected URI"));
} else {
SessionData::Value(value)
}
} else if let Some(uri) = uri {
SessionData::Uri(uri)
} else {
return Err(Error::custom(
"expected either `SessionData::Uri` or `SessionData::Value`",
));
}
};
Ok(Self {
data_id,
data,
language,
})
}
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
macro_rules! generate_tests {
( $( { $struct:expr, $str:expr } ),+ $(,)* ) => {
#[test]
fn test_display() {
$(
assert_eq!($struct.to_string(), $str.to_string());
)+
}
#[test]
fn test_parser() {
$(
assert_eq!($struct, $str.parse().unwrap());
)+
assert!(
concat!(
"#EXT-X-SESSION-DATA:",
"DATA-ID=\"foo\",",
"LANGUAGE=\"baz\""
)
.parse::<ExtXSessionData>()
.is_err()
);
assert!(
concat!(
"#EXT-X-SESSION-DATA:",
"DATA-ID=\"foo\",",
"LANGUAGE=\"baz\",",
"VALUE=\"VALUE\",",
"URI=\"https://www.example.com/\""
)
.parse::<ExtXSessionData>()
.is_err()
);
}
}
}
generate_tests! {
{
ExtXSessionData::new(
"com.example.lyrics",
SessionData::Uri("lyrics.json".into())
),
concat!(
"#EXT-X-SESSION-DATA:",
"DATA-ID=\"com.example.lyrics\",",
"URI=\"lyrics.json\""
)
},
{
ExtXSessionData::with_language(
"com.example.title",
SessionData::Value("This is an example".into()),
"en"
),
concat!(
"#EXT-X-SESSION-DATA:",
"DATA-ID=\"com.example.title\",",
"VALUE=\"This is an example\",",
"LANGUAGE=\"en\""
)
},
{
ExtXSessionData::with_language(
"com.example.title",
SessionData::Value("Este es un ejemplo".into()),
"es"
),
concat!(
"#EXT-X-SESSION-DATA:",
"DATA-ID=\"com.example.title\",",
"VALUE=\"Este es un ejemplo\",",
"LANGUAGE=\"es\""
)
}
}
#[test]
fn test_required_version() {
assert_eq!(
ExtXSessionData::new(
"com.example.lyrics",
SessionData::Uri("lyrics.json".to_string())
)
.required_version(),
ProtocolVersion::V1
);
}
}
| builder |
service_endpoint_execution_owner.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ServiceEndpointExecutionOwner(Model):
| """ServiceEndpointExecutionOwner.
:param _links:
:type _links: :class:`ReferenceLinks <service-endpoint.v4_1.models.ReferenceLinks>`
:param id: Gets or sets the Id of service endpoint execution owner.
:type id: int
:param name: Gets or sets the name of service endpoint execution owner.
:type name: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, _links=None, id=None, name=None):
super(ServiceEndpointExecutionOwner, self).__init__()
self._links = _links
self.id = id
self.name = name |
|
types.py | from typing import Union, Tuple, Sized, Container, Any, TypeVar, Callable
from typing import Iterable, Iterator, Sequence, Dict, Generic, cast
from typing import Optional, List, overload
from dataclasses import dataclass
import numpy
import sys
try:
import cupy
get_array_module = cupy.get_array_module
except ImportError:
get_array_module = lambda obj: numpy
# Use typing_extensions for Python versions < 3.8
if sys.version_info < (3, 8):
from typing_extensions import Protocol, Literal
else:
from typing import Protocol, Literal # noqa: F401
# fmt: off
XY_YZ_OutT = TypeVar("XY_YZ_OutT")
XY_XY_OutT = TypeVar("XY_XY_OutT")
DeviceTypes = Literal["cpu", "gpu", "tpu"]
Batchable = Union["Pairs", "Ragged", "Padded", "ArrayXd", List, Tuple]
Xp = Union["numpy", "cupy"] # type: ignore
Shape = Tuple[int, ...]
DTypes = Literal["f", "i", "float16", "float32", "float64", "int32", "int64", "uint32", "uint64"]
DTypesFloat = Literal["f", "float32", "float16", "float64"]
DTypesInt = Literal["i", "int32", "int64", "uint32", "uint64"]
Array1d = Union["Floats1d", "Ints1d"]
Array2d = Union["Floats2d", "Ints2d"]
Array3d = Union["Floats3d", "Ints3d"]
Array4d = Union["Floats4d", "Ints4d"]
FloatsXd = Union["Floats1d", "Floats2d", "Floats3d", "Floats4d"]
IntsXd = Union["Ints1d", "Ints2d", "Ints3d", "Ints4d"]
ArrayXd = Union[FloatsXd, IntsXd]
List1d = Union[List["Floats1d"], List["Ints1d"]]
List2d = Union[List["Floats2d"], List["Ints2d"]]
List3d = Union[List["Floats3d"], List["Ints3d"]]
List4d = Union[List["Floats4d"], List["Ints4d"]]
ListXd = Union[List["FloatsXd"], List["IntsXd"]]
ArrayT = TypeVar("ArrayT")
SelfT = TypeVar("SelfT")
Array1dT = TypeVar("Array1dT", bound="Array1d")
# These all behave the same as far as indexing is concerned
Slicish = Union[slice, List[int], "ArrayXd"]
_1_KeyScalar = int
_1_Key1d = Slicish
_1_AllKeys = Union[_1_KeyScalar, _1_Key1d]
_F1_AllReturns = Union[float, "Floats1d"]
_I1_AllReturns = Union[int, "Ints1d"]
_2_KeyScalar = Tuple[int, int]
_2_Key1d = Union[int, Tuple[Slicish, int], Tuple[int, Slicish]]
_2_Key2d = Union[Tuple[Slicish, Slicish], Slicish]
_2_AllKeys = Union[_2_KeyScalar, _2_Key1d, _2_Key2d]
_F2_AllReturns = Union[float, "Floats1d", "Floats2d"]
_I2_AllReturns = Union[int, "Ints1d", "Ints2d"]
_3_KeyScalar = Tuple[int, int, int]
_3_Key1d = Union[Tuple[int, int], Tuple[int, int, Slicish], Tuple[int, Slicish, int], Tuple[Slicish, int, int]]
_3_Key2d = Union[int, Tuple[int, Slicish], Tuple[Slicish, int], Tuple[int, Slicish, Slicish], Tuple[Slicish, int, Slicish], Tuple[Slicish, Slicish, int]]
_3_Key3d = Union[Slicish, Tuple[Slicish, Slicish], Tuple[Slicish, Slicish, Slicish]]
_3_AllKeys = Union[_3_KeyScalar, _3_Key1d, _3_Key2d, _3_Key3d]
_F3_AllReturns = Union[float, "Floats1d", "Floats2d", "Floats3d"]
_I3_AllReturns = Union[int, "Ints1d", "Ints2d", "Ints3d"]
_4_KeyScalar = Tuple[int, int, int, int]
_4_Key1d = Union[Tuple[int, int, int], Tuple[int, int, int, Slicish], Tuple[int, int, Slicish, int], Tuple[int, Slicish, int, int], Tuple[Slicish, int, int, int]]
_4_Key2d = Union[Tuple[int, int], Tuple[int, int, Slicish], Tuple[int, Slicish, int], Tuple[Slicish, int, int], Tuple[int, int, Slicish, Slicish], Tuple[int, Slicish, int, Slicish], Tuple[int, Slicish, Slicish, int], Tuple[Slicish, int, int, Slicish], Tuple[Slicish, int, Slicish, int], Tuple[Slicish, Slicish, int, int]]
_4_Key3d = Union[int, Tuple[int, Slicish], Tuple[Slicish, int], Tuple[int, Slicish, Slicish], Tuple[Slicish, int, Slicish], Tuple[Slicish, Slicish, int], Tuple[int, Slicish, Slicish, Slicish], Tuple[Slicish, int, Slicish, Slicish], Tuple[Slicish, Slicish, int, Slicish], Tuple[Slicish, Slicish, Slicish, int]]
_4_Key4d = Union[Slicish, Tuple[Slicish, Slicish], Tuple[Slicish, Slicish, Slicish], Tuple[Slicish, Slicish, Slicish, Slicish]]
_4_AllKeys = Union[_4_KeyScalar, _4_Key1d, _4_Key2d, _4_Key3d, _4_Key4d]
_F4_AllReturns = Union[float, "Floats1d", "Floats2d", "Floats3d", "Floats4d"]
_I4_AllReturns = Union[int, "Ints1d", "Ints2d", "Ints3d", "Ints4d"]
# Typedefs for the reduction methods.
Tru = Literal[True]
Fal = Literal[False]
OneAx = Union[int, Tuple[int]]
TwoAx = Tuple[int, int]
ThreeAx = Tuple[int, int, int]
FourAx = Tuple[int, int, int, int]
_1_AllAx = Optional[OneAx]
_2_AllAx = Union[Optional[TwoAx], OneAx]
_3_AllAx = Union[Optional[ThreeAx], TwoAx, OneAx]
_4_AllAx = Union[Optional[FourAx], ThreeAx, TwoAx, OneAx]
_1F_ReduceResults = Union[float, "Floats1d"]
_2F_ReduceResults = Union[float, "Floats1d", "Floats2d"]
_3F_ReduceResults = Union[float, "Floats1d", "Floats2d", "Floats3d"]
_4F_ReduceResults = Union[float, "Floats1d", "Floats2d", "Floats3d", "Floats4d"]
_1I_ReduceResults = Union[int, "Ints1d"]
_2I_ReduceResults = Union[int, "Ints1d", "Ints2d"]
_3I_ReduceResults = Union[int, "Ints1d", "Ints2d", "Ints3d"]
_4I_ReduceResults = Union[int, "Ints1d", "Ints2d", "Ints3d", "Ints4d"]
# TODO:
# We need to get correct overloads in for the following reduction methods.
# The 'sum' reduction is correct --- the others need to be just the same,
# but with a different name.
# max, min, prod, round, var, mean, ptp, std
# There's also one *slightly* different function, cumsum. This doesn't
# have a scalar version -- it always makes an array.
class _Array(Sized, Container):
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v)
@property
def dtype(self) -> DTypes: ...
@property
def data(self) -> memoryview: ...
@property
def flags(self) -> Any: ...
@property
def size(self) -> int: ...
@property
def itemsize(self) -> int: ...
@property
def nbytes(self) -> int: ...
@property
def ndim(self) -> int: ...
@property
def shape(self) -> Shape: ...
@property
def strides(self) -> Tuple[int, ...]: ...
# TODO: Is ArrayT right?
def astype(self: ArrayT, dtype: DTypes, order: str = ..., casting: str = ..., subok: bool = ..., copy: bool = ...) -> ArrayT: ...
def copy(self: ArrayT, order: str = ...) -> ArrayT: ...
def fill(self, value: Any) -> None: ...
# Shape manipulation
def reshape(self: ArrayT, shape: Shape, *, order: str = ...) -> ArrayT: ...
def transpose(self: ArrayT, axes: Shape) -> ArrayT: ...
# TODO: is this right? It returns 1d
def flatten(self, order: str = ...): ...
# TODO: is this right? It returns 1d
def ravel(self, order: str = ...): ...
def squeeze(self, axis: Union[int, Shape] = ...): ...
def __len__(self) -> int: ...
def __setitem__(self, key, value): ...
def __iter__(self) -> Iterator[Any]: ...
def __contains__(self, key) -> bool: ...
def __index__(self) -> int: ...
def __int__(self) -> int: ...
def __float__(self) -> float: ...
def __complex__(self) -> complex: ...
def __bool__(self) -> bool: ...
def __bytes__(self) -> bytes: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __copy__(self, order: str = ...): ...
def __deepcopy__(self, memo: dict) -> ArrayT: ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
def __add__(self, other): ...
def __radd__(self, other): ...
def __iadd__(self, other): ...
def __sub__(self, other): ...
def __rsub__(self, other): ...
def __isub__(self, other): ...
def __mul__(self, other): ...
def __rmul__(self, other): ...
def __imul__(self, other): ...
def __truediv__(self, other): ...
def __rtruediv__(self, other): ...
def __itruediv__(self, other): ...
def __floordiv__(self, other): ...
def __rfloordiv__(self, other): ...
def __ifloordiv__(self, other): ...
def __mod__(self, other): ...
def __rmod__(self, other): ...
def __imod__(self, other): ...
def __divmod__(self, other): ...
def __rdivmod__(self, other): ...
# NumPy's __pow__ doesn't handle a third argument
def __pow__(self, other): ...
def __rpow__(self, other): ...
def __ipow__(self, other): ...
def __lshift__(self, other): ...
def __rlshift__(self, other): ...
def __ilshift__(self, other): ...
def __rshift__(self, other): ...
def __rrshift__(self, other): ...
def __irshift__(self, other): ...
def __and__(self, other): ...
def __rand__(self, other): ...
def __iand__(self, other): ...
def __xor__(self, other): ...
def __rxor__(self, other): ...
def __ixor__(self, other): ...
def __or__(self, other): ...
def __ror__(self, other): ...
def __ior__(self, other): ...
def __matmul__(self, other): ...
def __rmatmul__(self, other): ...
def __neg__(self: ArrayT) -> ArrayT: ...
def __pos__(self: ArrayT) -> ArrayT: ...
def __abs__(self: ArrayT) -> ArrayT: ...
def __invert__(self: ArrayT) -> ArrayT: ...
def get(self: ArrayT) -> ArrayT: ...
def all(self, axis: int = -1, out: Optional[ArrayT] = None, keepdims: bool = False) -> ArrayT: ...
def any(self, axis: int = -1, out: Optional[ArrayT] = None, keepdims: bool = False) -> ArrayT: ...
# def argmax(self, axis: int = -1, out: Optional["Array"] = None, keepdims: Union[Tru, Fal]=False) -> Union[int, "Ints1d"]: ...
def argmin(self, axis: int = -1, out: Optional[ArrayT] = None) -> ArrayT: ...
def clip(self, a_min: Any, a_max: Any, out: Optional[ArrayT]) -> ArrayT: ...
#def cumsum( self: ArrayT, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional[ArrayT] = None) -> ArrayT: ...
def max(self, axis: int = -1, out: Optional[ArrayT] = None) -> ArrayT: ...
# def mean(self, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional[SelfT] = None, keepdims: bool = False) -> "Array": ...
def min(self, axis: int = -1, out: Optional[ArrayT] = None) -> ArrayT: ...
def nonzero(self) -> ArrayT: ...
def prod(self, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional[ArrayT] = None, keepdims: bool = False) -> ArrayT: ...
def round(self, decimals: int = 0, out: Optional[ArrayT] = None) -> ArrayT: ...
# def sum(self, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional[ArrayT] = None, keepdims: bool = False) -> ArrayT: ...
def tobytes(self, order: str = "C") -> bytes: ...
def tolist(self) -> List[Any]: ...
def var(self: SelfT, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional[ArrayT] = None, ddof: int = 0, keepdims: bool = False) -> SelfT: ...
class _Floats(_Array):
@property
def dtype(self) -> DTypesFloat: ...
def fill(self, value: float) -> None: ...
def reshape(self, shape: Shape, *, order: str = ...) -> "_Floats": ...
class _Ints(_Array):
@property
def dtype(self) -> DTypesInt: ...
def fill(self, value: int) -> None: ...
def reshape(self, shape: Shape, *, order: str = ...) -> "_Ints": ...
"""
Extensive overloads to represent __getitem__ behaviour.
In an N+1 dimensional array, there will be N possible return types. For instance,
if you have a 2d array, you could get back a float (array[i, j]), a floats1d
(array[i]) or a floats2d (array[:i, :j]). You'll get the scalar if you have N
ints in the index, a 1d array if you have N-1 ints, etc.
So the trick here is to make a union with the various combinations that produce
each result type, and then only have one overload per result. If we overloaded
on each *key* type, that would get crazy, because there's tonnes of combinations.
In each rank, we can use the same key-types for float and int, but we need a
different return-type union.
"""
class _Array1d(_Array):
"""1-dimensional array."""
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=1)
@property
def ndim(self) -> Literal[1]: ...
@property
def shape(self) -> Tuple[int]: ...
def __iter__(self) -> Iterator[Union[float, int]]: ...
def astype(self, dtype: DTypes, order: str = ..., casting: str = ..., subok: bool = ..., copy: bool = ...) -> "_Array1d": ...
def flatten(self: SelfT, order: str = ...) -> SelfT: ...
def ravel(self: SelfT, order: str = ...) -> SelfT: ...
# These is actually a bit too strict: It's legal to say 'array1d + array2d'
# That's kind of bad code though; it's better to write array2d + array1d.
# We could relax this, but let's try the strict version.
def __add__(self: SelfT, other: Union[float, int, "Array1d"]) -> SelfT: ...
def __sub__(self: SelfT, other: Union[float, int, "Array1d"]) -> SelfT: ...
def __mul__(self: SelfT, other: Union[float, int, "Array1d"]) -> SelfT: ...
def __pow__(self: SelfT, other: Union[float, int, "Array1d"]) -> SelfT: ...
def __matmul__(self: SelfT, other: Union[float, int, "Array1d"]) -> SelfT: ...
# These are not too strict though: you can't do += with higher dimensional.
def __iadd__(self, other: Union[float, int, "Array1d"]): ...
def __isub__(self, other: Union[float, int, "Array1d"]): ...
def __imul__(self, other: Union[float, int, "Array1d"]): ...
def __ipow__(self, other: Union[float, int, "Array1d"]): ...
@overload
def argmax(self, keepdims: Fal = False, axis: int = -1, out: Optional[_Array] = None) -> int: ...
@overload
def argmax(self, keepdims: Tru, axis: int = -1, out: Optional[_Array] = None) -> "Ints1d": ...
def argmax(self, keepdims: bool = False, axis: int = -1, out: Optional[_Array] = None) -> Union[int, "Ints1d"]: ...
@overload
def mean(self, keepdims: Tru, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional["Floats1d"] = None) -> "Floats1d": ...
@overload
def mean(self, keepdims: Fal = False, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional["Floats1d"] = None) -> float: ...
def mean(self, keepdims: bool = False, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional["Floats1d"] = None) -> Union["Floats1d", float]: ...
class Floats1d(_Array1d, _Floats):
"""1-dimensional array of floats."""
T: "Floats1d"
@classmethod
def __get_validators__(cls):
"""Runtine validation for pydantic."""
yield lambda v: validate_array(v, ndim=1, dtype="f")
def __iter__(self) -> Iterator[float]: ...
@overload
def __getitem__(self, key: _1_KeyScalar) -> float: ...
@overload
def __getitem__(self, key: _1_Key1d) -> "Floats1d": ...
def __getitem__(self, key: _1_AllKeys) -> _F1_AllReturns: ...
@overload
def __setitem__(self, key: _1_KeyScalar, value: float) -> None: ...
@overload
def __setitem__(self, key: _1_Key1d, value: "Floats1d") -> None: ...
def __setitem__(self, key: _1_AllKeys, _F1_AllReturns) -> None: ...
@overload
def cumsum(self, *, keepdims: Tru, axis: Optional[OneAx] = None, out: Optional["Floats1d"] = None) -> "Floats1d": ...
@overload # Cumsum is unusual in this
def cumsum(self, *, keepdims: Fal, axis: Optional[OneAx] = None, out: Optional["Floats1d"] = None) -> "Floats1d": ...
def cumsum(self, *, keepdims: bool = False, axis: _1_AllAx = None, out: Optional["Floats1d"] = None) -> "Floats1d": ...
@overload
def sum(self, *, keepdims: Tru, axis: Optional[OneAx] = None, out: Optional["Floats1d"] = None) -> "Floats1d": ...
@overload
def sum(self, *, keepdims: Fal, axis: Optional[OneAx] = None, out = None) -> float: ...
def sum(self, *, keepdims: bool = False, axis: _1_AllAx = None, out: Optional["Floats1d"] = None) -> _1F_ReduceResults: ...
class Ints1d(_Array1d, _Ints):
"""1-dimensional array of ints."""
T: "Ints1d"
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=1, dtype="i")
def __iter__(self) -> Iterator[int]: ...
@overload
def __getitem__(self, key: _1_KeyScalar) -> int: ...
@overload
def __getitem__(self, key: _1_Key1d) -> "Ints1d": ...
def __getitem__(self, key: _1_AllKeys) -> _I1_AllReturns: ...
@overload
def __setitem__(self, key: _1_KeyScalar, value: int) -> None: ...
@overload
def __setitem__(self, key: _1_Key1d, value: Union[int, "Ints1d"]) -> None: ...
def __setitem__(self, key: _1_AllKeys, _I1_AllReturns) -> None: ...
@overload
def cumsum(self, *, keepdims: Tru, axis: Optional[OneAx] = None, out: Optional["Ints1d"] = None) -> "Ints1d": ...
@overload
def cumsum(self, *, keepdims: Fal = False, axis: Optional[OneAx] = None, out: Optional["Ints1d"] = None) -> "Ints1d": ...
def cumsum(self, *, keepdims: bool = False, axis: _1_AllAx = None, out: Optional["Ints1d"] = None) -> "Ints1d": ...
@overload
def sum(self, *, keepdims: Tru, axis: Optional[OneAx] = None, out: Optional["Ints1d"] = None) -> "Ints1d": ...
@overload
def sum(self, *, keepdims: Fal = False, axis: Optional[OneAx] = None, out = None) -> int: ...
def sum(self, *, keepdims: bool = False, axis: _1_AllAx = None, out: Optional["Ints1d"] = None) -> _1I_ReduceResults: ...
class _Array2d(_Array):
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=2)
@property
def ndim(self) -> Literal[2]: ...
@property
def shape(self) -> Tuple[int, int]: ...
def __iter__(self) -> Iterator[Array1d]: ...
def astype(self, dtype: DTypes, order: str = ..., casting: str = ..., subok: bool = ..., copy: bool = ...) -> "Array2d": ...
# These is actually a bit too strict: It's legal to say 'array2d + array3d'
# That's kind of bad code though; it's better to write array3d + array2d.
# We could relax this, but let's try the strict version.
def __add__(self: ArrayT, other: Union[float, int, Array1d, "Array2d"]) -> ArrayT: ...
def __sub__(self: ArrayT, other: Union[float, int, Array1d, "Array2d"]) -> ArrayT: ...
def __mul__(self: ArrayT, other: Union[float, int, Array1d, "Array2d"]) -> ArrayT: ...
def __pow__(self: ArrayT, other: Union[float, int, Array1d, "Array2d"]) -> ArrayT: ...
def __matmul__(self: ArrayT, other: Union[float, int, Array1d, "Array2d"]) -> ArrayT: ...
# These are not too strict though: you can't do += with higher dimensional.
def __iadd__(self, other: Union[float, int, Array1d, "Array2d"]): ...
def __isub__(self, other: Union[float, int, Array1d, "Array2d"]): ...
def __imul__(self, other: Union[float, int, Array1d, "Array2d"]): ...
def __ipow__(self, other: Union[float, int, Array1d, "Array2d"]): ...
@overload
def argmax(self, keepdims: Fal = False, axis: int = -1, out: Optional[_Array] = None) -> Ints1d: ...
@overload
def argmax(self, keepdims: Tru, axis: int = -1, out: Optional[_Array] = None) -> "Ints2d": ...
def argmax(self, keepdims: bool = False, axis: int = -1, out: Optional[_Array] = None) -> Union[Ints1d, "Ints2d"]: ...
@overload
def mean(self, keepdims: Fal = False, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional["Floats2d"] = None) -> Floats1d: ...
@overload
def mean(self, keepdims: Tru, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional["Floats2d"] = None) -> "Floats2d": ...
def mean(self, keepdims: bool = False, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional["Floats2d"] = None) -> Union["Floats2d", Floats1d]: ...
class Floats2d(_Array2d, _Floats):
"""2-dimensional array of floats"""
T: "Floats2d"
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=2, dtype="f")
def __iter__(self) -> Iterator[Floats1d]: ...
@overload
def __getitem__(self, key: _2_KeyScalar) -> float: ...
@overload
def __getitem__(self, key: _2_Key1d) -> Floats1d: ...
@overload
def __getitem__(self, key: _2_Key2d) -> "Floats2d": ...
def __getitem__(self, key: _2_AllKeys) -> _F2_AllReturns: ...
@overload
def __setitem__(self, key: _2_KeyScalar, value: float) -> None: ...
@overload
def __setitem__(self, key: _2_Key1d, value: Union[float, Floats1d]) -> None: ...
@overload
def __setitem__(self, key: _2_Key2d, value: _F2_AllReturns) -> None: ...
def __setitem__(self, key: _2_AllKeys, value: _F2_AllReturns) -> None: ...
@overload
def sum(self, *, keepdims: Tru, axis: _2_AllAx = None, out: Optional["Floats2d"] = None) -> "Floats2d": ...
@overload
def sum(self, *, keepdims: Fal = False, axis: OneAx, out: Optional[Floats1d] = None) -> Floats1d: ...
@overload
def sum(self, *, keepdims: Fal = False, axis: TwoAx, out = None) -> float: ...
def sum(self, *, keepdims: bool = False, axis: _2_AllAx = None, out: Union[None, "Floats1d", "Floats2d"] = None) -> _2F_ReduceResults: ...
class Ints2d(_Array2d, _Ints):
"""2-dimensional array of ints."""
T: "Ints2d"
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=2, dtype="i")
def __iter__(self) -> Iterator[Ints1d]: ...
@overload
def __getitem__(self, key: _2_KeyScalar) -> int: ...
@overload
def __getitem__(self, key: _2_Key1d) -> Ints1d: ...
@overload
def __getitem__(self, key: _2_Key2d) -> "Ints2d": ...
def __getitem__(self, key: _2_AllKeys) -> _I2_AllReturns: ...
@overload
def __setitem__(self, key: _2_KeyScalar, value: int) -> None: ...
@overload
def __setitem__(self, key: _2_Key1d, value: Ints1d) -> None: ...
@overload
def __setitem__(self, key: _2_Key2d, value: "Ints2d") -> None: ...
def __setitem__(self, key: _2_AllKeys, value: _I2_AllReturns) -> None: ...
@overload
def sum(self, keepdims: Fal = False, axis: int = -1, out: Optional["Ints1d"] = None) -> Ints1d: ...
@overload
def sum(self, keepdims: Tru, axis: int = -1, out: Optional["Ints2d"] = None) -> "Ints2d": ...
def sum(self, keepdims: bool = False, axis: int = -1, out: Optional[Union["Ints1d", "Ints2d"]] = None) -> Union["Ints2d", Ints1d]: ...
class _Array3d(_Array):
"""3-dimensional array of floats"""
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=3)
@property
def ndim(self) -> Literal[3]: ...
@property
def shape(self) -> Tuple[int, int, int]: ...
def __iter__(self) -> Iterator[Array2d]: ...
def astype(self, dtype: DTypes, order: str = ..., casting: str = ..., subok: bool = ..., copy: bool = ...) -> "Array3d": ...
# These is actually a bit too strict: It's legal to say 'array2d + array3d'
# That's kind of bad code though; it's better to write array3d + array2d.
# We could relax this, but let's try the strict version.
def __add__(self: SelfT, other: Union[float, int, Array1d, Array2d, "Array3d"]) -> SelfT: ...
def __sub__(self: SelfT, other: Union[float, int, Array1d, Array2d, "Array3d"]) -> SelfT: ...
def __mul__(self: SelfT, other: Union[float, int, Array1d, Array2d, "Array3d"]) -> SelfT: ...
def __pow__(self: SelfT, other: Union[float, int, Array1d, Array2d, "Array3d"]) -> SelfT: ...
def __matmul__(self: SelfT, other: Union[float, int, Array1d, Array2d, "Array3d"]) -> SelfT: ...
# These are not too strict though: you can't do += with higher dimensional.
def __iadd__(self, other: Union[float, int, Array1d, Array2d, "Array3d"]): ...
def __isub__(self, other: Union[float, int, Array1d, Array2d, "Array3d"]): ...
def __imul__(self, other: Union[float, int, Array1d, Array2d, "Array3d"]): ...
def __ipow__(self, other: Union[float, int, Array1d, Array2d, "Array3d"]): ...
@overload
def argmax(self, keepdims: Fal = False, axis: int = -1, out: Optional[_Array] = None) -> Ints2d: ...
@overload
def argmax(self, keepdims: Tru, axis: int = -1, out: Optional[_Array] = None) -> "Ints3d": ...
def argmax(self, keepdims: bool = False, axis: int = -1, out: Optional[_Array] = None) -> Union[Ints2d, "Ints3d"]: ...
class Floats3d(_Array3d, _Floats):
"""3-dimensional array of floats"""
T: "Floats3d"
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=3, dtype="f")
def __iter__(self) -> Iterator[Floats2d]: ...
@overload
def __getitem__(self, key: _3_KeyScalar) -> float: ...
@overload
def __getitem__(self, key: _3_Key1d) -> Floats1d: ...
@overload
def __getitem__(self, key: _3_Key2d) -> Floats2d: ...
@overload
def __getitem__(self, key: _3_Key3d) -> "Floats3d": ...
def __getitem__(self, key: _3_AllKeys) -> _F3_AllReturns: ...
@overload
def __setitem__(self, key: _3_KeyScalar, value: float) -> None: ...
@overload
def __setitem__(self, key: _3_Key1d, value: Floats1d) -> None: ...
@overload
def __setitem__(self, key: _3_Key2d, value: Floats2d) -> None: ...
@overload
def __setitem__(self, key: _3_Key3d, value: "Floats3d") -> None: ...
def __setitem__(self, key: _3_AllKeys, value: _F3_AllReturns) -> None: ...
@overload
def sum(self, *, keepdims: Tru, axis: _3_AllAx = None, out: Optional["Floats3d"] = None) -> "Floats3d": ...
@overload
def sum(self, *, keepdims: Fal, axis: OneAx, out: Optional[Floats2d] = None) -> Floats2d: ...
@overload
def sum(self, *, keepdims: Fal, axis: TwoAx, out: Optional[Floats1d] = None) -> Floats1d: ...
@overload
def sum(self, *, keepdims: Fal, axis: Optional[ThreeAx], out = None) -> float: ...
def sum(self, *, keepdims: bool = False, axis: _3_AllAx = None, out: Union[None, Floats1d, Floats2d, "Floats3d"] = None) -> _3F_ReduceResults: ...
class Ints3d(_Array3d, _Ints):
"""3-dimensional array of ints."""
T: "Ints3d"
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=3, dtype="i")
def __iter__(self) -> Iterator[Ints2d]: ...
@overload
def __getitem__(self, key: _3_KeyScalar) -> int: ...
@overload
def __getitem__(self, key: _3_Key1d) -> Ints1d: ...
@overload
def __getitem__(self, key: _3_Key2d) -> Ints2d: ...
@overload
def __getitem__(self, key: _3_Key3d) -> "Ints3d": ...
def __getitem__(self, key: _3_AllKeys) -> _I3_AllReturns: ...
@overload
def __setitem__(self, key: _3_KeyScalar, value: int) -> None: ...
@overload
def __setitem__(self, key: _3_Key1d, value: Ints1d) -> None: ...
@overload
def __setitem__(self, key: _3_Key2d, value: Ints2d) -> None: ...
@overload
def __setitem__(self, key: _3_Key3d, value: "Ints3d") -> None: ...
def __setitem__(self, key: _3_AllKeys, value: _I3_AllReturns) -> None: ...
@overload
def sum(self, *, keepdims: Tru, axis: _3_AllAx = None, out: Optional["Ints3d"] = None) -> "Ints3d": ...
@overload
def sum(self, *, keepdims: Fal, axis: OneAx, out: Optional[Ints2d] = None) -> Ints2d: ...
@overload
def sum(self, *, keepdims: Fal, axis: TwoAx, out: Optional[Ints1d] = None) -> Ints1d: ...
@overload
def sum(self, *, keepdims: Fal, axis: Optional[ThreeAx], out = None) -> int: ...
def sum(self, *, keepdims: bool = False, axis: _3_AllAx = None, out: Union[None, Ints1d, Ints2d, "Ints3d"] = None) -> _3I_ReduceResults: ...
class _Array4d(_Array):
"""4-dimensional array."""
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=4)
@property
def ndim(self) -> Literal[4]: ...
@property
def shape(self) -> Tuple[int, int, int, int]: ...
def __iter__(self) -> Iterator[Array3d]: ...
def astype(self, dtype: DTypes, order: str = ..., casting: str = ..., subok: bool = ..., copy: bool = ...) -> "_Array4d": ...
# These is actually a bit too strict: It's legal to say 'array4d + array5d'
# That's kind of bad code though; it's better to write array5d + array4d.
# We could relax this, but let's try the strict version.
def __add__(self: SelfT, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]) -> SelfT: ...
def __sub__(self: SelfT, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]) -> SelfT: ...
def __mul__(self: SelfT, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]) -> SelfT: ...
def __pow__(self: SelfT, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]) -> SelfT: ...
def __matmul__(self: SelfT, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]) -> SelfT: ...
# These are not too strict though: you can't do += with higher dimensional.
def __iadd__(self, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]): ...
def __isub__(self, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]): ...
def __imul__(self, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]): ...
def __ipow__(self, other: Union[float, int, Array1d, Array2d, Array3d, "Array4d"]): ...
class Floats4d(_Array4d, _Floats):
"""4-dimensional array of floats."""
T: "Floats4d"
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=4, dtype="f")
def __iter__(self) -> Iterator[Floats3d]: ...
@overload
def __getitem__(self, key: _4_KeyScalar) -> float: ...
@overload
def __getitem__(self, key: _4_Key1d) -> Floats1d: ...
@overload
def __getitem__(self, key: _4_Key2d) -> Floats2d: ...
@overload
def __getitem__(self, key: _4_Key3d) -> Floats3d: ...
@overload
def __getitem__(self, key: _4_Key4d) -> "Floats4d": ...
def __getitem__(self, key: _4_AllKeys) -> _F4_AllReturns: ...
@overload
def __setitem__(self, key: _4_KeyScalar, value: float) -> None: ...
@overload
def __setitem__(self, key: _4_Key1d, value: Floats1d) -> None: ...
@overload
def __setitem__(self, key: _4_Key2d, value: Floats2d) -> None: ...
@overload
def __setitem__(self, key: _4_Key3d, value: Floats3d) -> None: ...
@overload
def __setitem__(self, key: _4_Key4d, value: "Floats4d") -> None: ...
def __setitem__(self, key: _4_AllKeys, value: _F4_AllReturns) -> None: ...
@overload
def sum(self, *, keepdims: Tru, axis: _4_AllAx = None, out: Optional["Floats4d"] = None) -> "Floats4d": ...
@overload
def sum(self, *, keepdims: Fal = False, axis: OneAx, out: Optional[Floats3d] = None) -> Floats3d: ...
@overload
def sum(self, *, keepdims: Fal = False, axis: TwoAx, out: Optional[Floats2d] = None) -> Floats2d: ...
@overload
def sum(self, *, keepdims: Fal = False, axis: ThreeAx, out: Optional[Floats1d] = None) -> Floats1d: ...
@overload
def sum(self, *, keepdims: Fal = False, axis: Optional[FourAx], out = None) -> float: ...
def sum(self, *, keepdims: bool = False, axis: _4_AllAx = None, out: Union[None, Floats1d, Floats2d, Floats3d, "Floats4d"] = None) -> _4F_ReduceResults: ...
class Ints4d(_Array4d, _Ints):
"""4-dimensional array of ints."""
T: "Ints4d"
@classmethod
def __get_validators__(cls):
"""Runtime validation for pydantic."""
yield lambda v: validate_array(v, ndim=4, dtype="i")
def __iter__(self) -> Iterator[Ints3d]: ...
# def __getitem__(self, key: int) -> Ints3d: ...
@overload
def sum(self, *, keepdims: Tru, axis: _4_AllAx = None, out: Optional["Ints4d"] = None) -> "Ints4d": ...
@overload
def sum(self, *, keepdims: Fal = False, axis: OneAx, out: Optional[Ints3d] = None) -> Ints3d: ...
@overload
def sum(self, *, keepdims: Fal = False, axis: TwoAx, out: Optional[Ints2d] = None) -> Ints2d: ...
@overload
def sum(self, *, keepdims: Fal = False, axis: ThreeAx, out: Optional[Ints1d] = None) -> Ints1d: ...
@overload
def sum(self, *, keepdims: Fal = False, axis: Optional[FourAx] = None, out = None) -> int: ...
def sum(self, *, keepdims: bool = False, axis: _4_AllAx = None, out: Optional[Union[Ints1d, Ints2d, Ints3d, "Ints4d"]] = None) -> _4I_ReduceResults: ...
_DIn = TypeVar("_DIn")
class Decorator(Protocol):
"""Protocol to mark a function as returning its child with identical signature."""
def __call__(self, name: str) -> Callable[[_DIn], _DIn]: ...
# fmt: on
class Generator(Iterator):
"""Custom generator type. Used to annotate function arguments that accept
generators so they can be validated by pydantic (which doesn't support
iterators/iterables otherwise).
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
if not hasattr(v, "__iter__") and not hasattr(v, "__next__"):
raise TypeError("not a valid iterator")
return v
@dataclass
class SizedGenerator:
"""A generator that has a __len__ and can repeatedly call the generator
function.
"""
get_items: Callable[[], Generator]
length: int
def __len__(self):
return self.length
def __iter__(self):
yield from self.get_items()
@dataclass
class Padded:
"""A batch of padded sequences, sorted by decreasing length. The data array
is of shape (step, batch, ...). The auxiliary array size_at_t indicates the
length of the batch at each timestep, so you can do data[:, :size_at_t[t]] to
shrink the batch. The lengths array indicates the length of each row b,
and the indices indicates the original ordering.
"""
data: Floats3d
size_at_t: Ints1d
lengths: Ints1d
indices: Ints1d
def copy(self):
return Padded(
self.data.copy(),
self.size_at_t.copy(),
self.lengths.copy(),
self.indices.copy()
)
def __len__(self) -> int:
return self.lengths.shape[0]
def __getitem__(self, index: Union[int, slice, Ints1d]) -> "Padded":
if isinstance(index, int):
# Slice to keep the dimensionality
return Padded(
self.data[:, index : index + 1],
self.lengths[index : index + 1],
self.lengths[index : index + 1],
self.indices[index : index + 1],
)
elif isinstance(index, slice):
return Padded(
self.data[:, index],
self.lengths[index],
self.lengths[index],
self.indices[index],
)
else:
# If we get a sequence of indices, we need to be careful that
# we maintain the length-sorting, while also keeping the mapping
# back to the original order correct.
sorted_index = list(sorted(index))
return Padded(
self.data[sorted_index],
self.size_at_t[sorted_index],
self.lengths[sorted_index],
self.indices[index], # Use original, to maintain order.
)
@dataclass
class Ragged:
"""A batch of concatenated sequences, that vary in the size of their
first dimension. Ragged allows variable-length sequence data to be contiguous
in memory, without padding.
Indexing into Ragged is just like indexing into the *lengths* array, except
it returns a Ragged object with the accompanying sequence data. For instance,
you can write ragged[1:4] to get a Ragged object with sequences 1, 2 and 3.
"""
data: Array2d
lengths: Ints1d
data_shape: Tuple[int, ...]
starts_ends: Optional[Ints1d] = None
def __init__(self, data: _Array, lengths: Ints1d):
self.lengths = lengths
# Frustratingly, the -1 dimension doesn't work with 0 size...
if data.size:
self.data = cast(Array2d, data.reshape((data.shape[0], -1)))
else:
self.data = cast(Array2d, data.reshape((0, 0)))
self.data_shape = (-1,) + data.shape[1:]
@property
def dataXd(self) -> ArrayXd:
if self.data.size:
reshaped = self.data.reshape(self.data_shape)
else:
reshaped = self.data.reshape((self.data.shape[0],) + self.data_shape[1:])
return cast(ArrayXd, reshaped)
def __len__(self) -> int:
return self.lengths.shape[0]
def __getitem__(self, index: Union[int, slice, Array1d]) -> "Ragged":
if isinstance(index, tuple):
raise IndexError("Ragged arrays do not support 2d indexing.")
starts = self._get_starts()
ends = self._get_ends()
if isinstance(index, int):
s = starts[index]
e = ends[index]
return Ragged(self.data[s:e], self.lengths[index : index + 1])
elif isinstance(index, slice):
lengths = self.lengths[index]
if len(lengths) == 0:
return Ragged(self.data[0:0].reshape(self.data_shape), lengths)
start = starts[index][0] if index.start >= 1 else 0
end = ends[index][-1]
return Ragged(self.data[start:end].reshape(self.data_shape), lengths)
else:
# There must be a way to do this "properly" :(. Sigh, hate numpy.
xp = get_array_module(self.data)
data = xp.vstack([self[int(i)].data for i in index])
return Ragged(data.reshape(self.data_shape), self.lengths[index])
def _get_starts_ends(self) -> Ints1d:
if self.starts_ends is None:
xp = get_array_module(self.lengths)
self.starts_ends = xp.empty(self.lengths.size + 1, dtype="i")
self.starts_ends[0] = 0
self.lengths.cumsum(out=self.starts_ends[1:])
return self.starts_ends
def _get_starts(self) -> Ints1d:
return self._get_starts_ends()[:-1]
def _get_ends(self) -> Ints1d:
return self._get_starts_ends()[1:]
_P = TypeVar("_P", bound=Sequence)
@dataclass
class Pairs(Generic[_P]):
"""Dataclass for pairs of sequences that allows indexing into the sequences
while keeping them aligned.
"""
one: _P
two: _P
def __getitem__(self, index) -> "Pairs[_P]":
return Pairs(self.one[index], self.two[index])
def __len__(self) -> int:
return len(self.one)
@dataclass
class ArgsKwargs:
"""A tuple of (args, kwargs) that can be spread into some function f:
f(*args, **kwargs)
"""
args: Tuple[Any, ...]
kwargs: Dict[str, Any]
@classmethod
def from_items(cls, items: Sequence[Tuple[Union[int, str], Any]]) -> "ArgsKwargs":
"""Create an ArgsKwargs object from a sequence of (key, value) tuples,
such as produced by argskwargs.items(). Each key should be either a string
or an integer. Items with int keys are added to the args list, and
items with string keys are added to the kwargs list. The args list is
determined by sequence order, not the value of the integer.
"""
args = []
kwargs = {}
for key, value in items:
if isinstance(key, int):
args.append(value)
else:
kwargs[key] = value
return cls(args=tuple(args), kwargs=kwargs)
def keys(self) -> Iterable[Union[int, str]]:
"""Yield indices from self.args, followed by keys from self.kwargs."""
yield from range(len(self.args))
yield from self.kwargs.keys()
def values(self) -> Iterable[Any]:
"""Yield elements of from self.args, followed by values from self.kwargs."""
yield from self.args
yield from self.kwargs.values()
def items(self) -> Iterable[Tuple[Union[int, str], Any]]:
"""Yield enumerate(self.args), followed by self.kwargs.items()"""
yield from enumerate(self.args)
yield from self.kwargs.items()
@dataclass
class Unserializable:
"""Wrap a value to prevent it from being serialized by msgpack."""
obj: Any
def validate_array(obj, ndim=None, dtype=None):
"""Runtime validator for pydantic to validate array types."""
xp = get_array_module(obj)
if not isinstance(obj, xp.ndarray):
raise TypeError("not a valid numpy or cupy array")
errors = []
if ndim is not None and obj.ndim != ndim:
errors.append(f"wrong array dimensions (expected {ndim}, got {obj.ndim})")
if dtype is not None:
dtype_mapping = {"f": ["float32"], "i": ["int32", "int64", "uint32", "uint64"]}
expected_types = dtype_mapping.get(dtype, [])
if obj.dtype not in expected_types:
expected = "/".join(expected_types)
err = f"wrong array data type (expected {expected}, got {obj.dtype})"
errors.append(err)
if errors:
|
return obj
| raise ValueError(", ".join(errors)) |
compile.rs |
pub(crate) type CompileResult<'i> = Result<Regex<'i>, CompileError>;
#[derive(Clone)]
pub(crate) struct CompileState<'c, 'i> {
pub(crate) next_idx: u32,
pub(crate) used_names: HashMap<String, u32>,
pub(crate) groups_count: u32,
pub(crate) default_quantifier: RegexQuantifier,
pub(crate) variables: Vec<(&'i str, &'c Rule<'i>)>,
pub(crate) current_vars: HashSet<usize>,
} | use std::collections::{HashMap, HashSet};
use crate::{error::CompileError, regex::Regex, repetition::RegexQuantifier, rule::Rule}; |
|
beads.go | package main
import (
"fmt"
"math"
"math/rand"
. "github.com/fogleman/pt/pt"
)
func frame(path string, t float64) {
materials := []Material{
GlossyMaterial(HexColor(0x167F39), 1.3, Radians(20)),
GlossyMaterial(HexColor(0x45BF55), 1.3, Radians(20)),
GlossyMaterial(HexColor(0x96ED89), 1.3, Radians(20)),
}
rand.Seed(1211)
eye := V(4, 2, 8)
center := V(0, 0, 0)
up := V(0, 0, 1)
scene := Scene{}
for a := 0; a < 80; a++ {
material := materials[rand.Intn(len(materials))]
n := 400
xs := LowPassNoise(n, 0.25, 4)
ys := LowPassNoise(n, 0.25, 4)
zs := LowPassNoise(n, 0.25, 4)
position := Vector{}
positions := make([]Vector, n)
for i := 0; i < n; i++ {
positions[i] = position
v := V(xs[i], ys[i], zs[i]).Normalize().MulScalar(0.1)
position = position.Add(v)
}
for i := 0; i < n-1; i++ {
a := positions[i]
b := positions[i+1]
p := a.Add(b.Sub(a).MulScalar(t))
sphere := NewSphere(p, 0.1, material)
scene.Add(sphere)
}
}
scene.Add(NewSphere(V(4, 4, 20), 2, LightMaterial(HexColor(0xFFFFFF), 30)))
fovy := 40.0
camera := LookAt(eye, center, up, fovy)
sampler := NewSampler(4, 4)
sampler.SpecularMode = SpecularModeFirst
renderer := NewRenderer(&scene, &camera, sampler, 960, 540)
renderer.IterativeRender("out%03d.png", 1000)
}
func main() {
for i := 0; i < 30; i++ {
t := float64(i) / 30
path := fmt.Sprintf("out%03d.png", i)
fmt.Println(path)
frame(path, t)
}
}
func Normalize(values []float64, a, b float64) []float64 {
result := make([]float64, len(values))
lo := values[0]
hi := values[0]
for _, x := range values {
lo = math.Min(lo, x)
hi = math.Max(hi, x)
}
for i, x := range values {
p := (x - lo) / (hi - lo)
result[i] = a + p*(b-a)
}
return result
}
func LowPass(values []float64, alpha float64) []float64 {
result := make([]float64, len(values))
var y float64
for i, x := range values {
y -= alpha * (y - x)
result[i] = y
}
return result
}
func | (n int, alpha float64, iterations int) []float64 {
result := make([]float64, n)
for i := range result {
result[i] = rand.Float64()*2 - 1
}
for i := 0; i < iterations; i++ {
result = LowPass(result, alpha)
}
result = Normalize(result, -1, 1)
return result
}
| LowPassNoise |
test_ingredients_api.py | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
# Test the publicly available ingredients API
def setUp(self):
self.client = APIClient()
def test_login_required(self):
# Test tha login is required to access the endpoint
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
# Test the private ingredients API
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
# Test retrieving a list of ingredients
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
# Test that ingredients for the authenticated user are returned
user2 = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Tumeric')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
# Test create a new ingredient
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name'],
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
# Test creating invalid ingredient fails
payload = {'name': ''} | res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) | |
fib_test.go | package main
import "testing"
//Function starts from Benchmark. WE use B structure b
//b.N - how many time we will run the benchmark
func BenchmarkFib10(b *testing.B) {
for i := 0; i < b.N; i++ {
Fib(10)
}
}
func BenchmarkFib20(b *testing.B) | {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
Fib(40)
}
})
} |
|
tag.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Tag(Model):
"""Represents a Tag.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Gets the Tag ID
:vartype id: str | :param description: Gets or sets the description of the tag
:type description: str
:ivar image_count: Gets the number of images with this tag
:vartype image_count: int
"""
_validation = {
'id': {'readonly': True},
'image_count': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'Id', 'type': 'str'},
'name': {'key': 'Name', 'type': 'str'},
'description': {'key': 'Description', 'type': 'str'},
'image_count': {'key': 'ImageCount', 'type': 'int'},
}
def __init__(self, **kwargs):
super(Tag, self).__init__(**kwargs)
self.id = None
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', None)
self.image_count = None | :param name: Gets or sets the name of the tag
:type name: str |
cost_model.rs | //! 'cost_model` provides service to estimate a transaction's cost
//! following proposed fee schedule #16984; Relevant cluster cost
//! measuring is described by #19627
//!
//! The main function is `calculate_cost` which returns &TransactionCost.
//!
use {
crate::{block_cost_limits::*, execute_cost_table::ExecuteCostTable},
log::*,
solana_sdk::{
instruction::CompiledInstruction, program_utils::limited_deserialize, pubkey::Pubkey,
system_instruction::SystemInstruction, system_program, transaction::SanitizedTransaction,
},
};
const MAX_WRITABLE_ACCOUNTS: usize = 256;
// costs are stored in number of 'compute unit's
#[derive(Debug)]
pub struct TransactionCost {
pub writable_accounts: Vec<Pubkey>,
pub signature_cost: u64,
pub write_lock_cost: u64,
pub data_bytes_cost: u64,
pub builtins_execution_cost: u64,
pub bpf_execution_cost: u64,
pub account_data_size: u64,
pub is_simple_vote: bool,
}
impl Default for TransactionCost {
fn default() -> Self {
Self {
writable_accounts: Vec::with_capacity(MAX_WRITABLE_ACCOUNTS),
signature_cost: 0u64,
write_lock_cost: 0u64,
data_bytes_cost: 0u64,
builtins_execution_cost: 0u64,
bpf_execution_cost: 0u64,
account_data_size: 0u64,
is_simple_vote: false,
}
}
}
impl TransactionCost {
pub fn new_with_capacity(capacity: usize) -> Self {
Self {
writable_accounts: Vec::with_capacity(capacity),
..Self::default()
}
}
pub fn reset(&mut self) {
self.writable_accounts.clear();
self.signature_cost = 0;
self.write_lock_cost = 0;
self.data_bytes_cost = 0;
self.builtins_execution_cost = 0;
self.bpf_execution_cost = 0;
self.is_simple_vote = false;
}
pub fn sum(&self) -> u64 {
self.sum_without_bpf()
.saturating_add(self.bpf_execution_cost)
}
pub fn sum_without_bpf(&self) -> u64 {
self.signature_cost
.saturating_add(self.write_lock_cost)
.saturating_add(self.data_bytes_cost)
.saturating_add(self.builtins_execution_cost)
}
}
#[derive(Debug, Default)]
pub struct CostModel {
instruction_execution_cost_table: ExecuteCostTable,
}
impl CostModel {
pub fn new() -> Self {
Self {
instruction_execution_cost_table: ExecuteCostTable::default(),
}
}
pub fn initialize_cost_table(&mut self, cost_table: &[(Pubkey, u64)]) {
cost_table
.iter()
.map(|(key, cost)| (key, cost))
.for_each(|(program_id, cost)| {
self.upsert_instruction_cost(program_id, *cost);
});
}
pub fn calculate_cost(&self, transaction: &SanitizedTransaction) -> TransactionCost {
let mut tx_cost = TransactionCost::new_with_capacity(MAX_WRITABLE_ACCOUNTS);
tx_cost.signature_cost = self.get_signature_cost(transaction);
self.get_write_lock_cost(&mut tx_cost, transaction);
self.get_transaction_cost(&mut tx_cost, transaction);
tx_cost.account_data_size = self.calculate_account_data_size(transaction);
tx_cost.is_simple_vote = transaction.is_simple_vote_transaction();
debug!("transaction {:?} has cost {:?}", transaction, tx_cost);
tx_cost
}
pub fn upsert_instruction_cost(&mut self, program_key: &Pubkey, cost: u64) {
self.instruction_execution_cost_table
.upsert(program_key, cost);
}
pub fn find_instruction_cost(&self, program_key: &Pubkey) -> u64 {
match self.instruction_execution_cost_table.get_cost(program_key) {
Some(cost) => *cost,
None => {
let default_value = self.instruction_execution_cost_table.get_default_units();
debug!(
"Program {:?} does not have aggregated cost, using default value {}",
program_key, default_value
);
default_value
}
}
}
fn get_signature_cost(&self, transaction: &SanitizedTransaction) -> u64 {
transaction.signatures().len() as u64 * SIGNATURE_COST
}
fn get_write_lock_cost(
&self,
tx_cost: &mut TransactionCost,
transaction: &SanitizedTransaction,
) {
let message = transaction.message();
message
.account_keys()
.iter()
.enumerate()
.for_each(|(i, k)| {
let is_writable = message.is_writable(i);
if is_writable {
tx_cost.writable_accounts.push(*k);
tx_cost.write_lock_cost += WRITE_LOCK_UNITS;
}
});
}
fn get_transaction_cost(
&self,
tx_cost: &mut TransactionCost,
transaction: &SanitizedTransaction,
) {
let mut builtin_costs = 0u64;
let mut bpf_costs = 0u64;
let mut data_bytes_len_total = 0u64;
for (program_id, instruction) in transaction.message().program_instructions_iter() {
// to keep the same behavior, look for builtin first
if let Some(builtin_cost) = BUILT_IN_INSTRUCTION_COSTS.get(program_id) {
builtin_costs = builtin_costs.saturating_add(*builtin_cost);
} else {
let instruction_cost = self.find_instruction_cost(program_id);
trace!(
"instruction {:?} has cost of {}",
instruction,
instruction_cost
);
bpf_costs = bpf_costs.saturating_add(instruction_cost);
}
data_bytes_len_total =
data_bytes_len_total.saturating_add(instruction.data.len() as u64);
}
tx_cost.builtins_execution_cost = builtin_costs;
tx_cost.bpf_execution_cost = bpf_costs;
tx_cost.data_bytes_cost = data_bytes_len_total / DATA_BYTES_UNITS;
}
fn calculate_account_data_size_on_deserialized_system_instruction(
instruction: SystemInstruction,
) -> u64 {
match instruction {
SystemInstruction::CreateAccount {
lamports: _lamports,
space,
owner: _owner,
} => space,
SystemInstruction::CreateAccountWithSeed {
base: _base,
seed: _seed,
lamports: _lamports,
space,
owner: _owner,
} => space,
SystemInstruction::Allocate { space } => space,
SystemInstruction::AllocateWithSeed {
base: _base,
seed: _seed,
space,
owner: _owner,
} => space,
_ => 0,
}
}
fn calculate_account_data_size_on_instruction(
program_id: &Pubkey,
instruction: &CompiledInstruction,
) -> u64 {
if program_id == &system_program::id() {
if let Ok(instruction) = limited_deserialize(&instruction.data) {
return Self::calculate_account_data_size_on_deserialized_system_instruction(
instruction,
);
}
}
0
}
/// eventually, potentially determine account data size of all writable accounts
/// at the moment, calculate account data size of account creation
fn | (&self, transaction: &SanitizedTransaction) -> u64 {
transaction
.message()
.program_instructions_iter()
.map(|(program_id, instruction)| {
Self::calculate_account_data_size_on_instruction(program_id, instruction)
})
.sum()
}
}
#[cfg(test)]
mod tests {
use {
super::*,
crate::{
bank::Bank,
genesis_utils::{create_genesis_config, GenesisConfigInfo},
},
solana_sdk::{
bpf_loader,
hash::Hash,
instruction::CompiledInstruction,
message::Message,
signature::{Keypair, Signer},
system_instruction::{self},
system_program, system_transaction,
transaction::Transaction,
},
std::{
str::FromStr,
sync::{Arc, RwLock},
thread::{self, JoinHandle},
},
};
fn test_setup() -> (Keypair, Hash) {
solana_logger::setup();
let GenesisConfigInfo {
genesis_config,
mint_keypair,
..
} = create_genesis_config(10);
let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(&genesis_config));
let start_hash = bank.last_blockhash();
(mint_keypair, start_hash)
}
#[test]
fn test_cost_model_instruction_cost() {
let mut testee = CostModel::default();
let known_key = Pubkey::from_str("known11111111111111111111111111111111111111").unwrap();
testee.upsert_instruction_cost(&known_key, 100);
// find cost for known programs
assert_eq!(100, testee.find_instruction_cost(&known_key));
testee.upsert_instruction_cost(&bpf_loader::id(), 1999);
assert_eq!(1999, testee.find_instruction_cost(&bpf_loader::id()));
// unknown program is assigned with default cost
assert_eq!(
testee.instruction_execution_cost_table.get_default_units(),
testee.find_instruction_cost(
&Pubkey::from_str("unknown111111111111111111111111111111111111").unwrap()
)
);
}
#[test]
fn test_cost_model_data_len_cost() {
let lamports = 0;
let owner = Pubkey::default();
let seed = String::default();
let space = 100;
let base = Pubkey::default();
for instruction in [
SystemInstruction::CreateAccount {
lamports,
space,
owner,
},
SystemInstruction::CreateAccountWithSeed {
base,
seed: seed.clone(),
lamports,
space,
owner,
},
SystemInstruction::Allocate { space },
SystemInstruction::AllocateWithSeed {
base,
seed,
space,
owner,
},
] {
assert_eq!(
space,
CostModel::calculate_account_data_size_on_deserialized_system_instruction(
instruction
)
);
}
assert_eq!(
0,
CostModel::calculate_account_data_size_on_deserialized_system_instruction(
SystemInstruction::TransferWithSeed {
lamports,
from_seed: String::default(),
from_owner: Pubkey::default(),
}
)
);
}
#[test]
fn test_cost_model_simple_transaction() {
let (mint_keypair, start_hash) = test_setup();
let keypair = Keypair::new();
let simple_transaction = SanitizedTransaction::from_transaction_for_tests(
system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 2, start_hash),
);
debug!(
"system_transaction simple_transaction {:?}",
simple_transaction
);
// expected cost for one system transfer instructions
let expected_execution_cost = BUILT_IN_INSTRUCTION_COSTS
.get(&system_program::id())
.unwrap();
let testee = CostModel::default();
let mut tx_cost = TransactionCost::default();
testee.get_transaction_cost(&mut tx_cost, &simple_transaction);
assert_eq!(*expected_execution_cost, tx_cost.builtins_execution_cost);
assert_eq!(0, tx_cost.bpf_execution_cost);
assert_eq!(0, tx_cost.data_bytes_cost);
}
#[test]
fn test_cost_model_transaction_many_transfer_instructions() {
let (mint_keypair, start_hash) = test_setup();
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let instructions =
system_instruction::transfer_many(&mint_keypair.pubkey(), &[(key1, 1), (key2, 1)]);
let message = Message::new(&instructions, Some(&mint_keypair.pubkey()));
let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new(
&[&mint_keypair],
message,
start_hash,
));
debug!("many transfer transaction {:?}", tx);
// expected cost for two system transfer instructions
let program_cost = BUILT_IN_INSTRUCTION_COSTS
.get(&system_program::id())
.unwrap();
let expected_cost = program_cost * 2;
let testee = CostModel::default();
let mut tx_cost = TransactionCost::default();
testee.get_transaction_cost(&mut tx_cost, &tx);
assert_eq!(expected_cost, tx_cost.builtins_execution_cost);
assert_eq!(0, tx_cost.bpf_execution_cost);
assert_eq!(1, tx_cost.data_bytes_cost);
}
#[test]
fn test_cost_model_message_many_different_instructions() {
let (mint_keypair, start_hash) = test_setup();
// construct a transaction with multiple random instructions
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let prog1 = solana_sdk::pubkey::new_rand();
let prog2 = solana_sdk::pubkey::new_rand();
let instructions = vec![
CompiledInstruction::new(3, &(), vec![0, 1]),
CompiledInstruction::new(4, &(), vec![0, 2]),
];
let tx = SanitizedTransaction::from_transaction_for_tests(
Transaction::new_with_compiled_instructions(
&[&mint_keypair],
&[key1, key2],
start_hash,
vec![prog1, prog2],
instructions,
),
);
debug!("many random transaction {:?}", tx);
let testee = CostModel::default();
let expected_cost = testee.instruction_execution_cost_table.get_default_units() * 2;
let mut tx_cost = TransactionCost::default();
testee.get_transaction_cost(&mut tx_cost, &tx);
assert_eq!(0, tx_cost.builtins_execution_cost);
assert_eq!(expected_cost, tx_cost.bpf_execution_cost);
assert_eq!(0, tx_cost.data_bytes_cost);
}
#[test]
fn test_cost_model_sort_message_accounts_by_type() {
// construct a transaction with two random instructions with same signer
let signer1 = Keypair::new();
let signer2 = Keypair::new();
let key1 = Pubkey::new_unique();
let key2 = Pubkey::new_unique();
let prog1 = Pubkey::new_unique();
let prog2 = Pubkey::new_unique();
let instructions = vec![
CompiledInstruction::new(4, &(), vec![0, 2]),
CompiledInstruction::new(5, &(), vec![1, 3]),
];
let tx = SanitizedTransaction::from_transaction_for_tests(
Transaction::new_with_compiled_instructions(
&[&signer1, &signer2],
&[key1, key2],
Hash::new_unique(),
vec![prog1, prog2],
instructions,
),
);
let cost_model = CostModel::default();
let tx_cost = cost_model.calculate_cost(&tx);
assert_eq!(2 + 2, tx_cost.writable_accounts.len());
assert_eq!(signer1.pubkey(), tx_cost.writable_accounts[0]);
assert_eq!(signer2.pubkey(), tx_cost.writable_accounts[1]);
assert_eq!(key1, tx_cost.writable_accounts[2]);
assert_eq!(key2, tx_cost.writable_accounts[3]);
}
#[test]
fn test_cost_model_insert_instruction_cost() {
let key1 = Pubkey::new_unique();
let cost1 = 100;
let mut cost_model = CostModel::default();
// Using default cost for unknown instruction
assert_eq!(
cost_model
.instruction_execution_cost_table
.get_default_units(),
cost_model.find_instruction_cost(&key1)
);
// insert instruction cost to table
cost_model.upsert_instruction_cost(&key1, cost1);
// now it is known instruction with known cost
assert_eq!(cost1, cost_model.find_instruction_cost(&key1));
}
#[test]
fn test_cost_model_calculate_cost() {
let (mint_keypair, start_hash) = test_setup();
let tx = SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer(
&mint_keypair,
&Keypair::new().pubkey(),
2,
start_hash,
));
let expected_account_cost = WRITE_LOCK_UNITS * 2;
let expected_execution_cost = BUILT_IN_INSTRUCTION_COSTS
.get(&system_program::id())
.unwrap();
let cost_model = CostModel::default();
let tx_cost = cost_model.calculate_cost(&tx);
assert_eq!(expected_account_cost, tx_cost.write_lock_cost);
assert_eq!(*expected_execution_cost, tx_cost.builtins_execution_cost);
assert_eq!(2, tx_cost.writable_accounts.len());
}
#[test]
fn test_cost_model_update_instruction_cost() {
let key1 = Pubkey::new_unique();
let cost1 = 100;
let cost2 = 200;
let updated_cost = (cost1 + cost2) / 2;
let mut cost_model = CostModel::default();
// insert instruction cost to table
cost_model.upsert_instruction_cost(&key1, cost1);
assert_eq!(cost1, cost_model.find_instruction_cost(&key1));
// update instruction cost
cost_model.upsert_instruction_cost(&key1, cost2);
assert_eq!(updated_cost, cost_model.find_instruction_cost(&key1));
}
#[test]
fn test_cost_model_can_be_shared_concurrently_with_rwlock() {
let (mint_keypair, start_hash) = test_setup();
// construct a transaction with multiple random instructions
let key1 = solana_sdk::pubkey::new_rand();
let key2 = solana_sdk::pubkey::new_rand();
let prog1 = solana_sdk::pubkey::new_rand();
let prog2 = solana_sdk::pubkey::new_rand();
let instructions = vec![
CompiledInstruction::new(3, &(), vec![0, 1]),
CompiledInstruction::new(4, &(), vec![0, 2]),
];
let tx = Arc::new(SanitizedTransaction::from_transaction_for_tests(
Transaction::new_with_compiled_instructions(
&[&mint_keypair],
&[key1, key2],
start_hash,
vec![prog1, prog2],
instructions,
),
));
let number_threads = 10;
let expected_account_cost = WRITE_LOCK_UNITS * 3;
let cost1 = 100;
let cost2 = 200;
// execution cost can be either 2 * Default (before write) or cost1+cost2 (after write)
let cost_model: Arc<RwLock<CostModel>> = Arc::new(RwLock::new(CostModel::default()));
let thread_handlers: Vec<JoinHandle<()>> = (0..number_threads)
.map(|i| {
let cost_model = cost_model.clone();
let tx = tx.clone();
if i == 5 {
thread::spawn(move || {
let mut cost_model = cost_model.write().unwrap();
cost_model.upsert_instruction_cost(&prog1, cost1);
cost_model.upsert_instruction_cost(&prog2, cost2);
})
} else {
thread::spawn(move || {
let cost_model = cost_model.write().unwrap();
let tx_cost = cost_model.calculate_cost(&tx);
assert_eq!(3, tx_cost.writable_accounts.len());
assert_eq!(expected_account_cost, tx_cost.write_lock_cost);
})
}
})
.collect();
for th in thread_handlers {
th.join().unwrap();
}
}
#[test]
fn test_initialize_cost_table() {
// build cost table
let cost_table = vec![
(Pubkey::new_unique(), 10),
(Pubkey::new_unique(), 20),
(Pubkey::new_unique(), 30),
];
// init cost model
let mut cost_model = CostModel::default();
cost_model.initialize_cost_table(&cost_table);
// verify
for (id, cost) in cost_table.iter() {
assert_eq!(*cost, cost_model.find_instruction_cost(id));
}
// verify built-in programs are not in bpf_costs
assert!(cost_model
.instruction_execution_cost_table
.get_cost(&system_program::id())
.is_none());
assert!(cost_model
.instruction_execution_cost_table
.get_cost(&solana_vote_program::id())
.is_none());
}
}
| calculate_account_data_size |
main.py | """
@param: n -> int : Upper Limit of the range
"""
def multiples(n: int) -> int:
num: list = []
for i in range(1, n):
if (i % 3 == 0) or (i % 5 == 0):
num.append(i)
return sum(num)
if __name__ == '__main__':
t: int = int(input()) | print(multiples(n)) | for _x in range(t):
n: int = int(input()) |
batch_test.go | // Copyright 2019 Drone.IO Inc. All rights reserved.
// Use of this source code is governed by the Drone Non-Commercial License
// that can be found in the LICENSE file.
package batch
import (
"context"
"database/sql"
"testing"
"github.com/drone/drone/core"
"github.com/drone/drone/store/perm"
"github.com/drone/drone/store/repos"
"github.com/drone/drone/store/shared/db"
"github.com/drone/drone/store/shared/db/dbtest"
"github.com/drone/drone/store/user"
)
var noContext = context.TODO()
func TestBatch(t *testing.T) {
conn, err := dbtest.Connect()
if err != nil {
t.Error(err)
return
}
defer func() {
dbtest.Reset(conn)
dbtest.Disconnect(conn)
}()
batcher := New(conn).(*batchUpdater)
repos := repos.New(conn)
perms := perm.New(conn)
user, err := seedUser(batcher.db)
if err != nil {
t.Error(err)
}
t.Run("Insert", testBatchInsert(batcher, repos, perms, user))
t.Run("Update", testBatchUpdate(batcher, repos, perms, user))
t.Run("Delete", testBatchDelete(batcher, repos, perms, user))
}
func testBatchInsert(
batcher core.Batcher,
repos core.RepositoryStore,
perms core.PermStore,
user *core.User,
) func(t *testing.T) {
return func(t *testing.T) {
batch := &core.Batch{
Insert: []*core.Repository{
{
UserID: 1,
UID: "42",
Namespace: "octocat",
Name: "hello-world",
Slug: "octocat/hello-world",
Private: false,
Visibility: "public",
},
},
}
err := batcher.Batch(noContext, user, batch)
if err != nil {
t.Error(err)
}
repo, err := repos.FindName(noContext, "octocat", "hello-world")
if err != nil {
t.Errorf("Want repository, got error %q", err)
}
_, err = perms.Find(noContext, repo.UID, user.ID)
if err != nil {
t.Errorf("Want permissions, got error %q", err)
}
}
}
func testBatchUpdate(
batcher core.Batcher,
repos core.RepositoryStore,
perms core.PermStore,
user *core.User,
) func(t *testing.T) {
return func(t *testing.T) {
before, err := repos.FindName(noContext, "octocat", "hello-world")
if err != nil {
t.Errorf("Want repository, got error %q", err)
}
batch := &core.Batch{
Update: []*core.Repository{
{
ID: before.ID,
UserID: 1,
UID: "42",
Namespace: "octocat",
Name: "hello-world",
Slug: "octocat/hello-world",
Private: true,
},
},
}
err = batcher.Batch(noContext, user, batch)
if err != nil {
t.Error(err)
}
after, err := repos.FindName(noContext, "octocat", "hello-world")
if err != nil {
t.Errorf("Want repository, got error %q", err)
}
if got, want := after.Private, true; got != want {
t.Errorf("Want repository Private %v, got %v", want, got)
}
}
}
func testBatchDelete(
batcher core.Batcher,
repos core.RepositoryStore,
perms core.PermStore,
user *core.User,
) func(t *testing.T) {
return func(t *testing.T) {
repo, err := repos.FindName(noContext, "octocat", "hello-world")
if err != nil {
t.Errorf("Want repository, got error %q", err)
}
_, err = perms.Find(noContext, repo.UID, user.ID)
if err != nil {
t.Errorf("Want permissions, got error %q", err)
}
batch := &core.Batch{
Revoke: []*core.Repository{
{
ID: repo.ID,
UserID: 1,
UID: "42",
Namespace: "octocat",
Name: "hello-world",
Slug: "octocat/hello-world",
Private: true,
},
},
}
err = batcher.Batch(noContext, user, batch)
if err != nil {
t.Error(err)
}
_, err = perms.Find(noContext, repo.UID, user.ID)
if err != sql.ErrNoRows {
t.Errorf("Want sql.ErrNoRows got %v", err)
}
}
}
func seedUser(db *db.DB) (*core.User, error) | {
out := &core.User{Login: "octocat"}
err := user.New(db).Create(noContext, out)
return out, err
} |
|
fastapi.py | import os
import platform
import subprocess
import sys
from pathlib import Path
from core.management.commands.utils import Utils
from django.apps import apps
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = """Manager responsável por analisar as classes de modelos do projeto Django para gerar os arquivos
do projeto FastAPI correspondente às apps do Django"""
def __init__(self):
super().__init__()
self.path_root = os.getcwd()
self.path_core = os.path.join(self.BASE_DIR, "core")
self.operation_system = platform.system().lower()
self.project = 'fastapi'
self.fastapi_dir = os.path.join(self.BASE_DIR, '..', "fastapi")
self.fastapi_project = os.path.join(self.path_core, "management/commands/snippets/fastapi_project")
self.snippet_dir = "{}/{}".format(self.path_core, "management/commands/snippets/fastapi/")
self.current_app_model = None
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
_django_types = ["SmallAutoField", "AutoField", "BLANK_CHOICE_DASH", "BigAutoField", "BigIntegerField",
"BinaryField", "BooleanField", "CharField", "CommaSeparatedIntegerField", "DateField",
"DateTimeField", "DecimalField", "DurationField", "EmailField", "Empty", "FileField", "Field",
"FieldDoesNotExist", "FilePathField", "FloatField", "GenericIPAddressField", "IPAddressField",
"IntegerField", "FieldFile", "NOT_PROVIDED", "NullBooleanField", "ImageField",
"PositiveIntegerField", "PositiveSmallIntegerField", "SlugField", "SmallIntegerField", "TextField",
"TimeField", "URLField", "UUIDField", "ForeignKey", "OneToOneField"]
_schemas_types = ["int", "int", "BLANK_CHOICE_DASH", "int", "int", "str", "bool", "str", "str", "datetime.date",
"datetime.datetime", "float", "int", "EmailStr", "str", "str", "str", "str", "str", "float",
"str", "str", "int", "str", "str", "bool", "str", "int", "int", "str", "int",
"str", "DateTime", "str", "str", "int", "int",]
_models_types = ["Integer", "Integer", "BLANK_CHOICE_DASH", "Integer", "Integer", "String", "Boolean", "String", "String", "Date",
"Datetime", "Float", "Integer", "String", "String", "String", "String", "String", "String", "Float",
"String", "String", "Integer", "String", "String", "Boolean", "String", "Integer", "Integer", "String", "Integer",
"String", "DateTime", "String", "String", "Integer", "Integer", ]
def add_arguments(self, parser):
parser.add_argument("App", type=str, nargs="?")
parser.add_argument("Model", type=str, nargs="?")
parser.add_argument("--app", action="store_true", dest="app", help="Criar a App e seus models")
parser.add_argument("--app_model", action="store_true", dest="app_model",
help="Criar a App e o Model informado")
# Parâmetro opcionais
parser.add_argument(
'--schemas',
action='store_true',
dest='schemas',
help='Criar apenas os Schemas'
)
parser.add_argument(
'--api',
action='store_true',
dest='api',
help='Criar apenas as rotas da api'
)
parser.add_argument(
'--cruds',
action='store_true',
dest='cruds',
help='Criar apenas os cruds'
)
parser.add_argument(
'--models',
action='store_true',
dest='models',
help='Criar apenas os models'
)
def _ch | lf, path) -> bool:
"""Método responsável por verificar se o diretório já existe."""
return Utils.check_dir(path)
def _check_file(self, path):
"""Método responsável por verificar se o arquivo já existe no caminho informado."""
return Utils.check_file(path)
def __check_content(self, path, text_check):
"""Método responsável por verificar se o texto passado com parâmetro existe no conteúdo do arquivo."""
return Utils.check_content(path, text_check)
def __ignore_base_fields(self, field):
"""Método responsável por remover da análise do models os atributos herdados da classe pai Base
Arguments:
field {String} -- Nome do atributo
Returns:
bool -- True se o atributo for um dos atributos da classe pai, caso contrário False.
"""
try:
__ignore_fields = ["id", "deleted", "created_on", "updated_on" ]
return field in __ignore_fields
except Exception as error:
Utils.show_message(f"Error in __ignore_base_fields: {error}", error=True)
def __get_snippet(self, path=None, file_name=None, state_manager=False):
"""Método para recuperar o valor do arquivo de snippet a ser convertido pela substituição com os valores
baseados em modelos do projeto Django
Arguments:
path {str} - Caminho do arquivo snippet a ser utilizado como padrão para gerar o arquivo resultante.
file_name {str} - Nome do arquivo snippet a ser lido
state_manager {bool} - Booleano para determinar se o snippet a ser lido é de algum dos pacotes
de gerência de estado do projeto Fastapi (deprecated)
Returns:
str -- Texto base a ser utilizado para geração dos arquivos resultantes da conversão
"""
try:
if os.path.isfile(path):
with open(path, encoding="utf-8") as arquivo:
return arquivo.read()
except Exception as e:
Utils.show_message(f"Error in get_snippet {e}", error=True)
sys.exit()
def __init_fastapi(self):
"""Método para iniciar o projeto Fastapi
"""
try:
if not Utils.check_dir(self.fastapi_dir):
Utils.show_message("Criando o projeto Fastapi.")
print(self.fastapi_project)
__cmd_fastapi_create = "cp -R {} {}".format(self.fastapi_project, self.fastapi_dir)
subprocess.call(__cmd_fastapi_create, shell=True)
Utils.show_message("Projeto criado com sucesso.")
except Exception as error:
Utils.show_message(f"Error in __init_Fastapi: {error}", error=True)
def __init_app(self, app_path):
"""Método para iniciar o projeto Fastapi
"""
try:
if not Utils.check_dir(app_path):
Utils.show_message("Criando diretório da app")
os.makedirs(app_path)
Utils.show_message("Diretório criado com sucesso")
except Exception as error:
Utils.show_message(f"Error in __init_Fastapi: {error}", error=True)
def __apply_pep(self, path):
try:
os.system('autopep8 --in-place --aggressive --aggressive {}'.format(path))
os.system('isort {}'.format(path))
except Exception as error:
Utils.show_message(f"Ocorreu o erro : {error}")
pass
def __manage_schema(self):
"""Método responsável por criar/configurar o arquivo de schema para a FastAPI """
try:
Utils.show_message("Trabalhando na configuração do Schema do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/schema.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
model = self.app_instance.get_model(self.model)
fields = model._meta.fields
result = ''
for field in iter(fields):
item = {}
item["app"], item["model"], item["name"] = str(field).split('.')
item["type"] = (str(
str(type(field)).split('.')[-1:])
.replace("[\"", "").replace("\'>\"]", ""))
if item["type"] not in self._django_types:
print('Campo {} desconhecido'.format(field))
continue
if not self.__ignore_base_fields(item['name']):
attribute = self._schemas_types[self._django_types.index(item['type'])]
field_name = item.get('name')
if (getattr(field, 'null', None)):
attribute = f"Optional[{attribute}]"
if (field.get_default() is not None and field.get_default() != ""):
attribute += f" = {field.get_default()}"
if (item.get("type") in ('ForeignKey', 'OneToOneField')):
field_name = field.get_attname_column()[1]
result += f"\t {field_name}: {attribute}\n"
content = content.replace("$fields$", result)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_schema) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_schema, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_schema)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_schema, "class {}".format(self.model)):
Utils.show_message("O model informado já possui schema configurado.")
return
with open(self.path_schema, 'a') as schema:
schema.write("\n")
schema.write(content)
self.__apply_pep(self.path_schema)
except Exception as error:
Utils.show_message(f"Error in __manage_schema: {error}", error=True)
def __manage_model(self):
"""Método responsável por criar/configurar o arquivo de schema para a FastAPI """
try:
Utils.show_message("Trabalhando na configuração do Model do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/model.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
model = self.app_instance.get_model(self.model)
content = content.replace("$table$", model._meta.db_table)
fields = model._meta.fields
related_fields = model._meta.many_to_many
result = ''
imports = ""
many_to_many = ""
for field in iter(fields):
item = {}
item["app"], item["model"], item["name"] = str(field).split('.')
item["type"] = (str(
str(type(field)).split('.')[-1:])
.replace("[\"", "").replace("\'>\"]", ""))
if item["type"] not in self._django_types:
print('Campo {} desconhecido'.format(field))
continue
if not self.__ignore_base_fields(item['name']):
attribute = self._models_types[self._django_types.index(item['type'])]
field_name = item.get('name')
relationship = None
if (field.max_length):
attribute += f"({field.max_length})"
if (item.get("type") in ('ForeignKey', 'OneToOneField')):
field_name = field.get_attname_column()[1]
__model = field.related_model._meta
attribute = f"ForeignKey('{__model.db_table}.id')"
if __model.app_label != item.get('app'):
imports += f"from {__model.app_label}.models import {__model.object_name}\n"
relationship = f"\t {item.get('name')} = relationship('{__model.object_name}')\n"
attribute = f"{attribute}, nullable={(getattr(field, 'null', None))}"
if (field.has_default()):
attribute += f" ,default={field.get_default()}"
if (field.unique):
attribute += f" ,unique={field.unique}"
result += f"\t {field_name} = Column({attribute})\n"
if relationship is not None:
result += relationship
for field in iter(related_fields):
item = {}
item["app"], item["model"], item["name"] = str(field).split('.')
item["type"] = (str(
str(type(field)).split('.')[-1:])
.replace("[\"", "").replace("\'>\"]", ""))
if (item.get("type") == "ManyToManyField"):
_model_name = field.model._meta.model_name
_app_name = field.model._meta.app_label
_related_model_name = field.related_model._meta.model_name
_related_model_app = field.related_model._meta.app_label
__model = field.related_model._meta
table = f"{item.get('app')}_{_model_name}_{field.related_model._meta.model_name}"
many_to_many += f"{table} = Table('{table}', Base.metadata,"
many_to_many += f"Column('id', Integer, primary_key=True, index=True),"
many_to_many += f"Column('{_model_name}_id', ForeignKey('{_app_name}_{_model_name}.id')),"
many_to_many += f"Column('{_related_model_name}_id', ForeignKey('{_related_model_app}_{_related_model_name}.id')))\n"
result += f"\t {item.get('name')} = relationship('{__model.object_name}', secondary={table})\n"
content = content.replace("$columns$", result)
content = content.replace("$imports$", imports)
content = content.replace("$manyToMany$", many_to_many)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_model_fastapi) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_model_fastapi, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_model_fastapi)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_model_fastapi, "class {}".format(self.model)):
Utils.show_message("O model informado já possui model configurado.")
return
with open(self.path_model_fastapi, 'a') as schema:
schema.write("\n")
schema.write(content)
self.__apply_pep(self.path_model_fastapi)
except Exception as error:
Utils.show_message(f"Error in __manage_model: {error}", error=True)
def __manage_cruds(self):
"""Método responsável por criar/configurar o arquivo de cruds para a FastAPI """
try:
Utils.show_message("Trabalhando na configuração do Crud do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/cruds.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
content = content.replace("$app$", self.app)
content = content.replace("$model$", self.model_lower)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_crud) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_crud, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_crud)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_crud, "class {}".format(self.model)):
Utils.show_message("O model informado já possui schema configurado.")
return
with open(self.path_crud, 'a') as crud:
crud.write("\n")
crud.write(content)
self.__apply_pep(self.path_crud)
except Exception as error:
Utils.show_message(f"Error in __manage_crud: {error}", error=True)
def __manage_api(self):
"""Método responsável por criar/configurar o arquivo de cruds para a FastAPI """
try:
Utils.show_message("Trabalhando na configuração das Rotas do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/api.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
content = content.replace("$app$", self.app)
content = content.replace("$model$", self.model_lower)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_api) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_api, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_api)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_api, "class {}".format(self.model)):
Utils.show_message("O model informado já possui schema configurado.")
return
if self.__check_content(self.path_api,
"router = APIRouter()"):
content = content.replace("router = APIRouter()", "")
with open(self.path_api, 'a') as crud:
crud.write("\n")
crud.write(content)
self.__apply_pep(self.path_api)
except Exception as error:
Utils.show_message(f"Error in __manage_crud: {error}", error=True)
def call_methods(self, options):
"""
Método que identifica qual comando foi solicitado pelo usuário para ser executado, antes de chamar o método,
as entradas informadas pelo usuário são validadas, evitando erros de execução do programa devido à ausência de
parâmetros obrigatórios.
Por uma questão de padrão de projeto as possibilidades de escolha do pacote de gerência
de estados para o projeto Fastapi foram alteradas, agora todo projeto gerado utiliza como pacote de gerência
de estado o pacote o Cubit/Bloc
"""
# Verificando se foram passados parâmetros opcionais
if options['cruds']:
Utils.show_message("Trabalhando apenas os cruds.")
self.__manage_cruds()
return
elif options['api']:
Utils.show_message("Trabalhando apenas a api.")
self.__manage_api()
return
elif options['schemas']:
Utils.show_message("Trabalhando apenas os schemas.")
self.__manage_schema()
return
elif options['models']:
Utils.show_message("Trabalhando apenas os models.")
self.__manage_model()
return
else:
# Chamando o método para tratar os api
self.__manage_api()
# Chamando o método para tratar as schemas
self.__manage_schema()
# Chamando o método para tratar o models
self.__manage_model()
# Chamando o método para tratar as cruds
self.__manage_cruds()
return
def handle(self, *args, **options):
app = options["App"] or None
model = options["Model"] or None
if app is None and model is None:
Utils.show_message(
f"Você não informou uma APP para ser gerada.",
error=True)
return
if app and Utils.contain_number(app):
Utils.show_message(f"Nome da app contendo números")
return
# Removendo os espaços em branco
self.app = app.strip()
# Pegando o diretório absoluto atual do projeto.
self.path_root = os.path.normpath(os.getcwd() + os.sep)
# Criando o path para a APP informada.
self.path_app = os.path.join(self.fastapi_dir, app)
self.path_app_local = os.path.join(self.path_root, app)
# Criando o path para a APP Core.
self.path_core = os.path.join(self.BASE_DIR, "core")
# Criando o path para os models baseado no App informada.
self.path_model = os.path.join(self.path_app_local, "models.py")
# Convertendo os nomes para caracteres minúsculo.
# para serem usado nos locais que necessitem dos nomes
# em minúsculo.
self.app_lower = app.lower()
# Criando o path para os forms baseado na App informada.
self.path_schema= os.path.join(self.path_app, "schemas.py")
self.path_model_fastapi = os.path.join(self.path_app, "models.py")
self.path_crud = os.path.join(self.path_app, "cruds.py")
self.path_api = os.path.join(self.path_app, "api.py")
# Verificando se o diretório do fast informada existe
if self._check_dir(self.fastapi_dir) is False:
self.__init_fastapi()
# Verifica se app esta instalada, pois precisa dela
# para recuperar as instancias dos models
if apps.is_installed(self.app_lower) is False:
Utils.show_message(
"Você deve colocar sua app no INSTALLED_APPS do settings.")
return
if self._check_dir(self.path_app) is False:
self.__init_app(self.path_app)
# Criando uma instancia da app
self.app_instance = apps.get_app_config(self.app_lower)
# Verificando se o usuário passou o nome do model
if options['Model']:
model = options['Model'] or None
if Utils.contain_number(model) is False:
# Removendo os espaços em branco
self.model = model.strip()
# Verificando se existe no models.py o Model informado
if self.__check_content(
self.path_model,
'class {}'.format(self.model)) is False:
Utils.show_message("Model informado não encontrado.")
return
try:
# Verifica se o model está na app informada
# Se o model for abstract ela retornará uma exceção
# LookupError
self.app_instance.get_model(self.model)
Utils.show_message(
"Gerando arquivos para o model {}".format(self.model))
# Convertendo os nomes para caracteres minúsculo.
# para serem usado nos locais que necessitem dos nomes
# em minúsculo.
self.model_lower = model.lower()
self.call_methods(options)
Utils.show_message("Processo concluído.")
except LookupError:
Utils.show_message(
"Esse model é abastrato. "
"Não vão ser gerados os arquivos.")
else:
# recupera todos os models da app
# print(self.app_instance.get_models())
for model in self.app_instance.get_models():
model = model.__name__
# Removendo os espaços em branco
self.model = model.strip()
Utils.show_message(
"Gerando arquivos para o model {}".format(self.model))
# Convertendo os nomes para caracteres minúsculo.
# para serem usado nos locais que necessitem dos nomes
# em minúsculo.
self.model_lower = model.lower()
# Chama os métodos de geração de arquivos
self.call_methods(options)
Utils.show_message(
"Processo concluído para o model {}.".format(
self.model))
Utils.show_message("Processo concluído.")
return
| eck_dir(se |
listener.js | document.onkeyup = function (event) {
if (event.keyCode == 113){ // listen for press of F2 button
console.log('Button is pressed. Gathering open windows...');
var compose_ids = Array.prototype.slice.call(document.getElementsByClassName("Am Al editable LW-avf va_ar"));//for compose windows
var reply_ids = Array.prototype.slice.call(document.getElementsByClassName("Am a09 Al editable LW-avf va_ar"));//for reply windows
var windows = compose_ids.concat(reply_ids);
console.log('Gathered open windows. Implementing changes...');
for (i = 0; i < windows.length; i++){
windows[i].innerHTML = Autolinker.link(windows[i].innerHTML,
{
replaceFn : function (autolinker, match) { // replace all
var final_value = HTTPResponse(match.getAnchorHref());
var tag = new Autolinker.HtmlTag();
if (final_value[1].indexOf("image") != -1){
console.log('Replacing image...');
// if URL is of an image, then insert img tag with appropriate source
tag.setTagName('img');
tag.setAttr('src', match.getAnchorHref());
tag.setAttr('alt',match.getAnchorHref());
} else { | console.log('Replacing link...');
// otherwise, replace with human-readable hyperlink
tag.setTagName('a');
tag.setAttr('href',match.getAnchorHref());
tag.setInnerHtml(final_value[0]);
}
return tag;
}
});
}
}
}
function HTTPResponse(url){
console.log('Sending request');
var xmlHttp = new XMLHttpRequest(); //create XMLHttpRequest
var mashape_api_key = "9kXiCVHd85msh7Scl9CLKlBFDhU7p1YwgoWjsn9JtTN8RAGnF3"; // insecure, but no risk is attached - revealing this just means that people get to ping my API, which is fine.
var native_url = "https://akshatm-richgmaileditor-v1.p.mashape.com/?url=";
xmlHttp.open("GET",native_url+url, false); //currently synchronous; must change to asynchronous
xmlHttp.setRequestHeader("X-Mashape-Key", mashape_api_key);
xmlHttp.send();
if (xmlHttp.status == 400){
return [url,"none"];
}
console.log('Request honoured.')
return [xmlHttp.responseText, xmlHttp.getResponseHeader("content-type")];
} | |
pixel_iter.rs | use crate::image::{Image, BaseImage, Number};
/// A struct representing a pixel iterator for an image. `next()` returns a tuple containing the
/// x-coordinate, y-coordinate, and a slice representing the pixel at that coordinate, in that
/// order.
///
/// # Examples
/// ```rust
/// # fn main() {
/// use imgproc_rs::image::{Image, BaseImage};
///
/// // Create an image
/// let img = Image::from_vec(2, 2, 3, false, vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]);
///
/// // Print pixels with corresponding coordinates using the pixel iterator
/// for vals in img.into_iter() {
/// print!("(x: {}, y: {}), pixel: (", vals.0, vals.1);
///
/// for i in 0..(img.info().channels as usize) {
/// print!("{}, ", vals.2[i]);
/// }
///
/// print!(")");
/// println!();
/// }
/// # }
/// ```
#[derive(Debug, Clone)]
pub struct PixelIter<'a, T: Number> {
image: &'a Image<T>,
x: u32,
y: u32,
x_max: u32,
y_max: u32,
}
impl<'a, T: Number> PixelIter<'a, T> {
pub fn new(image: &'a Image<T>) -> Self {
PixelIter {
image,
x: 0,
y: 0,
x_max: image.info().width - 1,
y_max: image.info().height - 1,
}
}
}
impl<'a, T: Number> Iterator for PixelIter<'a, T> {
type Item = (u32, u32, &'a [T]);
fn next(&mut self) -> Option<Self::Item> {
if self.x > self.x_max |
let temp_x = self.x;
let temp_y = self.y;
self.x += 1;
Some((temp_x, temp_y, self.image.get_pixel(temp_x, temp_y)))
}
fn size_hint(&self) -> (usize, Option<usize>) {
let size = ((self.x_max + 1) * (self.y_max + 1)) as usize;
(size, Some(size))
}
}
impl<'a, T: Number> IntoIterator for &'a Image<T> {
type Item = (u32, u32, &'a [T]);
type IntoIter = PixelIter<'a, T>;
fn into_iter(self) -> Self::IntoIter {
PixelIter::new(&self)
}
} | {
if self.y >= self.y_max {
return None;
} else {
self.x = 0;
self.y += 1;
}
} |
config.go | // Copyright 2021 Daniel Foehr
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"time"
"k8s.io/apimachinery/pkg/util/sets"
)
// StoreKind identifies a supported store kind - filesystem, vault, Gardener.
type StoreKind string
// ValidStoreKinds contains all valid store kinds
var ValidStoreKinds = sets.NewString(string(StoreKindVault), string(StoreKindFilesystem), string(StoreKindGardener))
// ValidConfigVersions contains all valid config versions
var ValidConfigVersions = sets.NewString("v1alpha1")
const (
// StoreKindFilesystem is an identifier for the filesystem store
StoreKindFilesystem StoreKind = "filesystem"
// StoreKindFilesystem is an identifier for the vault store
StoreKindVault StoreKind = "vault"
// StoreKindFilesystem is an identifier for the gardener store
StoreKindGardener StoreKind = "gardener"
)
type Config struct {
// Kind is the type of the config. Expects "SwitchConfig"
Kind string `yaml:"kind"`
// Version is the version of the config file.
// Possible values: "v1alpha1"
Version string `yaml:"version"`
// KubeconfigName is the global default for how the kubeconfig is
// identified in the backing store.
// Can be overridden in the individual kubeconfig store configuration
// + optional
KubeconfigName *string `yaml:"kubeconfigName"`
// RefreshIndexAfter is the global default for how how often
// the index for this kubeconfig store shall be refreshed.
// Not setting this field will cause kubeswitch to not use an index
// Can be overridden in the individual kubeconfig store configuration
// + optional
RefreshIndexAfter *time.Duration `yaml:"refreshIndexAfter"`
// Hooks defines configurations for commands that shall be executed prior to the search
Hooks []Hook `yaml:"hooks"`
// KubeconfigStores contains the configuration for kubeconfig stores
KubeconfigStores []KubeconfigStore `yaml:"kubeconfigStores"`
}
type KubeconfigStore struct {
// ID is the ID of the kubeconfig store.
// Used to write distinct index files for each store
// Not required if only one store of a store kind is configured
// + optional
ID *string `yaml:"id"`
// Kind identifies a supported store kind - filesystem, vault, Gardener.
Kind StoreKind `yaml:"kind"`
// KubeconfigName defines how the kubeconfig is identified in the backing store
// For the Filesystem store, this is the name of the file that contains the kubeconfig
// For the Vault store, this is the secret key
// For the Gardener store this field is not used
// + optional
KubeconfigName *string `yaml:"kubeconfigName"`
// Paths contains the paths to search for in the backing store
Paths []string `yaml:"paths"`
// RefreshIndexAfter defines how often the index for this kubeconfig store shall be refreshed.
// Not setting this field will cause kubeswitch to not use an index
// + optional
RefreshIndexAfter *time.Duration `yaml:"refreshIndexAfter"` | // defaults to true
// useful when configuring a kubeconfig store that is not always available
// However, when searching on an index and wanting to retrieve the kubeconfig from an unavailable store,
// it will throw an errors nonetheless
// + optional
Required *bool `yaml:"required"`
// Config is store-specific configuration.
// Please check the documentation for each backing provider to see what confiuguration is
// possible here
Config interface{} `yaml:"config"`
}
type StoreConfigVault struct {
// VaultAPIAddress is the URL of the Vault API
VaultAPIAddress string `yaml:"vaultAPIAddress"`
}
type StoreConfigGardener struct {
// GardenerAPIKubeconfigPath is the path on the local filesystem pointing to the kubeconfig
// for the Gardener API server
GardenerAPIKubeconfigPath string `yaml:"gardenerAPIKubeconfigPath"`
// LandscapeName is a custom name for the Gardener landscape
// uses this name instead of the default ID from the Gardener API ConfigMap "cluster-identity"
// also used as the store ID if the kubeconfig store ID is not specified
// + optional
LandscapeName *string `yaml:"landscapeName"`
} | // Required defines if errors when initializing this store should be logged |
test_note_viewset.py | import os
from datetime import datetime
from django.conf import settings
from django.utils.timezone import make_aware
from django.test import RequestFactory
from guardian.shortcuts import assign_perm
from onadata.apps.api.viewsets.note_viewset import NoteViewSet
from onadata.apps.api.viewsets.xform_viewset import XFormViewSet
from onadata.apps.logger.models import Note
from onadata.apps.main.tests.test_base import TestBase
from onadata.libs.serializers.note_serializer import NoteSerializer
class TestNoteViewSet(TestBase):
"""
Test NoteViewSet
"""
def setUp(self):
super(TestNoteViewSet, self).setUp()
self._create_user_and_login()
self._publish_transportation_form()
self._make_submissions()
self.view = NoteViewSet.as_view({
'get': 'list',
'post': 'create',
'delete': 'destroy'
})
self.factory = RequestFactory()
self.extra = {'HTTP_AUTHORIZATION': 'Token %s' % self.user.auth_token}
@property
def _first_xform_instance(self):
return self.xform.instances.all().order_by('pk')[0]
def _add_notes_to_data_point(self):
# add a note to a specific data point
note = {'note': u"Road Warrior"}
dataid = self._first_xform_instance.pk
note['instance'] = dataid
request = self.factory.post('/', data=note, **self.extra)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 201)
self.pk = response.data['id']
note['id'] = self.pk
self.note = note
def test_note_list(self):
self._add_notes_to_data_point()
request = self.factory.get('/', **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.data) > 0)
self.assertDictContainsSubset(self.note, response.data[0])
def test_note_get(self):
self._add_notes_to_data_point()
view = NoteViewSet.as_view({'get': 'retrieve'})
request = self.factory.get('/', **self.extra)
response = view(request, pk=self.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['owner'], self.user.username)
self.assertDictContainsSubset(self.note, response.data)
def test_get_note_for_specific_instance(self):
self._add_notes_to_data_point()
view = NoteViewSet.as_view({'get': 'retrieve'})
instance = self.xform.instances.first()
query_params = {"instance": instance.id}
request = self.factory.get('/', data=query_params, **self.extra)
response = view(request, pk=self.pk)
self.assertEqual(response.status_code, 200)
self.assertDictContainsSubset(self.note, response.data)
second_instance = self.xform.instances.last()
query_params = {"instance": second_instance.id}
request = self.factory.get('/', data=query_params, **self.extra)
response = view(request, pk=self.pk)
self.assertEqual(response.status_code, 200)
self.assertListEqual(response.data, [])
def test_add_notes_to_data_point(self):
self._add_notes_to_data_point()
self.assertEquals(len(self._first_xform_instance.json["_notes"]), 1)
def test_other_user_notes_access(self):
self._create_user_and_login('lilly', '1234')
extra = {'HTTP_AUTHORIZATION': 'Token %s' % self.user.auth_token}
note = {'note': u"Road Warrior"}
dataid = self.xform.instances.first().pk
note['instance'] = dataid
# Other user 'lilly' should not be able to create notes
# to xform instance owned by 'bob'
request = self.factory.post('/', data=note)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 401)
# save some notes
self._add_notes_to_data_point()
# access to /notes endpoint,should be empty list
request = self.factory.get('/', **extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [])
# Other user 'lilly' sees an empty list when accessing bob's notes
view = NoteViewSet.as_view({'get': 'retrieve'})
query_params = {"instance": dataid}
request = self.factory.get('/', data=query_params, **extra)
response = view(request, pk=self.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [])
def test_collaborator_with_readonly_permission_can_add_comment(self):
self._create_user_and_login('lilly', '1234')
extra = {'HTTP_AUTHORIZATION': 'Token %s' % self.user.auth_token}
# save some notes
self._add_notes_to_data_point()
# post note to submission as lilly without permissions
note = {'note': u"Road Warrior"}
dataid = self._first_xform_instance.pk
note['instance'] = dataid
request = self.factory.post('/', data=note)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 401)
# post note to submission with permissions to form
assign_perm('view_xform', self.user, self._first_xform_instance.xform)
note = {'note': u"Road Warrior"}
dataid = self._first_xform_instance.pk
note['instance'] = dataid
request = self.factory.post('/', data=note, **extra)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 201)
def test_delete_note(self):
self._add_notes_to_data_point()
request = self.factory.delete('/', **self.extra)
response = self.view(request, pk=self.pk)
self.assertEqual(response.status_code, 204)
request = self.factory.get('/', **self.extra)
response = self.view(request)
self.assertEqual(response.status_code, 200)
self.assertEquals(response.data, [])
def test_question_level_notes(self):
field = "transport"
dataid = self.xform.instances.all()[0].pk
note = {
'note': "Road Warrior",
'instance': dataid,
'instance_field': field
}
request = self.factory.post('/', data=note, **self.extra)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 201)
instance = self.xform.instances.all()[0]
self.assertEquals(len(instance.json["_notes"]), 1)
note = instance.json["_notes"][0]
self.assertEquals(note['instance_field'], field)
def test_only_add_question_notes_to_existing_fields(self):
field = "bla"
dataid = self.xform.instances.all()[0].pk
note = {
'note': "Road Warrior",
'instance': dataid,
'instance_field': field
}
request = self.factory.post('/', data=note, **self.extra)
self.assertTrue(self.xform.instances.count())
response = self.view(request)
self.assertEqual(response.status_code, 400)
instance = self.xform.instances.all()[0]
self.assertEquals(len(instance.json["_notes"]), 0)
def test_csv_export_form_w_notes(self):
"""
Test CSV exports include notes for submissions that have notes.
"""
self._add_notes_to_data_point()
self._add_notes_to_data_point()
time = make_aware(datetime(2016, 7, 1))
for instance in self.xform.instances.all():
instance.date_created = time
instance.save()
instance.parsed_instance.save()
view = XFormViewSet.as_view({'get': 'retrieve'})
request = self.factory.get('/', **self.extra)
response = view(request, pk=self.xform.pk, format='csv')
self.assertTrue(response.status_code, 200)
test_file_path = os.path.join(settings.PROJECT_ROOT, 'apps', 'viewer', |
def test_attribute_error_bug(self):
"""NoteSerializer: Should not raise AttributeError exeption"""
note = Note(note='Hello', instance=self._first_xform_instance)
note.save()
data = NoteSerializer(note).data
self.assertDictContainsSubset({
'created_by': None,
'note': u'Hello',
'instance': note.instance_id,
'owner': None
}, data) | 'tests', 'fixtures',
'transportation_w_notes.csv')
self._test_csv_response(response, test_file_path) |
luma.rs | use core::{
any::TypeId,
convert::TryInto,
fmt,
marker::PhantomData,
ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Sub, SubAssign},
};
use approx::{AbsDiffEq, RelativeEq, UlpsEq};
#[cfg(feature = "random")]
use rand::{
distributions::{
uniform::{SampleBorrow, SampleUniform, Uniform, UniformSampler},
Distribution, Standard,
},
Rng,
};
use crate::{
blend::PreAlpha,
cast::{ComponentOrder, Packed, UintCast},
clamp, clamp_assign, contrast_ratio,
convert::FromColorUnclamped,
encoding::{linear::LinearFn, Linear, Srgb, TransferFn},
luma::LumaStandard,
num::{Arithmetics, IsValidDivisor, MinMax, One, Real, Sqrt, Zero},
stimulus::{FromStimulus, Stimulus},
Alpha, Blend, Clamp, ClampAssign, ComponentWise, IsWithinBounds, Lighten, LightenAssign, Mix,
MixAssign, RelativeContrast, Xyz, Yxy,
};
/// Luminance with an alpha component. See the [`Lumaa` implementation
/// in `Alpha`](crate::Alpha#Lumaa).
pub type Lumaa<S = Srgb, T = f32> = Alpha<Luma<S, T>, T>;
/// Luminance.
///
/// Luma is a purely gray scale color space, which is included more for
/// completeness than anything else, and represents how bright a color is
/// perceived to be. It's basically the `Y` component of [CIE
/// XYZ](crate::Xyz). The lack of any form of hue representation limits
/// the set of operations that can be performed on it.
#[derive(Debug, ArrayCast, FromColorUnclamped, WithAlpha)]
#[cfg_attr(feature = "serializing", derive(Serialize, Deserialize))]
#[palette(
palette_internal,
luma_standard = "S",
component = "T",
skip_derives(Xyz, Yxy, Luma)
)]
#[repr(C)]
#[doc(alias = "gray")]
#[doc(alias = "grey")]
pub struct Luma<S = Srgb, T = f32> {
/// The lightness of the color. 0.0 is black and 1.0 is white.
pub luma: T,
/// The kind of RGB standard. sRGB is the default.
#[cfg_attr(feature = "serializing", serde(skip))]
#[palette(unsafe_zero_sized)]
pub standard: PhantomData<S>,
}
impl<S, T: Copy> Copy for Luma<S, T> {}
impl<S, T: Clone> Clone for Luma<S, T> {
fn clone(&self) -> Luma<S, T> {
Luma {
luma: self.luma.clone(),
standard: PhantomData,
}
}
}
impl<S, T> Luma<S, T> {
/// Create a luminance color.
pub const fn new(luma: T) -> Luma<S, T> {
Luma {
luma,
standard: PhantomData,
}
}
/// Convert into another component type.
pub fn into_format<U>(self) -> Luma<S, U>
where
U: FromStimulus<T>,
{
Luma {
luma: U::from_stimulus(self.luma),
standard: PhantomData,
}
}
/// Convert from another component type.
pub fn from_format<U>(color: Luma<S, U>) -> Self
where
T: FromStimulus<U>,
{
color.into_format()
}
/// Convert to a `(luma,)` tuple.
pub fn into_components(self) -> (T,) {
(self.luma,)
}
/// Convert from a `(luma,)` tuple.
pub fn from_components((luma,): (T,)) -> Self {
Self::new(luma)
}
fn reinterpret_as<S2>(self) -> Luma<S2, T>
where
S: LumaStandard<T>,
S2: LumaStandard<T, WhitePoint = S::WhitePoint>,
{
Luma {
luma: self.luma,
standard: PhantomData,
}
}
}
impl<S, T> Luma<S, T>
where
T: Stimulus,
{
/// Return the `luma` value minimum.
pub fn min_luma() -> T {
T::zero()
}
/// Return the `luma` value maximum.
pub fn max_luma() -> T {
T::max_intensity()
}
}
impl<S> Luma<S, u8> {
/// Convert to a packed `u16` with with specifiable component order.
///
/// ```
/// use palette::{luma, SrgbLuma};
///
/// let integer = SrgbLuma::new(96u8).into_u16::<luma::channels::La>();
/// assert_eq!(0x60FF, integer);
/// ```
///
/// It's also possible to use `From` and `Into`, which defaults to the
/// `0xAALL` component order:
///
/// ```
/// use palette::SrgbLuma;
///
/// let integer = u16::from(SrgbLuma::new(96u8));
/// assert_eq!(0xFF60, integer);
/// ```
///
/// See [Packed](crate::cast::Packed) for more details.
#[inline]
pub fn into_u16<O>(self) -> u16
where
O: ComponentOrder<Lumaa<S, u8>, u16>,
{
O::pack(Lumaa::from(self))
}
/// Convert from a packed `u16` with specifiable component order.
///
/// ```
/// use palette::{luma, SrgbLuma};
///
/// let luma = SrgbLuma::from_u16::<luma::channels::La>(0x60FF);
/// assert_eq!(SrgbLuma::new(96u8), luma);
/// ```
///
/// It's also possible to use `From` and `Into`, which defaults to the
/// `0xAALL` component order:
///
/// ```
/// use palette::SrgbLuma;
///
/// let luma = SrgbLuma::from(0x60u16);
/// assert_eq!(SrgbLuma::new(96u8), luma);
/// ```
///
/// See [Packed](crate::cast::Packed) for more details.
#[inline]
pub fn from_u16<O>(color: u16) -> Self
where
O: ComponentOrder<Lumaa<S, u8>, u16>,
{
O::unpack(color).color
}
}
impl<S, T> Luma<S, T>
where
S: LumaStandard<T>,
{
/// Convert the color to linear luminance.
pub fn into_linear(self) -> Luma<Linear<S::WhitePoint>, T> {
Luma::new(S::TransferFn::into_linear(self.luma))
}
/// Convert linear luminance to non-linear luminance.
pub fn from_linear(color: Luma<Linear<S::WhitePoint>, T>) -> Luma<S, T> {
Luma::new(S::TransferFn::from_linear(color.luma))
}
/// Convert the color to a different encoding.
pub fn into_encoding<St>(self) -> Luma<St, T>
where
St: LumaStandard<T, WhitePoint = S::WhitePoint>,
{
Luma::new(St::TransferFn::from_linear(S::TransferFn::into_linear(
self.luma,
)))
}
/// Convert luminance from a different encoding.
pub fn from_encoding<St>(color: Luma<St, T>) -> Luma<S, T>
where
St: LumaStandard<T, WhitePoint = S::WhitePoint>,
{
Luma::new(S::TransferFn::from_linear(St::TransferFn::into_linear(
color.luma,
)))
}
}
impl<S, T> PartialEq for Luma<S, T>
where
T: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.luma == other.luma
}
}
impl<S, T> Eq for Luma<S, T> where T: Eq {}
// Safety:
//
// Luma is a transparent wrapper around its component, which fulfills the
// requirements of UintCast.
unsafe impl<S> UintCast for Luma<S, u8> {
type Uint = u8;
}
// Safety:
//
// Luma is a transparent wrapper around its component, which fulfills the
// requirements of UintCast.
unsafe impl<S> UintCast for Luma<S, u16> {
type Uint = u16;
}
// Safety:
//
// Luma is a transparent wrapper around its component, which fulfills the
// requirements of UintCast.
unsafe impl<S> UintCast for Luma<S, u32> {
type Uint = u32;
}
// Safety:
//
// Luma is a transparent wrapper around its component, which fulfills the
// requirements of UintCast.
unsafe impl<S> UintCast for Luma<S, u64> {
type Uint = u64;
}
// Safety:
//
// Luma is a transparent wrapper around its component, which fulfills the
// requirements of UintCast.
unsafe impl<S> UintCast for Luma<S, u128> {
type Uint = u128;
}
///<span id="Lumaa"></span>[`Lumaa`](crate::luma::Lumaa) implementations.
impl<S, T, A> Alpha<Luma<S, T>, A> {
/// Create a luminance color with transparency.
pub const fn new(luma: T, alpha: A) -> Self {
Alpha {
color: Luma::new(luma),
alpha,
}
}
/// Convert into another component type.
pub fn into_format<U, B>(self) -> Alpha<Luma<S, U>, B>
where
U: FromStimulus<T>,
B: FromStimulus<A>,
{
Alpha {
color: self.color.into_format(),
alpha: B::from_stimulus(self.alpha),
}
}
/// Convert from another component type.
pub fn from_format<U, B>(color: Alpha<Luma<S, U>, B>) -> Self
where
T: FromStimulus<U>,
A: FromStimulus<B>,
{
color.into_format()
}
/// Convert to a `(luma, alpha)` tuple.
pub fn into_components(self) -> (T, A) {
(self.color.luma, self.alpha)
}
/// Convert from a `(luma, alpha)` tuple.
pub fn from_components((luma, alpha): (T, A)) -> Self {
Self::new(luma, alpha)
}
}
impl<S> Lumaa<S, u8> {
/// Convert to a packed `u16` with with a specific component order.
///
/// ```
/// use palette::{luma, SrgbLumaa};
///
/// let integer = SrgbLumaa::new(96u8, 255).into_u16::<luma::channels::Al>();
/// assert_eq!(0xFF60, integer);
/// ```
///
/// It's also possible to use `From` and `Into`, which defaults to the
/// `0xLLAA` component order:
///
/// ```
/// use palette::SrgbLumaa;
///
/// let integer = u16::from(SrgbLumaa::new(96u8, 255));
/// assert_eq!(0x60FF, integer);
/// ```
///
/// See [Packed](crate::cast::Packed) for more details.
#[inline]
pub fn into_u16<O>(self) -> u16
where
O: ComponentOrder<Lumaa<S, u8>, u16>,
{
O::pack(self)
}
/// Convert from a packed `u16` with a specific component order.
///
/// ```
/// use palette::{luma, SrgbLumaa};
///
/// let luma = SrgbLumaa::from_u16::<luma::channels::Al>(0xFF60);
/// assert_eq!(SrgbLumaa::new(96u8, 255), luma);
/// ```
///
/// It's also possible to use `From` and `Into`, which defaults to the
/// `0xLLAA` component order:
///
/// ```
/// use palette::SrgbLumaa;
///
/// let luma = SrgbLumaa::from(0x60FF);
/// assert_eq!(SrgbLumaa::new(96u8, 255), luma);
/// ```
///
/// See [Packed](crate::cast::Packed) for more details.
#[inline]
pub fn from_u16<O>(color: u16) -> Self
where
O: ComponentOrder<Lumaa<S, u8>, u16>,
{
O::unpack(color)
}
}
///[`Lumaa`](crate::luma::Lumaa) implementations.
impl<S, T, A> Alpha<Luma<S, T>, A>
where
S: LumaStandard<T>,
{
/// Convert the color to linear luminance with transparency.
pub fn into_linear(self) -> Alpha<Luma<Linear<S::WhitePoint>, T>, A> {
Alpha {
color: self.color.into_linear(),
alpha: self.alpha,
}
}
/// Convert linear luminance to non-linear luminance with transparency.
pub fn from_linear(color: Alpha<Luma<Linear<S::WhitePoint>, T>, A>) -> Alpha<Luma<S, T>, A> {
Alpha {
color: Luma::from_linear(color.color),
alpha: color.alpha,
}
}
/// Convert the color to a different encoding with transparency.
pub fn into_encoding<St>(self) -> Alpha<Luma<St, T>, A>
where
St: LumaStandard<T, WhitePoint = S::WhitePoint>,
{
Alpha {
color: Luma::from_linear(self.color.into_linear()),
alpha: self.alpha,
}
}
/// Convert luminance from a different encoding with transparency.
pub fn from_encoding<St>(color: Alpha<Luma<St, T>, A>) -> Alpha<Luma<S, T>, A>
where
St: LumaStandard<T, WhitePoint = S::WhitePoint>,
{
color.into_encoding()
}
}
impl<S1, S2, T> FromColorUnclamped<Luma<S2, T>> for Luma<S1, T>
where
S1: LumaStandard<T>,
S2: LumaStandard<T, WhitePoint = S1::WhitePoint>,
{
fn from_color_unclamped(color: Luma<S2, T>) -> Self {
if TypeId::of::<S1>() == TypeId::of::<S2>() {
color.reinterpret_as()
} else {
Self::from_linear(color.into_linear().reinterpret_as())
}
}
}
impl<S, T> FromColorUnclamped<Xyz<S::WhitePoint, T>> for Luma<S, T>
where
S: LumaStandard<T>,
{
fn from_color_unclamped(color: Xyz<S::WhitePoint, T>) -> Self {
Self::from_linear(Luma {
luma: color.y,
standard: PhantomData,
})
}
}
impl<S, T> FromColorUnclamped<Yxy<S::WhitePoint, T>> for Luma<S, T>
where
S: LumaStandard<T>,
{
fn from_color_unclamped(color: Yxy<S::WhitePoint, T>) -> Self {
Self::from_linear(Luma {
luma: color.luma,
standard: PhantomData,
})
}
}
impl<S, T> From<(T,)> for Luma<S, T> {
fn from(components: (T,)) -> Self {
Self::from_components(components)
}
}
impl<S, T> From<Luma<S, T>> for (T,) {
fn from(color: Luma<S, T>) -> (T,) {
color.into_components()
}
}
impl<S, T, A> From<(T, A)> for Alpha<Luma<S, T>, A> {
fn from(components: (T, A)) -> Self {
Self::from_components(components)
}
}
impl<S, T, A> From<Alpha<Luma<S, T>, A>> for (T, A) {
fn from(color: Alpha<Luma<S, T>, A>) -> (T, A) {
color.into_components()
}
}
impl<S, T> IsWithinBounds for Luma<S, T>
where
T: Stimulus + PartialOrd,
{
#[inline]
fn is_within_bounds(&self) -> bool {
self.luma >= Self::min_luma() && self.luma <= Self::max_luma()
}
}
impl<S, T> Clamp for Luma<S, T>
where
T: Stimulus + PartialOrd,
{
#[inline]
fn clamp(self) -> Self {
Self::new(clamp(self.luma, Self::min_luma(), Self::max_luma()))
}
}
impl<S, T> ClampAssign for Luma<S, T>
where
T: Stimulus + PartialOrd,
{
#[inline]
fn clamp_assign(&mut self) {
clamp_assign(&mut self.luma, Self::min_luma(), Self::max_luma());
}
}
impl_mix!(Luma<S> where S: LumaStandard<T, TransferFn = LinearFn>,);
impl_lighten!(Luma<S> increase {luma => [Self::min_luma(), Self::max_luma()]} other {} phantom: standard where T: Stimulus, S: LumaStandard<T, TransferFn = LinearFn>);
impl<S, T> Blend for Luma<S, T>
where
S: LumaStandard<T, TransferFn = LinearFn>,
T: Real + One + Zero + MinMax + Sqrt + IsValidDivisor + Arithmetics + PartialOrd + Clone,
Lumaa<S, T>: From<PreAlpha<Luma<S, T>, T>>,
{
type Color = Luma<S, T>;
fn into_premultiplied(self) -> PreAlpha<Luma<S, T>, T> {
Lumaa {
color: self,
alpha: T::one(),
}
.into_premultiplied()
}
fn from_premultiplied(color: PreAlpha<Luma<S, T>, T>) -> Self {
Lumaa::from_premultiplied(color).color
}
}
impl<S, T> ComponentWise for Luma<S, T>
where
T: Clone,
{
type Scalar = T;
fn component_wise<F: FnMut(T, T) -> T>(&self, other: &Luma<S, T>, mut f: F) -> Luma<S, T> {
Luma {
luma: f(self.luma.clone(), other.luma.clone()),
standard: PhantomData,
}
}
fn component_wise_self<F: FnMut(T) -> T>(&self, mut f: F) -> Luma<S, T> {
Luma {
luma: f(self.luma.clone()),
standard: PhantomData,
}
}
}
impl<S, T> Default for Luma<S, T>
where
T: Stimulus,
{
fn default() -> Luma<S, T> {
Luma::new(Self::min_luma())
}
}
impl<S, T> Add<Luma<S, T>> for Luma<S, T>
where
T: Add,
S: LumaStandard<T, TransferFn = LinearFn>,
{
type Output = Luma<S, <T as Add>::Output>;
fn add(self, other: Luma<S, T>) -> Self::Output {
Luma {
luma: self.luma + other.luma,
standard: PhantomData,
}
}
}
impl<S, T> Add<T> for Luma<S, T>
where
T: Add,
S: LumaStandard<T, TransferFn = LinearFn>,
{
type Output = Luma<S, <T as Add>::Output>;
fn add(self, c: T) -> Self::Output {
Luma {
luma: self.luma + c,
standard: PhantomData,
}
}
}
impl<S, T> AddAssign<Luma<S, T>> for Luma<S, T>
where
T: AddAssign,
S: LumaStandard<T, TransferFn = LinearFn>,
{
fn add_assign(&mut self, other: Luma<S, T>) {
self.luma += other.luma;
}
}
impl<S, T> AddAssign<T> for Luma<S, T>
where
T: AddAssign,
S: LumaStandard<T, TransferFn = LinearFn>,
{
fn add_assign(&mut self, c: T) {
self.luma += c;
}
}
impl<S, T> Sub<Luma<S, T>> for Luma<S, T>
where
T: Sub,
S: LumaStandard<T, TransferFn = LinearFn>,
{
type Output = Luma<S, <T as Sub>::Output>;
fn sub(self, other: Luma<S, T>) -> Self::Output {
Luma {
luma: self.luma - other.luma,
standard: PhantomData,
}
}
}
impl<S, T> Sub<T> for Luma<S, T>
where
T: Sub,
S: LumaStandard<T, TransferFn = LinearFn>,
{
type Output = Luma<S, <T as Sub>::Output>;
fn sub(self, c: T) -> Self::Output {
Luma {
luma: self.luma - c,
standard: PhantomData,
}
}
}
impl<S, T> SubAssign<Luma<S, T>> for Luma<S, T>
where
T: SubAssign,
S: LumaStandard<T, TransferFn = LinearFn>,
{
fn sub_assign(&mut self, other: Luma<S, T>) |
}
impl<S, T> SubAssign<T> for Luma<S, T>
where
T: SubAssign,
S: LumaStandard<T, TransferFn = LinearFn>,
{
fn sub_assign(&mut self, c: T) {
self.luma -= c;
}
}
impl<S, T> Mul<Luma<S, T>> for Luma<S, T>
where
T: Mul,
S: LumaStandard<T, TransferFn = LinearFn>,
{
type Output = Luma<S, <T as Mul>::Output>;
fn mul(self, other: Luma<S, T>) -> Self::Output {
Luma {
luma: self.luma * other.luma,
standard: PhantomData,
}
}
}
impl<S, T> Mul<T> for Luma<S, T>
where
T: Mul,
S: LumaStandard<T, TransferFn = LinearFn>,
{
type Output = Luma<S, <T as Mul>::Output>;
fn mul(self, c: T) -> Self::Output {
Luma {
luma: self.luma * c,
standard: PhantomData,
}
}
}
impl<S, T> MulAssign<Luma<S, T>> for Luma<S, T>
where
T: MulAssign,
S: LumaStandard<T, TransferFn = LinearFn>,
{
fn mul_assign(&mut self, other: Luma<S, T>) {
self.luma *= other.luma;
}
}
impl<S, T> MulAssign<T> for Luma<S, T>
where
T: MulAssign,
S: LumaStandard<T, TransferFn = LinearFn>,
{
fn mul_assign(&mut self, c: T) {
self.luma *= c;
}
}
impl<S, T> Div<Luma<S, T>> for Luma<S, T>
where
T: Div,
S: LumaStandard<T, TransferFn = LinearFn>,
{
type Output = Luma<S, <T as Div>::Output>;
fn div(self, other: Luma<S, T>) -> Self::Output {
Luma {
luma: self.luma / other.luma,
standard: PhantomData,
}
}
}
impl<S, T> Div<T> for Luma<S, T>
where
T: Div,
S: LumaStandard<T, TransferFn = LinearFn>,
{
type Output = Luma<S, <T as Div>::Output>;
fn div(self, c: T) -> Self::Output {
Luma {
luma: self.luma / c,
standard: PhantomData,
}
}
}
impl<S, T> DivAssign<Luma<S, T>> for Luma<S, T>
where
T: DivAssign,
S: LumaStandard<T, TransferFn = LinearFn>,
{
fn div_assign(&mut self, other: Luma<S, T>) {
self.luma /= other.luma;
}
}
impl<S, T> DivAssign<T> for Luma<S, T>
where
T: DivAssign,
S: LumaStandard<T, TransferFn = LinearFn>,
{
fn div_assign(&mut self, c: T) {
self.luma /= c;
}
}
impl_array_casts!(Luma<S, T>, [T; 1]);
impl<S, T> AsRef<T> for Luma<S, T> {
#[inline]
fn as_ref(&self) -> &T {
&self.luma
}
}
impl<S, T> AsMut<T> for Luma<S, T> {
#[inline]
fn as_mut(&mut self) -> &mut T {
&mut self.luma
}
}
impl<S, T> From<T> for Luma<S, T> {
#[inline]
fn from(luma: T) -> Self {
Self::new(luma)
}
}
macro_rules! impl_luma_cast_other {
($($other: ty),+) => {
$(
impl<'a, S> From<&'a $other> for &'a Luma<S, $other>
where
$other: AsRef<Luma<S, $other>>,
{
#[inline]
fn from(luma: &'a $other) -> Self {
luma.as_ref()
}
}
impl<'a, S> From<&'a mut $other> for &'a mut Luma<S, $other>
where
$other: AsMut<Luma<S, $other>>,
{
#[inline]
fn from(luma: &'a mut $other) -> Self {
luma.as_mut()
}
}
impl<S> AsRef<Luma<S, $other>> for $other {
#[inline]
fn as_ref(&self) -> &Luma<S, $other> {
core::slice::from_ref(self).try_into().unwrap()
}
}
impl<S> AsMut<Luma<S, $other>> for $other {
#[inline]
fn as_mut(&mut self) -> &mut Luma<S, $other> {
core::slice::from_mut(self).try_into().unwrap()
}
}
impl<S> From<Luma<S, $other>> for $other {
#[inline]
fn from(color: Luma<S, $other>) -> Self {
color.luma
}
}
impl<'a, S> From<&'a Luma<S, $other>> for &'a $other {
#[inline]
fn from(color: &'a Luma<S, $other>) -> Self {
color.as_ref()
}
}
impl<'a, S> From<&'a mut Luma<S, $other>> for &'a mut $other {
#[inline]
fn from(color: &'a mut Luma<S, $other>) -> Self {
color.as_mut()
}
}
)+
};
}
impl_luma_cast_other!(u8, u16, u32, u64, u128, f32, f64);
impl<S, T, P, O> From<Luma<S, T>> for Packed<O, P>
where
O: ComponentOrder<Lumaa<S, T>, P>,
Lumaa<S, T>: From<Luma<S, T>>,
{
#[inline]
fn from(color: Luma<S, T>) -> Self {
Self::from(Lumaa::from(color))
}
}
impl<S, T, O, P> From<Lumaa<S, T>> for Packed<O, P>
where
O: ComponentOrder<Lumaa<S, T>, P>,
{
#[inline]
fn from(color: Lumaa<S, T>) -> Self {
Packed::pack(color)
}
}
impl<S, O, P> From<Packed<O, P>> for Luma<S, u8>
where
O: ComponentOrder<Lumaa<S, u8>, P>,
{
#[inline]
fn from(packed: Packed<O, P>) -> Self {
Lumaa::from(packed).color
}
}
impl<S, T, O, P> From<Packed<O, P>> for Lumaa<S, T>
where
O: ComponentOrder<Lumaa<S, T>, P>,
{
#[inline]
fn from(packed: Packed<O, P>) -> Self {
packed.unpack()
}
}
impl<S> From<u16> for Luma<S, u8> {
#[inline]
fn from(color: u16) -> Self {
Self::from_u16::<super::channels::Al>(color)
}
}
impl<S> From<u16> for Lumaa<S, u8> {
#[inline]
fn from(color: u16) -> Self {
Self::from_u16::<super::channels::La>(color)
}
}
impl<S> From<Luma<S, u8>> for u16 {
#[inline]
fn from(color: Luma<S, u8>) -> Self {
Luma::into_u16::<super::channels::Al>(color)
}
}
impl<S> From<Lumaa<S, u8>> for u16 {
#[inline]
fn from(color: Lumaa<S, u8>) -> Self {
Lumaa::into_u16::<super::channels::La>(color)
}
}
impl<S, T> AbsDiffEq for Luma<S, T>
where
T: AbsDiffEq,
{
type Epsilon = T::Epsilon;
fn default_epsilon() -> Self::Epsilon {
T::default_epsilon()
}
fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
self.luma.abs_diff_eq(&other.luma, epsilon)
}
}
impl<S, T> RelativeEq for Luma<S, T>
where
T: RelativeEq,
{
fn default_max_relative() -> Self::Epsilon {
T::default_max_relative()
}
fn relative_eq(
&self,
other: &Self,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool {
self.luma.relative_eq(&other.luma, epsilon, max_relative)
}
}
impl<S, T> UlpsEq for Luma<S, T>
where
T: UlpsEq,
{
fn default_max_ulps() -> u32 {
T::default_max_ulps()
}
fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
self.luma.ulps_eq(&other.luma, epsilon, max_ulps)
}
}
impl<S, T> fmt::LowerHex for Luma<S, T>
where
T: fmt::LowerHex,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let size = f.width().unwrap_or(::core::mem::size_of::<T>() * 2);
write!(f, "{:0width$x}", self.luma, width = size)
}
}
impl<S, T> fmt::UpperHex for Luma<S, T>
where
T: fmt::UpperHex,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let size = f.width().unwrap_or(::core::mem::size_of::<T>() * 2);
write!(f, "{:0width$X}", self.luma, width = size)
}
}
impl<S, T> RelativeContrast for Luma<S, T>
where
T: Real + Arithmetics + PartialOrd,
S: LumaStandard<T>,
{
type Scalar = T;
#[inline]
fn get_contrast_ratio(self, other: Self) -> T {
let luma1 = self.into_linear();
let luma2 = other.into_linear();
contrast_ratio(luma1.luma, luma2.luma)
}
}
#[cfg(feature = "random")]
impl<S, T> Distribution<Luma<S, T>> for Standard
where
Standard: Distribution<T>,
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Luma<S, T> {
Luma {
luma: rng.gen(),
standard: PhantomData,
}
}
}
#[cfg(feature = "random")]
pub struct UniformLuma<S, T>
where
T: SampleUniform,
{
luma: Uniform<T>,
standard: PhantomData<S>,
}
#[cfg(feature = "random")]
impl<S, T> SampleUniform for Luma<S, T>
where
T: SampleUniform + Clone,
{
type Sampler = UniformLuma<S, T>;
}
#[cfg(feature = "random")]
impl<S, T> UniformSampler for UniformLuma<S, T>
where
T: SampleUniform + Clone,
{
type X = Luma<S, T>;
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
UniformLuma {
luma: Uniform::new::<_, T>(low.luma.clone(), high.luma.clone()),
standard: PhantomData,
}
}
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
UniformLuma {
luma: Uniform::new_inclusive::<_, T>(low.luma.clone(), high.luma.clone()),
standard: PhantomData,
}
}
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Luma<S, T> {
Luma {
luma: self.luma.sample(rng),
standard: PhantomData,
}
}
}
#[cfg(feature = "bytemuck")]
unsafe impl<S, T> bytemuck::Zeroable for Luma<S, T> where T: bytemuck::Zeroable {}
#[cfg(feature = "bytemuck")]
unsafe impl<S: 'static, T> bytemuck::Pod for Luma<S, T> where T: bytemuck::Pod {}
#[cfg(test)]
mod test {
use crate::encoding::Srgb;
use crate::Luma;
#[test]
fn ranges() {
assert_ranges! {
Luma<Srgb, f64>;
clamped {
luma: 0.0 => 1.0
}
clamped_min {}
unclamped {}
}
}
raw_pixel_conversion_tests!(Luma<Srgb>: luma);
#[test]
fn lower_hex() {
assert_eq!(format!("{:x}", Luma::<Srgb, u8>::new(161)), "a1");
}
#[test]
fn lower_hex_small_numbers() {
assert_eq!(format!("{:x}", Luma::<Srgb, u8>::new(1)), "01");
assert_eq!(format!("{:x}", Luma::<Srgb, u16>::new(1)), "0001");
assert_eq!(format!("{:x}", Luma::<Srgb, u32>::new(1)), "00000001");
assert_eq!(
format!("{:x}", Luma::<Srgb, u64>::new(1)),
"0000000000000001"
);
}
#[test]
fn lower_hex_custom_width() {
assert_eq!(format!("{:03x}", Luma::<Srgb, u8>::new(1)), "001");
assert_eq!(format!("{:03x}", Luma::<Srgb, u16>::new(1)), "001");
assert_eq!(format!("{:03x}", Luma::<Srgb, u32>::new(1)), "001");
assert_eq!(format!("{:03x}", Luma::<Srgb, u64>::new(1)), "001");
}
#[test]
fn upper_hex() {
assert_eq!(format!("{:X}", Luma::<Srgb, u8>::new(161)), "A1");
}
#[test]
fn upper_hex_small_numbers() {
assert_eq!(format!("{:X}", Luma::<Srgb, u8>::new(1)), "01");
assert_eq!(format!("{:X}", Luma::<Srgb, u16>::new(1)), "0001");
assert_eq!(format!("{:X}", Luma::<Srgb, u32>::new(1)), "00000001");
assert_eq!(
format!("{:X}", Luma::<Srgb, u64>::new(1)),
"0000000000000001"
);
}
#[test]
fn upper_hex_custom_width() {
assert_eq!(format!("{:03X}", Luma::<Srgb, u8>::new(1)), "001");
assert_eq!(format!("{:03X}", Luma::<Srgb, u16>::new(1)), "001");
assert_eq!(format!("{:03X}", Luma::<Srgb, u32>::new(1)), "001");
assert_eq!(format!("{:03X}", Luma::<Srgb, u64>::new(1)), "001");
}
#[test]
fn check_min_max_components() {
assert_relative_eq!(Luma::<Srgb, f32>::min_luma(), 0.0);
assert_relative_eq!(Luma::<Srgb, f32>::max_luma(), 1.0);
}
#[cfg(feature = "serializing")]
#[test]
fn serialize() {
let serialized = ::serde_json::to_string(&Luma::<Srgb>::new(0.3)).unwrap();
assert_eq!(serialized, r#"{"luma":0.3}"#);
}
#[cfg(feature = "serializing")]
#[test]
fn deserialize() {
let deserialized: Luma<Srgb> = ::serde_json::from_str(r#"{"luma":0.3}"#).unwrap();
assert_eq!(deserialized, Luma::<Srgb>::new(0.3));
}
#[cfg(feature = "random")]
test_uniform_distribution! {
Luma<Srgb, f32> {
luma: (0.0, 1.0)
},
min: Luma::new(0.0f32),
max: Luma::new(1.0)
}
}
| {
self.luma -= other.luma;
} |
main_test.go | // +build integration
package tests
import (
"os"
"testing"
)
func TestMain(m *testing.M) | {
st := m.Run()
if st != 0 {
os.Exit(st)
}
} |
|
ProofRequestTemplate.ts | import { AutoAcceptProof } from '@aries-framework/core'
import { Type } from 'class-transformer'
import { IsString, IsOptional, ValidateNested, IsEnum } from 'class-validator'
import { ProofRequest } from '../utils/ProofRequest'
export class ProofRequestTemplate {
@ValidateNested()
@Type(() => ProofRequest)
public proofRequest!: ProofRequest
@IsOptional()
@IsString()
public comment?: string
@IsOptional() | @IsEnum(AutoAcceptProof)
public autoAcceptProof?: AutoAcceptProof
} | |
loader.py | import os
import sys
import dataflow as df
import numpy as np
class LTRLoader(df.DataFlow):
"""
Data loader. Combines a dataset and a sampler, and provides
single- or multi-process iterators over the dataset.
Note: an additional option stack_dim is available to
select along which dimension the data should be stacked to form a batch.
Arguments:
dataset (Dataset): dataset from which to load the data.
batch_size (int, optional): how many samples per batch to load
(default: 1).
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: False).
sampler (Sampler, optional): defines the strategy to draw samples from
the dataset. If specified, ``shuffle`` must be False.
batch_sampler (Sampler, optional): like sampler, but returns a batch of
indices at a time. Mutually exclusive with batch_size, shuffle,
sampler, and drop_last.
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means that the data will be loaded in the main process.
(default: 0)
collate_fn (callable, optional): merges a list of samples to form a mini-batch.
stack_dim (int): Dimension along which to stack to form the batch. (default: 0)
pin_memory (bool, optional): If ``True``, the data loader will copy tensors
into CUDA pinned memory before returning them.
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last batch
will be smaller. (default: False)
timeout (numeric, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: 0)
worker_init_fn (callable, optional): If not None, this will be called on each
worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as
input, after seeding and before data loading. (default: None)
.. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an
unpicklable object, e.g., a lambda function.
"""
__initialized = False
def __init__(self,
name,
dataset,
training=True,
batch_size=1,
shuffle=False,
sampler=None,
batch_sampler=None,
num_workers=0,
epoch_interval=1,
collate_fn=None,
stack_dim=0,
pin_memory=False,
drop_last=False,
timeout=0,
worker_init_fn=None):
super().__init__()
ds = df.RepeatedData(dataset, -1)
ds = df.MultiProcessRunnerZMQ(ds, num_proc=num_workers, hwm=300)
# ds = df.MultiThreadRunner(lambda: ds, num_prefetch=1024, num_thread=num_workers)
ds = df.BatchData(ds, batch_size)
self.ds = ds
self.name = name
self.training = training
self.epoch_interval = epoch_interval
self.stack_dim = stack_dim
self.batches_per_epoch = len(dataset) // batch_size
def __len__(self):
return self.batches_per_epoch
def __iter__(self):
if not self.__initialized:
self.reset_state()
self.__initialized = True
for d in self.ds:
if self.stack_dim > 0:
for k, v in d.items():
if len(v.shape) >= self.stack_dim + 1:
d[k] = np.swapaxes(v, 0, self.stack_dim)
yield d
def reset_state(self):
| self.ds.reset_state() |
|
main.js | const path = require('path')
const url = require('url')
const { app, BrowserWindow } = require('electron')
let mainWindow
let isDev = false
if (
process.env.NODE_ENV !== undefined &&
process.env.NODE_ENV === 'development'
) {
isDev = true
}
function createMainWindow() {
mainWindow = new BrowserWindow({
width: 1100,
height: 800,
show: false,
frame: false,
icon: `${__dirname}/assets/icon.png`,
webPreferences: {
nodeIntegration: true,
},
})
let indexPath
if (isDev && process.argv.indexOf('--noDevServer') === -1) {
indexPath = url.format({
protocol: 'http:',
host: 'localhost:8080',
pathname: 'index.html',
slashes: true,
})
} else {
indexPath = url.format({
protocol: 'file:',
pathname: path.join(__dirname, 'dist', 'index.html'),
slashes: true,
})
}
mainWindow.loadURL(indexPath)
// Don't show until we are ready and loaded
mainWindow.once('ready-to-show', () => {
mainWindow.show()
// Open devtools if dev
if (isDev) {
const {
default: installExtension,
REACT_DEVELOPER_TOOLS,
} = require('electron-devtools-installer')
installExtension(REACT_DEVELOPER_TOOLS).catch((err) =>
console.log('Error loading React DevTools: ', err)
)
mainWindow.webContents.openDevTools()
}
})
mainWindow.on('closed', () => (mainWindow = null))
}
app.on('ready', createMainWindow)
app.on('window-all-closed', () => {
if (process.platform !== 'darwin') {
app.quit()
}
})
app.on('activate', () => {
if (mainWindow === null) { | createMainWindow()
}
})
// Stop error
app.allowRendererProcessReuse = true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.