file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
main_test.go | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License. | import (
"testing"
"github.com/pingcap/tidb/util/testbridge"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
testbridge.SetupForCommonTest()
opts := []goleak.Option{
goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"),
goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"),
goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
}
goleak.VerifyTestMain(m, opts...)
} |
package linux_test
|
color.rs | use crate::prelude::*;
use crate::u8cpu;
use skia_bindings as sb;
use skia_bindings::{SkColor, SkColor4f, SkHSVToColor, SkPMColor, SkRGBToHSV};
use std::ops::{BitAnd, BitOr, Index, IndexMut, Mul};
// TODO: What should we do with SkAlpha?
// It does not seem to be used, but if we want to export it, we'd
// like to define Alpha::TRANSPARENT and Alpha::OPAQUE.
// pub type Alpha = u8;
// Note: SkColor _is_ a u32, and therefore its components are
// endian dependent, so we can't expose it as (transmuted) individual
// argb fields.
#[derive(Copy, Clone, PartialEq, Default, Debug)]
#[repr(transparent)]
pub struct Color(SkColor);
impl NativeTransmutable<SkColor> for Color {}
#[test]
fn test_color_layout() {
Color::test_layout();
}
impl From<u32> for Color {
fn from(argb: u32) -> Self {
Color::new(argb)
}
}
impl From<RGB> for Color {
fn from(rgb: RGB) -> Self {
Color::from_rgb(rgb.r, rgb.g, rgb.b)
}
}
//
// Bitwise operators.
//
impl BitOr for Color {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
Color::from_native(self.native() | rhs.native())
}
}
impl BitAnd for Color {
type Output = Self;
fn bitand(self, rhs: Self) -> Self::Output {
Color::from_native(self.native() & rhs.native())
}
}
impl BitOr<u32> for Color {
type Output = Self;
fn bitor(self, rhs: u32) -> Self::Output {
self | Color::from_native(rhs)
}
}
impl BitAnd<u32> for Color {
type Output = Self;
fn bitand(self, rhs: u32) -> Self::Output {
self & (Color::from_native(rhs))
}
}
impl Color {
pub const fn new(argb: u32) -> Self {
Self(argb)
}
// note: we don't use the u8cpu type here, because we trust the Rust
// compiler to optimize the storage type.
pub fn from_argb(a: u8, r: u8, g: u8, b: u8) -> Color {
Self(
(u8cpu::from(a) << 24)
| (u8cpu::from(r) << 16)
| (u8cpu::from(g) << 8)
| (u8cpu::from(b)),
)
}
pub fn from_rgb(r: u8, g: u8, b: u8) -> Color {
Self::from_argb(0xff, r, g, b)
}
pub fn a(self) -> u8 {
(self.into_native() >> 24) as _
}
pub fn r(self) -> u8 {
(self.into_native() >> 16) as _
}
pub fn g(self) -> u8 {
(self.into_native() >> 8) as _
}
pub fn b(self) -> u8 {
self.into_native() as _
}
#[must_use]
pub fn with_a(self, a: u8) -> Self {
Self::from_argb(a, self.r(), self.g(), self.b())
}
pub const TRANSPARENT: Self = Self(sb::SK_ColorTRANSPARENT);
pub const BLACK: Self = Self(sb::SK_ColorBLACK);
pub const DARK_GRAY: Self = Self(sb::SK_ColorDKGRAY);
pub const GRAY: Self = Self(sb::SK_ColorLTGRAY);
pub const LIGHT_GRAY: Self = Self(sb::SK_ColorLTGRAY);
pub const WHITE: Self = Self(sb::SK_ColorWHITE);
pub const RED: Self = Self(sb::SK_ColorRED);
pub const GREEN: Self = Self(sb::SK_ColorGREEN);
pub const BLUE: Self = Self(sb::SK_ColorBLUE);
pub const YELLOW: Self = Self(sb::SK_ColorYELLOW);
pub const CYAN: Self = Self(sb::SK_ColorCYAN);
pub const MAGENTA: Self = Self(sb::SK_ColorMAGENTA);
pub fn to_rgb(self) -> RGB {
(self.r(), self.g(), self.b()).into()
}
pub fn to_hsv(self) -> HSV {
self.to_rgb().to_hsv()
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct RGB {
pub r: u8,
pub g: u8,
pub b: u8,
}
impl From<(u8, u8, u8)> for RGB {
fn from((r, g, b): (u8, u8, u8)) -> Self {
Self { r, g, b }
}
}
impl RGB {
pub fn to_hsv(self) -> HSV {
let mut hsv: [f32; 3] = Default::default();
unsafe {
SkRGBToHSV(
self.r.into(),
self.g.into(),
self.b.into(),
hsv.as_mut_ptr(),
);
}
HSV {
h: hsv[0],
s: hsv[1],
v: hsv[2],
}
}
}
#[derive(Copy, Clone, PartialEq, Debug)]
pub struct HSV {
pub h: f32,
pub s: f32,
pub v: f32,
}
impl From<(f32, f32, f32)> for HSV {
fn from((h, s, v): (f32, f32, f32)) -> Self {
Self { h, s, v }
}
}
impl HSV {
pub fn to_color(&self, alpha: u8) -> Color {
Color::from_native(unsafe { SkHSVToColor(alpha.into(), [self.h, self.s, self.v].as_ptr()) })
}
}
pub type PMColor = SkPMColor;
pub fn pre_multiply_argb(a: u8cpu, r: u8cpu, g: u8cpu, b: u8cpu) -> PMColor {
unsafe { sb::SkPreMultiplyARGB(a, r, g, b) }
}
pub fn pre_multiply_color(c: impl Into<Color>) -> PMColor {
unsafe { sb::SkPreMultiplyColor(c.into().into_native()) }
}
pub use sb::SkColorChannel as ColorChannel;
#[test]
fn color_channel_naming() {
let _ = ColorChannel::R;
}
bitflags! {
pub struct ColorChannelFlag: u32 {
const RED = sb::SkColorChannelFlag::kRed_SkColorChannelFlag as _;
const GREEN = sb::SkColorChannelFlag::kGreen_SkColorChannelFlag as _;
const BLUE = sb::SkColorChannelFlag::kBlue_SkColorChannelFlag as _;
const ALPHA = sb::SkColorChannelFlag::kAlpha_SkColorChannelFlag as _;
const GRAY = sb::SkColorChannelFlag::kGray_SkColorChannelFlag as _;
const RG = Self::RED.bits | Self::GREEN.bits;
const RGB = Self::RG.bits | Self::BLUE.bits;
const RGBA = Self::RGB.bits | Self::ALPHA.bits;
}
}
// TODO: SkRGBA4f
#[derive(Clone, PartialEq, Debug)]
#[repr(C)]
pub struct Color4f {
pub r: f32,
pub g: f32,
pub b: f32,
pub a: f32,
}
impl NativeTransmutable<SkColor4f> for Color4f {}
#[test]
fn test_color4f_layout() |
impl AsRef<Self> for Color4f {
fn as_ref(&self) -> &Self {
self
}
}
impl Mul<f32> for Color4f {
type Output = Self;
fn mul(self, scale: f32) -> Self {
let r = self.r * scale;
let g = self.g * scale;
let b = self.b * scale;
let a = self.a * scale;
Self { r, g, b, a }
}
}
impl Mul for Color4f {
type Output = Self;
fn mul(self, scale: Self) -> Self {
self.mul(&scale)
}
}
impl Mul<&Self> for Color4f {
type Output = Self;
fn mul(self, scale: &Self) -> Self {
Self {
r: self.r * scale.r,
g: self.g * scale.g,
b: self.b * scale.b,
a: self.a * scale.a,
}
}
}
impl Index<usize> for Color4f {
type Output = f32;
fn index(&self, index: usize) -> &f32 {
&self.as_array()[index]
}
}
impl IndexMut<usize> for Color4f {
fn index_mut(&mut self, index: usize) -> &mut f32 {
&mut self.as_array_mut()[index]
}
}
impl From<Color> for Color4f {
fn from(color: Color) -> Self {
fn c(c: u8) -> f32 {
(f32::from(c)) * (1.0 / 255.0)
}
let r = c(color.r());
let g = c(color.g());
let b = c(color.b());
let a = c(color.a());
Self { r, g, b, a }
}
}
impl Color4f {
pub const fn new(r: f32, g: f32, b: f32, a: f32) -> Color4f {
Self { r, g, b, a }
}
// corresponding Skia function: vec()
pub fn as_array(&self) -> &[f32; 4] {
unsafe { transmute_ref(self) }
}
// corresponding Skia function: vec()
pub fn as_array_mut(&mut self) -> &mut [f32; 4] {
unsafe { transmute_ref_mut(self) }
}
#[allow(clippy::float_cmp)]
pub fn is_opaque(&self) -> bool {
self.a == 1.0
}
// TODO: This is the copied implementation, it would probably be better
// to call the Skia function.
pub fn fits_in_bytes(&self) -> bool {
debug_assert!(self.a >= 0.0 && self.a <= 1.0);
self.r >= 0.0
&& self.r <= 1.0
&& self.g >= 0.0
&& self.g <= 1.0
&& self.b >= 0.0
&& self.b <= 1.0
}
pub fn to_color(&self) -> Color {
fn c(f: f32) -> u8 {
(f.max(0.0).min(1.0) * 255.0) as u8
}
let a = c(self.a);
let r = c(self.r);
let g = c(self.g);
let b = c(self.b);
Color::from_argb(a, r, g, b)
}
// TODO: FromPMColor
// TODO: premul()
// TODO: unpremul()
// TODO: toBytes_RGBA()
// TODO: FromBytes_RGBA
pub fn to_opaque(&self) -> Self {
Self {
a: 1.0,
..self.clone()
}
}
}
pub mod colors {
use crate::Color4f;
pub const TRANSPARENT: Color4f = Color4f::new(0.0, 0.0, 0.0, 0.0);
pub const BLACK: Color4f = Color4f::new(0.0, 0.0, 0.0, 1.0);
pub const DARK_GREY: Color4f = Color4f::new(0.25, 0.25, 0.25, 1.0);
pub const GREY: Color4f = Color4f::new(0.5, 0.5, 0.5, 1.0);
pub const LIGHT_GREY: Color4f = Color4f::new(0.75, 0.75, 0.75, 1.0);
pub const WHITE: Color4f = Color4f::new(1.0, 1.0, 1.0, 1.0);
pub const RED: Color4f = Color4f::new(1.0, 0.0, 0.0, 1.0);
pub const GREEN: Color4f = Color4f::new(0.0, 1.0, 0.0, 1.0);
pub const BLUE: Color4f = Color4f::new(0.0, 0.0, 1.0, 1.0);
pub const YELLOW: Color4f = Color4f::new(1.0, 1.0, 0.0, 1.0);
pub const CYAN: Color4f = Color4f::new(0.0, 1.0, 1.0, 1.0);
pub const MAGENTA: Color4f = Color4f::new(1.0, 0.0, 1.0, 1.0);
}
#[test]
#[allow(clippy::float_cmp)]
pub fn color4f_array_access() {
let mut color = Color4f {
r: 0.1,
g: 0.2,
b: 0.3,
a: 0.4,
};
color[1] = 0.5;
assert_eq!(0.5, color.g);
}
#[test]
pub fn color_color4f_conversion() {
let c = Color::from_argb(1, 2, 3, 4);
let cf = Color4f::from(c);
let c2 = cf.to_color();
assert_eq!(c, c2);
}
| {
Color4f::test_layout();
} |
borrow.ts | import { Dec, LCDClient } from '@terra-money/terra.js';
import { AddressProvider, MARKET_DENOMS } from '../../address-provider';
import {
fabricateMarketBorrow,
fabricateMarketRepay,
fabricateProvideCollateral,
fabricateRedeemCollateral,
OmitAddress,
OptionType,
} from '../../fabricators';
import {
queryCustodyBorrower,
queryMarketBorrowerInfo,
queryOraclePrices,
queryOverseerBorrowLimit,
queryOverseerWhitelist,
} from '../../queries';
import { Operation, OperationImpl } from '../operation';
interface UserCollateral {
collateral: string;
balance: string;
}
export type BorrowBorrowOption = OmitAddress<
OptionType<typeof fabricateMarketBorrow>
>;
export type BorrowRepayOption = OmitAddress<
OptionType<typeof fabricateMarketRepay>
>;
export type BorrowProvideCollateralOption = OmitAddress<
OptionType<typeof fabricateProvideCollateral>
>;
export type BorrowWithdrawCollateralOption = OmitAddress<
OptionType<typeof fabricateRedeemCollateral>
>;
export interface BorrowQueriesOptions {
market: MARKET_DENOMS;
address: string;
}
export class | {
private _lcd!: LCDClient;
private _addressProvider!: AddressProvider;
constructor(lcd: LCDClient, addressProvider: AddressProvider) {
this._lcd = lcd;
this._addressProvider = addressProvider;
}
borrow(borrowOption: BorrowBorrowOption): Operation {
return new OperationImpl(
fabricateMarketBorrow,
borrowOption,
this._addressProvider,
);
}
repay(repayOption: BorrowRepayOption): Operation {
return new OperationImpl(
fabricateMarketRepay,
repayOption,
this._addressProvider,
);
}
provideCollateral(
provideCollateralOptions: BorrowProvideCollateralOption,
): Operation {
return new OperationImpl(
fabricateProvideCollateral,
provideCollateralOptions,
this._addressProvider,
);
}
withdrawCollateral(
withdrawCollateralOption: BorrowWithdrawCollateralOption,
): Operation {
return new OperationImpl(
fabricateRedeemCollateral,
withdrawCollateralOption,
this._addressProvider,
);
}
async getCollateralValue(
getCollateralValueOption: BorrowQueriesOptions,
): Promise<string> {
// only bLuna is supported now, and the below requests are only about bLuna
const oraclePrice = await queryOraclePrices({ lcd: this._lcd, limit: 30 })(
this._addressProvider,
);
const collaterals = await this.getCollaterals(getCollateralValueOption);
if (collaterals.length === 1 && collaterals[0] === null) {
return new Dec(0).toString();
}
const total = collaterals.reduce((sum, collateral) => {
const collateralPrice = oraclePrice.prices.find(
(p) => p.asset === collateral.collateral,
);
if (!collateralPrice || new Dec(collateralPrice.price).eq(0)) {
return sum;
}
return sum.add(new Dec(collateral.balance).mul(collateralPrice.price));
}, new Dec(0));
return total.div(1000000).toString();
}
async getCollaterals(
getCollateralsOption: BorrowQueriesOptions,
): Promise<UserCollateral[]> {
// get user balances of all COLLATERAL_DENOMS
const whitelistedCollaterals = await queryOverseerWhitelist({
lcd: this._lcd,
...getCollateralsOption,
})(this._addressProvider);
const collateralDenoms = await Promise.all(
whitelistedCollaterals.elems
.map(async (whitelist) => {
const userBalance = await queryCustodyBorrower({
lcd: this._lcd,
...getCollateralsOption,
custody: getCollateralsOption.market,
})(this._addressProvider);
if (userBalance.balance === '0') {
return null;
}
return {
collateral: whitelist.collateral_token,
balance: new Dec(userBalance.balance).toString(),
};
})
.filter(Boolean),
);
return collateralDenoms as UserCollateral[];
}
async getBorrowedValue(
getBorrowedValueOption: BorrowQueriesOptions,
): Promise<string> {
const { block } = await this._lcd.tendermint.blockInfo();
const loanAmount = await queryMarketBorrowerInfo({
lcd: this._lcd,
market: getBorrowedValueOption.market,
borrower: getBorrowedValueOption.address,
block_height: +block.header.height,
})(this._addressProvider);
return new Dec(loanAmount.loan_amount).div(1000000).toString();
}
async getBorrowLimit(
getBorrowLimitOption: BorrowQueriesOptions,
): Promise<string> {
const { block } = await this._lcd.tendermint.blockInfo();
const borrowLimitResponse = await queryOverseerBorrowLimit({
lcd: this._lcd,
overseer: getBorrowLimitOption.market,
borrower: getBorrowLimitOption.address,
block_time: +block.header.height,
})(this._addressProvider);
return new Dec(borrowLimitResponse.borrow_limit).div(1000000).toString();
}
}
| Borrow |
models.py | """ User account models """
from django.db import models
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.base_user import BaseUserManager
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, username, email, password, **extra_fields):
"""
Create and save a user with the given username, email, and password.
"""
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
username = self.model.normalize_username(username)
user = self.model(username=username, email=email, **extra_fields) | user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(username, email, password, **extra_fields)
def create_superuser(self, username, email=None, password=None, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(username, email, password, **extra_fields)
class User(AbstractUser):
email = models.EmailField(unique=True)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = "user"
verbose_name_plural = "users"
app_label = "users" | user.set_password(password) |
logger.gen.go | // Code generated by github.com/doovemax/assh/contrib/generate-loggers.sh
package utils
import "go.uber.org/zap"
func logger() *zap.Logger | {
return zap.L().Named("assh.pkg.utils")
} |
|
app.component.ts | import {Component} from '@angular/core';
@Component({
selector: 'my-app',
template: `
<h1>{{title}}</h1>
<h2>My Heroes</h2>
<ul class="heroes">
<li *ngFor="let hero of heroes"
(click)="onSelect(hero)"
[class.selected]="hero === selectedHero">
<span class="badge">{{hero.id}}</span> {{hero.name}}
</li>
</ul>
<div *ngIf="selectedHero">
<h2>{{selectedHero.name}} details!</h2>
<div><label>id: </label>{{selectedHero.id}}</div>
<div>
<label>name: </label>
<input [(ngModel)]="selectedHero.name" placeholder="name"/>
</div>
</div>
`,
styles: [`
.selected {
background-color: #CFD8DC !important;
color: white;
}
.heroes {
margin: 0 0 2em 0;
list-style-type: none;
padding: 0;
width: 15em;
}
.heroes li {
cursor: pointer;
position: relative;
left: 0;
background-color: #EEE;
margin: .5em;
padding: .3em 0;
height: 1.6em;
border-radius: 4px;
}
.heroes li.selected:hover {
background-color: #BBD8DC !important;
color: white;
}
.heroes li:hover {
color: #607D8B;
background-color: #DDD;
left: .1em;
}
.heroes .text {
position: relative;
top: -3px;
}
.heroes .badge {
display: inline-block;
font-size: small;
color: white;
padding: 0.8em 0.7em 0 0.7em;
background-color: #607D8B;
line-height: 1em;
position: relative;
left: -1px;
top: -4px;
height: 1.8em;
margin-right: .8em;
border-radius: 4px 0 0 4px;
}
`]
})
export class AppComponent {
title = 'Tour of Heroes';
heroes = HEROES;
selectedHero: Hero;
onSelect(hero: Hero) { this.selectedHero = hero; }
}
export class | {
id: number
name: string;
}
const HEROES: Hero[] = [
{ id: 11, name: 'Mr. Nice' },
{ id: 12, name: 'Narco' },
{ id: 13, name: 'Bombasto' },
{ id: 14, name: 'Celeritas' },
{ id: 15, name: 'Magneta' },
{ id: 16, name: 'RubberMan' },
{ id: 17, name: 'Dynama' },
{ id: 18, name: 'Dr IQ' },
{ id: 19, name: 'Magma' },
{ id: 20, name: 'Tornado' }
];
| Hero |
test.py | from __future__ import absolute_import, division
from unittest import TestCase
from binascii import hexlify
import inbloom
class InBloomTestCase(TestCase):
def | (self):
bf = inbloom.Filter(20, 0.01)
keys = ["foo", "bar", "foosdfsdfs", "fossdfsdfo", "foasdfasdfasdfasdfo", "foasdfasdfasdasdfasdfasdfasdfasdfo"]
faux = ["goo", "gar", "gaz"]
for k in keys:
bf.add(k)
for k in keys:
assert bf.contains(k)
for k in faux:
assert not bf.contains(k)
expected = '02000C0300C2246913049E040002002000017614002B0002'
actual = hexlify(bf.buffer()).upper()
assert expected == actual
def test_dump_load(self):
bf = inbloom.Filter(20, 0.01)
bf.add('abc')
expected = '620d006400000014000000000020001000080000000000002000100008000400'
actual = hexlify(inbloom.dump(bf))
assert expected == actual
bf = inbloom.load(inbloom.dump(bf))
actual = hexlify(inbloom.dump(bf))
assert expected == actual
data = inbloom.dump(bf)
data = str([0xff, 0xff]) + data[2:]
with self.assertRaisesRegexp(inbloom.error, "checksum mismatch"):
inbloom.load(data)
data = data[:4]
with self.assertRaisesRegexp(inbloom.error, "incomplete payload"):
inbloom.load(data)
| test_functionality |
system.go | package api
import (
"net/http"
"github.com/bleenco/abstruse/internal/version"
"github.com/bleenco/abstruse/pkg/render"
"go.uber.org/zap"
)
type system struct {
logger *zap.SugaredLogger
}
func | (logger *zap.Logger) system {
return system{
logger: logger.With(zap.String("api", "system")).Sugar(),
}
}
func (s *system) version() http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
render.JSON(w, http.StatusOK, version.GetBuildInfo())
})
}
| newSystem |
cisco_fc_zone_client_cli.py | # (c) Copyright 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Script to push the zone configuration to Cisco SAN switches.
"""
import random
import re
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder import ssh_utils
from cinder import utils
import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant
LOG = logging.getLogger(__name__)
class CiscoFCZoneClientCLI(object):
"""Cisco FC zone client cli implementation.
OpenStack Fibre Channel zone client cli connector
to manage FC zoning in Cisco SAN fabrics.
Version history:
1.0 - Initial Cisco FC zone client cli
"""
switch_ip = None
switch_port = '22'
switch_user = 'admin'
switch_pwd = 'none'
def __init__(self, ipaddress, username, password, port, vsan):
"""initializing the client."""
self.switch_ip = ipaddress
self.switch_port = port
self.switch_user = username
self.switch_pwd = password
self.fabric_vsan = vsan
self.sshpool = None
def get_active_zone_set(self):
"""Return the active zone configuration.
Return active zoneset from fabric. When none of the configurations
are active then it will return empty map.
:returns: Map -- active zone set map in the following format
{
'zones':
{'openstack50060b0000c26604201900051ee8e329':
['50060b0000c26604', '201900051ee8e329']
},
'active_zone_config': 'OpenStack_Cfg'
}
"""
zone_set = {}
zone = {}
zone_member = None
zone_name = None
switch_data = None
zone_set_name = None
try:
switch_data = self._get_switch_info(
[ZoneConstant.GET_ACTIVE_ZONE_CFG, self.fabric_vsan,
' | no-more'])
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed getting active zone set "
"from fabric %s"), self.switch_ip)
try:
for line in switch_data:
# Split on non-word characters,
line_split = re.split('[\s\[\]]+', line)
if ZoneConstant.CFG_ZONESET in line_split:
# zoneset name [name] vsan [vsan]
zone_set_name = \
line_split[line_split.index(ZoneConstant.CFG_ZONESET)
+ 2]
continue
if ZoneConstant.CFG_ZONE in line_split:
# zone name [name] vsan [vsan]
zone_name = \
line_split[line_split.index(ZoneConstant.CFG_ZONE) + 2]
zone[zone_name] = list()
continue
if ZoneConstant.CFG_ZONE_MEMBER in line_split:
# Examples:
# pwwn c0:50:76:05:15:9f:00:12
# * fcid 0x1e01c0 [pwwn 50:05:07:68:02:20:48:04] [V7K_N1P2]
zone_member = \
line_split[
line_split.index(ZoneConstant.CFG_ZONE_MEMBER) + 1]
zone_member_list = zone.get(zone_name)
zone_member_list.append(zone_member)
zone_set[ZoneConstant.CFG_ZONES] = zone
zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] = zone_set_name
except Exception as ex:
# In case of parsing error here, it should be malformed cli output.
msg = _("Malformed zone configuration: (switch=%(switch)s "
"zone_config=%(zone_config)s)."
) % {'switch': self.switch_ip,
'zone_config': switch_data}
LOG.error(msg)
exc_msg = _("Exception: %s") % six.text_type(ex)
LOG.exception(exc_msg)
raise exception.FCZoneDriverException(reason=msg)
return zone_set
def add_zones(self, zones, activate, fabric_vsan, active_zone_set,
zone_status):
"""Add zone configuration.
This method will add the zone configuration passed by user.
input params:
zones - zone names mapped to members and VSANs.
zone members are colon separated but case-insensitive
{ zonename1:[zonememeber1,zonemember2,...],
zonename2:[zonemember1, zonemember2,...]...}
e.g: {'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']
}
activate - True/False
"""
LOG.debug("Add Zones - Zones passed: %s", zones)
LOG.debug("Active zone set:%s", active_zone_set)
zone_list = active_zone_set[ZoneConstant.CFG_ZONES]
LOG.debug("zone list:%s", zone_list)
LOG.debug("zone status:%s", zone_status)
cfg_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG]
zone_cmds = [['conf'],
['zoneset', 'name', cfg_name, 'vsan', fabric_vsan]]
for zone in zones.keys():
# if zone exists, its an update. Delete & insert
LOG.debug("Update call")
if zone in zone_list:
# Response from get_active_zone_set strips colons from WWPNs
current_zone = set(zone_list[zone])
new_wwpns = map(lambda x: x.lower().replace(':', ''),
zones[zone])
new_zone = set(new_wwpns)
if current_zone != new_zone:
try:
self.delete_zones([zone], activate, fabric_vsan,
active_zone_set, zone_status)
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Deleting zone failed %s"), zone)
LOG.debug("Deleted Zone before insert : %s", zone)
zone_cmds.append(['zone', 'name', zone])
for member in zones[zone]:
zone_cmds.append(['member', 'pwwn', member])
zone_cmds.append(['end'])
try:
LOG.debug("Add zones: Config cmd to run:%s", zone_cmds)
self._ssh_execute(zone_cmds, True, 1)
if activate: | self._cfg_save()
except Exception as e:
msg = _("Creating and activating zone set failed: "
"(Zone set=%(zoneset)s error=%(err)s)."
) % {'zoneset': cfg_name, 'err': six.text_type(e)}
LOG.error(msg)
raise exception.CiscoZoningCliException(reason=msg)
def activate_zoneset(self, cfgname, fabric_vsan, zone_status):
"""Method to Activate the zone config. Param cfgname - ZonesetName."""
LOG.debug("zone status:%s", zone_status)
cmd_list = [['conf'],
['zoneset', 'activate', 'name', cfgname, 'vsan',
self.fabric_vsan]]
if zone_status['mode'] == 'enhanced':
cmd_list.append(['zone', 'commit', 'vsan', fabric_vsan])
cmd_list.append(['end'])
return self._ssh_execute(cmd_list, True, 1)
def get_zoning_status(self):
"""Return the zoning mode and session for a zoneset."""
zone_status = {}
try:
switch_data = self._get_switch_info(
[ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan])
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed getting zone status "
"from fabric %s"), self.switch_ip)
try:
for line in switch_data:
# Split on non-word characters,
line_split = re.split('[\s\[\]]+', line)
if 'mode:' in line_split:
# mode: <enhanced|basic>
zone_status['mode'] = line_split[line_split.index('mode:')
+ 1]
continue
if 'session:' in line_split:
# session: <none|a value other than none>
zone_status['session'] = \
line_split[line_split.index('session:') + 1]
continue
except Exception as ex:
# In case of parsing error here, it should be malformed cli output.
msg = _("Malformed zone status: (switch=%(switch)s "
"zone_config=%(zone_config)s)."
) % {'switch': self.switch_ip,
'zone_status': switch_data}
LOG.error(msg)
exc_msg = _("Exception: %s") % six.text_type(ex)
LOG.exception(exc_msg)
raise exception.FCZoneDriverException(reason=msg)
return zone_status
def delete_zones(self, zone_names, activate, fabric_vsan, active_zone_set,
zone_status):
"""Delete zones from fabric.
Method to delete the active zone config zones
params zone_names: zoneNames separated by semicolon
params activate: True/False
"""
LOG.debug("zone_names %s", zone_names)
active_zoneset_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG]
cmds = [['conf'],
['zoneset', 'name', active_zoneset_name, 'vsan',
fabric_vsan]]
try:
for zone in set(zone_names.split(';')):
cmds.append(['no', 'zone', 'name', zone])
cmds.append(['end'])
LOG.debug("Delete zones: Config cmd to run:%s", cmds)
self._ssh_execute(cmds, True, 1)
if activate:
self.activate_zoneset(active_zoneset_name, fabric_vsan,
zone_status)
self._cfg_save()
except Exception as e:
msg = _("Deleting zones failed: (command=%(cmd)s error=%(err)s)."
) % {'cmd': cmds, 'err': six.text_type(e)}
LOG.error(msg)
raise exception.CiscoZoningCliException(reason=msg)
def get_nameserver_info(self):
"""Get name server data from fabric.
This method will return the connected node port wwn list(local
and remote) for the given switch fabric
show fcns database
"""
cli_output = None
return_list = []
try:
cli_output = self._get_switch_info([ZoneConstant.FCNS_SHOW,
self.fabric_vsan])
except exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting fcns database "
"info for fabric %s"), self.switch_ip)
if (cli_output):
return_list = self._parse_ns_output(cli_output)
LOG.info(_LI("Connector returning fcnsinfo-%s"), return_list)
return return_list
def _cfg_save(self):
cmd = ['copy', 'running-config', 'startup-config']
self._run_ssh(cmd, True, 1)
def _get_switch_info(self, cmd_list):
stdout, stderr, sw_data = None, None, None
try:
stdout, stderr = self._run_ssh(cmd_list, True, 1)
LOG.debug("CLI output from ssh - output:%s", stdout)
if (stdout):
sw_data = stdout.splitlines()
return sw_data
except processutils.ProcessExecutionError as e:
msg = _("Error while getting data via ssh: (command=%(cmd)s "
"error=%(err)s).") % {'cmd': cmd_list,
'err': six.text_type(e)}
LOG.error(msg)
raise exception.CiscoZoningCliException(reason=msg)
def _parse_ns_output(self, switch_data):
"""Parses name server data.
Parses nameserver raw data and adds the device port wwns to the list
:returns: List -- list of device port wwn from ns info
"""
return_list = []
for line in switch_data:
if not(" N " in line):
continue
linesplit = line.split()
if len(linesplit) > 2:
node_port_wwn = linesplit[2]
return_list.append(node_port_wwn)
else:
msg = _("Malformed show fcns database string: %s") % line
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
return return_list
def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1):
command = ' '.join(cmd_list)
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
min_size=1,
max_size=5)
last_exception = None
try:
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
return processutils.ssh_execute(
ssh,
command,
check_exit_code=check_exit_code)
except Exception as e:
msg = _("Exception: %s") % six.text_type(e)
LOG.error(msg)
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error running SSH command: %s") % command)
def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1):
"""Execute cli with status update.
Executes CLI commands where status return is expected.
cmd_list is a list of commands, where each command is itself
a list of parameters. We use utils.check_ssh_injection to check each
command, but then join then with " ; " to form a single command.
"""
# Check that each command is secure
for cmd in cmd_list:
utils.check_ssh_injection(cmd)
# Combine into a single command.
command = ' ; '.join(map(lambda x: ' '.join(x), cmd_list))
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
min_size=1,
max_size=5)
stdin, stdout, stderr = None, None, None
LOG.debug("Executing command via ssh: %s" % command)
last_exception = None
try:
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
stdin, stdout, stderr = ssh.exec_command(command)
greenthread.sleep(random.randint(20, 500) / 100.0)
channel = stdout.channel
exit_status = channel.recv_exit_status()
LOG.debug("Exit Status from ssh:%s", exit_status)
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise processutils.ProcessExecutionError(
exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=command)
else:
return True
else:
return True
except Exception as e:
msg = _("Exception: %s") % six.text_type(e)
LOG.error(msg)
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
LOG.debug("Handling error case after SSH:%s", last_exception)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_("Error executing command via ssh: %s") %
six.text_type(e))
LOG.error(msg)
finally:
if stdin:
stdin.flush()
stdin.close()
if stdout:
stdout.close()
if stderr:
stderr.close()
def cleanup(self):
self.sshpool = None | self.activate_zoneset(cfg_name, fabric_vsan, zone_status) |
main.go | package server
import (
"github.com/gin-gonic/gin"
"heritago/backend/handlers"
log "heritago/backend/logger"
"heritago/backend/orm"
"heritago/backend/utils"
)
var host, port, gqlPath, gqlPgPath string
var isPgEnabled bool
func init() {
host = utils.MustGet("GQL_SERVER_HOST")
port = utils.MustGet("GQL_SERVER_PORT")
gqlPath = utils.MustGet("GQL_SERVER_GRAPHQL_PATH")
gqlPgPath = utils.MustGet("GQL_SERVER_GRAPHQL_PLAYGROUND_PATH")
isPgEnabled = utils.MustGetBool("GQL_SERVER_GRAPHQL_PLAYGROUND_ENABLED")
}
// Run spins up the server
func | (orm *orm.ORM) {
log.Info("GORM_CONNECTION_DSN: ", utils.MustGet("GORM_CONNECTION_DSN"))
endpoint := "http://" + host + ":" + port
r := gin.Default()
// Handlers
// Simple keep-alive/ping handler
r.GET("/ping", handlers.Ping())
// GraphQL handlers
// Playground handler
if isPgEnabled {
r.GET(gqlPgPath, handlers.PlaygroundHandler(gqlPath))
log.Info("GraphQL Playground @ " + endpoint + gqlPgPath)
}
r.POST(gqlPath, handlers.GraphqlHandler(orm))
log.Info("GraphQL @ " + endpoint + gqlPath)
// Run the server
// Inform the user where the server is listening
log.Info("Running @ " + endpoint)
// Print out and exit(1) to the OS if the server cannot run
log.Fatal(r.Run(host + ":" + port))
}
| Run |
typescriptPreprocessor.js | /**
* Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
* | * This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
const tsc = require('typescript');
module.exports = {
process(src, path) {
if (path.endsWith('.ts') || path.endsWith('.tsx')) {
return tsc.transpile(
src,
{
jsx: tsc.JsxEmit.React,
module: tsc.ModuleKind.CommonJS,
},
path,
[]
);
}
return src;
},
}; | |
Slicer.py | """
Tile real scn/svs files; used by Cutter.py
Created on 11/19/2018
*** Removed imlist storage to minimize memory usage 01/24/2019 ***
@author: RH
"""
from openslide import OpenSlide
import numpy as np
import pandas as pd
import multiprocessing as mp
import staintools
from PIL import Image
# check if a tile is background or not; return a blank pixel percentage score
def bgcheck(img, ts):
the_imagea = np.array(img)[:, :, :3]
the_imagea = np.nan_to_num(the_imagea)
mask = (the_imagea[:, :, :3] > 200).astype(np.uint8)
maskb = (the_imagea[:, :, :3] < 50).astype(np.uint8)
greya = ((np.ptp(the_imagea[0])) < 100).astype(np.uint8)
greyb = ((np.ptp(the_imagea[1])) < 100).astype(np.uint8)
greyc = ((np.ptp(the_imagea[2])) < 100).astype(np.uint8)
grey = greya * greyb * greyc
mask = mask[:, :, 0] * mask[:, :, 1] * mask[:, :, 2]
maskb = maskb[:, :, 0] * maskb[:, :, 1] * maskb[:, :, 2]
white = (np.sum(mask) + np.sum(maskb)) / (ts * ts) + grey
return white
# Tile color normalization
def normalization(img, sttd):
img = np.array(img)[:, :, :3]
img = staintools.LuminosityStandardizer.standardize(img)
normalizer = staintools.StainNormalizer(method='vahadane')
normalizer.fit(sttd)
img = normalizer.transform(img)
img = Image.fromarray(img.astype('uint8'), 'RGB')
return img
# tile method; slp is the scn/svs image; n_y is the number of tiles can be cut on y column to be cut;
# x and y are the upper left position of each tile; tile_size is tile size; stepsize of each step; x0 is the row to cut.
# outdir is the output directory for images;
# imloc record each tile's relative and absolute coordinates; imlist is a list of cut tiles (Removed 01/24/2019).
def v_slide(slp, n_y, x, y, tile_size, stepsize, x0, outdir, level, dp, std):
# pid = os.getpid()
# print('{}: start working'.format(pid))
slide = OpenSlide(slp)
imloc = []
y0 = 0
target_x = x0 * stepsize
image_x = (target_x + x)*(4**level)
while y0 < n_y:
target_y = y0 * stepsize
image_y = (target_y + y)*(4**level)
img = slide.read_region((image_x, image_y), level, (tile_size, tile_size))
wscore = bgcheck(img, tile_size)
if 0.01 < wscore < 0.4:
img = img.resize((299, 299))
img = normalization(img, std)
if dp:
img.save(outdir + "/region_x-{}-y-{}_{}.png".format(image_x, image_y, str(dp)))
strr = outdir + "/region_x-{}-y-{}_{}.png".format(image_x, image_y, str(dp))
else:
img.save(outdir + "/region_x-{}-y-{}.png".format(image_x, image_y))
strr = outdir + "/region_x-{}-y-{}.png".format(image_x, image_y)
imloc.append([x0, y0, image_x, image_y, strr])
y0 += 1
slide.close()
return imloc
# image_file is the scn/svs name; outdir is the output directory; path_to_slide is where the scn/svs stored.
# First open the slide, determine how many tiles can be cut, record the residue edges width,
# and calculate the final output prediction heat map size should be. Then, using multithread to cut tiles, and stack up
# tiles and their position dictionaries.
def tile(image_file, outdir, level, std_img, path_to_slide="../images/", dp=None, ft=1):
slide = OpenSlide(path_to_slide+image_file)
slp = str(path_to_slide+image_file)
print(slp)
print(slide.level_dimensions)
bounds_width = slide.level_dimensions[level][0]
bounds_height = slide.level_dimensions[level][1]
x = 0
y = 0
half_width_region = 49*ft
full_width_region = 299*ft
stepsize = (full_width_region - half_width_region)
n_x = int((bounds_width - 1) / stepsize)
n_y = int((bounds_height - 1) / stepsize)
residue_x = int((bounds_width - n_x * stepsize)/50)
residue_y = int((bounds_height - n_y * stepsize)/50)
lowres = slide.read_region((x, y), 2, (int(n_x*stepsize/16), int(n_y*stepsize/16)))
lowres = np.array(lowres)[:,:,:3]
x0 = 0
# create multiporcessing pool
print(mp.cpu_count())
pool = mp.Pool(processes=mp.cpu_count())
tasks = [] | x0 += 1
# slice images with multiprocessing
temp = pool.starmap(v_slide, tasks)
tempdict = list(temp)
temp = None
pool.close()
pool.join()
tempdict = list(filter(None, tempdict))
imloc = []
list(map(imloc.extend, tempdict))
imlocpd = pd.DataFrame(imloc, columns = ["X_pos", "Y_pos", "X", "Y", "Loc"])
imlocpd = imlocpd.sort_values(["X_pos", "Y_pos"], ascending=[True, True])
imlocpd = imlocpd.reset_index(drop=True)
imlocpd = imlocpd.reset_index(drop=False)
imlocpd.columns = ["Num", "X_pos", "Y_pos", "X", "Y", "Loc"]
if dp:
imlocpd.to_csv(outdir + "/{}_dict.csv".format(dp), index=False)
else:
imlocpd.to_csv(outdir + "/dict.csv", index=False)
tempdict = None
ct = len(imloc)
print(ct)
return n_x, n_y, lowres, residue_x, residue_y, ct | while x0 < n_x:
task = tuple((slp, n_y, x, y, full_width_region, stepsize, x0, outdir, level, dp, std_img))
tasks.append(task) |
cnpmjs.js | 'use strict';
/** | * Desc:
*/
exports.name = 'https://r.cnpmjs.org/';
exports.cmd = 'npm.cmd config set registry="' + exports.name + '"'; | * Copyright (c) 2017 Copyright tj All Rights Reserved.
* Author: lipengxiang
* Date: 2018-06-02 16:36 |
node.go | // Copyright © 2017 ZhongAn Technology
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"fmt"
"net"
"net/http"
_ "net/http/pprof"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/spf13/viper"
"github.com/dappledger/AnnChain/chain/app"
"github.com/dappledger/AnnChain/chain/types"
etypes "github.com/dappledger/AnnChain/eth/core/types"
"github.com/dappledger/AnnChain/eth/core/vm"
"github.com/dappledger/AnnChain/eth/rlp"
"github.com/dappledger/AnnChain/gemmill"
"github.com/dappledger/AnnChain/gemmill/go-crypto"
"github.com/dappledger/AnnChain/gemmill/go-wire"
cmn "github.com/dappledger/AnnChain/gemmill/modules/go-common"
"github.com/dappledger/AnnChain/gemmill/modules/go-log"
"github.com/dappledger/AnnChain/gemmill/p2p"
"github.com/dappledger/AnnChain/gemmill/rpc/server"
gtypes "github.com/dappledger/AnnChain/gemmill/types"
)
type Node struct {
running int64
config *viper.Viper
privValidator *gtypes.PrivValidator
nodeInfo *p2p.NodeInfo
Angine *gemmill.Angine
AngineTune *gemmill.Tunes
Application gtypes.Application
GenesisDoc *gtypes.GenesisDoc
}
func queryPayLoadTxParser(txData []byte) ([]byte, error) {
btx := etypes.Transaction{}
err := rlp.DecodeBytes(txData, &btx)
if err != nil {
return nil, err
}
return btx.Data(), nil
}
func (nd *Node) ExecAdminTx(app *vm.AdminDBApp, tx []byte) error {
return nd.Angine.ExecAdminTx(app, tx)
}
func NewNode(conf *viper.Viper, runtime, appName string) (*Node, error) {
// new app
am, ok := app.AppMap[appName]
if !ok {
return nil, fmt.Errorf("app `%v` is not regiestered", appName)
}
initApp, err := am(conf)
if err != nil {
return nil, fmt.Errorf("create App instance error: %v", err)
}
// new angine
tune := &gemmill.Tunes{Conf: conf, Runtime: runtime}
newAngine, err := gemmill.NewAngine(initApp, tune)
if err != nil {
return nil, fmt.Errorf("new angine error: %v", err)
}
newAngine.SetQueryPayLoadTxParser(queryPayLoadTxParser)
newAngine.ConnectApp(initApp)
node := &Node{
Application: initApp,
Angine: newAngine,
AngineTune: tune,
GenesisDoc: newAngine.Genesis(),
nodeInfo: newAngine.GetNodeInfo(),
config: conf,
privValidator: newAngine.PrivValidator(),
}
vm.DefaultAdminContract.SetCallback(node.ExecAdminTx)
initApp.SetCore(newAngine)
return node, nil
}
func RunNode(config *viper.Viper, runtime, appName string) {
if err := RunNodeRet(config, runtime, appName); err != nil {
panic(err)
}
}
func RunNodeRet(config *viper.Viper, runtime, appName string) error {
node, err := NewNode(config, runtime, appName)
if err != nil {
return fmt.Errorf("failed to new node: %v", err)
}
if err := node.Start(); err != nil {
return fmt.Errorf("failed to start node: %v", err)
}
if config.GetString("rpc_laddr") != "" {
if _, err := node.StartRPC(); err != nil {
return fmt.Errorf("failed to start rpc: %v", err)
}
}
if config.GetBool("pprof") {
go func() {
http.ListenAndServe(":6060", nil)
}()
}
fmt.Printf("node (%s) is running on %s:%d ......\n", node.Angine.Genesis().ChainID, node.NodeInfo().ListenHost(), node.NodeInfo().ListenPort())
cmn.TrapSignal(func() {
node.Stop()
})
return nil
}
// Call Start() after adding the listeners.
func (n *Node) Start() error {
if !atomic.CompareAndSwapInt64(&n.running, 0, 1) {
return fmt.Errorf("already started")
}
n.Application.Start()
if err := n.Angine.Start(); err != nil {
return fmt.Errorf("fail to start, error: %v", err)
}
// TODO timeout
for n.Angine.NoneGenesis() {
time.Sleep(500 * time.Millisecond)
}
return nil
}
func (n *Node) Stop() {
log.Info("Stopping Node")
if atomic.CompareAndSwapInt64(&n.running, 1, 0) {
n.Application.Stop()
n.Angine.Stop()
}
}
func (n *Node) IsRunning() bool {
return atomic.LoadInt64(&n.running) == 1
}
func makeNodeInfo(conf *viper.Viper, pubkey crypto.PubKey, p2pHost string, p2pPort uint16) *p2p.NodeInfo { |
func (n *Node) NodeInfo() *p2p.NodeInfo {
return n.nodeInfo
}
func (n *Node) StartRPC() ([]net.Listener, error) {
listenAddrs := strings.Split(n.config.GetString("rpc_laddr"), ",")
listeners := make([]net.Listener, len(listenAddrs))
for i, listenAddr := range listenAddrs {
mux := http.NewServeMux()
routes := n.rpcRoutes()
for _, v := range n.Angine.APIs() {
for n, h := range v {
routes[n] = h
}
}
server.RegisterRPCFuncs(mux, routes)
listener, err := server.StartHTTPServer(listenAddr, mux)
if err != nil {
return nil, err
}
listeners[i] = listener
}
return listeners, nil
}
func (n *Node) PrivValidator() *gtypes.PrivValidator {
return n.privValidator
}
func (n *Node) HealthStatus() int {
return n.Angine.HealthStatus()
}
//func (n *Node) GetAdminVote(data []byte, validator *gtypes.Validator) ([]byte, error) {
// clientJSON := client.NewClientJSONRPC(validator.RPCAddress) // all shard nodes share the same rpc address of the Node
// rpcResult := new(gtypes.RPCResult)
// _, err := clientJSON.Call("vote_admin_op", []interface{}{n.GenesisDoc.ChainID, data}, rpcResult)
// if err != nil {
// return nil, err
// }
// res := (*rpcResult).(*gtypes.ResultRequestAdminOP)
// if res.Code == gtypes.CodeType_OK {
// return res.Data, nil
// }
// return nil, fmt.Errorf(res.Log)
//}
func DefaultConf() *viper.Viper {
globalConf := viper.New()
// runtime, _ := cmd.Flags().GetString("runtime")
return globalConf
}
|
nodeInfo := &p2p.NodeInfo{
PubKey: pubkey,
Moniker: conf.GetString("moniker"),
Network: conf.GetString("chain_id"),
SigndPubKey: conf.GetString("signbyCA"),
Version: types.GetCommitVersion(),
Other: []string{
cmn.Fmt("wire_version=%v", wire.Version),
cmn.Fmt("p2p_version=%v", p2p.Version),
// cmn.Fmt("consensus_version=%v", n.StateMachine.Version()),
// cmn.Fmt("rpc_version=%v/%v", rpc.Version, rpccore.Version),
cmn.Fmt("node_start_at=%s", strconv.FormatInt(time.Now().Unix(), 10)),
cmn.Fmt("commit_version=%s", types.GetCommitVersion()),
},
RemoteAddr: conf.GetString("rpc_laddr"),
ListenAddr: cmn.Fmt("%v:%v", p2pHost, p2pPort),
}
// We assume that the rpcListener has the same ExternalAddress.
// This is probably true because both P2P and RPC listeners use UPnP,
// except of course if the rpc is only bound to localhost
return nodeInfo
}
|
test_network_performance_high_auto.py |
# Testing module network_performance.high
import pytest
import ec2_compare.internal.network_performance.high
def | ():
assert len(ec2_compare.internal.network_performance.high.get_instances_list()) > 0
def test_get_internal_data_network_performance_high_get():
assert len(ec2_compare.internal.network_performance.high.get) > 0
| test_get_internal_data_network_performance_high_get_instances_list |
elevatorEx.py | class Elevator:
occupancy_limit = 8
def __init__(self, occupants=0):
self.floor = 0 | print('too many occupants', occupants - Elevator.occupancy_limit, 'left outside')
def add_occupants(self,num):
self.occupants += num
if self.occupants > Elevator.occupancy_limit:
print('too many occupants', self.occupants - Elevator.occupancy_limit, 'left outside')
self.occupants = Elevator.occupancy_limit
def remove_occupants(self,num):
if self.occupants - num > 0:
self.occupants -= num
else:
print('elevator empty')
self.occupants = 0
def goto_floor(self,floor):
if floor < -5 or floor > 50:
print('floor',floor,'does not exist')
else:
self.floor = floor
elevator1 = Elevator(6)
elevator1.add_occupants(7)
elevator2 = Elevator(11)
elevator1.goto_floor(20)
elevator1.remove_occupants(99)
elevator2.goto_floor(99)
print(elevator1.__dict__)
print(elevator2.__dict__)
"""
ATTRIBUTES
Occupants attribute which is 0 by default
floor attribute which is 0 by default
METHODS:
Add_occupants
Go to floor
PROPERTIES:
Occupants can only be added if the occupants limit (8) has not been exceeded
Only floors from -5 to 50 exist
""" | if occupants <= Elevator.occupancy_limit:
self.occupants = occupants
else:
self.occupants = Elevator.occupancy_limit |
encoders_test.go | package merkletree2
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestBlindedSHA512_256v1Encoder(t *testing.T) |
func TestBlindedSHA512_256v1EncoderHardcodedValues(t *testing.T) {
// To prevent changes to go-codec or similar from breaking existing merkle
// proofs, this test hardcodes some example values for the encoder and
// hasher.
encoder := NewBlindedSHA512_256v1Encoder()
hashLength := 32
hashb1 := make([]byte, hashLength)
hashb1[0] = 0x01
hash1 := Hash(hashb1)
hashb2 := make([]byte, hashLength)
hashb2[0] = 0x02
hash2 := Hash(hashb2)
enc, err := encoder.Encode(Node{INodes: []Hash{hash1}})
require.NoError(t, err)
require.Equal(t, []byte{0x1, 0x91, 0xc4, 0x20, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, enc)
enc, err = encoder.Encode(Node{INodes: []Hash{hash1, hash2}})
require.NoError(t, err)
require.Equal(t, []byte{0x1, 0x92, 0xc4, 0x20, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc4, 0x20, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, enc)
enc, err = encoder.Encode(Node{LeafHashes: []KeyHashPair{{Key: []byte{0x01}, Hash: hash1}}})
require.NoError(t, err)
require.Equal(t, []byte{0x2, 0x91, 0x92, 0xc4, 0x1, 0x1, 0xc4, 0x20, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, enc)
enc, err = encoder.Encode(Node{LeafHashes: []KeyHashPair{{Key: []byte{0x01}, Hash: hash1}, {Key: []byte{0x03}, Hash: hash2}}})
require.NoError(t, err)
require.Equal(t, []byte{0x2, 0x92, 0x92, 0xc4, 0x1, 0x1, 0xc4, 0x20, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x92, 0xc4, 0x1, 0x3, 0xc4, 0x20, 0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, enc)
_, hash, err := encoder.EncodeAndHashGeneric(Node{INodes: []Hash{hash1}})
require.NoError(t, err)
require.EqualValues(t, []byte{0x1d, 0x66, 0xcd, 0x98, 0xa1, 0x36, 0x3a, 0x96, 0x8c, 0xdf, 0x5f, 0xff, 0xb1, 0x8d, 0xff, 0xfa, 0xcf, 0xdb, 0xfe, 0x6, 0xc7, 0xbd, 0xe0, 0xe5, 0x4d, 0x58, 0x5d, 0x79, 0xd, 0x57, 0x50, 0x6d}, hash)
_, hash, err = encoder.EncodeAndHashGeneric(Node{INodes: []Hash{hash1, hash2}})
require.NoError(t, err)
require.EqualValues(t, []byte{0x85, 0xd5, 0x4f, 0xf7, 0x4c, 0x11, 0x6f, 0x86, 0x46, 0x63, 0x25, 0xf9, 0xc8, 0x69, 0xfb, 0xb8, 0x26, 0x71, 0x50, 0x4f, 0x7d, 0xe9, 0x73, 0x9f, 0x2e, 0x66, 0x76, 0xa5, 0x1e, 0x1f, 0x16, 0x55}, hash)
_, hash, err = encoder.EncodeAndHashGeneric(Node{LeafHashes: []KeyHashPair{{Key: []byte{0x01}, Hash: hash1}}})
require.NoError(t, err)
require.EqualValues(t, []byte{0x59, 0xfb, 0x86, 0x3b, 0x3a, 0x83, 0xdd, 0x92, 0x22, 0xa3, 0x85, 0x4a, 0x8a, 0x9d, 0x18, 0xb, 0x31, 0x4a, 0x9c, 0xd6, 0xb, 0x5e, 0x20, 0x79, 0xa9, 0xb8, 0xae, 0x41, 0xc9, 0xa1, 0xd8, 0x5d}, hash)
_, hash, err = encoder.EncodeAndHashGeneric(Node{LeafHashes: []KeyHashPair{{Key: []byte{0x01}, Hash: hash1}, {Key: []byte{0x03}, Hash: hash2}}})
require.NoError(t, err)
require.EqualValues(t, []byte{0xf5, 0x19, 0xdb, 0x70, 0x5c, 0x4f, 0x84, 0x3f, 0xcc, 0x30, 0x62, 0x45, 0x7e, 0xfb, 0x91, 0x31, 0x59, 0x9, 0xca, 0x81, 0xc0, 0x78, 0x13, 0x82, 0xcf, 0x96, 0x37, 0xfd, 0xa7, 0xf3, 0x35, 0x81}, hash)
ms := MasterSecret([]byte{0x00, 0x01})
ks := encoder.ComputeKeySpecificSecret(ms, Key([]byte{0x00, 0x03}))
require.Len(t, ks, hashLength)
require.EqualValues(t, []byte{0xa1, 0x87, 0x3f, 0x2b, 0x2b, 0x1c, 0x76, 0xfe, 0x41, 0x64, 0x83, 0xef, 0x4d, 0xb5, 0x3f, 0x5d, 0xa2, 0x73, 0x84, 0x4d, 0x8d, 0x7e, 0x47, 0xc5, 0xd3, 0x53, 0x7e, 0xc4, 0x3, 0x2d, 0x56, 0xd5}, ks)
value := "pasta"
encValue, err := encoder.Encode(value)
require.NoError(t, err)
require.Equal(t, []byte{0xa5, 0x70, 0x61, 0x73, 0x74, 0x61}, encValue)
h, err := encoder.HashKeyEncodedValuePairWithKeySpecificSecret(KeyEncodedValuePair{Key: Key([]byte{0x00, 0x01}), Value: encValue}, ks)
require.NoError(t, err)
require.EqualValues(t, []byte{0xab, 0x6c, 0xfe, 0x18, 0xda, 0x5f, 0x43, 0x52, 0x66, 0x6f, 0x1e, 0x56, 0xbe, 0x64, 0x1b, 0xd3, 0xf, 0x7, 0xa6, 0x32, 0x1b, 0xbd, 0xfa, 0x6f, 0xd2, 0xa1, 0x5a, 0xfd, 0xcb, 0xd5, 0xd3, 0xd3}, h)
h, err = encoder.HashKeyValuePairWithKeySpecificSecret(KeyValuePair{Key: Key([]byte{0x00, 0x01}), Value: value}, ks)
require.NoError(t, err)
require.EqualValues(t, []byte{0xab, 0x6c, 0xfe, 0x18, 0xda, 0x5f, 0x43, 0x52, 0x66, 0x6f, 0x1e, 0x56, 0xbe, 0x64, 0x1b, 0xd3, 0xf, 0x7, 0xa6, 0x32, 0x1b, 0xbd, 0xfa, 0x6f, 0xd2, 0xa1, 0x5a, 0xfd, 0xcb, 0xd5, 0xd3, 0xd3}, h)
}
| {
encoder := NewBlindedSHA512_256v1Encoder()
hashLength := 32
val1 := "pizza"
val2 := "pasta"
enc0, err := encoder.Encode(val1)
require.NoError(t, err)
require.NotEmpty(t, enc0)
dec := ""
err = encoder.Decode(&dec, enc0)
require.NoError(t, err)
require.Equal(t, val1, dec)
enc1, h, err := encoder.EncodeAndHashGeneric(val1)
require.NoError(t, err)
require.Len(t, h, hashLength)
require.NotEmpty(t, enc1)
require.Equal(t, enc0, enc1)
enc2, h2, err := encoder.EncodeAndHashGeneric(val2)
require.NoError(t, err)
require.Len(t, h, hashLength)
require.NotEqual(t, h, h2)
require.NotEmpty(t, enc2)
require.NotEqual(t, enc1, enc2)
ms, err := encoder.GenerateMasterSecret(0)
require.NoError(t, err)
require.Len(t, ms, hashLength)
ms2, err := encoder.GenerateMasterSecret(0)
require.NoError(t, err)
require.Len(t, ms2, hashLength)
require.NotEqual(t, ms, ms2)
ks := encoder.ComputeKeySpecificSecret(ms, Key([]byte{0x00, 0x01}))
require.Len(t, ks, hashLength)
ks2 := encoder.ComputeKeySpecificSecret(ms, Key([]byte{0x00, 0x01}))
require.Len(t, ks, hashLength)
require.Equal(t, ks, ks2)
ks3 := encoder.ComputeKeySpecificSecret(ms, Key([]byte{0x00, 0x02}))
require.Len(t, ks3, hashLength)
require.NotEqual(t, ks, ks3)
h0, err := encoder.HashKeyEncodedValuePairWithKeySpecificSecret(KeyEncodedValuePair{Key: Key([]byte{0x00, 0x01}), Value: enc0}, ks)
require.NoError(t, err)
require.Len(t, h, hashLength)
h, err = encoder.HashKeyValuePairWithKeySpecificSecret(KeyValuePair{Key: Key([]byte{0x00, 0x01}), Value: val1}, ks)
require.NoError(t, err)
require.Len(t, h, hashLength)
require.Equal(t, h0, h)
h2, err = encoder.HashKeyValuePairWithKeySpecificSecret(KeyValuePair{Key: Key([]byte{0x00, 0x01}), Value: val1}, ks)
require.NoError(t, err)
require.Len(t, h2, hashLength)
require.Equal(t, h, h2)
h3, err := encoder.HashKeyValuePairWithKeySpecificSecret(KeyValuePair{Key: Key([]byte{0x00, 0x01}), Value: val1}, ks3)
require.NoError(t, err)
require.Len(t, h3, hashLength)
require.NotEqual(t, h, h3)
h4, err := encoder.HashKeyValuePairWithKeySpecificSecret(KeyValuePair{Key: Key([]byte{0x00, 0x02}), Value: val1}, ks)
require.NoError(t, err)
require.Len(t, h4, hashLength)
require.NotEqual(t, h, h3)
} |
on-impl.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test if the on_unimplemented message override works
#![feature(on_unimplemented)]
#![feature(rustc_attrs)]
#[rustc_on_unimplemented = "invalid"]
trait Index<Idx: ?Sized> {
type Output: ?Sized;
fn index(&self, index: Idx) -> &Self::Output;
}
#[rustc_on_unimplemented = "a usize is required to index into a slice"]
impl Index<usize> for [i32] {
type Output = i32;
fn index(&self, index: usize) -> &i32 {
&self[index]
}
}
#[rustc_error]
fn | () {
Index::<u32>::index(&[1, 2, 3] as &[i32], 2u32);
//~^ ERROR E0277
//~| NOTE a usize is required
//~| NOTE required by
}
| main |
routes.spec.ts | /*!
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License.
*/
import * as assert from "assert";
import {
ICreateBlobParams,
ICreateBlobResponse,
ICreateCommitParams,
ICreateRefParams,
ICreateTreeParams,
} from "@fluidframework/gitresources";
import {
ICreateRefParamsExternal,
IGetRefParamsExternal,
} from "@fluidframework/server-services-client";
import * as async from "async";
import lorem from "lorem-ipsum";
import * as moniker from "moniker";
import request from "supertest";
import * as app from "../app";
/* eslint-disable import/no-internal-modules */
import { createBlob as createBlobInternal } from "../routes/git/blobs";
import { createCommit as createCommitInternal } from "../routes/git/commits";
import { createTree as createTreeInternal } from "../routes/git/trees";
import { getCommits } from "../routes/repository/commits";
/* eslint-enable import/no-internal-modules */
import { ExternalStorageManager } from "../externalStorageManager";
import * as utils from "../utils";
import * as testUtils from "./utils";
// TODO: (issue logged): replace email & name
const commitEmail = "[email protected]";
const commitName = "Kurt Berglund";
async function createRepo(supertest: request.SuperTest<request.Test>, owner: string, name: string) {
console.log("Entered create repo");
return supertest
.post(`/${owner}/repos`)
.set("Accept", "application/json")
.set("Content-Type", "application/json")
.send({ name })
.expect(201);
}
async function createBlob(
supertest: request.SuperTest<request.Test>,
owner: string,
repoName: string,
blob: ICreateBlobParams) {
return supertest
.post(`/repos/${owner}/${repoName}/git/blobs`)
.set("Accept", "application/json")
.set("Content-Type", "application/json")
.send(blob)
.expect(201);
}
async function createTree(
supertest: request.SuperTest<request.Test>,
owner: string,
repoName: string,
tree: ICreateTreeParams) {
return supertest
.post(`/repos/${owner}/${repoName}/git/trees`)
.set("Accept", "application/json")
.set("Content-Type", "application/json")
.send(tree)
.expect(201);
}
async function | (
supertest: request.SuperTest<request.Test>,
owner: string,
repoName: string,
commit: ICreateCommitParams) {
return supertest
.post(`/repos/${owner}/${repoName}/git/commits`)
.set("Accept", "application/json")
.set("Content-Type", "application/json")
.send(commit)
.expect(201);
}
async function createRef(
supertest: request.SuperTest<request.Test>,
owner: string,
repoName: string,
ref: ICreateRefParams) {
return supertest
.post(`/repos/${owner}/${repoName}/git/refs`)
.set("Accept", "application/json")
.set("Content-Type", "application/json")
.send(ref)
.expect(201);
}
async function initBaseRepo(
supertest: request.SuperTest<request.Test>,
owner: string,
repoName: string,
testBlob: ICreateBlobParams,
testTree: ICreateTreeParams,
testCommit: ICreateCommitParams,
testRef: ICreateRefParams) {
await createRepo(supertest, owner, repoName);
await createBlob(supertest, owner, repoName, testBlob);
await createTree(supertest, owner, repoName, testTree);
await createCommit(supertest, owner, repoName, testCommit);
await createRef(supertest, owner, repoName, testRef);
}
describe("GitRest", () => {
describe("Routes", () => {
const testOwnerName = "owner";
const testRepoName = "test";
const testBlob: ICreateBlobParams = {
content: "Hello, World!",
encoding: "utf-8",
};
const testTree: ICreateTreeParams = {
tree: [
{
mode: "100644",
path: "file.txt",
sha: "b45ef6fec89518d314f546fd6c3025367b721684",
type: "blob",
}],
};
const testCommit: ICreateCommitParams = {
author: {
date: "Thu Jul 13 2017 20:17:40 GMT-0700 (PDT)",
email: commitEmail,
name: commitName,
},
message: "first commit",
parents: [],
tree: "bf4db183cbd07f48546a5dde098b4510745d79a1",
};
const testRef: ICreateRefParamsExternal = {
ref: "refs/heads/main",
sha: "cf0b592907d683143b28edd64d274ca70f68998e",
config: { enabled: true },
};
const testReadParams: IGetRefParamsExternal = {
config: { enabled: true },
};
const testRefWriteDisabled: ICreateRefParams = {
ref: "refs/heads/main",
sha: "cf0b592907d683143b28edd64d274ca70f68998e",
};
const externalStorageManager = new ExternalStorageManager(testUtils.defaultProvider);
testUtils.initializeBeforeAfterTestHooks(testUtils.defaultProvider);
// Create the git repo before and after each test
let supertest: request.SuperTest<request.Test>;
beforeEach(() => {
const testApp = app.create(testUtils.defaultProvider, externalStorageManager);
supertest = request(testApp);
});
// Git data API tests
describe("Git", () => {
describe("Repos", () => {
it("Can create and get a new repo", async () => {
await createRepo(supertest, testOwnerName, testRepoName);
return supertest
.get(`/repos/${testOwnerName}/${testRepoName}`)
.expect(200);
});
it("Returns 400 for an unknown repo", async () => {
return supertest
.get(`/repos/${testOwnerName}/${testRepoName}`)
.expect(400);
});
it("Rejects invalid repo names", async () => {
return supertest
.post(`/${testOwnerName}/repos`)
.set("Accept", "application/json")
.set("Content-Type", "application/json")
.send({ name: "../evilrepo" })
.expect(400);
});
it("Rejects missing repo names", async () => {
return supertest
.post(`/${testOwnerName}/repos`)
.expect(400);
});
});
describe("Blobs", () => {
it("Can create and retrieve a blob", async () => {
await createRepo(supertest, testOwnerName, testRepoName);
const result = await createBlob(supertest, testOwnerName, testRepoName, testBlob);
assert.equal(result.body.sha, "b45ef6fec89518d314f546fd6c3025367b721684");
return supertest
.get(`/repos/${testOwnerName}/${testRepoName}/git/blobs/${result.body.sha}`)
.expect(200)
.expect((getResult) => {
assert.equal(getResult.body.sha, result.body.sha);
});
});
it("Can create an existing blob without error", async () => {
await createRepo(supertest, testOwnerName, testRepoName);
await createBlob(supertest, testOwnerName, testRepoName, testBlob);
await createBlob(supertest, testOwnerName, testRepoName, testBlob);
});
});
describe("Trees", () => {
it("Can create and retrieve a tree", async () => {
await createRepo(supertest, testOwnerName, testRepoName);
await createBlob(supertest, testOwnerName, testRepoName, testBlob);
const tree = await createTree(supertest, testOwnerName, testRepoName, testTree);
assert.equal(tree.body.sha, "bf4db183cbd07f48546a5dde098b4510745d79a1");
return supertest
.get(`/repos/${testOwnerName}/${testRepoName}/git/trees/${tree.body.sha}`)
.expect(200)
.expect((getResult) => {
assert.equal(getResult.body.sha, tree.body.sha);
});
});
it("Can recursively retrieve a tree", async () => {
// Create a tree with a single sub directory
await createRepo(supertest, testOwnerName, testRepoName);
await createBlob(supertest, testOwnerName, testRepoName, testBlob);
await createTree(supertest, testOwnerName, testRepoName, testTree);
const parentBlob = await createBlob(
supertest,
testOwnerName,
testRepoName,
{ content: "Parent", encoding: "utf-8" });
const parentTree = {
tree: [
{
mode: "100644",
path: "parentBlob.txt",
sha: parentBlob.body.sha,
type: "blob",
},
{
mode: "040000",
path: "subdir",
sha: "bf4db183cbd07f48546a5dde098b4510745d79a1",
type: "tree",
}],
};
const tree = await createTree(supertest, testOwnerName, testRepoName, parentTree);
// And then a commit to reference it
const treeCommit: ICreateCommitParams = {
author: {
date: "Thu Jul 13 2017 20:17:40 GMT-0700 (PDT)",
email: commitEmail,
name: commitName,
},
message: "complex tree",
parents: [],
tree: tree.body.sha,
};
const commit = await createCommit(supertest, testOwnerName, testRepoName, treeCommit);
return supertest
.get(`/repos/${testOwnerName}/${testRepoName}/git/trees/${commit.body.tree.sha}?recursive=1`)
.expect(200);
});
});
describe("Commits", () => {
it("Can create and retrieve a commit", async () => {
await createRepo(supertest, testOwnerName, testRepoName);
await createBlob(supertest, testOwnerName, testRepoName, testBlob);
await createTree(supertest, testOwnerName, testRepoName, testTree);
const commit = await createCommit(supertest, testOwnerName, testRepoName, testCommit);
assert.equal(commit.body.sha, "cf0b592907d683143b28edd64d274ca70f68998e");
return supertest
.get(`/repos/${testOwnerName}/${testRepoName}/git/commits/${commit.body.sha}`)
.expect(200)
.expect((getResult) => {
assert.equal(getResult.body.sha, commit.body.sha);
});
});
});
describe("Refs", () => {
it("Can create and retrieve a reference", async () => {
await createRepo(supertest, testOwnerName, testRepoName);
await createBlob(supertest, testOwnerName, testRepoName, testBlob);
await createTree(supertest, testOwnerName, testRepoName, testTree);
await createCommit(supertest, testOwnerName, testRepoName, testCommit);
const ref = await createRef(supertest, testOwnerName, testRepoName, testRef);
assert.equal(ref.body.ref, testRef.ref);
return supertest
.get(`/repos/${testOwnerName}/${testRepoName}/git/${testRef.ref}`)
.expect(200)
.expect((getResult) => {
assert.equal(getResult.body.ref, ref.body.ref);
});
});
it("Can retrieve all references", async () => {
await initBaseRepo(supertest, testOwnerName, testRepoName, testBlob, testTree, testCommit, testRef);
return supertest
.get(`/repos/${testOwnerName}/${testRepoName}/git/refs`)
.expect(200)
.expect((getResult) => {
assert.equal(getResult.body.length, 1);
assert.equal(getResult.body[0].ref, testRef.ref);
});
});
it("Can patch to create a reference", async () => {
await initBaseRepo(supertest, testOwnerName, testRepoName, testBlob, testTree, testCommit, testRef);
return supertest
.patch(`/repos/${testOwnerName}/${testRepoName}/git/refs/heads/patch`)
.set("Accept", "application/json")
.set("Content-Type", "application/json")
.send({
force: true,
sha: "cf0b592907d683143b28edd64d274ca70f68998e",
config : { enabled : true },
})
.expect(200);
});
it("Can't patch an existing reference without force flag set", async () => {
await initBaseRepo(supertest, testOwnerName, testRepoName, testBlob, testTree, testCommit, testRef);
return supertest
.patch(`/repos/${testOwnerName}/${testRepoName}/git/${testRef.ref}`)
.set("Accept", "application/json")
.set("Content-Type", "application/json")
.send({
force: false,
sha: "cf0b592907d683143b28edd64d274ca70f68998e",
config : { enabled : true },
})
.expect(400);
});
it("Can patch an existing reference with force flag set", async () => {
await initBaseRepo(supertest, testOwnerName, testRepoName, testBlob, testTree, testCommit, testRef);
return supertest
.patch(`/repos/${testOwnerName}/${testRepoName}/git/${testRef.ref}`)
.set("Accept", "application/json")
.set("Content-Type", "application/json")
.send({
force: true,
sha: "cf0b592907d683143b28edd64d274ca70f68998e",
config : { enabled : true },
})
.expect(200);
});
it("Can delete a reference", async () => {
await initBaseRepo(
supertest,
testOwnerName,
testRepoName,
testBlob,
testTree,
testCommit,
testRefWriteDisabled);
await supertest
.delete(`/repos/${testOwnerName}/${testRepoName}/git/${testRefWriteDisabled.ref}`)
.expect(204);
return supertest
.get(`/repos/${testOwnerName}/${testRepoName}/git/${testRefWriteDisabled.ref}`)
.expect(400);
});
});
describe("Tags", () => {
it("Can create and retrive an annotated tag", async () => {
const tagParams = {
message: "Hello, World!",
object: "cf0b592907d683143b28edd64d274ca70f68998e",
tag: "v1.0",
tagger: {
date: "Thu Jul 13 2017 20:17:40 GMT-0700 (PDT)",
email: commitEmail,
name: commitName,
},
type: "commit",
};
await initBaseRepo(supertest, testOwnerName, testRepoName, testBlob, testTree, testCommit, testRef);
const tag = await supertest
.post(`/repos/${testOwnerName}/${testRepoName}/git/tags`)
.set("Accept", "application/json")
.set("Content-Type", "application/json")
.send(tagParams)
.expect(201);
assert.equal(tag.body.sha, "a8588b3913aa692c3642697d6f136cec470dd82c");
return supertest
.get(`/repos/${testOwnerName}/${testRepoName}/git/tags/${tag.body.sha}`)
.expect(200)
.expect((result) => {
assert.equal(result.body.sha, tag.body.sha);
});
});
});
});
describe("Stress", () => {
it("Run a long time and break", async () => {
const MaxTreeLength = 10;
const MaxParagraphs = 200;
await initBaseRepo(supertest, testOwnerName, testRepoName, testBlob, testTree, testCommit, testRef);
const manager = new utils.RepositoryManager(testUtils.defaultProvider.get("storageDir"));
let lastCommit;
async function runRound() {
const total = Math.floor(Math.random() * MaxTreeLength);
const blobsP: Promise<ICreateBlobResponse>[] = [];
for (let i = 0; i < total; i++) {
const param: ICreateBlobParams = {
content: lorem({
count: Math.floor(Math.random() * MaxParagraphs),
units: "paragraphs",
}),
encoding: "utf-8",
};
blobsP.push(createBlobInternal(manager, testOwnerName, testRepoName, param));
}
const blobs = await Promise.all(blobsP);
const files = blobs.map((blob) => {
return {
mode: "100644",
path: `${moniker.choose()}.txt`,
sha: blob.sha,
type: "blob",
};
});
const createTreeParams: ICreateTreeParams = {
tree: files,
};
const tree = await createTreeInternal(manager, testOwnerName, testRepoName, createTreeParams);
const parents: string[] = [];
if (lastCommit) {
const commits = await getCommits(
manager,
testOwnerName,
testRepoName,
lastCommit,
1,
testReadParams,
externalStorageManager);
const parentCommit = commits[0];
assert.ok(parentCommit.commit);
parents.push(parentCommit.sha);
}
const commitParams: ICreateCommitParams = {
author: {
date: new Date().toISOString(),
email: commitEmail,
name: commitName,
},
message: lorem({ count: 1, units: "sentences" }),
parents,
tree: tree.sha,
};
const commit = await createCommitInternal(manager, testOwnerName, testRepoName, commitParams);
lastCommit = commit.sha;
}
const queue = async.queue(
(task, callback) => {
runRound().then(() => callback(), (error) => callback(error));
},
5);
const resultP = new Promise<void>((resolve, reject) => {
queue.drain = () => {
resolve();
};
queue.error = (error) => {
reject(error);
};
});
for (let i = 0; i < 100; i++) {
queue.push(i);
}
return resultP;
}).timeout(4000);
});
// Higher level repository tests
describe("Repository", () => {
describe("Commits", () => {
it("Can list recent commits", async () => {
await initBaseRepo(supertest, testOwnerName, testRepoName, testBlob, testTree, testCommit, testRef);
return supertest
.get(`/repos/${testOwnerName}/${testRepoName}/commits?sha=main`)
.expect(200)
.expect((result) => {
assert.equal(result.body.length, 1);
});
});
});
describe("Content", () => {
it("Can retrieve a stored object", async () => {
await initBaseRepo(supertest, testOwnerName, testRepoName, testBlob, testTree, testCommit, testRef);
const fullRepoPath = `${testOwnerName}/${testRepoName}`;
return supertest
.get(`/repos/${fullRepoPath}/contents/${testTree.tree[0].path}?ref=${testRef.sha}`)
.expect(200);
});
});
});
});
});
| createCommit |
changefeed_test.go | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"math"
"testing"
"time"
filter "github.com/pingcap/tidb-tools/pkg/table-filter"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tiflow/pkg/config"
cerror "github.com/pingcap/tiflow/pkg/errors"
cerrors "github.com/pingcap/tiflow/pkg/errors"
"github.com/stretchr/testify/require"
"github.com/tikv/client-go/v2/oracle"
)
func TestFillV1(t *testing.T) {
t.Parallel()
v1Config := `
{
"sink-uri":"blackhole://",
"opts":{
},
"start-ts":417136892416622595,
"target-ts":0,
"admin-job-type":0,
"sort-engine":"memory",
"sort-dir":".",
"config":{
"case-sensitive":true,
"filter":{
"do-tables":[
{
"db-name":"test",
"tbl-name":"tbl1"
},
{
"db-name":"test",
"tbl-name":"tbl2"
}
],
"do-dbs":[
"test1",
"sys1"
],
"ignore-tables":[
{
"db-name":"test",
"tbl-name":"tbl3"
},
{
"db-name":"test",
"tbl-name":"tbl4"
}
],
"ignore-dbs":[
"test",
"sys"
],
"ignore-txn-start-ts":[
1,
2
],
"ddl-allow-list":"AQI="
},
"mounter":{
"worker-num":64
},
"sink":{
"dispatch-rules":[
{
"db-name":"test",
"tbl-name":"tbl3",
"rule":"ts"
},
{
"db-name":"test",
"tbl-name":"tbl4",
"rule":"rowid"
}
]
},
"cyclic-replication":{
"enable":true,
"replica-id":1,
"filter-replica-ids":[
2,
3
],
"id-buckets":4,
"sync-ddl":true
}
}
}
`
cfg := &ChangeFeedInfo{}
err := cfg.Unmarshal([]byte(v1Config))
require.Nil(t, err)
require.Equal(t, &ChangeFeedInfo{
SinkURI: "blackhole://",
Opts: map[string]string{
"_cyclic_relax_sql_mode": `{"enable":true,"replica-id":1,"filter-replica-ids":[2,3],"id-buckets":4,"sync-ddl":true}`,
},
StartTs: 417136892416622595,
Engine: "memory",
SortDir: ".",
Config: &config.ReplicaConfig{
CaseSensitive: true,
Filter: &config.FilterConfig{
MySQLReplicationRules: &filter.MySQLReplicationRules{
DoTables: []*filter.Table{{
Schema: "test",
Name: "tbl1",
}, {
Schema: "test",
Name: "tbl2",
}},
DoDBs: []string{"test1", "sys1"},
IgnoreTables: []*filter.Table{{
Schema: "test",
Name: "tbl3",
}, {
Schema: "test",
Name: "tbl4",
}},
IgnoreDBs: []string{"test", "sys"},
},
IgnoreTxnStartTs: []uint64{1, 2},
DDLAllowlist: []model.ActionType{1, 2},
},
Mounter: &config.MounterConfig{
WorkerNum: 64,
},
Sink: &config.SinkConfig{
DispatchRules: []*config.DispatchRule{
{Matcher: []string{"test.tbl3"}, Dispatcher: "ts"},
{Matcher: []string{"test.tbl4"}, Dispatcher: "rowid"},
},
},
Cyclic: &config.CyclicConfig{
Enable: true,
ReplicaID: 1,
FilterReplicaID: []uint64{2, 3},
IDBuckets: 4,
SyncDDL: true,
},
},
}, cfg)
}
func TestVerifyAndComplete(t *testing.T) {
t.Parallel()
info := &ChangeFeedInfo{
SinkURI: "blackhole://",
Opts: map[string]string{},
StartTs: 417257993615179777,
Config: &config.ReplicaConfig{
CaseSensitive: true,
EnableOldValue: true,
CheckGCSafePoint: true,
},
}
err := info.VerifyAndComplete()
require.Nil(t, err)
require.Equal(t, SortUnified, info.Engine)
marshalConfig1, err := info.Config.Marshal()
require.Nil(t, err)
defaultConfig := config.GetDefaultReplicaConfig()
marshalConfig2, err := defaultConfig.Marshal()
require.Nil(t, err)
require.Equal(t, marshalConfig2, marshalConfig1)
}
func TestFixStateIncompatible(t *testing.T) {
t.Parallel()
// Test to fix incompatible states.
testCases := []struct {
info *ChangeFeedInfo
expectedState FeedState
}{
{
info: &ChangeFeedInfo{
AdminJobType: AdminStop,
State: StateNormal,
Error: nil,
CreatorVersion: "",
SinkURI: "mysql://root:[email protected]:3306/",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolDefault.String()},
},
},
expectedState: StateStopped,
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminStop,
State: StateNormal,
Error: nil,
CreatorVersion: "4.0.14",
SinkURI: "mysql://root:[email protected]:3306/",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolDefault.String()},
},
},
expectedState: StateStopped,
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminStop,
State: StateNormal,
Error: nil,
CreatorVersion: "5.0.5",
SinkURI: "mysql://root:[email protected]:3306/",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolDefault.String()},
},
},
expectedState: StateStopped,
},
}
for _, tc := range testCases {
tc.info.FixIncompatible()
require.Equal(t, tc.expectedState, tc.info.State)
}
}
func TestFixSinkProtocolIncompatible(t *testing.T) {
t.Parallel()
// Test to fix incompatible protocols.
configTestCases := []struct {
info *ChangeFeedInfo
expectedProtocol config.Protocol
}{
{
info: &ChangeFeedInfo{
AdminJobType: AdminStop,
State: StateStopped,
Error: nil,
CreatorVersion: "",
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolAvro.String()},
},
},
expectedProtocol: config.ProtocolAvro,
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminStop,
State: StateStopped,
Error: nil,
CreatorVersion: "",
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolDefault.String()},
},
},
expectedProtocol: config.ProtocolOpen,
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminStop,
State: StateStopped,
Error: nil,
CreatorVersion: "",
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: "random"},
},
},
expectedProtocol: config.ProtocolOpen,
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminStop,
State: StateStopped,
Error: nil,
CreatorVersion: "5.3.0",
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolDefault.String()},
},
},
expectedProtocol: config.ProtocolOpen,
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminStop,
State: StateStopped,
Error: nil,
CreatorVersion: "5.3.0",
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: "random"},
},
},
expectedProtocol: config.ProtocolOpen,
},
}
for _, tc := range configTestCases {
tc.info.FixIncompatible()
var protocol config.Protocol
err := protocol.FromString(tc.info.Config.Sink.Protocol)
require.Nil(t, err)
require.Equal(t, tc.expectedProtocol, protocol)
}
sinkURITestCases := []struct {
info *ChangeFeedInfo
expectedSinkURI string
}{
{
info: &ChangeFeedInfo{
AdminJobType: AdminStop,
State: StateStopped,
Error: nil,
CreatorVersion: "",
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2?protocol=canal",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolDefault.String()},
},
},
expectedSinkURI: "kafka://127.0.0.1:9092/ticdc-test2?protocol=canal",
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminStop,
State: StateStopped,
Error: nil,
CreatorVersion: "",
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2?protocol=random",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolDefault.String()},
},
},
expectedSinkURI: "kafka://127.0.0.1:9092/ticdc-test2?protocol=open-protocol",
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminStop,
State: StateStopped,
Error: nil,
CreatorVersion: "5.3.0",
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2?protocol=canal",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolDefault.String()},
},
},
expectedSinkURI: "kafka://127.0.0.1:9092/ticdc-test2?protocol=canal",
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminStop,
State: StateStopped,
Error: nil,
CreatorVersion: "5.3.0",
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2?protocol=random",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolDefault.String()},
},
},
expectedSinkURI: "kafka://127.0.0.1:9092/ticdc-test2?protocol=open-protocol",
},
}
for _, tc := range sinkURITestCases {
tc.info.FixIncompatible()
require.Equal(t, tc.expectedSinkURI, tc.info.SinkURI)
}
}
func TestFixState(t *testing.T) {
t.Parallel()
testCases := []struct {
info *ChangeFeedInfo
expectedState FeedState
}{
{
info: &ChangeFeedInfo{
AdminJobType: AdminNone,
State: StateNormal,
Error: nil,
},
expectedState: StateNormal,
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminResume,
State: StateNormal,
Error: nil,
},
expectedState: StateNormal,
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminNone,
State: StateNormal,
Error: &RunningError{
Code: string(cerrors.ErrGCTTLExceeded.RFCCode()),
},
},
expectedState: StateFailed,
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminResume,
State: StateNormal,
Error: &RunningError{
Code: string(cerrors.ErrGCTTLExceeded.RFCCode()),
},
},
expectedState: StateFailed,
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminNone,
State: StateNormal,
Error: &RunningError{
Code: string(cerrors.ErrClusterIDMismatch.RFCCode()),
},
},
expectedState: StateError,
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminResume,
State: StateNormal,
Error: &RunningError{
Code: string(cerrors.ErrClusterIDMismatch.RFCCode()),
},
},
expectedState: StateError,
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminStop,
State: StateNormal,
Error: nil,
},
expectedState: StateStopped,
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminFinish,
State: StateNormal,
Error: nil,
},
expectedState: StateFinished,
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminRemove,
State: StateNormal,
Error: nil,
},
expectedState: StateRemoved,
},
{
info: &ChangeFeedInfo{
AdminJobType: AdminRemove,
State: StateNormal,
Error: nil,
},
expectedState: StateRemoved,
},
}
for _, tc := range testCases {
tc.info.fixState()
require.Equal(t, tc.expectedState, tc.info.State)
}
}
func TestFixSinkProtocol(t *testing.T) {
t.Parallel()
// Test fixing the protocol in the configuration.
configTestCases := []struct {
info *ChangeFeedInfo
expectedProtocol config.Protocol
}{
{
info: &ChangeFeedInfo{
SinkURI: "mysql://root:[email protected]:3306/",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolDefault.String()},
},
},
expectedProtocol: config.ProtocolOpen,
},
{
info: &ChangeFeedInfo{
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolCanal.String()},
},
},
expectedProtocol: config.ProtocolCanal,
},
{
info: &ChangeFeedInfo{
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolDefault.String()},
},
},
expectedProtocol: config.ProtocolOpen,
},
{
info: &ChangeFeedInfo{
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: "random"},
},
},
expectedProtocol: config.ProtocolOpen,
},
}
for _, tc := range configTestCases {
tc.info.fixSinkProtocol()
var protocol config.Protocol
err := protocol.FromString(tc.info.Config.Sink.Protocol)
require.Nil(t, err)
require.Equal(t, tc.expectedProtocol, protocol)
}
// Test fixing the protocol in SinkURI.
sinkURITestCases := []struct {
info *ChangeFeedInfo
expectedSinkURI string
}{
{
info: &ChangeFeedInfo{
SinkURI: "mysql://root:[email protected]:3306/",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolDefault.String()},
},
},
expectedSinkURI: "mysql://root:[email protected]:3306/",
},
{
info: &ChangeFeedInfo{
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolCanal.String()},
},
},
expectedSinkURI: "kafka://127.0.0.1:9092/ticdc-test2",
},
{
info: &ChangeFeedInfo{
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2?protocol=canal",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolDefault.String()},
},
},
expectedSinkURI: "kafka://127.0.0.1:9092/ticdc-test2?protocol=canal",
},
{
info: &ChangeFeedInfo{
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2?protocol=random",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolDefault.String()},
},
},
expectedSinkURI: "kafka://127.0.0.1:9092/ticdc-test2?protocol=open-protocol",
},
{
info: &ChangeFeedInfo{
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2?protocol=random&max-message-size=15",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolDefault.String()},
},
},
expectedSinkURI: "kafka://127.0.0.1:9092/ticdc-test2?max-message-size=15&protocol=open-protocol",
},
{
info: &ChangeFeedInfo{
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2?protocol=default&max-message-size=15",
Config: &config.ReplicaConfig{
Sink: &config.SinkConfig{Protocol: config.ProtocolDefault.String()},
},
},
expectedSinkURI: "kafka://127.0.0.1:9092/ticdc-test2?max-message-size=15&protocol=open-protocol",
},
}
for _, tc := range sinkURITestCases {
tc.info.fixSinkProtocol()
require.Equal(t, tc.expectedSinkURI, tc.info.SinkURI)
}
}
func TestChangeFeedInfoClone(t *testing.T) {
t.Parallel()
info := &ChangeFeedInfo{
SinkURI: "blackhole://",
Opts: map[string]string{},
StartTs: 417257993615179777,
Config: &config.ReplicaConfig{
CaseSensitive: true,
EnableOldValue: true,
CheckGCSafePoint: true,
},
}
cloned, err := info.Clone()
require.Nil(t, err)
sinkURI := "mysql://unix:/var/run/tidb.sock"
cloned.SinkURI = sinkURI
cloned.Config.EnableOldValue = false
require.Equal(t, sinkURI, cloned.SinkURI)
require.False(t, cloned.Config.EnableOldValue)
require.Equal(t, "blackhole://", info.SinkURI)
require.True(t, info.Config.EnableOldValue)
}
func TestChangefeedInfoStringer(t *testing.T) {
t.Parallel()
testcases := []struct {
info *ChangeFeedInfo
expectedSinkURIRegexp string
}{
{
&ChangeFeedInfo{
SinkURI: "blackhole://",
StartTs: 418881574869139457,
},
`.*blackhole:.*`,
},
{
&ChangeFeedInfo{
SinkURI: "kafka://127.0.0.1:9092/ticdc-test2",
StartTs: 418881574869139457,
},
`.*kafka://\*\*\*/ticdc-test2.*`,
},
{
&ChangeFeedInfo{
SinkURI: "mysql://root:[email protected]:3306/",
StartTs: 418881574869139457,
},
`.*mysql://username:password@\*\*\*/.*`,
},
{
&ChangeFeedInfo{
SinkURI: "mysql://[email protected]:3306/",
StartTs: 418881574869139457,
},
`.*mysql://username:password@\*\*\*/.*`,
},
{
&ChangeFeedInfo{
SinkURI: "mysql://root:test%21%23%24%25%5E%26%[email protected]:3306/",
StartTs: 418881574869139457,
},
`.*mysql://username:password@\*\*\*/.*`,
},
}
for _, tc := range testcases {
require.Regexp(t, tc.expectedSinkURIRegexp, tc.info.String())
}
}
func TestValidateChangefeedID(t *testing.T) {
t.Parallel()
tests := []struct {
name string
id string
wantErr bool
}{
{
name: "alphabet", | {
name: "number",
id: "01131323",
wantErr: false,
},
{
name: "mixed",
id: "9ff52acaA-aea6-4022-8eVc4-fbee3fD2c7890",
wantErr: false,
},
{
name: "len==128",
id: "1234567890-1234567890-1234567890-1234567890-1234567890-1234567890-1234567890-1234567890-1234567890123456789012345678901234567890",
wantErr: false,
},
{
name: "empty string 1",
id: "",
wantErr: true,
},
{
name: "empty string 2",
id: " ",
wantErr: true,
},
{
name: "test_task",
id: "test_task ",
wantErr: true,
},
{
name: "job$",
id: "job$ ",
wantErr: true,
},
{
name: "test-",
id: "test-",
wantErr: true,
},
{
name: "-",
id: "-",
wantErr: true,
},
{
name: "-sfsdfdf1",
id: "-sfsdfdf1",
wantErr: true,
},
{
name: "len==129",
id: "1234567890-1234567890-1234567890-1234567890-1234567890-1234567890-1234567890-1234567890-1234567890-123456789012345678901234567890",
wantErr: true,
},
}
for _, tt := range tests {
err := ValidateChangefeedID(tt.id)
if !tt.wantErr {
require.Nil(t, err, fmt.Sprintf("case:%s", tt.name))
} else {
require.True(t, cerror.ErrInvalidChangefeedID.Equal(err), fmt.Sprintf("case:%s", tt.name))
}
}
}
func TestGetTs(t *testing.T) {
t.Parallel()
var (
startTs uint64 = 418881574869139457
targetTs uint64 = 420891571239139085
checkpointTs uint64 = 420874357546418177
createTime = time.Now()
info = &ChangeFeedInfo{
SinkURI: "blackhole://",
CreateTime: createTime,
}
)
require.Equal(t, info.GetStartTs(), oracle.GoTimeToTS(createTime))
info.StartTs = startTs
require.Equal(t, info.GetStartTs(), startTs)
require.Equal(t, info.GetTargetTs(), uint64(math.MaxUint64))
info.TargetTs = targetTs
require.Equal(t, info.GetTargetTs(), targetTs)
require.Equal(t, info.GetCheckpointTs(nil), startTs)
status := &ChangeFeedStatus{CheckpointTs: checkpointTs}
require.Equal(t, info.GetCheckpointTs(status), checkpointTs)
} | id: "testTtTT",
wantErr: false,
}, |
job.rs | use serde::{Deserialize, Serialize};
use crate::server::rpc::Backend;
use crate::stream::server::control::StreamServerControlMessage;
use crate::transfer::messages::{JobDescription, JobDetail, JobInfo};
use crate::worker::start::RunningTaskContext;
use crate::{JobId, JobTaskCount, JobTaskId, Map, TakoTaskId, WorkerId};
use chrono::{DateTime, Utc};
use std::path::PathBuf;
use tako::common::index::ItemId;
use tako::server::task::SerializedTaskContext;
use tako::transfer::auth::deserialize;
use tako::TaskId;
use tokio::sync::oneshot;
/// State of a task that has been started at least once.
/// It contains the last known state of the task.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct StartedTaskData {
pub start_date: DateTime<Utc>,
pub context: RunningTaskContext,
pub worker_id: WorkerId,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum JobTaskState {
Waiting,
Running {
started_data: StartedTaskData,
},
Finished {
started_data: StartedTaskData,
end_date: DateTime<Utc>,
},
Failed {
started_data: StartedTaskData,
end_date: DateTime<Utc>,
error: String,
},
Canceled {
started_data: Option<StartedTaskData>,
},
}
impl JobTaskState {
pub fn started_data(&self) -> Option<&StartedTaskData> { | | JobTaskState::Finished { started_data, .. }
| JobTaskState::Failed { started_data, .. } => Some(started_data),
JobTaskState::Canceled { started_data } => started_data.as_ref(),
_ => None,
}
}
pub fn get_worker(&self) -> Option<WorkerId> {
self.started_data().map(|data| data.worker_id)
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct JobTaskInfo {
pub state: JobTaskState,
pub task_id: JobTaskId,
}
#[derive(Serialize, Deserialize, Debug, Copy, Clone, Default)]
pub struct JobTaskCounters {
pub n_running_tasks: JobTaskCount,
pub n_finished_tasks: JobTaskCount,
pub n_failed_tasks: JobTaskCount,
pub n_canceled_tasks: JobTaskCount,
}
impl std::ops::Add<JobTaskCounters> for JobTaskCounters {
type Output = JobTaskCounters;
fn add(self, rhs: Self) -> Self::Output {
Self {
n_running_tasks: self.n_running_tasks + rhs.n_running_tasks,
n_finished_tasks: self.n_finished_tasks + rhs.n_finished_tasks,
n_failed_tasks: self.n_failed_tasks + rhs.n_failed_tasks,
n_canceled_tasks: self.n_canceled_tasks + rhs.n_canceled_tasks,
}
}
}
impl JobTaskCounters {
pub fn n_waiting_tasks(&self, n_tasks: JobTaskCount) -> JobTaskCount {
n_tasks
- self.n_running_tasks
- self.n_finished_tasks
- self.n_failed_tasks
- self.n_canceled_tasks
}
pub fn has_unsuccessful_tasks(&self) -> bool {
self.n_failed_tasks > 0 || self.n_canceled_tasks > 0
}
pub fn is_terminated(&self, n_tasks: JobTaskCount) -> bool {
self.n_running_tasks == 0 && self.n_waiting_tasks(n_tasks) == 0
}
}
pub struct Job {
pub job_id: JobId,
pub base_task_id: TakoTaskId,
pub max_fails: Option<JobTaskCount>,
pub counters: JobTaskCounters,
pub tasks: Map<TakoTaskId, JobTaskInfo>,
pub log: Option<PathBuf>,
pub job_desc: JobDescription,
pub name: String,
pub submission_date: DateTime<Utc>,
pub completion_date: Option<DateTime<Utc>>,
submit_dir: PathBuf,
/// Holds channels that will receive information about the job after the it finishes in any way.
/// You can subscribe to the completion message with [`Self::subscribe_to_completion`].
completion_callbacks: Vec<oneshot::Sender<JobId>>,
}
impl Job {
// Probably we need some structure for the future, but as it is called in exactly one place,
// I am disabling it now
#[allow(clippy::too_many_arguments)]
pub fn new(
job_desc: JobDescription,
job_id: JobId,
base_task_id: TakoTaskId,
name: String,
max_fails: Option<JobTaskCount>,
log: Option<PathBuf>,
submit_dir: PathBuf,
) -> Self {
let base = base_task_id.as_num();
let tasks = match &job_desc {
JobDescription::Array { ids, .. } => ids
.iter()
.enumerate()
.map(|(i, task_id)| {
(
TakoTaskId::new(base + i as <TaskId as ItemId>::IdType),
JobTaskInfo {
state: JobTaskState::Waiting,
task_id: task_id.into(),
},
)
})
.collect(),
};
Job {
job_desc,
job_id,
counters: Default::default(),
base_task_id,
name,
tasks,
max_fails,
log,
submission_date: Utc::now(),
completion_date: None,
submit_dir,
completion_callbacks: Default::default(),
}
}
pub fn make_job_detail(&self, include_tasks: bool) -> JobDetail {
let tasks = if include_tasks {
self.tasks.values().cloned().collect()
} else {
Vec::new()
};
JobDetail {
info: self.make_job_info(),
job_desc: self.job_desc.clone(),
tasks,
max_fails: self.max_fails,
submission_date: self.submission_date,
submit_dir: self.submit_dir.clone(),
completion_date_or_now: self.completion_date.unwrap_or_else(Utc::now),
}
}
pub fn make_job_info(&self) -> JobInfo {
JobInfo {
id: self.job_id,
name: self.name.clone(),
n_tasks: self.n_tasks(),
counters: self.counters,
}
}
#[inline]
pub fn n_tasks(&self) -> JobTaskCount {
self.tasks.len() as JobTaskCount
}
pub fn is_terminated(&self) -> bool {
self.counters.is_terminated(self.n_tasks())
}
pub fn get_task_state_mut(
&mut self,
tako_task_id: TakoTaskId,
) -> (JobTaskId, &mut JobTaskState) {
let state = self.tasks.get_mut(&tako_task_id).unwrap();
(state.task_id, &mut state.state)
}
pub fn iter_task_states(
&self,
) -> impl Iterator<Item = (TakoTaskId, JobTaskId, &JobTaskState)> + '_ {
self.tasks.iter().map(|(k, v)| (*k, v.task_id, &v.state))
}
pub fn non_finished_task_ids(&self) -> Vec<TakoTaskId> {
let mut result = Vec::new();
for (tako_id, _task_id, state) in self.iter_task_states() {
match state {
JobTaskState::Waiting | JobTaskState::Running { .. } => result.push(tako_id),
JobTaskState::Finished { .. }
| JobTaskState::Failed { .. }
| JobTaskState::Canceled { .. } => { /* Do nothing */ }
}
}
result
}
pub fn set_running_state(
&mut self,
tako_task_id: TakoTaskId,
worker: WorkerId,
context: SerializedTaskContext,
) {
let (_, state) = self.get_task_state_mut(tako_task_id);
let context: RunningTaskContext =
deserialize(&context).expect("Could not deserialize task context");
if matches!(state, JobTaskState::Waiting) {
*state = JobTaskState::Running {
started_data: StartedTaskData {
start_date: Utc::now(),
context,
worker_id: worker,
},
};
self.counters.n_running_tasks += 1;
}
}
pub fn check_termination(&mut self, backend: &Backend, now: DateTime<Utc>) {
if self.is_terminated() {
self.completion_date = Some(now);
if self.log.is_some() {
backend
.send_stream_control(StreamServerControlMessage::UnregisterStream(self.job_id));
}
for handler in self.completion_callbacks.drain(..) {
handler.send(self.job_id).ok();
}
}
}
pub fn set_finished_state(&mut self, tako_task_id: TakoTaskId, backend: &Backend) {
let (_, state) = self.get_task_state_mut(tako_task_id);
let now = Utc::now();
match state {
JobTaskState::Running { started_data } => {
*state = JobTaskState::Finished {
started_data: started_data.clone(),
end_date: now,
};
self.counters.n_running_tasks -= 1;
self.counters.n_finished_tasks += 1;
}
_ => panic!("Invalid worker state, expected Running, got {:?}", state),
}
self.check_termination(backend, now);
}
pub fn set_waiting_state(&mut self, tako_task_id: TakoTaskId) {
let (_, state) = self.get_task_state_mut(tako_task_id);
assert!(matches!(state, JobTaskState::Running { .. }));
*state = JobTaskState::Waiting;
self.counters.n_running_tasks -= 1;
}
pub fn set_failed_state(&mut self, tako_task_id: TakoTaskId, error: String, backend: &Backend) {
let (_, state) = self.get_task_state_mut(tako_task_id);
let now = Utc::now();
match state {
JobTaskState::Running { started_data } => {
*state = JobTaskState::Failed {
error,
started_data: started_data.clone(),
end_date: now,
};
self.counters.n_running_tasks -= 1;
self.counters.n_failed_tasks += 1;
}
_ => panic!("Invalid worker state, expected Running, got {:?}", state),
}
self.check_termination(backend, now);
}
pub fn set_cancel_state(&mut self, tako_task_id: TakoTaskId, backend: &Backend) -> JobTaskId {
let now = Utc::now();
let (task_id, state) = self.get_task_state_mut(tako_task_id);
match state {
JobTaskState::Running { started_data, .. } => {
*state = JobTaskState::Canceled {
started_data: Some(started_data.clone()),
};
self.counters.n_running_tasks -= 1;
}
JobTaskState::Waiting => {
*state = JobTaskState::Canceled { started_data: None };
}
state => panic!(
"Invalid job state that is being canceled: {:?} {:?}",
task_id, state
),
}
self.counters.n_canceled_tasks += 1;
self.check_termination(backend, now);
task_id
}
/// Subscribes to the completion event of this job.
/// When the job finishes in any way (completion, failure, cancellation), the channel will
/// receive a single message.
pub fn subscribe_to_completion(&mut self) -> oneshot::Receiver<JobId> {
let (tx, rx) = oneshot::channel();
self.completion_callbacks.push(tx);
rx
}
} | match self {
JobTaskState::Running { started_data, .. } |
validate_auth.go | // Copyright 2021 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"context"
"fmt"
"net/http"
"strings"
authv1 "k8s.io/api/authorization/v1"
authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
"github.com/chaos-mesh/chaos-mesh/controllers/common"
)
var alwaysAllowedKind = []string{
v1alpha1.KindAwsChaos,
v1alpha1.KindPodNetworkChaos,
v1alpha1.KindPodIOChaos,
v1alpha1.KindGcpChaos,
v1alpha1.KindPodHttpChaos,
// TODO: check the auth for Schedule
// The resouce will be created by the SA of controller-manager, so checking the auth of Schedule is needed.
v1alpha1.KindSchedule,
"Workflow",
"WorkflowNode",
}
var authLog = ctrl.Log.WithName("validate-auth")
// +kubebuilder:webhook:path=/validate-auth,mutating=false,failurePolicy=fail,groups=chaos-mesh.org,resources=*,verbs=create;update,versions=v1alpha1,name=vauth.kb.io
// AuthValidator validates the authority
type AuthValidator struct {
enabled bool
authCli *authorizationv1.AuthorizationV1Client
decoder *admission.Decoder
clusterScoped bool
targetNamespace string
enableFilterNamespace bool
}
// NewAuthValidator returns a new AuthValidator
func | (enabled bool, authCli *authorizationv1.AuthorizationV1Client,
clusterScoped bool, targetNamespace string, enableFilterNamespace bool) *AuthValidator {
return &AuthValidator{
enabled: enabled,
authCli: authCli,
clusterScoped: clusterScoped,
targetNamespace: targetNamespace,
enableFilterNamespace: enableFilterNamespace,
}
}
// AuthValidator admits a pod iff a specific annotation exists.
func (v *AuthValidator) Handle(ctx context.Context, req admission.Request) admission.Response {
if !v.enabled {
return admission.Allowed("")
}
username := req.UserInfo.Username
groups := req.UserInfo.Groups
requestKind := req.Kind.Kind
if contains(alwaysAllowedKind, requestKind) {
return admission.Allowed(fmt.Sprintf("skip the RBAC check for type %s", requestKind))
}
kind, ok := v1alpha1.AllKinds()[requestKind]
if !ok {
err := fmt.Errorf("kind %s is not support", requestKind)
return admission.Errored(http.StatusBadRequest, err)
}
chaos := kind.Chaos.DeepCopyObject().(common.InnerObjectWithSelector)
if chaos == nil {
err := fmt.Errorf("kind %s is not support", requestKind)
return admission.Errored(http.StatusBadRequest, err)
}
err := v.decoder.Decode(req, chaos)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
specs := chaos.GetSelectorSpecs()
requireClusterPrivileges := false
affectedNamespaces := make(map[string]struct{})
for _, spec := range specs {
var selector *v1alpha1.PodSelector
if s, ok := spec.(*v1alpha1.ContainerSelector); ok {
selector = &s.PodSelector
}
if p, ok := spec.(*v1alpha1.PodSelector); ok {
selector = p
}
if selector == nil {
return admission.Allowed("")
}
if selector.Selector.ClusterScoped() {
requireClusterPrivileges = true
}
for _, namespace := range selector.Selector.AffectedNamespaces() {
affectedNamespaces[namespace] = struct{}{}
}
}
if requireClusterPrivileges {
allow, err := v.auth(username, groups, "", requestKind)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if !allow {
return admission.Denied(fmt.Sprintf("%s is forbidden on cluster", username))
}
authLog.Info("user have the privileges on cluster, auth validate passed", "user", username, "groups", groups, "namespace", affectedNamespaces)
} else {
for namespace := range affectedNamespaces {
allow, err := v.auth(username, groups, namespace, requestKind)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if !allow {
return admission.Denied(fmt.Sprintf("%s is forbidden on namespace %s", username, namespace))
}
}
authLog.Info("user have the privileges on namespace, auth validate passed", "user", username, "groups", groups, "namespace", affectedNamespaces)
}
return admission.Allowed("")
}
// AuthValidator implements admission.DecoderInjector.
// A decoder will be automatically injected.
// InjectDecoder injects the decoder.
func (v *AuthValidator) InjectDecoder(d *admission.Decoder) error {
v.decoder = d
return nil
}
func (v *AuthValidator) auth(username string, groups []string, namespace string, chaosKind string) (bool, error) {
resourceName, err := v.resourceFor(chaosKind)
if err != nil {
return false, err
}
sar := authv1.SubjectAccessReview{
Spec: authv1.SubjectAccessReviewSpec{
ResourceAttributes: &authv1.ResourceAttributes{
Namespace: namespace,
Verb: "create",
Group: "chaos-mesh.org",
Resource: resourceName,
},
User: username,
Groups: groups,
},
}
response, err := v.authCli.SubjectAccessReviews().Create(&sar)
if err != nil {
return false, err
}
return response.Status.Allowed, nil
}
func (v *AuthValidator) resourceFor(name string) (string, error) {
// TODO: we should use RESTMapper, but it relates to many dependencies
return strings.ToLower(name), nil
}
func contains(arr []string, target string) bool {
for _, item := range arr {
if item == target {
return true
}
}
return false
}
| NewAuthValidator |
evaluator.go | package manager
import (
"context"
"github.com/gobwas/glob"
"github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/services/accesscontrol"
)
const roleGrafanaAdmin = "Grafana Admin"
func (m *Manager) Evaluate(ctx context.Context, user *models.SignedInUser, permission string, scope ...string) (bool, error) {
roles := []string{string(user.OrgRole)}
for _, role := range user.OrgRole.Children() {
roles = append(roles, string(role))
}
if user.IsGrafanaAdmin {
roles = append(roles, roleGrafanaAdmin)
}
res, err := m.GetUserPermissions(ctx, accesscontrol.GetUserPermissionsQuery{
OrgID: user.OrgId,
UserID: user.UserId,
Roles: roles,
})
if err != nil { | }
ok, dbScopes := extractPermission(res, permission)
if !ok {
return false, nil
}
for _, s := range scope {
var match bool
for dbScope := range dbScopes {
rule, err := glob.Compile(dbScope, ':', '/')
if err != nil {
return false, err
}
match = rule.Match(s)
if match {
break
}
}
if !match {
return false, nil
}
}
return true, nil
}
func extractPermission(permissions []*accesscontrol.Permission, permission string) (bool, map[string]struct{}) {
scopes := map[string]struct{}{}
ok := false
for _, p := range permissions {
if p == nil {
continue
}
if p.Permission == permission {
ok = true
scopes[p.Scope] = struct{}{}
}
}
return ok, scopes
} | return false, err |
complete_outfit.rs | use bdd::closet::Closet;
use bdd::node::Node;
use core::Family;
use core::Item;
use core::Outfit;
use core::OutfitError;
use core::OutfitError::IncompatibleSelections;
use core::OutfitError::MultipleItemsPerFamily;
use core::OutfitError::UnknownItems;
use std::collections::BTreeMap;
impl Closet {
pub fn complete_outfit(&self, selections: Vec<Item>) -> Result<Outfit, OutfitError> {
validate(self, &selections)?;
let mut root: Node = selections.iter()
.fold(
self.root().clone(),
|new_root, selection| Node::restrict(&new_root, selection, true));
let mut outfit_items = selections;
loop {
match root {
Node::Branch(id, low, high) => {
let high = Node::from(high);
let low = Node::from(low);
match high {
Node::Leaf(false) => root = low,
_ => {
outfit_items.push(id);
root = high;
}
}
}
Node::Leaf(_val) => {
outfit_items.sort();
return Ok(Outfit::new(outfit_items));
}
}
}
}
}
fn validate(closet: &Closet, selections: &[Item]) -> Result<(), OutfitError> {
if let Some(items) = find_unknown_items(&closet, &selections) {
return Err(UnknownItems(items));
}
if let Some(items) = find_duplicate_items(&closet, &selections) {
return Err(MultipleItemsPerFamily(items));
}
if let Some(items) = find_conflicting_items(&closet, &selections) {
return Err(IncompatibleSelections(items));
}
Ok(())
}
fn find_unknown_items(closet: &Closet, selections: &[Item]) -> Option<Vec<Item>> {
let unknown_items = selections.iter()
.filter(|ref item| closet.get_family(item).is_none())
.cloned()
.collect::<Vec<Item>>();
if !unknown_items.is_empty() {
Some(unknown_items)
} else {
None
}
}
fn find_duplicate_items(closet: &Closet, selections: &[Item]) -> Option<BTreeMap<Family, Vec<Item>>> {
let duplicates: BTreeMap<Family, Vec<Item>> = selections.iter()
.map(|item| (closet.get_family(item), item))
.map(|(family, item): (Option<&Family>, &Item)| (family.unwrap(), item))
.fold(BTreeMap::new(), |mut duplicates: BTreeMap<Family, Vec<Item>>, (family, item): (&Family, &Item)| {
duplicates.entry(family.clone()).or_insert_with(|| vec![]).push(item.clone());
duplicates
})
.iter()
.filter(|&(_, items)| items.len() > 1)
.map(|(family, items)| (family.clone(), items.clone()))
.collect();
if !duplicates.is_empty() {
Some(duplicates)
} else {
None
}
}
fn find_conflicting_items(closet: &Closet, selections: &[Item]) -> Option<Vec<Item>> | {
let root: Node = selections.iter()
.fold(closet.root().clone(), |new_root, selection| Node::restrict(&new_root, selection, true));
let mut outfit_items = selections.to_owned();
match root {
Node::Leaf(false) => {
outfit_items.sort();
Some(outfit_items)
}
_ => None,
}
} |
|
post_learning_assignments_aggregates_query_responses.go | // Code generated by go-swagger; DO NOT EDIT.
package learning
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/freman/genesysapi/models"
)
// PostLearningAssignmentsAggregatesQueryReader is a Reader for the PostLearningAssignmentsAggregatesQuery structure.
type PostLearningAssignmentsAggregatesQueryReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *PostLearningAssignmentsAggregatesQueryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewPostLearningAssignmentsAggregatesQueryOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewPostLearningAssignmentsAggregatesQueryBadRequest()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 401:
result := NewPostLearningAssignmentsAggregatesQueryUnauthorized()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 403:
result := NewPostLearningAssignmentsAggregatesQueryForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewPostLearningAssignmentsAggregatesQueryNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 408:
result := NewPostLearningAssignmentsAggregatesQueryRequestTimeout()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 413:
result := NewPostLearningAssignmentsAggregatesQueryRequestEntityTooLarge()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 415:
result := NewPostLearningAssignmentsAggregatesQueryUnsupportedMediaType()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 429:
result := NewPostLearningAssignmentsAggregatesQueryTooManyRequests()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 500:
result := NewPostLearningAssignmentsAggregatesQueryInternalServerError()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 501:
result := NewPostLearningAssignmentsAggregatesQueryNotImplemented()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 503:
result := NewPostLearningAssignmentsAggregatesQueryServiceUnavailable()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 504:
result := NewPostLearningAssignmentsAggregatesQueryGatewayTimeout()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
}
}
// NewPostLearningAssignmentsAggregatesQueryOK creates a PostLearningAssignmentsAggregatesQueryOK with default headers values
func NewPostLearningAssignmentsAggregatesQueryOK() *PostLearningAssignmentsAggregatesQueryOK {
return &PostLearningAssignmentsAggregatesQueryOK{}
}
/*PostLearningAssignmentsAggregatesQueryOK handles this case with default header values.
Query completed successfully
*/
type PostLearningAssignmentsAggregatesQueryOK struct {
Payload *models.LearningAssignmentAggregateResponse
}
func (o *PostLearningAssignmentsAggregatesQueryOK) Error() string {
return fmt.Sprintf("[POST /api/v2/learning/assignments/aggregates/query][%d] postLearningAssignmentsAggregatesQueryOK %+v", 200, o.Payload)
}
func (o *PostLearningAssignmentsAggregatesQueryOK) GetPayload() *models.LearningAssignmentAggregateResponse {
return o.Payload
}
func (o *PostLearningAssignmentsAggregatesQueryOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.LearningAssignmentAggregateResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPostLearningAssignmentsAggregatesQueryBadRequest creates a PostLearningAssignmentsAggregatesQueryBadRequest with default headers values
func NewPostLearningAssignmentsAggregatesQueryBadRequest() *PostLearningAssignmentsAggregatesQueryBadRequest {
return &PostLearningAssignmentsAggregatesQueryBadRequest{}
}
/*PostLearningAssignmentsAggregatesQueryBadRequest handles this case with default header values.
The request could not be understood by the server due to malformed syntax.
*/
type PostLearningAssignmentsAggregatesQueryBadRequest struct {
Payload *models.ErrorBody
}
func (o *PostLearningAssignmentsAggregatesQueryBadRequest) Error() string {
return fmt.Sprintf("[POST /api/v2/learning/assignments/aggregates/query][%d] postLearningAssignmentsAggregatesQueryBadRequest %+v", 400, o.Payload)
}
func (o *PostLearningAssignmentsAggregatesQueryBadRequest) GetPayload() *models.ErrorBody {
return o.Payload
}
func (o *PostLearningAssignmentsAggregatesQueryBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ErrorBody)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPostLearningAssignmentsAggregatesQueryUnauthorized creates a PostLearningAssignmentsAggregatesQueryUnauthorized with default headers values
func NewPostLearningAssignmentsAggregatesQueryUnauthorized() *PostLearningAssignmentsAggregatesQueryUnauthorized {
return &PostLearningAssignmentsAggregatesQueryUnauthorized{}
}
/*PostLearningAssignmentsAggregatesQueryUnauthorized handles this case with default header values.
No authentication bearer token specified in authorization header.
*/
type PostLearningAssignmentsAggregatesQueryUnauthorized struct {
Payload *models.ErrorBody
}
func (o *PostLearningAssignmentsAggregatesQueryUnauthorized) Error() string {
return fmt.Sprintf("[POST /api/v2/learning/assignments/aggregates/query][%d] postLearningAssignmentsAggregatesQueryUnauthorized %+v", 401, o.Payload)
}
func (o *PostLearningAssignmentsAggregatesQueryUnauthorized) GetPayload() *models.ErrorBody {
return o.Payload
}
func (o *PostLearningAssignmentsAggregatesQueryUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ErrorBody)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPostLearningAssignmentsAggregatesQueryForbidden creates a PostLearningAssignmentsAggregatesQueryForbidden with default headers values
func NewPostLearningAssignmentsAggregatesQueryForbidden() *PostLearningAssignmentsAggregatesQueryForbidden {
return &PostLearningAssignmentsAggregatesQueryForbidden{}
}
/*PostLearningAssignmentsAggregatesQueryForbidden handles this case with default header values.
You are not authorized to perform the requested action.
*/
type PostLearningAssignmentsAggregatesQueryForbidden struct {
Payload *models.ErrorBody
}
func (o *PostLearningAssignmentsAggregatesQueryForbidden) Error() string {
return fmt.Sprintf("[POST /api/v2/learning/assignments/aggregates/query][%d] postLearningAssignmentsAggregatesQueryForbidden %+v", 403, o.Payload)
}
func (o *PostLearningAssignmentsAggregatesQueryForbidden) GetPayload() *models.ErrorBody {
return o.Payload
}
func (o *PostLearningAssignmentsAggregatesQueryForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ErrorBody)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPostLearningAssignmentsAggregatesQueryNotFound creates a PostLearningAssignmentsAggregatesQueryNotFound with default headers values
func NewPostLearningAssignmentsAggregatesQueryNotFound() *PostLearningAssignmentsAggregatesQueryNotFound {
return &PostLearningAssignmentsAggregatesQueryNotFound{}
}
/*PostLearningAssignmentsAggregatesQueryNotFound handles this case with default header values.
The requested resource was not found.
*/
type PostLearningAssignmentsAggregatesQueryNotFound struct {
Payload *models.ErrorBody
}
func (o *PostLearningAssignmentsAggregatesQueryNotFound) Error() string {
return fmt.Sprintf("[POST /api/v2/learning/assignments/aggregates/query][%d] postLearningAssignmentsAggregatesQueryNotFound %+v", 404, o.Payload)
}
func (o *PostLearningAssignmentsAggregatesQueryNotFound) GetPayload() *models.ErrorBody {
return o.Payload
}
func (o *PostLearningAssignmentsAggregatesQueryNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ErrorBody)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPostLearningAssignmentsAggregatesQueryRequestTimeout creates a PostLearningAssignmentsAggregatesQueryRequestTimeout with default headers values
func NewPostLearningAssignmentsAggregatesQueryRequestTimeout() *PostLearningAssignmentsAggregatesQueryRequestTimeout {
return &PostLearningAssignmentsAggregatesQueryRequestTimeout{}
}
/*PostLearningAssignmentsAggregatesQueryRequestTimeout handles this case with default header values.
The client did not produce a request within the server timeout limit. This can be caused by a slow network connection and/or large payloads.
*/
type PostLearningAssignmentsAggregatesQueryRequestTimeout struct {
Payload *models.ErrorBody
}
func (o *PostLearningAssignmentsAggregatesQueryRequestTimeout) Error() string {
return fmt.Sprintf("[POST /api/v2/learning/assignments/aggregates/query][%d] postLearningAssignmentsAggregatesQueryRequestTimeout %+v", 408, o.Payload)
}
func (o *PostLearningAssignmentsAggregatesQueryRequestTimeout) GetPayload() *models.ErrorBody {
return o.Payload
}
func (o *PostLearningAssignmentsAggregatesQueryRequestTimeout) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ErrorBody)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPostLearningAssignmentsAggregatesQueryRequestEntityTooLarge creates a PostLearningAssignmentsAggregatesQueryRequestEntityTooLarge with default headers values
func NewPostLearningAssignmentsAggregatesQueryRequestEntityTooLarge() *PostLearningAssignmentsAggregatesQueryRequestEntityTooLarge {
return &PostLearningAssignmentsAggregatesQueryRequestEntityTooLarge{}
}
/*PostLearningAssignmentsAggregatesQueryRequestEntityTooLarge handles this case with default header values.
The request is over the size limit. Content-Length: %s
*/
type PostLearningAssignmentsAggregatesQueryRequestEntityTooLarge struct {
Payload *models.ErrorBody
}
func (o *PostLearningAssignmentsAggregatesQueryRequestEntityTooLarge) Error() string {
return fmt.Sprintf("[POST /api/v2/learning/assignments/aggregates/query][%d] postLearningAssignmentsAggregatesQueryRequestEntityTooLarge %+v", 413, o.Payload)
}
func (o *PostLearningAssignmentsAggregatesQueryRequestEntityTooLarge) GetPayload() *models.ErrorBody {
return o.Payload
}
func (o *PostLearningAssignmentsAggregatesQueryRequestEntityTooLarge) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ErrorBody)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPostLearningAssignmentsAggregatesQueryUnsupportedMediaType creates a PostLearningAssignmentsAggregatesQueryUnsupportedMediaType with default headers values
func NewPostLearningAssignmentsAggregatesQueryUnsupportedMediaType() *PostLearningAssignmentsAggregatesQueryUnsupportedMediaType {
return &PostLearningAssignmentsAggregatesQueryUnsupportedMediaType{}
}
/*PostLearningAssignmentsAggregatesQueryUnsupportedMediaType handles this case with default header values.
Unsupported Media Type - Unsupported or incorrect media type, such as an incorrect Content-Type value in the header.
*/
type PostLearningAssignmentsAggregatesQueryUnsupportedMediaType struct {
Payload *models.ErrorBody
}
func (o *PostLearningAssignmentsAggregatesQueryUnsupportedMediaType) Error() string {
return fmt.Sprintf("[POST /api/v2/learning/assignments/aggregates/query][%d] postLearningAssignmentsAggregatesQueryUnsupportedMediaType %+v", 415, o.Payload)
}
func (o *PostLearningAssignmentsAggregatesQueryUnsupportedMediaType) GetPayload() *models.ErrorBody {
return o.Payload
}
func (o *PostLearningAssignmentsAggregatesQueryUnsupportedMediaType) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ErrorBody)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPostLearningAssignmentsAggregatesQueryTooManyRequests creates a PostLearningAssignmentsAggregatesQueryTooManyRequests with default headers values
func NewPostLearningAssignmentsAggregatesQueryTooManyRequests() *PostLearningAssignmentsAggregatesQueryTooManyRequests {
return &PostLearningAssignmentsAggregatesQueryTooManyRequests{}
}
/*PostLearningAssignmentsAggregatesQueryTooManyRequests handles this case with default header values.
Rate limit exceeded the maximum. Retry the request in [%s] seconds
*/
type PostLearningAssignmentsAggregatesQueryTooManyRequests struct {
Payload *models.ErrorBody
}
func (o *PostLearningAssignmentsAggregatesQueryTooManyRequests) Error() string {
return fmt.Sprintf("[POST /api/v2/learning/assignments/aggregates/query][%d] postLearningAssignmentsAggregatesQueryTooManyRequests %+v", 429, o.Payload)
}
func (o *PostLearningAssignmentsAggregatesQueryTooManyRequests) GetPayload() *models.ErrorBody {
return o.Payload
}
func (o *PostLearningAssignmentsAggregatesQueryTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ErrorBody)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPostLearningAssignmentsAggregatesQueryInternalServerError creates a PostLearningAssignmentsAggregatesQueryInternalServerError with default headers values
func NewPostLearningAssignmentsAggregatesQueryInternalServerError() *PostLearningAssignmentsAggregatesQueryInternalServerError {
return &PostLearningAssignmentsAggregatesQueryInternalServerError{}
}
/*PostLearningAssignmentsAggregatesQueryInternalServerError handles this case with default header values.
The server encountered an unexpected condition which prevented it from fulfilling the request.
*/
type PostLearningAssignmentsAggregatesQueryInternalServerError struct {
Payload *models.ErrorBody
}
func (o *PostLearningAssignmentsAggregatesQueryInternalServerError) Error() string {
return fmt.Sprintf("[POST /api/v2/learning/assignments/aggregates/query][%d] postLearningAssignmentsAggregatesQueryInternalServerError %+v", 500, o.Payload)
}
func (o *PostLearningAssignmentsAggregatesQueryInternalServerError) GetPayload() *models.ErrorBody {
return o.Payload
}
func (o *PostLearningAssignmentsAggregatesQueryInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ErrorBody)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF |
return nil
}
// NewPostLearningAssignmentsAggregatesQueryNotImplemented creates a PostLearningAssignmentsAggregatesQueryNotImplemented with default headers values
func NewPostLearningAssignmentsAggregatesQueryNotImplemented() *PostLearningAssignmentsAggregatesQueryNotImplemented {
return &PostLearningAssignmentsAggregatesQueryNotImplemented{}
}
/*PostLearningAssignmentsAggregatesQueryNotImplemented handles this case with default header values.
Not Implemented
*/
type PostLearningAssignmentsAggregatesQueryNotImplemented struct {
Payload *models.ErrorBody
}
func (o *PostLearningAssignmentsAggregatesQueryNotImplemented) Error() string {
return fmt.Sprintf("[POST /api/v2/learning/assignments/aggregates/query][%d] postLearningAssignmentsAggregatesQueryNotImplemented %+v", 501, o.Payload)
}
func (o *PostLearningAssignmentsAggregatesQueryNotImplemented) GetPayload() *models.ErrorBody {
return o.Payload
}
func (o *PostLearningAssignmentsAggregatesQueryNotImplemented) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ErrorBody)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPostLearningAssignmentsAggregatesQueryServiceUnavailable creates a PostLearningAssignmentsAggregatesQueryServiceUnavailable with default headers values
func NewPostLearningAssignmentsAggregatesQueryServiceUnavailable() *PostLearningAssignmentsAggregatesQueryServiceUnavailable {
return &PostLearningAssignmentsAggregatesQueryServiceUnavailable{}
}
/*PostLearningAssignmentsAggregatesQueryServiceUnavailable handles this case with default header values.
Service Unavailable - The server is currently unavailable (because it is overloaded or down for maintenance).
*/
type PostLearningAssignmentsAggregatesQueryServiceUnavailable struct {
Payload *models.ErrorBody
}
func (o *PostLearningAssignmentsAggregatesQueryServiceUnavailable) Error() string {
return fmt.Sprintf("[POST /api/v2/learning/assignments/aggregates/query][%d] postLearningAssignmentsAggregatesQueryServiceUnavailable %+v", 503, o.Payload)
}
func (o *PostLearningAssignmentsAggregatesQueryServiceUnavailable) GetPayload() *models.ErrorBody {
return o.Payload
}
func (o *PostLearningAssignmentsAggregatesQueryServiceUnavailable) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ErrorBody)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewPostLearningAssignmentsAggregatesQueryGatewayTimeout creates a PostLearningAssignmentsAggregatesQueryGatewayTimeout with default headers values
func NewPostLearningAssignmentsAggregatesQueryGatewayTimeout() *PostLearningAssignmentsAggregatesQueryGatewayTimeout {
return &PostLearningAssignmentsAggregatesQueryGatewayTimeout{}
}
/*PostLearningAssignmentsAggregatesQueryGatewayTimeout handles this case with default header values.
The request timed out.
*/
type PostLearningAssignmentsAggregatesQueryGatewayTimeout struct {
Payload *models.ErrorBody
}
func (o *PostLearningAssignmentsAggregatesQueryGatewayTimeout) Error() string {
return fmt.Sprintf("[POST /api/v2/learning/assignments/aggregates/query][%d] postLearningAssignmentsAggregatesQueryGatewayTimeout %+v", 504, o.Payload)
}
func (o *PostLearningAssignmentsAggregatesQueryGatewayTimeout) GetPayload() *models.ErrorBody {
return o.Payload
}
func (o *PostLearningAssignmentsAggregatesQueryGatewayTimeout) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ErrorBody)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
| {
return err
} |
identity_user_flow_attribute_type.go | package graph
import (
"strings"
"errors"
)
//
type IdentityUserFlowAttributeType int
const (
BUILTIN_IDENTITYUSERFLOWATTRIBUTETYPE IdentityUserFlowAttributeType = iota
CUSTOM_IDENTITYUSERFLOWATTRIBUTETYPE
REQUIRED_IDENTITYUSERFLOWATTRIBUTETYPE
UNKNOWNFUTUREVALUE_IDENTITYUSERFLOWATTRIBUTETYPE
)
func (i IdentityUserFlowAttributeType) String() string {
return []string{"BUILTIN", "CUSTOM", "REQUIRED", "UNKNOWNFUTUREVALUE"}[i]
}
func ParseIdentityUserFlowAttributeType(v string) (interface{}, error) |
func SerializeIdentityUserFlowAttributeType(values []IdentityUserFlowAttributeType) []string {
result := make([]string, len(values))
for i, v := range values {
result[i] = v.String()
}
return result
}
| {
switch strings.ToUpper(v) {
case "BUILTIN":
return BUILTIN_IDENTITYUSERFLOWATTRIBUTETYPE, nil
case "CUSTOM":
return CUSTOM_IDENTITYUSERFLOWATTRIBUTETYPE, nil
case "REQUIRED":
return REQUIRED_IDENTITYUSERFLOWATTRIBUTETYPE, nil
case "UNKNOWNFUTUREVALUE":
return UNKNOWNFUTUREVALUE_IDENTITYUSERFLOWATTRIBUTETYPE, nil
}
return 0, errors.New("Unknown IdentityUserFlowAttributeType value: " + v)
} |
sdpMalleables.go | package seed2sdp
import (
"fmt"
)
// SDPMalleable includes v, o, s, c, t which "do not affect the WebRTC session". (WebRTC For The Curious, page 13)
type SDPMalleables struct {
Version uint32 // v=0
Origin SDPOrigin // o=- 0 0 IN IP4 0.0.0.0
SessionName string // s=-
ConnectionData SDPConnectionData // c=IN IP4 0.0.0.0
Timing SDPTiming // t=0 0
}
func NewSDPMalleables() SDPMalleables {
return SDPMalleables{
Version: 0,
Origin: NewSDPOrigin(),
SessionName: "-",
ConnectionData: NewSDPConnectionData(),
Timing: NewSDPTiming(),
}
}
// Reserved
func | (_ *HKDFParams) SDPMalleables {
sdpm := NewSDPMalleables()
return sdpm
}
// v=0
// o=- 0 0 IN IP4 0.0.0.0
// s=-
// c=IN IP4 0.0.0.0
// t=0 0
func (sm *SDPMalleables) String() string {
strsm := fmt.Sprintf(`v=%d\r\n`, sm.Version)
strsm += fmt.Sprintf(`o=%s\r\n`, sm.Origin.String())
strsm += fmt.Sprintf(`s=%s\r\n`, sm.SessionName)
strsm += fmt.Sprintf(`c=%s\r\n`, sm.ConnectionData.String())
strsm += fmt.Sprintf(`t=%s\r\n`, sm.Timing.String())
return strsm
}
| PredictSDPMalleables |
get_blob_properties_response.rs | use crate::{blob::blob::Blob, AzureStorageError};
use azure_core::headers::{date_from_headers, request_id_from_headers};
use azure_core::RequestId; | use chrono::{DateTime, Utc};
use http::HeaderMap;
#[derive(Debug, Clone)]
pub struct GetBlobPropertiesResponse {
pub blob: Blob,
pub request_id: RequestId,
pub date: DateTime<Utc>,
}
impl GetBlobPropertiesResponse {
pub(crate) fn from_response(
headers: &HeaderMap,
blob: Blob,
) -> Result<GetBlobPropertiesResponse, AzureStorageError> {
debug!("headers == {:#?}", headers);
let request_id = request_id_from_headers(headers)?;
let date = date_from_headers(headers)?;
Ok(GetBlobPropertiesResponse {
blob,
request_id,
date,
})
}
} | |
02-numbers-tasks.js | /* *******************************************************************************************
* *
* Plese read the following tutorial before implementing tasks: *
* https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Numbers_and_dates *
* https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number *
* https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math *
* *
******************************************************************************************* */
/**
* Returns an area of a rectangle given by width and heigth.
*
* @param {numder} width
* @param {number} height
* @return {number}
*
* @example:
* 5, 10 => 50 | * 5, 5 => 25
*/
function getRectangleArea(width, height) {
return width * height;
}
/**
* Returns a circumference of circle given by radius.
*
* @param {number} radius
* @return {number}
*
* @example:
* 5 => 31.41592653589793
* 3.14 => 19.729201864543903
* 0 => 0
*/
function getCicleCircumference(radius) {
return 2 * Math.PI * radius;
}
/**
* Returns an average of two given numbers.
*
* @param {numder} value1
* @param {number} value2
* @return {number}
*
* @example:
* 5, 5 => 5
* 10, 0 => 5
* -3, 3 => 0
*/
function getAverage(value1, value2) {
if (value1 === value2) {
return value1;
}
return (value1 + value2) / 2;
}
/**
* Returns a distance beetween two points by cartesian coordinates.
*
* @param {number} x1
* @param {number} y1
* @param {number} x2
* @param {number} y2
*
* @return {number}
*
* @example:
* (0,0) (0,1) => 1
* (0,0) (1,0) => 1
* (-5,0) (10,-10) => 18.027756377319946
*/
function getDistanceBetweenPoints(x1, y1, x2, y2) {
return Math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2);
}
/**
* Returns a root of linear equation a*x + b = 0 given by coefficients a and b.
*
* @param {number} a
* @param {number} b
* @return {number}
*
* @example:
* 5*x - 10 = 0 => 2
* x + 8 = 0 => -8
* 5*x = 0 => 0
*/
function getLinearEquationRoot(a, b) {
return -b / a;
}
/**
* Returns an angle (in radians) between two vectors given by xi and yi,
* coordinates in Cartesian plane.
* See details https://en.wikipedia.org/wiki/Euclidean_vector#Representations
*
* @param {number} x1
* @param {number} y1
* @param {number} x2
* @param {number} y2
* @return {number}
*
* @example:
* (1,0) (0,1) => π/2
* (0,1) (0,-1) => π
* (0,-1) (1,0) => π/2
* (0,1) (0,1) => 0
* (0,1) (1,2) => 0
*/
function getAngleBetweenVectors(x1, y1, x2, y2) {
return Math.abs(Math.atan2(y2, x2) - Math.atan2(y1, x1));
}
/**
* Returns a last digit of a integer number.
*
* @param {number} value
* @return {number}
*
* @example:
* 100 => 0
* 37 => 7
* 5 => 5
* 0 => 0
*/
function getLastDigit(value) {
return parseInt(value.toString().slice(-1), 10);
}
/**
* Returns a number by given string representation.
*
* @param {string} value
* @return {number}
*
* @example:
* '100' => 100
* '37' => 37
* '-525.5' => -525.5
*/
function parseNumberFromString(value) {
return parseFloat(value);
}
/**
* Returns a diagonal length of the rectangular parallelepiped given by its sides a,b,c.
*
* @param {number} a
* @param {number} b
* @param {number} c
* @return {number}
*
* @example:
* 1,1,1 => 1.7320508075688772
* 3,3,3 => 5.196152422706632
* 1,2,3 => 3.741657386773941
*/
function getParallelipidedDiagonal(a, b, c) {
return Math.sqrt(a ** 2 + b ** 2 + c ** 2);
}
/**
* Returns the number rounded to specified power of 10.
*
* @param {number} num
* @param {number} pow
* @return {number}
*
* @example:
* 1234, 0 => 1234
* 1234, 1 => 1230
* 1234, 2 => 1200
* 1234, 3 => 1000
* 1678, 0 => 1678
* 1678, 1 => 1680
* 1678, 2 => 1700
* 1678, 3 => 2000
*/
function roundToPowerOfTen(num, pow) {
return Math.round(num / (10 ** pow)) * (10 ** pow);
}
/**
* Returns true is the number is prime; otherwise false.
* See: https://en.wikipedia.org/wiki/Primality_test
*
* @param {number} n
* @return {boolean}
*
* @example:
* 4 => false
* 5 => true
* 6 => false
* 7 => true
* 11 => true
* 12 => false
* 16 => false
* 17 => true
*/
function isPrime(n) {
if (n <= 3) {
return n > 1;
}
if (n % 2 === 0 || n % 3 === 0) {
return false;
}
let i = 5;
while (i ** 2 <= n) {
if (n % i === 0 || n % (i + 2) === 0) {
return false;
}
i += 6;
}
return true;
}
/**
* Tries to convert value to number and returns it if conversion was successfull;
* otherwise returns default value passed as a second argument.
*
* @param {any} value
* @param {any} def
* @return {number}
*
* @example
* toNumber(null, 0) => 0
* toNumber('test', 0) => 0
* toNumber('1', 0) => 1
* toNumber(42, 0) => 42
* toNumber(new Number(42), 0) => 42
*/
function toNumber(value, def) {
const number = parseInt(value, 10);
if (number) {
return number;
}
return def;
}
module.exports = {
getRectangleArea,
getCicleCircumference,
getAverage,
getDistanceBetweenPoints,
getLinearEquationRoot,
getAngleBetweenVectors,
getLastDigit,
parseNumberFromString,
getParallelipidedDiagonal,
roundToPowerOfTen,
isPrime,
toNumber,
}; | |
main.go | package main
import (
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
"golang.org/x/net/websocket"
)
type Game struct {
Chanel1 chan ChessData
Chanel2 chan ChessData
}
func (game *Game) closeGame() {
close(game.Chanel1)
close(game.Chanel2)
}
type ChessData struct {
Castling bool `json:"castling"`
Checkmate bool `json:"checkmate"`
Promotion bool `json:"promotion"`
Finish bool `json:"finish"`
Turn string `json:"turn"`
PieceSquare string `json:"pieceSquare"`
DestinationSquare string `json:"destinationSquare"`
PromotionChoice string `json:"promotionChoice"`
PlayerColor string `json:"playerColor"`
}
func createGame(c echo.Context) error {
websocket.Handler(func(ws *websocket.Conn) {
defer ws.Close()
game := &Game{
Chanel1: make(chan ChessData),
Chanel2: make(chan ChessData),
}
defer game.closeGame()
token := c.QueryParam("token")
games[token] = game
playerColor := ChessData{
PlayerColor: "white",
}
err := websocket.JSON.Send(ws, &playerColor)
if err != nil {
c.Logger().Error(err)
return
}
playGame(c, ws, game.Chanel1, game.Chanel2)
}).ServeHTTP(c.Response(), c.Request())
return nil
}
func joinGame(c echo.Context) error {
websocket.Handler(func(ws *websocket.Conn) {
defer ws.Close()
token := c.QueryParam("token")
playerColor := ChessData{
PlayerColor: "black",
}
err := websocket.JSON.Send(ws, &playerColor)
if err != nil {
c.Logger().Error(err)
return
}
playGame(c, ws, games[token].Chanel2, games[token].Chanel1)
}).ServeHTTP(c.Response(), c.Request())
return nil
}
func playGame(c echo.Context, ws *websocket.Conn, canal1 chan ChessData, canal2 chan ChessData) |
var games = make(map[string]*Game)
func main() {
e := echo.New()
e.Use(middleware.Logger())
e.Use(middleware.Recover())
e.Static("/", "public")
e.GET("/ws", createGame)
e.GET("/ws2", joinGame)
e.Logger.Fatal(e.Start(":8080"))
}
| {
for {
// Write
go func() {
for {
data := <-canal2
err := websocket.JSON.Send(ws, &data)
if err != nil {
c.Logger().Error(err)
return
}
}
}()
// Read
data := ChessData{}
err := websocket.JSON.Receive(ws, &data)
if err != nil {
c.Logger().Error(err)
return
}
if data.Finish {
return
}
canal1 <- data
if data.Checkmate {
return
}
}
} |
objstore.rs | use pyo3::prelude::*;
#[pyclass]
#[derive(Default)]
pub struct ObjStore {
obj: Vec<PyObject>,
}
#[pymethods]
impl ObjStore {
#[new]
fn new() -> Self {
ObjStore::default() | }
fn push(&mut self, py: Python<'_>, obj: &PyAny) {
self.obj.push(obj.to_object(py));
}
}
#[pymodule]
pub fn objstore(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
m.add_class::<ObjStore>()
} | |
otherItemsParent-container.js | import React from "react";
import * as _ from "lodash";
import models from "../models";
import interpreter from "../interpreter";
import { evaluateObjectProperties } from "../interpreter";
import { calculateStateProjection } from "../reducer";
import { connect } from "react-redux"
import Checkbox from "@material-ui/core/Checkbox";
import Textfield from "@material-ui/core/TextField";
import FormControl from "@material-ui/core/FormControl";
import InputLabel from "@material-ui/core/InputLabel";
import Select from "@material-ui/core/Select";
import MenuItem from "@material-ui/core/MenuItem";
import List from "@material-ui/core/List";
import ListItem from "@material-ui/core/ListItem";
import Grid from "@material-ui/core/Grid";
import Button from "@material-ui/core/Button";
import OtherItemsList from './OtherItemsList';
import NewOtherItemContainer from './NewOtherItemContainer';
class ErrorBoundary extends React.Component {
constructor(props) {
super(props);
this.state = { hasError: false };
}
static getDerivedStateFromError(error){
return {
error: "There was an error in otherItemsParent",
hasError: true
};
}
componentDidCatch(error, errorInfo){
console.error(error, errorInfo);
}
render() {
if (this.state.hasError) {
// You can render any custom fallback UI
return <h1>{this.state.error}</h1>;
}
return this.props.children;
}
}
const defaultStyle = {
border: 0
};
class otherItemsParent extends React.Component {
render(){
if(!this.props.hidden){
return (<ErrorBoundary>
<Grid id="otherItemsParent" style={defaultStyle} container spacing={8} justify="space-around" direction={"vertical" == "vertical" ? "column" : "row"}>
<Grid item xs={12}>
<OtherItemsList value={this.props.value} index={this.props.index} />
</Grid>
<Grid item xs={12}>
<NewOtherItemContainer value={this.props.value} index={this.props.index} />
</Grid>
</Grid>
</ErrorBoundary>);
} else {
return null
}
}
shouldComponentUpdate(nextProps){
return this.props.hidden !== nextProps.hidden ||
this.props.index !== nextProps.index ||
this.props.value !== nextProps.value;
}
}
const Component = otherItemsParent;
const isBoundComponent = false
const boundProperty = '# $this'; // The expression used to determine the value of binding.
const readOnly = true; // If this component only calculates its value, or is bound and can update it.
const valueExpression = "";
const action = undefined;
const disabledWhenExpression = undefined;
const hiddenWhenExpression = undefined;
const selectionCountExpression = 0;
const connected = connect((state, ownProps) => {
const context = {
$state: calculateStateProjection(state),
$model: models,
$this: ownProps.value,
$index: ownProps.index
};
const interpretedBoundProperty = interpreter.interpret(boundProperty, context);
const value = (()=>{
if (isBoundComponent){
return _.get(context, interpretedBoundProperty);
} else {
return interpreter.interpret(interpretedBoundProperty, context);
}
})();
const maxSelections = interpreter.interpret(selectionCountExpression, context);
const options = (interpreter.interpret(valueExpression, context) || []).map(opt => {
if(_.isArray(opt)){
return opt.map(evaluateObjectProperties.bind(null, context));
} else if(_.isObject(opt)) {
return evaluateObjectProperties(context, opt);
} else {
return opt;
}
}); | const disabled = interpreter.interpret(disabledWhenExpression, context);
const hidden = interpreter.interpret(hiddenWhenExpression, context);
const clickAction = interpreter.interpret(action, context);
return {
clickAction,
disabled,
hidden,
options,
maxSelections,
boundProperty: interpretedBoundProperty,
value: _.isNil(value) ? "" : value
};
}, (dispatch) => {
return {dispatch};
})(Component);
function determineValues(expression, state, value){
if (!expression){
return [];
}
const expressionTokens = expression.split(".");
switch(expressionTokens[0]){
case "model":
return _.get(models, expressionTokens.slice(1).join("."));
case "character":
return _.get(state, expression, []);
}
}
function renderOption(value) {
return value.choices + " from " + value.options.join(", ");
}
export default connected; | |
process.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Working with processes.
#![stable(feature = "process", since = "1.0.0")]
#![allow(non_upper_case_globals)]
use prelude::v1::*;
use io::prelude::*;
use ffi::AsOsStr;
use fmt;
use io::{self, Error, ErrorKind};
use path::AsPath;
use libc;
use sync::mpsc::{channel, Receiver};
use sys::pipe2::{self, AnonPipe};
use sys::process2::Process as ProcessImp;
use sys::process2::Command as CommandImp;
use sys::process2::ExitStatus as ExitStatusImp;
use sys_common::{AsInner, AsInnerMut};
use thread;
/// Representation of a running or exited child process.
///
/// This structure is used to represent and manage child processes. A child
/// process is created via the `Command` struct, which configures the spawning
/// process and can itself be constructed using a builder-style interface.
///
/// # Example
///
/// ```should_fail
/// # #![feature(process)]
///
/// use std::process::Command;
///
/// let output = Command::new("/bin/cat").arg("file.txt").output().unwrap_or_else(|e| {
/// panic!("failed to execute child: {}", e)
/// });
/// let contents = output.stdout;
/// assert!(output.status.success());
/// ```
#[stable(feature = "process", since = "1.0.0")]
pub struct Child {
handle: ProcessImp,
/// None until wait() or wait_with_output() is called.
status: Option<ExitStatusImp>,
/// The handle for writing to the child's stdin, if it has been captured
#[stable(feature = "process", since = "1.0.0")]
pub stdin: Option<ChildStdin>,
/// The handle for reading from the child's stdout, if it has been captured
#[stable(feature = "process", since = "1.0.0")]
pub stdout: Option<ChildStdout>,
/// The handle for reading from the child's stderr, if it has been captured
#[stable(feature = "process", since = "1.0.0")]
pub stderr: Option<ChildStderr>,
}
/// A handle to a child procesess's stdin
#[stable(feature = "process", since = "1.0.0")]
pub struct ChildStdin {
inner: AnonPipe
}
#[stable(feature = "process", since = "1.0.0")]
impl Write for ChildStdin {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
/// A handle to a child procesess's stdout
#[stable(feature = "process", since = "1.0.0")]
pub struct ChildStdout {
inner: AnonPipe
}
#[stable(feature = "process", since = "1.0.0")]
impl Read for ChildStdout {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
}
/// A handle to a child procesess's stderr
#[stable(feature = "process", since = "1.0.0")]
pub struct ChildStderr {
inner: AnonPipe
}
#[stable(feature = "process", since = "1.0.0")]
impl Read for ChildStderr {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
}
/// The `Command` type acts as a process builder, providing fine-grained control
/// over how a new process should be spawned. A default configuration can be
/// generated using `Command::new(program)`, where `program` gives a path to the
/// program to be executed. Additional builder methods allow the configuration
/// to be changed (for example, by adding arguments) prior to spawning:
///
/// ```
/// use std::process::Command;
///
/// let output = Command::new("sh").arg("-c").arg("echo hello").output().unwrap_or_else(|e| {
/// panic!("failed to execute process: {}", e)
/// });
/// let hello = output.stdout;
/// ```
#[stable(feature = "process", since = "1.0.0")]
pub struct Command {
inner: CommandImp,
// Details explained in the builder methods
stdin: Option<StdioImp>,
stdout: Option<StdioImp>,
stderr: Option<StdioImp>,
}
impl Command {
/// Constructs a new `Command` for launching the program at
/// path `program`, with the following default configuration:
///
/// * No arguments to the program
/// * Inherit the current process's environment
/// * Inherit the current process's working directory
/// * Inherit stdin/stdout/stderr for `run` or `status`, but create pipes for `output`
///
/// Builder methods are provided to change these defaults and
/// otherwise configure the process.
#[stable(feature = "process", since = "1.0.0")]
pub fn new<S: AsOsStr + ?Sized>(program: &S) -> Command {
Command {
inner: CommandImp::new(program.as_os_str()),
stdin: None,
stdout: None,
stderr: None,
}
}
/// Add an argument to pass to the program.
#[stable(feature = "process", since = "1.0.0")]
pub fn arg<S: AsOsStr + ?Sized>(&mut self, arg: &S) -> &mut Command {
self.inner.arg(arg.as_os_str());
self
}
/// Add multiple arguments to pass to the program.
#[stable(feature = "process", since = "1.0.0")]
pub fn args<S: AsOsStr>(&mut self, args: &[S]) -> &mut Command {
self.inner.args(args.iter().map(AsOsStr::as_os_str));
self
}
/// Inserts or updates an environment variable mapping.
///
/// Note that environment variable names are case-insensitive (but case-preserving) on Windows,
/// and case-sensitive on all other platforms.
#[stable(feature = "process", since = "1.0.0")]
pub fn env<K: ?Sized, V: ?Sized>(&mut self, key: &K, val: &V) -> &mut Command
where K: AsOsStr, V: AsOsStr
{
self.inner.env(key.as_os_str(), val.as_os_str());
self
}
/// Removes an environment variable mapping.
#[stable(feature = "process", since = "1.0.0")]
pub fn env_remove<K: ?Sized + AsOsStr>(&mut self, key: &K) -> &mut Command {
self.inner.env_remove(key.as_os_str());
self
}
/// Clears the entire environment map for the child process.
#[stable(feature = "process", since = "1.0.0")]
pub fn env_clear(&mut self) -> &mut Command {
self.inner.env_clear();
self
}
/// Set the working directory for the child process.
#[stable(feature = "process", since = "1.0.0")]
pub fn current_dir<P: AsPath + ?Sized>(&mut self, dir: &P) -> &mut Command {
self.inner.cwd(dir.as_path().as_os_str());
self
}
/// Configuration for the child process's stdin handle (file descriptor 0).
/// Defaults to `CreatePipe(true, false)` so the input can be written to.
#[stable(feature = "process", since = "1.0.0")]
pub fn stdin(&mut self, cfg: Stdio) -> &mut Command {
self.stdin = Some(cfg.0);
self
}
/// Configuration for the child process's stdout handle (file descriptor 1).
/// Defaults to `CreatePipe(false, true)` so the output can be collected.
#[stable(feature = "process", since = "1.0.0")]
pub fn stdout(&mut self, cfg: Stdio) -> &mut Command {
self.stdout = Some(cfg.0);
self
}
/// Configuration for the child process's stderr handle (file descriptor 2).
/// Defaults to `CreatePipe(false, true)` so the output can be collected.
#[stable(feature = "process", since = "1.0.0")]
pub fn stderr(&mut self, cfg: Stdio) -> &mut Command {
self.stderr = Some(cfg.0);
self
}
fn spawn_inner(&self, default_io: StdioImp) -> io::Result<Child> {
let (their_stdin, our_stdin) = try!(
setup_io(self.stdin.as_ref().unwrap_or(&default_io), 0, true)
);
let (their_stdout, our_stdout) = try!(
setup_io(self.stdout.as_ref().unwrap_or(&default_io), 1, false)
);
let (their_stderr, our_stderr) = try!(
setup_io(self.stderr.as_ref().unwrap_or(&default_io), 2, false)
);
match ProcessImp::spawn(&self.inner, their_stdin, their_stdout, their_stderr) {
Err(e) => Err(e),
Ok(handle) => Ok(Child {
handle: handle,
status: None,
stdin: our_stdin.map(|fd| ChildStdin { inner: fd }),
stdout: our_stdout.map(|fd| ChildStdout { inner: fd }),
stderr: our_stderr.map(|fd| ChildStderr { inner: fd }),
})
}
}
/// Executes the command as a child process, returning a handle to it.
///
/// By default, stdin, stdout and stderr are inherited by the parent.
#[stable(feature = "process", since = "1.0.0")]
pub fn spawn(&mut self) -> io::Result<Child> {
self.spawn_inner(StdioImp::Inherit)
}
/// Executes the command as a child process, waiting for it to finish and
/// collecting all of its output.
///
/// By default, stdin, stdout and stderr are captured (and used to
/// provide the resulting output).
///
/// # Example
///
/// ```
/// # #![feature(process)]
/// use std::process::Command;
///
/// let output = Command::new("cat").arg("foot.txt").output().unwrap_or_else(|e| {
/// panic!("failed to execute process: {}", e)
/// });
///
/// println!("status: {}", output.status);
/// println!("stdout: {}", String::from_utf8_lossy(output.stdout.as_slice()));
/// println!("stderr: {}", String::from_utf8_lossy(output.stderr.as_slice()));
/// ```
#[stable(feature = "process", since = "1.0.0")]
pub fn output(&mut self) -> io::Result<Output> {
self.spawn_inner(StdioImp::Piped).and_then(|p| p.wait_with_output())
}
/// Executes a command as a child process, waiting for it to finish and
/// collecting its exit status.
///
/// By default, stdin, stdout and stderr are inherited by the parent.
///
/// # Example
///
/// ```
/// # #![feature(process)]
/// use std::process::Command;
///
/// let status = Command::new("ls").status().unwrap_or_else(|e| {
/// panic!("failed to execute process: {}", e)
/// });
///
/// println!("process exited with: {}", status);
/// ```
#[stable(feature = "process", since = "1.0.0")]
pub fn status(&mut self) -> io::Result<ExitStatus> {
self.spawn().and_then(|mut p| p.wait())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for Command {
/// Format the program and arguments of a Command for display. Any
/// non-utf8 data is lossily converted using the utf8 replacement
/// character.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f, "{:?}", self.inner.program));
for arg in &self.inner.args {
try!(write!(f, " {:?}", arg));
}
Ok(())
}
}
impl AsInner<CommandImp> for Command {
fn as_inner(&self) -> &CommandImp { &self.inner }
}
impl AsInnerMut<CommandImp> for Command {
fn as_inner_mut(&mut self) -> &mut CommandImp { &mut self.inner }
}
fn setup_io(io: &StdioImp, fd: libc::c_int, readable: bool)
-> io::Result<(Option<AnonPipe>, Option<AnonPipe>)>
{
use self::StdioImp::*;
Ok(match *io {
Null => {
(None, None)
}
Inherit => {
(Some(AnonPipe::from_fd(fd)), None)
}
Piped => {
let (reader, writer) = try!(unsafe { pipe2::anon_pipe() });
if readable {
(Some(reader), Some(writer))
} else {
(Some(writer), Some(reader))
}
}
})
}
/// The output of a finished process.
#[derive(PartialEq, Eq, Clone)]
#[stable(feature = "process", since = "1.0.0")]
pub struct Output {
/// The status (exit code) of the process.
#[stable(feature = "process", since = "1.0.0")]
pub status: ExitStatus,
/// The data that the process wrote to stdout.
#[stable(feature = "process", since = "1.0.0")]
pub stdout: Vec<u8>,
/// The data that the process wrote to stderr.
#[stable(feature = "process", since = "1.0.0")]
pub stderr: Vec<u8>,
}
/// Describes what to do with a standard io stream for a child process.
#[stable(feature = "process", since = "1.0.0")]
pub struct Stdio(StdioImp);
// The internal enum for stdio setup; see below for descriptions.
#[derive(Clone)]
enum StdioImp {
Piped,
Inherit,
Null,
}
impl Stdio {
/// A new pipe should be arranged to connect the parent and child processes.
#[unstable(feature = "process_capture")]
#[deprecated(since = "1.0.0", reason = "renamed to `Stdio::piped`")]
pub fn capture() -> Stdio { Stdio::piped() }
/// A new pipe should be arranged to connect the parent and child processes.
#[stable(feature = "process", since = "1.0.0")]
pub fn piped() -> Stdio { Stdio(StdioImp::Piped) }
/// The child inherits from the corresponding parent descriptor.
#[stable(feature = "process", since = "1.0.0")]
pub fn inherit() -> Stdio { Stdio(StdioImp::Inherit) }
/// This stream will be ignored. This is the equivalent of attaching the
/// stream to `/dev/null`
#[stable(feature = "process", since = "1.0.0")]
pub fn null() -> Stdio { Stdio(StdioImp::Null) }
}
/// Describes the result of a process after it has terminated.
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
#[stable(feature = "process", since = "1.0.0")]
pub struct ExitStatus(ExitStatusImp);
impl ExitStatus {
/// Was termination successful? Signal termination not considered a success,
/// and success is defined as a zero exit status.
#[stable(feature = "process", since = "1.0.0")]
pub fn success(&self) -> bool {
self.0.success()
}
/// Return the exit code of the process, if any.
///
/// On Unix, this will return `None` if the process was terminated
/// by a signal; `std::os::unix` provides an extension trait for
/// extracting the signal and other details from the `ExitStatus`.
#[stable(feature = "process", since = "1.0.0")]
pub fn code(&self) -> Option<i32> {
self.0.code()
}
}
impl AsInner<ExitStatusImp> for ExitStatus {
fn as_inner(&self) -> &ExitStatusImp { &self.0 }
}
#[stable(feature = "process", since = "1.0.0")]
impl fmt::Display for ExitStatus {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl Child {
/// Forces the child to exit. This is equivalent to sending a
/// SIGKILL on unix platforms.
#[stable(feature = "process", since = "1.0.0")]
pub fn kill(&mut self) -> io::Result<()> {
#[cfg(unix)] fn collect_status(p: &mut Child) {
// On Linux (and possibly other unices), a process that has exited will
// continue to accept signals because it is "defunct". The delivery of
// signals will only fail once the child has been reaped. For this
// reason, if the process hasn't exited yet, then we attempt to collect
// their status with WNOHANG.
if p.status.is_none() {
match p.handle.try_wait() {
Some(status) => { p.status = Some(status); }
None => {}
}
}
}
#[cfg(windows)] fn collect_status(_p: &mut Child) {}
collect_status(self);
// if the process has finished, and therefore had waitpid called,
// and we kill it, then on unix we might ending up killing a
// newer process that happens to have the same (re-used) id
if self.status.is_some() {
return Err(Error::new(
ErrorKind::InvalidInput,
"invalid argument: can't kill an exited process",
None
))
}
unsafe { self.handle.kill() }
}
/// Wait for the child to exit completely, returning the status that it
/// exited with. This function will continue to have the same return value
/// after it has been called at least once.
///
/// The stdin handle to the child process, if any, will be closed
/// before waiting. This helps avoid deadlock: it ensures that the
/// child does not block waiting for input from the parent, while
/// the parent waits for the child to exit.
#[stable(feature = "process", since = "1.0.0")]
pub fn wait(&mut self) -> io::Result<ExitStatus> {
drop(self.stdin.take());
match self.status {
Some(code) => Ok(ExitStatus(code)),
None => {
let status = try!(self.handle.wait());
self.status = Some(status);
Ok(ExitStatus(status))
}
}
}
/// Simultaneously wait for the child to exit and collect all remaining
/// output on the stdout/stderr handles, returning a `Output`
/// instance.
///
/// The stdin handle to the child process, if any, will be closed
/// before waiting. This helps avoid deadlock: it ensures that the
/// child does not block waiting for input from the parent, while
/// the parent waits for the child to exit.
#[stable(feature = "process", since = "1.0.0")]
pub fn wait_with_output(mut self) -> io::Result<Output> {
drop(self.stdin.take());
fn read<T: Read + Send + 'static>(stream: Option<T>) -> Receiver<io::Result<Vec<u8>>> {
let (tx, rx) = channel();
match stream {
Some(stream) => {
thread::spawn(move || {
let mut stream = stream;
let mut ret = Vec::new();
let res = stream.read_to_end(&mut ret);
tx.send(res.map(|_| ret)).unwrap();
});
}
None => tx.send(Ok(Vec::new())).unwrap()
}
rx
}
let stdout = read(self.stdout.take());
let stderr = read(self.stderr.take());
let status = try!(self.wait());
Ok(Output {
status: status,
stdout: stdout.recv().unwrap().unwrap_or(Vec::new()),
stderr: stderr.recv().unwrap().unwrap_or(Vec::new()),
})
}
}
#[cfg(test)]
mod tests {
use io::ErrorKind;
use io::prelude::*;
use prelude::v1::{Ok, Err, drop, Some, Vec};
use prelude::v1::{String, Clone};
use prelude::v1::{SliceExt, Str, StrExt, AsSlice, ToString, GenericPath};
use old_path;
use old_io::fs::PathExtensions;
use rt::running_on_valgrind;
use str;
use super::{Command, Output, Stdio};
// FIXME(#10380) these tests should not all be ignored on android.
#[cfg(not(target_os="android"))]
#[test]
fn smoke() {
let p = Command::new("true").spawn();
assert!(p.is_ok());
let mut p = p.unwrap();
assert!(p.wait().unwrap().success());
}
#[cfg(not(target_os="android"))]
#[test]
fn smoke_failure() {
match Command::new("if-this-is-a-binary-then-the-world-has-ended").spawn() {
Ok(..) => panic!(),
Err(..) => {}
}
}
#[cfg(not(target_os="android"))]
#[test]
fn exit_reported_right() {
let p = Command::new("false").spawn();
assert!(p.is_ok());
let mut p = p.unwrap();
assert!(p.wait().unwrap().code() == Some(1));
drop(p.wait().clone());
}
#[cfg(all(unix, not(target_os="android")))]
#[test]
fn signal_reported_right() {
use os::unix::ExitStatusExt;
let p = Command::new("/bin/sh").arg("-c").arg("kill -9 $$").spawn();
assert!(p.is_ok());
let mut p = p.unwrap();
match p.wait().unwrap().signal() {
Some(9) => {},
result => panic!("not terminated by signal 9 (instead, {:?})", result),
}
}
pub fn run_output(mut cmd: Command) -> String {
let p = cmd.spawn();
assert!(p.is_ok());
let mut p = p.unwrap();
assert!(p.stdout.is_some());
let mut ret = String::new();
p.stdout.as_mut().unwrap().read_to_string(&mut ret).unwrap();
assert!(p.wait().unwrap().success());
return ret;
}
#[cfg(not(target_os="android"))]
#[test]
fn stdout_works() {
let mut cmd = Command::new("echo");
cmd.arg("foobar").stdout(Stdio::piped());
assert_eq!(run_output(cmd), "foobar\n");
}
#[cfg(all(unix, not(target_os="android")))]
#[test]
fn set_current_dir_works() {
let mut cmd = Command::new("/bin/sh");
cmd.arg("-c").arg("pwd")
.current_dir("/")
.stdout(Stdio::piped());
assert_eq!(run_output(cmd), "/\n");
}
#[cfg(all(unix, not(target_os="android")))]
#[test]
fn stdin_works() {
let mut p = Command::new("/bin/sh")
.arg("-c").arg("read line; echo $line")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn().unwrap();
p.stdin.as_mut().unwrap().write("foobar".as_bytes()).unwrap();
drop(p.stdin.take());
let mut out = String::new();
p.stdout.as_mut().unwrap().read_to_string(&mut out).unwrap();
assert!(p.wait().unwrap().success());
assert_eq!(out, "foobar\n");
}
#[cfg(all(unix, not(target_os="android")))]
#[test]
fn uid_works() {
use os::unix::*;
use libc;
let mut p = Command::new("/bin/sh")
.arg("-c").arg("true")
.uid(unsafe { libc::getuid() })
.gid(unsafe { libc::getgid() })
.spawn().unwrap();
assert!(p.wait().unwrap().success());
}
#[cfg(all(unix, not(target_os="android")))]
#[test]
fn uid_to_root_fails() {
use os::unix::*;
use libc;
// if we're already root, this isn't a valid test. Most of the bots run
// as non-root though (android is an exception).
if unsafe { libc::getuid() == 0 } { return }
assert!(Command::new("/bin/ls").uid(0).gid(0).spawn().is_err());
}
#[cfg(not(target_os="android"))]
#[test]
fn test_process_status() {
let mut status = Command::new("false").status().unwrap();
assert!(status.code() == Some(1));
status = Command::new("true").status().unwrap();
assert!(status.success());
}
#[test]
fn test_process_output_fail_to_start() {
match Command::new("/no-binary-by-this-name-should-exist").output() {
Err(e) => assert_eq!(e.kind(), ErrorKind::FileNotFound),
Ok(..) => panic!()
}
}
#[cfg(not(target_os="android"))]
#[test]
fn test_process_output_output() {
let Output {status, stdout, stderr}
= Command::new("echo").arg("hello").output().unwrap();
let output_str = str::from_utf8(stdout.as_slice()).unwrap();
assert!(status.success());
assert_eq!(output_str.trim().to_string(), "hello");
// FIXME #7224
if !running_on_valgrind() {
assert_eq!(stderr, Vec::new());
}
}
#[cfg(not(target_os="android"))]
#[test]
fn test_process_output_error() {
let Output {status, stdout, stderr}
= Command::new("mkdir").arg(".").output().unwrap();
assert!(status.code() == Some(1));
assert_eq!(stdout, Vec::new());
assert!(!stderr.is_empty());
}
#[cfg(not(target_os="android"))]
#[test]
fn test_finish_once() {
let mut prog = Command::new("false").spawn().unwrap();
assert!(prog.wait().unwrap().code() == Some(1));
}
#[cfg(not(target_os="android"))]
#[test]
fn test_finish_twice() {
let mut prog = Command::new("false").spawn().unwrap();
assert!(prog.wait().unwrap().code() == Some(1));
assert!(prog.wait().unwrap().code() == Some(1));
}
#[cfg(not(target_os="android"))]
#[test]
fn test_wait_with_output_once() {
let prog = Command::new("echo").arg("hello").stdout(Stdio::piped())
.spawn().unwrap();
let Output {status, stdout, stderr} = prog.wait_with_output().unwrap();
let output_str = str::from_utf8(stdout.as_slice()).unwrap();
assert!(status.success());
assert_eq!(output_str.trim().to_string(), "hello");
// FIXME #7224
if !running_on_valgrind() {
assert_eq!(stderr, Vec::new());
}
}
#[cfg(all(unix, not(target_os="android")))]
pub fn pwd_cmd() -> Command {
Command::new("pwd")
}
#[cfg(target_os="android")]
pub fn pwd_cmd() -> Command {
let mut cmd = Command::new("/system/bin/sh");
cmd.arg("-c").arg("pwd");
cmd
}
#[cfg(windows)]
pub fn pwd_cmd() -> Command {
let mut cmd = Command::new("cmd");
cmd.arg("/c").arg("cd");
cmd
}
#[test]
fn test_keep_current_working_dir() {
use os;
let prog = pwd_cmd().spawn().unwrap();
let output = String::from_utf8(prog.wait_with_output().unwrap().stdout).unwrap();
let parent_dir = os::getcwd().unwrap();
let child_dir = old_path::Path::new(output.trim());
let parent_stat = parent_dir.stat().unwrap();
let child_stat = child_dir.stat().unwrap();
assert_eq!(parent_stat.unstable.device, child_stat.unstable.device);
assert_eq!(parent_stat.unstable.inode, child_stat.unstable.inode);
}
#[test]
fn test_change_working_directory() {
use os;
// test changing to the parent of os::getcwd() because we know
// the path exists (and os::getcwd() is not expected to be root)
let parent_dir = os::getcwd().unwrap().dir_path();
let result = pwd_cmd().current_dir(&parent_dir).output().unwrap();
let output = String::from_utf8(result.stdout).unwrap();
let child_dir = old_path::Path::new(output.trim());
let parent_stat = parent_dir.stat().unwrap();
let child_stat = child_dir.stat().unwrap();
assert_eq!(parent_stat.unstable.device, child_stat.unstable.device);
assert_eq!(parent_stat.unstable.inode, child_stat.unstable.inode);
}
#[cfg(all(unix, not(target_os="android")))]
pub fn env_cmd() -> Command {
Command::new("env")
}
#[cfg(target_os="android")]
pub fn env_cmd() -> Command {
let mut cmd = Command::new("/system/bin/sh");
cmd.arg("-c").arg("set");
cmd
}
#[cfg(windows)]
pub fn env_cmd() -> Command {
let mut cmd = Command::new("cmd");
cmd.arg("/c").arg("set");
cmd
}
#[cfg(not(target_os="android"))]
#[test]
fn test_inherit_env() {
use std::env;
if running_on_valgrind() { return; }
let result = env_cmd().output().unwrap();
let output = String::from_utf8(result.stdout).unwrap();
for (ref k, ref v) in env::vars() {
// don't check windows magical empty-named variables
assert!(k.is_empty() ||
output.contains(format!("{}={}", *k, *v).as_slice()),
"output doesn't contain `{}={}`\n{}",
k, v, output);
}
}
#[cfg(target_os="android")]
#[test]
fn test_inherit_env() {
use os;
if running_on_valgrind() { return; }
let mut result = env_cmd().output().unwrap();
let output = String::from_utf8(result.stdout).unwrap();
let r = os::env();
for &(ref k, ref v) in &r {
// don't check android RANDOM variables
if *k != "RANDOM".to_string() {
assert!(output.contains(format!("{}={}",
*k,
*v).as_slice()) ||
output.contains(format!("{}=\'{}\'",
*k,
*v).as_slice()));
}
}
}
#[test]
fn test_override_env() {
use env;
// In some build environments (such as chrooted Nix builds), `env` can
// only be found in the explicitly-provided PATH env variable, not in
// default places such as /bin or /usr/bin. So we need to pass through
// PATH to our sub-process.
let mut cmd = env_cmd();
cmd.env_clear().env("RUN_TEST_NEW_ENV", "123");
if let Some(p) = env::var_os("PATH") {
cmd.env("PATH", &p);
}
let result = cmd.output().unwrap();
let output = String::from_utf8_lossy(result.stdout.as_slice()).to_string();
assert!(output.contains("RUN_TEST_NEW_ENV=123"),
"didn't find RUN_TEST_NEW_ENV inside of:\n\n{}", output);
}
#[test]
fn test_add_to_env() {
let result = env_cmd().env("RUN_TEST_NEW_ENV", "123").output().unwrap();
let output = String::from_utf8_lossy(result.stdout.as_slice()).to_string();
assert!(output.contains("RUN_TEST_NEW_ENV=123"),
"didn't find RUN_TEST_NEW_ENV inside of:\n\n{}", output); | }
} | |
user.py | class User():
| def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
self.rooms = [] |
|
System.py | # Copyright (c) 2020 Club Raiders Project
# https://github.com/HausReport/ClubRaiders
#
# SPDX-License-Identifier: BSD-3-Clause
#
# SPDX-License-Identifier: BSD-3-Clause
import math
from datetime import datetime, timedelta
from craid.eddb.base.Aware import Aware
class System(Aware):
def __init__(self, jsonLine):
super().__init__(jsonLine['name'], jsonLine['id'])
self.x: float = float(jsonLine['x'])
self.y: float = float(jsonLine['y'])
self.z: float = float(jsonLine['z'])
self.needs_permit: bool = jsonLine.get('needs_permit')
self.updated_at: datetime = datetime.utcfromtimestamp(jsonLine['updated_at'])
def getX(self) -> float:
return self.x
def getY(self) -> float:
return self.y
def getZ(self) -> float:
return self.z
#
# Octant of galaxy measured from Etionses
#
def getOctant(self) -> int:
tmp: int = 0
if self.getX() > 49.5:
tmp += 1
if self.getY() > -104:
tmp += 2
if self.getZ() > 6.3:
tmp += 4
return tmp
def needsPermit(self) -> bool:
return self.needs_permit
def getUpdatedDateTime(self) -> datetime:
return self.updated_at
def getUpdatedString(self) -> str:
days: int = self.getDaysSinceScouted()
if days <= 1:
return "Scouted within the last day."
if days <= 6:
return "Scouted within the last " + str(days) + " days."
weeks = math.ceil(days / 7)
if weeks <= 6:
return "*Scouted " + str(weeks) + " weeks ago.*"
return "**Really, really needs to be scouted.**"
def getDaysSinceScouted(self) -> int:
upd = self.getUpdatedDateTime()
now = datetime.utcnow() # timezone.utc)
time_elapsed: timedelta = now - upd
days = time_elapsed.days
return days
def getInaraNearestShipyardUrl(self):
return "https://inara.cz/galaxy-nearest/14/" + str(self.get_id())
def getInaraSystemUrl(self):
return "https://inara.cz/galaxy-starsystem/" + str(self.get_id()) + "/"
def getEddbSystemUrl(self):
return "https://eddb.io/system/" + str(self.get_id())
def getRoadToRichesUrl(self):
return "http://edtools.ddns.net/expl.php?s=" # + urllib.parse.quote(self.get_name())
def getRegionColor(self):
from craid.club.regions.RegionFactory import RegionFactory
return RegionFactory.getRegionColor(self)
def getRegionName(self):
from craid.club.regions.RegionFactory import RegionFactory
return RegionFactory.getRegionName(self)
def getNearestRegionMessage(self):
from craid.club.regions.RegionFactory import RegionFactory
return RegionFactory.getNearestRegionMessage(self)
def getRegionNumber(self):
| from craid.club.regions.RegionFactory import RegionFactory
return RegionFactory.getRegionNumber(self) |
|
basic.rs | // Copyright (c) 2021 Via Technology Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use opencl3::command_queue::{CommandQueue, CL_QUEUE_PROFILING_ENABLE};
use opencl3::context::Context;
use opencl3::device::{get_all_devices, Device, CL_DEVICE_TYPE_GPU};
use opencl3::kernel::{ExecuteKernel, Kernel};
use opencl3::memory::{Buffer, CL_MEM_READ_ONLY, CL_MEM_WRITE_ONLY};
use opencl3::program::Program;
use opencl3::types::{cl_event, cl_float, CL_BLOCKING, CL_NON_BLOCKING};
use opencl3::Result;
use std::ptr;
const PROGRAM_SOURCE: &str = r#"
kernel void saxpy_float (global float* z,
global float const* x,
global float const* y,
float a)
{
const size_t i = get_global_id(0);
z[i] = a*x[i] + y[i];
}"#;
const KERNEL_NAME: &str = "saxpy_float";
fn | () -> Result<()> {
// Find a usable device for this application
let device_id = *get_all_devices(CL_DEVICE_TYPE_GPU)?
.first()
.expect("no device found in platform");
let device = Device::new(device_id);
// Create a Context on an OpenCL device
let context = Context::from_device(&device).expect("Context::from_device failed");
// Create a command_queue on the Context's device
let queue = CommandQueue::create(
&context,
context.default_device(),
CL_QUEUE_PROFILING_ENABLE,
)
.expect("CommandQueue::create failed");
// Build the OpenCL program source and create the kernel.
let program = Program::create_and_build_from_source(&context, PROGRAM_SOURCE, "")
.expect("Program::create_and_build_from_source failed");
let kernel = Kernel::create(&program, KERNEL_NAME).expect("Kernel::create failed");
/////////////////////////////////////////////////////////////////////
// Compute data
// The input data
const ARRAY_SIZE: usize = 1000;
let ones: [cl_float; ARRAY_SIZE] = [1.0; ARRAY_SIZE];
let mut sums: [cl_float; ARRAY_SIZE] = [0.0; ARRAY_SIZE];
for i in 0..ARRAY_SIZE {
sums[i] = 1.0 + 1.0 * i as cl_float;
}
// Create OpenCL device buffers
let mut x =
Buffer::<cl_float>::create(&context, CL_MEM_WRITE_ONLY, ARRAY_SIZE, ptr::null_mut())?;
let mut y =
Buffer::<cl_float>::create(&context, CL_MEM_WRITE_ONLY, ARRAY_SIZE, ptr::null_mut())?;
let z = Buffer::<cl_float>::create(&context, CL_MEM_READ_ONLY, ARRAY_SIZE, ptr::null_mut())?;
// Blocking write
let _x_write_event = queue.enqueue_write_buffer(&mut x, CL_BLOCKING, 0, &ones, &[])?;
// Non-blocking write, wait for y_write_event
let y_write_event = queue.enqueue_write_buffer(&mut y, CL_NON_BLOCKING, 0, &sums, &[])?;
// a value for the kernel function
let a: cl_float = 300.0;
// Use the ExecuteKernel builder to set the kernel buffer and
// cl_float value arguments, before setting the one dimensional
// global_work_size for the call to enqueue_nd_range.
// Unwraps the Result to get the kernel execution event.
let kernel_event = ExecuteKernel::new(&kernel)
.set_arg(&z)
.set_arg(&x)
.set_arg(&y)
.set_arg(&a)
.set_global_work_size(ARRAY_SIZE)
.set_wait_event(&y_write_event)
.enqueue_nd_range(&queue)?;
let mut events: Vec<cl_event> = Vec::default();
events.push(kernel_event.get());
// Create a results array to hold the results from the OpenCL device
// and enqueue a read command to read the device buffer into the array
// after the kernel event completes.
let mut results: [cl_float; ARRAY_SIZE] = [0.0; ARRAY_SIZE];
let read_event = queue.enqueue_read_buffer(&z, CL_NON_BLOCKING, 0, &mut results, &events)?;
// Wait for the read_event to complete.
read_event.wait()?;
// Output the first and last results
println!("results front: {}", results[0]);
println!("results back: {}", results[ARRAY_SIZE - 1]);
// Calculate the kernel duration, from the kernel_event
let start_time = kernel_event.profiling_command_start()?;
let end_time = kernel_event.profiling_command_end()?;
let duration = end_time - start_time;
println!("kernel execution duration (ns): {}", duration);
Ok(())
}
| main |
ADSref.py | import ads
ads.config.token = 'my token'
import numpy as np
# Filenames
## Enter the filename for first-author publications here:
first_author = "first_author.bib"
## Enter the filename for cd-authored publications here:
co_author = "co_author.bib"
# Function Declarations
def extract_bibcodes(filename):
"""Takes a .bib filename, looks for bibcodes on the first line of each entry, and parses into a list."""
f = open(filename)
full_list = f.readlines()
bibcodes = []
# drop yCat, arxiv, PhDT, and other non-refereed entries
# Workaround, since I couldn't get the API to accept property:refereed or property=refereed to work when searching
exclude = ['arXiv','tmp','yCat','PhDT','AAS','ASPC','BSRSL','conf','EPJWC','IAUFM','IAUGA','IAUS','hst','iue','jwst','spzr','prop']
for line in full_list:
if line[0] == "@":
if not any(x in line for x in exclude):
bibcodes.append(line.split("{")[1].replace(",\n",""))
return bibcodes
def author_format(authors):
if len(authors) == 1:
a = authors[0]
elif len(authors) == 2:
a = authors[0] + " \& " + authors[1]
else:
a = authors[0] + ' et al.'
return a
def journal_name(bibcode):
return bibcode.split(".")[0][4:].replace("&","\&")
def adsurl(bibcode):
return 'https://ui.adsabs.harvard.edu/abs/' + bibcode
def latex_title_greek(title):
greek_dict = {"α":r"$\alpha$", "β":r"$\beta$", "γ":r"$\gamma$", "δ":r"$\delta$", "ε":r"$\epsilon$", "ζ":r"$\zeta$", "η":r"$\eta$", "ι":r"$\iota$", "θ":r"$\theta$", "κ":r"$\kappa$", "λ":r"$\lambda$", "μ":r"$\mu$", "ν":r"$\nu$", "ξ":r"$\xi$", "π":r"$\pi$", "ρ":r"$\rho$", "σ":r"$\sigma$", "τ":r"$\tau$", "φ":r"$\phi$", "χ":r"$\chi$", "ψ":r"$\psi$", "ω":r"$\omega$"}
for key in greek_dict.keys():
title = title.replace(key, greek_dict[key])
return title
def citation_formatter(cites):
if cites is None:
return ""
elif cites == 0:
return ""
else:
if cites < 10:
return f"Cited: \\phantom" + "{1}" + f"{cites}"
else:
return f"Cited: {cites}"
def latex_strings(paper_list):
output = []
n = len(paper_list)
for i,p in enumerate(paper_list):
title = p.title[0]
entry = "\\textbf{" + f"{n-i}" + "}. " + '\\' + 'href{' + adsurl(p.bibcode) + "}{" + f"{latex_title_greek(title)}" + "}" + "\\\\"
entry += author_format(p.author)
entry += f" ({p.year}) "
entry += journal_name(p.bibcode)
entry += f" {p.volume},"
entry += f" {p.page[0]}."
entry += ' \\hspace*{\\fill}' + citation_formatter(p.citation_count) + "\\vspace{1mm}" + "\\\\"
output.append(entry)
output[0] = "\\noindent " + output[0]
return output
def export_latex(filename, | _list):
f = open(filename,"w")
for line in latex_string_list:
f.write(line+"\n")
f.close()
return "Saved."
# Parse bibcodes
print("Parsing bibcodes...")
bibcodes = extract_bibcodes(first_author)
co_bibcodes = extract_bibcodes(co_author)
# Search for papers and their attributes from bibcodes
print("Querying the ADS API for paper metadata... This may take a while if there are many entries...")
papers = [list(ads.SearchQuery(bibcode=bibcode, fl=['bibcode', 'title', 'author', 'year', 'volume', 'page', 'citation_count']))[0] for bibcode in bibcodes]
co_papers = [list(ads.SearchQuery(bibcode=bibcode, fl=['bibcode', 'title', 'author', 'year', 'volume', 'page', 'citation_count']))[0] for bibcode in co_bibcodes]
# Remove Errata
## Because Ew. And if anyone cares about the paper content they'll discover errata when they visit the ADS pages.
print("Dropping Errata, Corrigenda...")
for p in papers:
if "Erratum" in p.title[0] or "Corrigendum" in p.title[0]:
papers.remove(p)
for p in co_papers:
if "Erratum" in p.title[0] or "Corrigendum" in p.title[0]:
co_papers.remove(p)
# Sum citations
first_author_cites = 0
co_author_cites = 0
for p in papers:
if p.citation_count is not None:
first_author_cites += p.citation_count
for p in co_papers:
if p.citation_count is not None:
co_author_cites += p.citation_count
# Compile LaTeX string
print("Compiling LaTeX strings...")
output = latex_strings(papers)
co_output = latex_strings(co_papers)
# Export to LaTeX
print("Exporting to LaTeX...")
export_latex("first_author.tex",output)
export_latex("co_author.tex",co_output)
print(f"\nThere are {len(papers)} first-author papers, and {len(co_papers)} co-authored papers.")
print(f"They have a total of {first_author_cites} and {co_author_cites} citations, respectively.")
print("\n\n.tex files prepared. Now run:\n")
print("\t pdflatex publications.tex\n\n\n") | latex_string |
mod.rs | //! Traits, helpers, and type definitions for core I/O functionality.
//!
//! The `std::io` module contains a number of common things you'll need
//! when doing input and output. The most core part of this module is
//! the [`Read`] and [`Write`] traits, which provide the
//! most general interface for reading and writing input and output.
//!
//! # Read and Write
//!
//! Because they are traits, [`Read`] and [`Write`] are implemented by a number
//! of other types, and you can implement them for your types too. As such,
//! you'll see a few different types of I/O throughout the documentation in
//! this module: [`File`]s, [`TcpStream`]s, and sometimes even [`Vec<T>`]s. For
//! example, [`Read`] adds a [`read`][`Read::read`] method, which we can use on
//! [`File`]s:
//!
//! ```no_run
//! use std::io;
//! use std::io::prelude::*;
//! use std::fs::File;
//!
//! fn main() -> io::Result<()> {
//! let mut f = File::open("foo.txt")?;
//! let mut buffer = [0; 10];
//!
//! // read up to 10 bytes
//! let n = f.read(&mut buffer)?;
//!
//! println!("The bytes: {:?}", &buffer[..n]);
//! Ok(())
//! }
//! ```
//!
//! [`Read`] and [`Write`] are so important, implementors of the two traits have a
//! nickname: readers and writers. So you'll sometimes see 'a reader' instead
//! of 'a type that implements the [`Read`] trait'. Much easier!
//!
//! ## Seek and BufRead
//!
//! Beyond that, there are two important traits that are provided: [`Seek`]
//! and [`BufRead`]. Both of these build on top of a reader to control
//! how the reading happens. [`Seek`] lets you control where the next byte is
//! coming from:
//!
//! ```no_run
//! use std::io;
//! use std::io::prelude::*;
//! use std::io::SeekFrom;
//! use std::fs::File;
//!
//! fn main() -> io::Result<()> {
//! let mut f = File::open("foo.txt")?;
//! let mut buffer = [0; 10];
//!
//! // skip to the last 10 bytes of the file
//! f.seek(SeekFrom::End(-10))?;
//!
//! // read up to 10 bytes
//! let n = f.read(&mut buffer)?;
//!
//! println!("The bytes: {:?}", &buffer[..n]);
//! Ok(())
//! }
//! ```
//!
//! [`BufRead`] uses an internal buffer to provide a number of other ways to read, but
//! to show it off, we'll need to talk about buffers in general. Keep reading!
//!
//! ## BufReader and BufWriter
//!
//! Byte-based interfaces are unwieldy and can be inefficient, as we'd need to be
//! making near-constant calls to the operating system. To help with this,
//! `std::io` comes with two structs, [`BufReader`] and [`BufWriter`], which wrap
//! readers and writers. The wrapper uses a buffer, reducing the number of
//! calls and providing nicer methods for accessing exactly what you want.
//!
//! For example, [`BufReader`] works with the [`BufRead`] trait to add extra
//! methods to any reader:
//!
//! ```no_run
//! use std::io;
//! use std::io::prelude::*;
//! use std::io::BufReader;
//! use std::fs::File;
//!
//! fn main() -> io::Result<()> {
//! let f = File::open("foo.txt")?;
//! let mut reader = BufReader::new(f);
//! let mut buffer = String::new();
//!
//! // read a line into buffer
//! reader.read_line(&mut buffer)?;
//!
//! println!("{}", buffer);
//! Ok(())
//! }
//! ```
//!
//! [`BufWriter`] doesn't add any new ways of writing; it just buffers every call
//! to [`write`][`Write::write`]:
//!
//! ```no_run
//! use std::io;
//! use std::io::prelude::*;
//! use std::io::BufWriter;
//! use std::fs::File;
//!
//! fn main() -> io::Result<()> {
//! let f = File::create("foo.txt")?;
//! {
//! let mut writer = BufWriter::new(f);
//!
//! // write a byte to the buffer
//! writer.write(&[42])?;
//!
//! } // the buffer is flushed once writer goes out of scope
//!
//! Ok(())
//! }
//! ```
//!
//! ## Standard input and output
//!
//! A very common source of input is standard input:
//!
//! ```no_run
//! use std::io;
//!
//! fn main() -> io::Result<()> {
//! let mut input = String::new();
//!
//! io::stdin().read_line(&mut input)?;
//!
//! println!("You typed: {}", input.trim());
//! Ok(())
//! }
//! ```
//!
//! Note that you cannot use the [`?` operator] in functions that do not return
//! a [`Result<T, E>`][`Result`]. Instead, you can call [`.unwrap()`]
//! or `match` on the return value to catch any possible errors:
//!
//! ```no_run
//! use std::io;
//!
//! let mut input = String::new();
//!
//! io::stdin().read_line(&mut input).unwrap();
//! ```
//!
//! And a very common source of output is standard output:
//!
//! ```no_run
//! use std::io;
//! use std::io::prelude::*;
//!
//! fn main() -> io::Result<()> {
//! io::stdout().write(&[42])?;
//! Ok(())
//! }
//! ```
//!
//! Of course, using [`io::stdout`] directly is less common than something like
//! [`println!`].
//!
//! ## Iterator types
//!
//! A large number of the structures provided by `std::io` are for various
//! ways of iterating over I/O. For example, [`Lines`] is used to split over
//! lines:
//!
//! ```no_run
//! use std::io;
//! use std::io::prelude::*;
//! use std::io::BufReader;
//! use std::fs::File;
//!
//! fn main() -> io::Result<()> {
//! let f = File::open("foo.txt")?;
//! let reader = BufReader::new(f);
//!
//! for line in reader.lines() {
//! println!("{}", line?);
//! }
//! Ok(())
//! }
//! ```
//!
//! ## Functions
//!
//! There are a number of [functions][functions-list] that offer access to various
//! features. For example, we can use three of these functions to copy everything
//! from standard input to standard output:
//!
//! ```no_run
//! use std::io;
//!
//! fn main() -> io::Result<()> {
//! io::copy(&mut io::stdin(), &mut io::stdout())?;
//! Ok(())
//! }
//! ```
//!
//! [functions-list]: #functions-1
//!
//! ## io::Result
//!
//! Last, but certainly not least, is [`io::Result`]. This type is used
//! as the return type of many `std::io` functions that can cause an error, and
//! can be returned from your own functions as well. Many of the examples in this
//! module use the [`?` operator]:
//!
//! ```
//! use std::io;
//!
//! fn read_input() -> io::Result<()> {
//! let mut input = String::new();
//!
//! io::stdin().read_line(&mut input)?;
//!
//! println!("You typed: {}", input.trim());
//!
//! Ok(())
//! }
//! ```
//!
//! The return type of `read_input()`, [`io::Result<()>`][`io::Result`], is a very
//! common type for functions which don't have a 'real' return value, but do want to
//! return errors if they happen. In this case, the only purpose of this function is
//! to read the line and print it, so we use `()`.
//!
//! ## Platform-specific behavior
//!
//! Many I/O functions throughout the standard library are documented to indicate
//! what various library or syscalls they are delegated to. This is done to help
//! applications both understand what's happening under the hood as well as investigate
//! any possibly unclear semantics. Note, however, that this is informative, not a binding
//! contract. The implementation of many of these functions are subject to change over
//! time and may call fewer or more syscalls/library functions.
//!
//! [`File`]: crate::fs::File
//! [`TcpStream`]: crate::net::TcpStream
//! [`Vec<T>`]: crate::vec::Vec
//! [`io::stdout`]: stdout
//! [`io::Result`]: crate::io::Result
//! [`?` operator]: ../../book/appendix-02-operators.html
//! [`Result`]: crate::result::Result
//! [`.unwrap()`]: crate::result::Result::unwrap
#![stable(feature = "rust1", since = "1.0.0")]
use crate::cmp;
use crate::fmt;
use crate::mem;
use crate::memchr;
use crate::ops::{Deref, DerefMut};
use crate::ptr;
use crate::slice;
use crate::str;
use crate::sys;
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::buffered::IntoInnerError;
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::buffered::{BufReader, BufWriter, LineWriter};
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::cursor::Cursor;
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::error::{Error, ErrorKind, Result};
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::stdio::{stderr, stdin, stdout, Stderr, Stdin, Stdout};
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::stdio::{StderrLock, StdinLock, StdoutLock};
#[unstable(feature = "print_internals", issue = "none")]
pub use self::stdio::{_eprint, _print};
#[unstable(feature = "libstd_io_internals", issue = "42788")]
#[doc(no_inline, hidden)]
pub use self::stdio::{set_panic, set_print};
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::util::{copy, empty, repeat, sink, Empty, Repeat, Sink};
mod buffered;
mod cursor;
mod error;
mod impls;
mod lazy;
pub mod prelude;
mod stdio;
mod util;
const DEFAULT_BUF_SIZE: usize = crate::sys_common::io::DEFAULT_BUF_SIZE;
struct Guard<'a> {
buf: &'a mut Vec<u8>,
len: usize,
}
impl Drop for Guard<'_> {
fn drop(&mut self) {
unsafe {
self.buf.set_len(self.len);
}
}
}
// A few methods below (read_to_string, read_line) will append data into a
// `String` buffer, but we need to be pretty careful when doing this. The
// implementation will just call `.as_mut_vec()` and then delegate to a
// byte-oriented reading method, but we must ensure that when returning we never
// leave `buf` in a state such that it contains invalid UTF-8 in its bounds.
//
// To this end, we use an RAII guard (to protect against panics) which updates
// the length of the string when it is dropped. This guard initially truncates
// the string to the prior length and only after we've validated that the
// new contents are valid UTF-8 do we allow it to set a longer length.
//
// The unsafety in this function is twofold:
//
// 1. We're looking at the raw bytes of `buf`, so we take on the burden of UTF-8
// checks.
// 2. We're passing a raw buffer to the function `f`, and it is expected that
// the function only *appends* bytes to the buffer. We'll get undefined
// behavior if existing bytes are overwritten to have non-UTF-8 data.
fn append_to_string<F>(buf: &mut String, f: F) -> Result<usize>
where
F: FnOnce(&mut Vec<u8>) -> Result<usize>,
{
unsafe {
let mut g = Guard { len: buf.len(), buf: buf.as_mut_vec() };
let ret = f(g.buf);
if str::from_utf8(&g.buf[g.len..]).is_err() {
ret.and_then(|_| {
Err(Error::new(ErrorKind::InvalidData, "stream did not contain valid UTF-8"))
})
} else {
g.len = g.buf.len();
ret
}
}
}
// This uses an adaptive system to extend the vector when it fills. We want to
// avoid paying to allocate and zero a huge chunk of memory if the reader only
// has 4 bytes while still making large reads if the reader does have a ton
// of data to return. Simply tacking on an extra DEFAULT_BUF_SIZE space every
// time is 4,500 times (!) slower than a default reservation size of 32 if the
// reader has a very small amount of data to return.
//
// Because we're extending the buffer with uninitialized data for trusted
// readers, we need to make sure to truncate that if any of this panics.
fn read_to_end<R: Read + ?Sized>(r: &mut R, buf: &mut Vec<u8>) -> Result<usize> {
read_to_end_with_reservation(r, buf, |_| 32)
}
fn read_to_end_with_reservation<R, F>(
r: &mut R,
buf: &mut Vec<u8>,
mut reservation_size: F,
) -> Result<usize>
where
R: Read + ?Sized,
F: FnMut(&R) -> usize,
{
let start_len = buf.len();
let mut g = Guard { len: buf.len(), buf };
let ret;
loop {
if g.len == g.buf.len() {
unsafe {
// FIXME(danielhenrymantilla): #42788
//
// - This creates a (mut) reference to a slice of
// _uninitialized_ integers, which is **undefined behavior**
//
// - Only the standard library gets to soundly "ignore" this,
// based on its privileged knowledge of unstable rustc
// internals;
g.buf.reserve(reservation_size(r));
let capacity = g.buf.capacity();
g.buf.set_len(capacity);
r.initializer().initialize(&mut g.buf[g.len..]);
}
}
match r.read(&mut g.buf[g.len..]) {
Ok(0) => {
ret = Ok(g.len - start_len);
break;
}
Ok(n) => g.len += n,
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
Err(e) => {
ret = Err(e);
break;
}
}
}
ret
}
pub(crate) fn default_read_vectored<F>(read: F, bufs: &mut [IoSliceMut<'_>]) -> Result<usize>
where
F: FnOnce(&mut [u8]) -> Result<usize>,
{
let buf = bufs.iter_mut().find(|b| !b.is_empty()).map_or(&mut [][..], |b| &mut **b);
read(buf)
}
pub(crate) fn default_write_vectored<F>(write: F, bufs: &[IoSlice<'_>]) -> Result<usize>
where
F: FnOnce(&[u8]) -> Result<usize>,
{
let buf = bufs.iter().find(|b| !b.is_empty()).map_or(&[][..], |b| &**b);
write(buf)
}
/// The `Read` trait allows for reading bytes from a source.
///
/// Implementors of the `Read` trait are called 'readers'.
///
/// Readers are defined by one required method, [`read()`]. Each call to [`read()`]
/// will attempt to pull bytes from this source into a provided buffer. A
/// number of other methods are implemented in terms of [`read()`], giving
/// implementors a number of ways to read bytes while only needing to implement
/// a single method.
///
/// Readers are intended to be composable with one another. Many implementors
/// throughout [`std::io`] take and provide types which implement the `Read`
/// trait.
///
/// Please note that each call to [`read()`] may involve a system call, and
/// therefore, using something that implements [`BufRead`], such as
/// [`BufReader`], will be more efficient.
///
/// # Examples
///
/// [`File`]s implement `Read`:
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut f = File::open("foo.txt")?;
/// let mut buffer = [0; 10];
///
/// // read up to 10 bytes
/// f.read(&mut buffer)?;
///
/// let mut buffer = Vec::new();
/// // read the whole file
/// f.read_to_end(&mut buffer)?;
///
/// // read into a String, so that you don't need to do the conversion.
/// let mut buffer = String::new();
/// f.read_to_string(&mut buffer)?;
///
/// // and more! See the other methods for more details.
/// Ok(())
/// }
/// ```
///
/// Read from [`&str`] because [`&[u8]`][slice] implements `Read`:
///
/// ```no_run
/// # use std::io;
/// use std::io::prelude::*;
///
/// fn main() -> io::Result<()> {
/// let mut b = "This string will be read".as_bytes();
/// let mut buffer = [0; 10];
///
/// // read up to 10 bytes
/// b.read(&mut buffer)?;
///
/// // etc... it works exactly as a File does!
/// Ok(())
/// }
/// ```
///
/// [`read()`]: Read::read
/// [`&str`]: str
/// [`std::io`]: self
/// [`File`]: crate::fs::File
/// [slice]: ../../std/primitive.slice.html
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(spotlight)]
pub trait Read {
/// Pull some bytes from this source into the specified buffer, returning
/// how many bytes were read.
///
/// This function does not provide any guarantees about whether it blocks
/// waiting for data, but if an object needs to block for a read and cannot,
/// it will typically signal this via an [`Err`] return value.
///
/// If the return value of this method is [`Ok(n)`], then it must be
/// guaranteed that `0 <= n <= buf.len()`. A nonzero `n` value indicates
/// that the buffer `buf` has been filled in with `n` bytes of data from this
/// source. If `n` is `0`, then it can indicate one of two scenarios:
///
/// 1. This reader has reached its "end of file" and will likely no longer
/// be able to produce bytes. Note that this does not mean that the
/// reader will *always* no longer be able to produce bytes.
/// 2. The buffer specified was 0 bytes in length.
///
/// It is not an error if the returned value `n` is smaller than the buffer size,
/// even when the reader is not at the end of the stream yet.
/// This may happen for example because fewer bytes are actually available right now
/// (e. g. being close to end-of-file) or because read() was interrupted by a signal.
///
/// No guarantees are provided about the contents of `buf` when this
/// function is called, implementations cannot rely on any property of the
/// contents of `buf` being true. It is recommended that *implementations*
/// only write data to `buf` instead of reading its contents.
///
/// Correspondingly, however, *callers* of this method may not assume any guarantees
/// about how the implementation uses `buf`. The trait is safe to implement,
/// so it is possible that the code that's supposed to write to the buffer might also read
/// from it. It is your responsibility to make sure that `buf` is initialized
/// before calling `read`. Calling `read` with an uninitialized `buf` (of the kind one
/// obtains via [`MaybeUninit<T>`]) is not safe, and can lead to undefined behavior.
///
/// [`MaybeUninit<T>`]: crate::mem::MaybeUninit
///
/// # Errors
///
/// If this function encounters any form of I/O or other error, an error
/// variant will be returned. If an error is returned then it must be
/// guaranteed that no bytes were read.
///
/// An error of the [`ErrorKind::Interrupted`] kind is non-fatal and the read
/// operation should be retried if there is nothing else to do.
///
/// # Examples
///
/// [`File`]s implement `Read`:
///
/// [`Ok(n)`]: Ok
/// [`File`]: crate::fs::File
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut f = File::open("foo.txt")?;
/// let mut buffer = [0; 10];
///
/// // read up to 10 bytes
/// let n = f.read(&mut buffer[..])?;
///
/// println!("The bytes: {:?}", &buffer[..n]);
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn read(&mut self, buf: &mut [u8]) -> Result<usize>;
/// Like `read`, except that it reads into a slice of buffers.
///
/// Data is copied to fill each buffer in order, with the final buffer
/// written to possibly being only partially filled. This method must
/// behave equivalently to a single call to `read` with concatenated
/// buffers.
///
/// The default implementation calls `read` with either the first nonempty
/// buffer provided, or an empty one if none exists.
#[stable(feature = "iovec", since = "1.36.0")]
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> Result<usize> {
default_read_vectored(|b| self.read(b), bufs)
}
/// Determines if this `Read`er has an efficient `read_vectored`
/// implementation.
///
/// If a `Read`er does not override the default `read_vectored`
/// implementation, code using it may want to avoid the method all together
/// and coalesce writes into a single buffer for higher performance.
///
/// The default implementation returns `false`.
#[unstable(feature = "can_vector", issue = "69941")]
fn is_read_vectored(&self) -> bool {
false
}
/// Determines if this `Read`er can work with buffers of uninitialized
/// memory.
///
/// The default implementation returns an initializer which will zero
/// buffers.
///
/// If a `Read`er guarantees that it can work properly with uninitialized
/// memory, it should call [`Initializer::nop()`]. See the documentation for
/// [`Initializer`] for details.
///
/// The behavior of this method must be independent of the state of the
/// `Read`er - the method only takes `&self` so that it can be used through
/// trait objects.
///
/// # Safety
///
/// This method is unsafe because a `Read`er could otherwise return a
/// non-zeroing `Initializer` from another `Read` type without an `unsafe`
/// block.
#[unstable(feature = "read_initializer", issue = "42788")]
#[inline]
unsafe fn initializer(&self) -> Initializer {
Initializer::zeroing()
}
/// Read all bytes until EOF in this source, placing them into `buf`.
///
/// All bytes read from this source will be appended to the specified buffer
/// `buf`. This function will continuously call [`read()`] to append more data to
/// `buf` until [`read()`] returns either [`Ok(0)`] or an error of
/// non-[`ErrorKind::Interrupted`] kind.
///
/// If successful, this function will return the total number of bytes read.
///
/// # Errors
///
/// If this function encounters an error of the kind
/// [`ErrorKind::Interrupted`] then the error is ignored and the operation
/// will continue.
///
/// If any other read error is encountered then this function immediately
/// returns. Any bytes which have already been read will be appended to
/// `buf`.
///
/// # Examples
///
/// [`File`]s implement `Read`:
///
/// [`read()`]: Read::read
/// [`Ok(0)`]: Ok
/// [`File`]: crate::fs::File
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut f = File::open("foo.txt")?;
/// let mut buffer = Vec::new();
///
/// // read the whole file
/// f.read_to_end(&mut buffer)?;
/// Ok(())
/// }
/// ```
///
/// (See also the [`std::fs::read`] convenience function for reading from a
/// file.)
///
/// [`std::fs::read`]: crate::fs::read
#[stable(feature = "rust1", since = "1.0.0")]
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> Result<usize> {
read_to_end(self, buf)
}
/// Read all bytes until EOF in this source, appending them to `buf`.
///
/// If successful, this function returns the number of bytes which were read
/// and appended to `buf`.
///
/// # Errors
///
/// If the data in this stream is *not* valid UTF-8 then an error is
/// returned and `buf` is unchanged.
///
/// See [`read_to_end`][readtoend] for other error semantics.
///
/// [readtoend]: Self::read_to_end
///
/// # Examples
///
/// [`File`][file]s implement `Read`:
///
/// [file]: crate::fs::File
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut f = File::open("foo.txt")?;
/// let mut buffer = String::new();
///
/// f.read_to_string(&mut buffer)?;
/// Ok(())
/// }
/// ```
///
/// (See also the [`std::fs::read_to_string`] convenience function for
/// reading from a file.)
///
/// [`std::fs::read_to_string`]: crate::fs::read_to_string
#[stable(feature = "rust1", since = "1.0.0")]
fn read_to_string(&mut self, buf: &mut String) -> Result<usize> {
// Note that we do *not* call `.read_to_end()` here. We are passing
// `&mut Vec<u8>` (the raw contents of `buf`) into the `read_to_end`
// method to fill it up. An arbitrary implementation could overwrite the
// entire contents of the vector, not just append to it (which is what
// we are expecting).
//
// To prevent extraneously checking the UTF-8-ness of the entire buffer
// we pass it to our hardcoded `read_to_end` implementation which we
// know is guaranteed to only read data into the end of the buffer.
append_to_string(buf, |b| read_to_end(self, b))
}
/// Read the exact number of bytes required to fill `buf`.
///
/// This function reads as many bytes as necessary to completely fill the
/// specified buffer `buf`.
///
/// No guarantees are provided about the contents of `buf` when this
/// function is called, implementations cannot rely on any property of the
/// contents of `buf` being true. It is recommended that implementations
/// only write data to `buf` instead of reading its contents. The
/// documentation on [`read`] has a more detailed explanation on this
/// subject.
///
/// # Errors
///
/// If this function encounters an error of the kind
/// [`ErrorKind::Interrupted`] then the error is ignored and the operation
/// will continue.
///
/// If this function encounters an "end of file" before completely filling
/// the buffer, it returns an error of the kind [`ErrorKind::UnexpectedEof`].
/// The contents of `buf` are unspecified in this case.
///
/// If any other read error is encountered then this function immediately
/// returns. The contents of `buf` are unspecified in this case.
///
/// If this function returns an error, it is unspecified how many bytes it
/// has read, but it will never read more than would be necessary to
/// completely fill the buffer.
///
/// # Examples
///
/// [`File`]s implement `Read`:
///
/// [`read`]: Read::read
/// [`File`]: crate::fs::File
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut f = File::open("foo.txt")?;
/// let mut buffer = [0; 10];
///
/// // read exactly 10 bytes
/// f.read_exact(&mut buffer)?;
/// Ok(())
/// }
/// ```
#[stable(feature = "read_exact", since = "1.6.0")]
fn read_exact(&mut self, mut buf: &mut [u8]) -> Result<()> {
while !buf.is_empty() {
match self.read(buf) {
Ok(0) => break,
Ok(n) => {
let tmp = buf;
buf = &mut tmp[n..];
}
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
Err(e) => return Err(e),
}
}
if !buf.is_empty() {
Err(Error::new(ErrorKind::UnexpectedEof, "failed to fill whole buffer"))
} else {
Ok(())
}
}
/// Creates a "by reference" adaptor for this instance of `Read`.
///
/// The returned adaptor also implements `Read` and will simply borrow this
/// current reader.
///
/// # Examples
///
/// [`File`][file]s implement `Read`:
///
/// [file]: crate::fs::File
///
/// ```no_run
/// use std::io;
/// use std::io::Read;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut f = File::open("foo.txt")?;
/// let mut buffer = Vec::new();
/// let mut other_buffer = Vec::new();
///
/// {
/// let reference = f.by_ref();
///
/// // read at most 5 bytes
/// reference.take(5).read_to_end(&mut buffer)?;
///
/// } // drop our &mut reference so we can use f again
///
/// // original file still usable, read the rest
/// f.read_to_end(&mut other_buffer)?;
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn by_ref(&mut self) -> &mut Self
where
Self: Sized,
{
self
}
/// Transforms this `Read` instance to an [`Iterator`] over its bytes.
///
/// The returned type implements [`Iterator`] where the `Item` is
/// [`Result`]`<`[`u8`]`, `[`io::Error`]`>`.
/// The yielded item is [`Ok`] if a byte was successfully read and [`Err`]
/// otherwise. EOF is mapped to returning [`None`] from this iterator.
///
/// # Examples
///
/// [`File`][file]s implement `Read`:
///
/// [file]: crate::fs::File
/// [`Iterator`]: crate::iter::Iterator
/// [`Result`]: crate::result::Result
/// [`io::Error`]: self::Error
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut f = File::open("foo.txt")?;
///
/// for byte in f.bytes() {
/// println!("{}", byte.unwrap());
/// }
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn bytes(self) -> Bytes<Self>
where
Self: Sized,
{
Bytes { inner: self }
}
/// Creates an adaptor which will chain this stream with another.
///
/// The returned `Read` instance will first read all bytes from this object
/// until EOF is encountered. Afterwards the output is equivalent to the
/// output of `next`.
///
/// # Examples
///
/// [`File`][file]s implement `Read`:
///
/// [file]: crate::fs::File
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut f1 = File::open("foo.txt")?;
/// let mut f2 = File::open("bar.txt")?;
///
/// let mut handle = f1.chain(f2);
/// let mut buffer = String::new();
///
/// // read the value into a String. We could use any Read method here,
/// // this is just one example.
/// handle.read_to_string(&mut buffer)?;
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn chain<R: Read>(self, next: R) -> Chain<Self, R>
where
Self: Sized,
{
Chain { first: self, second: next, done_first: false }
}
/// Creates an adaptor which will read at most `limit` bytes from it.
///
/// This function returns a new instance of `Read` which will read at most
/// `limit` bytes, after which it will always return EOF ([`Ok(0)`]). Any
/// read errors will not count towards the number of bytes read and future
/// calls to [`read()`] may succeed.
///
/// # Examples
///
/// [`File`]s implement `Read`:
///
/// [`File`]: crate::fs::File
/// [`Ok(0)`]: Ok
/// [`read()`]: Read::read
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut f = File::open("foo.txt")?;
/// let mut buffer = [0; 5];
///
/// // read at most five bytes
/// let mut handle = f.take(5);
///
/// handle.read(&mut buffer)?;
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn take(self, limit: u64) -> Take<Self>
where
Self: Sized,
{
Take { inner: self, limit }
}
}
/// A buffer type used with `Read::read_vectored`.
///
/// It is semantically a wrapper around an `&mut [u8]`, but is guaranteed to be
/// ABI compatible with the `iovec` type on Unix platforms and `WSABUF` on
/// Windows.
#[stable(feature = "iovec", since = "1.36.0")]
#[repr(transparent)]
pub struct IoSliceMut<'a>(sys::io::IoSliceMut<'a>);
#[stable(feature = "iovec-send-sync", since = "1.44.0")]
unsafe impl<'a> Send for IoSliceMut<'a> {}
#[stable(feature = "iovec-send-sync", since = "1.44.0")]
unsafe impl<'a> Sync for IoSliceMut<'a> {}
#[stable(feature = "iovec", since = "1.36.0")]
impl<'a> fmt::Debug for IoSliceMut<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(self.0.as_slice(), fmt)
}
}
impl<'a> IoSliceMut<'a> {
/// Creates a new `IoSliceMut` wrapping a byte slice.
///
/// # Panics
///
/// Panics on Windows if the slice is larger than 4GB.
#[stable(feature = "iovec", since = "1.36.0")]
#[inline]
pub fn new(buf: &'a mut [u8]) -> IoSliceMut<'a> {
IoSliceMut(sys::io::IoSliceMut::new(buf))
}
/// Advance the internal cursor of the slice.
///
/// # Notes
///
/// Elements in the slice may be modified if the cursor is not advanced to
/// the end of the slice. For example if we have a slice of buffers with 2
/// `IoSliceMut`s, both of length 8, and we advance the cursor by 10 bytes
/// the first `IoSliceMut` will be untouched however the second will be
/// modified to remove the first 2 bytes (10 - 8).
///
/// # Examples
///
/// ```
/// #![feature(io_slice_advance)]
///
/// use std::io::IoSliceMut;
/// use std::ops::Deref;
///
/// let mut buf1 = [1; 8];
/// let mut buf2 = [2; 16];
/// let mut buf3 = [3; 8];
/// let mut bufs = &mut [
/// IoSliceMut::new(&mut buf1),
/// IoSliceMut::new(&mut buf2),
/// IoSliceMut::new(&mut buf3),
/// ][..];
///
/// // Mark 10 bytes as read.
/// bufs = IoSliceMut::advance(bufs, 10);
/// assert_eq!(bufs[0].deref(), [2; 14].as_ref());
/// assert_eq!(bufs[1].deref(), [3; 8].as_ref());
/// ```
#[unstable(feature = "io_slice_advance", issue = "62726")]
#[inline]
pub fn advance<'b>(bufs: &'b mut [IoSliceMut<'a>], n: usize) -> &'b mut [IoSliceMut<'a>] {
// Number of buffers to remove.
let mut remove = 0;
// Total length of all the to be removed buffers.
let mut accumulated_len = 0;
for buf in bufs.iter() {
if accumulated_len + buf.len() > n {
break;
} else {
accumulated_len += buf.len();
remove += 1;
}
}
let bufs = &mut bufs[remove..];
if !bufs.is_empty() {
bufs[0].0.advance(n - accumulated_len)
}
bufs
}
}
#[stable(feature = "iovec", since = "1.36.0")]
impl<'a> Deref for IoSliceMut<'a> {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
self.0.as_slice()
}
}
#[stable(feature = "iovec", since = "1.36.0")]
impl<'a> DerefMut for IoSliceMut<'a> {
#[inline]
fn deref_mut(&mut self) -> &mut [u8] {
self.0.as_mut_slice()
}
}
/// A buffer type used with `Write::write_vectored`.
///
/// It is semantically a wrapper around an `&[u8]`, but is guaranteed to be
/// ABI compatible with the `iovec` type on Unix platforms and `WSABUF` on
/// Windows.
#[stable(feature = "iovec", since = "1.36.0")]
#[derive(Copy, Clone)]
#[repr(transparent)]
pub struct IoSlice<'a>(sys::io::IoSlice<'a>);
#[stable(feature = "iovec-send-sync", since = "1.44.0")]
unsafe impl<'a> Send for IoSlice<'a> {}
#[stable(feature = "iovec-send-sync", since = "1.44.0")]
unsafe impl<'a> Sync for IoSlice<'a> {}
#[stable(feature = "iovec", since = "1.36.0")]
impl<'a> fmt::Debug for IoSlice<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(self.0.as_slice(), fmt)
}
}
impl<'a> IoSlice<'a> {
/// Creates a new `IoSlice` wrapping a byte slice.
///
/// # Panics
///
/// Panics on Windows if the slice is larger than 4GB.
#[stable(feature = "iovec", since = "1.36.0")]
#[inline]
pub fn new(buf: &'a [u8]) -> IoSlice<'a> {
IoSlice(sys::io::IoSlice::new(buf))
}
/// Advance the internal cursor of the slice.
///
/// # Notes
///
/// Elements in the slice may be modified if the cursor is not advanced to
/// the end of the slice. For example if we have a slice of buffers with 2
/// `IoSlice`s, both of length 8, and we advance the cursor by 10 bytes the
/// first `IoSlice` will be untouched however the second will be modified to
/// remove the first 2 bytes (10 - 8).
///
/// # Examples
///
/// ```
/// #![feature(io_slice_advance)]
///
/// use std::io::IoSlice;
/// use std::ops::Deref;
///
/// let buf1 = [1; 8];
/// let buf2 = [2; 16];
/// let buf3 = [3; 8];
/// let mut bufs = &mut [
/// IoSlice::new(&buf1),
/// IoSlice::new(&buf2),
/// IoSlice::new(&buf3),
/// ][..];
///
/// // Mark 10 bytes as written.
/// bufs = IoSlice::advance(bufs, 10);
/// assert_eq!(bufs[0].deref(), [2; 14].as_ref());
/// assert_eq!(bufs[1].deref(), [3; 8].as_ref());
#[unstable(feature = "io_slice_advance", issue = "62726")]
#[inline]
pub fn advance<'b>(bufs: &'b mut [IoSlice<'a>], n: usize) -> &'b mut [IoSlice<'a>] {
// Number of buffers to remove.
let mut remove = 0;
// Total length of all the to be removed buffers.
let mut accumulated_len = 0;
for buf in bufs.iter() {
if accumulated_len + buf.len() > n {
break;
} else {
accumulated_len += buf.len();
remove += 1;
}
}
let bufs = &mut bufs[remove..];
if !bufs.is_empty() {
bufs[0].0.advance(n - accumulated_len)
}
bufs
}
}
#[stable(feature = "iovec", since = "1.36.0")]
impl<'a> Deref for IoSlice<'a> {
type Target = [u8];
#[inline]
fn deref(&self) -> &[u8] {
self.0.as_slice()
}
}
/// A type used to conditionally initialize buffers passed to `Read` methods.
#[unstable(feature = "read_initializer", issue = "42788")]
#[derive(Debug)]
pub struct Initializer(bool);
impl Initializer {
/// Returns a new `Initializer` which will zero out buffers.
#[unstable(feature = "read_initializer", issue = "42788")]
#[inline]
pub fn zeroing() -> Initializer {
Initializer(true)
}
/// Returns a new `Initializer` which will not zero out buffers.
///
/// # Safety
///
/// This may only be called by `Read`ers which guarantee that they will not
/// read from buffers passed to `Read` methods, and that the return value of
/// the method accurately reflects the number of bytes that have been
/// written to the head of the buffer.
#[unstable(feature = "read_initializer", issue = "42788")]
#[inline]
pub unsafe fn nop() -> Initializer {
Initializer(false)
}
/// Indicates if a buffer should be initialized.
#[unstable(feature = "read_initializer", issue = "42788")]
#[inline]
pub fn should_initialize(&self) -> bool {
self.0
}
/// Initializes a buffer if necessary.
#[unstable(feature = "read_initializer", issue = "42788")]
#[inline]
pub fn initialize(&self, buf: &mut [u8]) {
if self.should_initialize() {
unsafe { ptr::write_bytes(buf.as_mut_ptr(), 0, buf.len()) }
}
}
}
/// A trait for objects which are byte-oriented sinks.
///
/// Implementors of the `Write` trait are sometimes called 'writers'.
///
/// Writers are defined by two required methods, [`write`] and [`flush`]:
///
/// * The [`write`] method will attempt to write some data into the object,
/// returning how many bytes were successfully written.
///
/// * The [`flush`] method is useful for adaptors and explicit buffers
/// themselves for ensuring that all buffered data has been pushed out to the
/// 'true sink'.
///
/// Writers are intended to be composable with one another. Many implementors
/// throughout [`std::io`] take and provide types which implement the `Write`
/// trait.
///
/// [`write`]: Self::write
/// [`flush`]: Self::flush
/// [`std::io`]: index.html
///
/// # Examples
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> std::io::Result<()> {
/// let data = b"some bytes";
///
/// let mut pos = 0;
/// let mut buffer = File::create("foo.txt")?;
///
/// while pos < data.len() {
/// let bytes_written = buffer.write(&data[pos..])?;
/// pos += bytes_written;
/// }
/// Ok(())
/// }
/// ```
///
/// The trait also provides convenience methods like [`write_all`], which calls
/// `write` in a loop until its entire input has been written.
///
/// [`write_all`]: Self::write_all
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(spotlight)]
pub trait Write {
/// Write a buffer into this writer, returning how many bytes were written.
///
/// This function will attempt to write the entire contents of `buf`, but
/// the entire write may not succeed, or the write may also generate an
/// error. A call to `write` represents *at most one* attempt to write to
/// any wrapped object.
///
/// Calls to `write` are not guaranteed to block waiting for data to be
/// written, and a write which would otherwise block can be indicated through
/// an [`Err`] variant.
///
/// If the return value is [`Ok(n)`] then it must be guaranteed that
/// `n <= buf.len()`. A return value of `0` typically means that the
/// underlying object is no longer able to accept bytes and will likely not
/// be able to in the future as well, or that the buffer provided is empty.
///
/// # Errors
///
/// Each call to `write` may generate an I/O error indicating that the
/// operation could not be completed. If an error is returned then no bytes
/// in the buffer were written to this writer.
///
/// It is **not** considered an error if the entire buffer could not be
/// written to this writer.
///
/// An error of the [`ErrorKind::Interrupted`] kind is non-fatal and the
/// write operation should be retried if there is nothing else to do.
///
/// # Examples
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> std::io::Result<()> {
/// let mut buffer = File::create("foo.txt")?;
///
/// // Writes some prefix of the byte string, not necessarily all of it.
/// buffer.write(b"some bytes")?;
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn write(&mut self, buf: &[u8]) -> Result<usize>;
/// Like `write`, except that it writes from a slice of buffers.
///
/// Data is copied from each buffer in order, with the final buffer
/// read from possibly being only partially consumed. This method must
/// behave as a call to `write` with the buffers concatenated would.
///
/// The default implementation calls `write` with either the first nonempty
/// buffer provided, or an empty one if none exists.
#[stable(feature = "iovec", since = "1.36.0")]
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> Result<usize> {
default_write_vectored(|b| self.write(b), bufs)
}
/// Determines if this `Write`er has an efficient `write_vectored`
/// implementation.
///
/// If a `Write`er does not override the default `write_vectored`
/// implementation, code using it may want to avoid the method all together
/// and coalesce writes into a single buffer for higher performance.
///
/// The default implementation returns `false`.
#[unstable(feature = "can_vector", issue = "69941")]
fn is_write_vectored(&self) -> bool {
false
}
/// Flush this output stream, ensuring that all intermediately buffered
/// contents reach their destination.
///
/// # Errors
///
/// It is considered an error if not all bytes could be written due to
/// I/O errors or EOF being reached.
///
/// # Examples
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::io::BufWriter;
/// use std::fs::File;
///
/// fn main() -> std::io::Result<()> {
/// let mut buffer = BufWriter::new(File::create("foo.txt")?);
///
/// buffer.write_all(b"some bytes")?;
/// buffer.flush()?;
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn flush(&mut self) -> Result<()>;
/// Attempts to write an entire buffer into this writer.
///
/// This method will continuously call [`write`] until there is no more data
/// to be written or an error of non-[`ErrorKind::Interrupted`] kind is
/// returned. This method will not return until the entire buffer has been
/// successfully written or such an error occurs. The first error that is
/// not of [`ErrorKind::Interrupted`] kind generated from this method will be
/// returned.
///
/// If the buffer contains no data, this will never call [`write`].
///
/// # Errors
///
/// This function will return the first error of
/// non-[`ErrorKind::Interrupted`] kind that [`write`] returns.
///
/// [`write`]: Self::write
///
/// # Examples
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> std::io::Result<()> {
/// let mut buffer = File::create("foo.txt")?;
///
/// buffer.write_all(b"some bytes")?;
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn write_all(&mut self, mut buf: &[u8]) -> Result<()> {
while !buf.is_empty() {
match self.write(buf) {
Ok(0) => {
return Err(Error::new(ErrorKind::WriteZero, "failed to write whole buffer"));
}
Ok(n) => buf = &buf[n..],
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
Err(e) => return Err(e),
}
}
Ok(())
}
/// Attempts to write multiple buffers into this writer.
///
/// This method will continuously call [`write_vectored`] until there is no
/// more data to be written or an error of non-[`ErrorKind::Interrupted`]
/// kind is returned. This method will not return until all buffers have
/// been successfully written or such an error occurs. The first error that
/// is not of [`ErrorKind::Interrupted`] kind generated from this method
/// will be returned.
///
/// If the buffer contains no data, this will never call [`write_vectored`].
///
/// [`write_vectored`]: Self::write_vectored
///
/// # Notes
///
///
/// Unlike `io::Write::write_vectored`, this takes a *mutable* reference to
/// a slice of `IoSlice`s, not an immutable one. That's because we need to
/// modify the slice to keep track of the bytes already written.
///
/// Once this function returns, the contents of `bufs` are unspecified, as
/// this depends on how many calls to `write_vectored` were necessary. It is
/// best to understand this function as taking ownership of `bufs` and to
/// not use `bufs` afterwards. The underlying buffers, to which the
/// `IoSlice`s point (but not the `IoSlice`s themselves), are unchanged and
/// can be reused.
///
/// # Examples
///
/// ```
/// #![feature(write_all_vectored)]
/// # fn main() -> std::io::Result<()> {
///
/// use std::io::{Write, IoSlice};
///
/// let mut writer = Vec::new();
/// let bufs = &mut [
/// IoSlice::new(&[1]),
/// IoSlice::new(&[2, 3]),
/// IoSlice::new(&[4, 5, 6]),
/// ];
///
/// writer.write_all_vectored(bufs)?;
/// // Note: the contents of `bufs` is now undefined, see the Notes section.
///
/// assert_eq!(writer, &[1, 2, 3, 4, 5, 6]);
/// # Ok(()) }
/// ```
#[unstable(feature = "write_all_vectored", issue = "70436")]
fn write_all_vectored(&mut self, mut bufs: &mut [IoSlice<'_>]) -> Result<()> {
while !bufs.is_empty() {
match self.write_vectored(bufs) {
Ok(0) => {
return Err(Error::new(ErrorKind::WriteZero, "failed to write whole buffer"));
}
Ok(n) => bufs = IoSlice::advance(mem::take(&mut bufs), n),
Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
Err(e) => return Err(e),
}
}
Ok(())
}
/// Writes a formatted string into this writer, returning any error
/// encountered.
///
/// This method is primarily used to interface with the
/// [`format_args!()`] macro, but it is rare that this should
/// explicitly be called. The [`write!()`] macro should be favored to
/// invoke this method instead.
///
/// This function internally uses the [`write_all`][writeall] method on
/// this trait and hence will continuously write data so long as no errors
/// are received. This also means that partial writes are not indicated in
/// this signature.
///
/// [writeall]: Self::write_all
///
/// # Errors
///
/// This function will return any I/O error reported while formatting.
///
/// # Examples
///
/// ```no_run
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> std::io::Result<()> {
/// let mut buffer = File::create("foo.txt")?;
///
/// // this call
/// write!(buffer, "{:.*}", 2, 1.234567)?;
/// // turns into this:
/// buffer.write_fmt(format_args!("{:.*}", 2, 1.234567))?;
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> Result<()> {
// Create a shim which translates a Write to a fmt::Write and saves
// off I/O errors. instead of discarding them
struct Adaptor<'a, T: ?Sized + 'a> {
inner: &'a mut T,
error: Result<()>,
}
impl<T: Write + ?Sized> fmt::Write for Adaptor<'_, T> {
fn write_str(&mut self, s: &str) -> fmt::Result {
match self.inner.write_all(s.as_bytes()) {
Ok(()) => Ok(()),
Err(e) => {
self.error = Err(e);
Err(fmt::Error)
}
}
}
}
let mut output = Adaptor { inner: self, error: Ok(()) };
match fmt::write(&mut output, fmt) {
Ok(()) => Ok(()),
Err(..) => {
// check if the error came from the underlying `Write` or not
if output.error.is_err() {
output.error
} else {
Err(Error::new(ErrorKind::Other, "formatter error"))
}
}
}
}
/// Creates a "by reference" adaptor for this instance of `Write`.
///
/// The returned adaptor also implements `Write` and will simply borrow this
/// current writer.
///
/// # Examples
///
/// ```no_run
/// use std::io::Write;
/// use std::fs::File;
///
/// fn main() -> std::io::Result<()> {
/// let mut buffer = File::create("foo.txt")?;
///
/// let reference = buffer.by_ref();
///
/// // we can use reference just like our original buffer
/// reference.write_all(b"some bytes")?;
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn by_ref(&mut self) -> &mut Self
where
Self: Sized,
{
self
}
}
/// The `Seek` trait provides a cursor which can be moved within a stream of
/// bytes.
///
/// The stream typically has a fixed size, allowing seeking relative to either
/// end or the current offset.
///
/// # Examples
///
/// [`File`][file]s implement `Seek`:
///
/// [file]: crate::fs::File
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
/// use std::fs::File;
/// use std::io::SeekFrom;
///
/// fn main() -> io::Result<()> {
/// let mut f = File::open("foo.txt")?;
///
/// // move the cursor 42 bytes from the start of the file
/// f.seek(SeekFrom::Start(42))?;
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Seek {
/// Seek to an offset, in bytes, in a stream.
///
/// A seek beyond the end of a stream is allowed, but behavior is defined
/// by the implementation.
///
/// If the seek operation completed successfully,
/// this method returns the new position from the start of the stream.
/// That position can be used later with [`SeekFrom::Start`].
///
/// # Errors
///
/// Seeking to a negative offset is considered an error.
///
/// [`SeekFrom::Start`]: enum.SeekFrom.html#variant.Start
#[stable(feature = "rust1", since = "1.0.0")]
fn seek(&mut self, pos: SeekFrom) -> Result<u64>;
/// Returns the length of this stream (in bytes).
///
/// This method is implemented using up to three seek operations. If this
/// method returns successfully, the seek position is unchanged (i.e. the
/// position before calling this method is the same as afterwards).
/// However, if this method returns an error, the seek position is
/// unspecified.
///
/// If you need to obtain the length of *many* streams and you don't care
/// about the seek position afterwards, you can reduce the number of seek
/// operations by simply calling `seek(SeekFrom::End(0))` and using its
/// return value (it is also the stream length).
///
/// Note that length of a stream can change over time (for example, when
/// data is appended to a file). So calling this method multiple times does
/// not necessarily return the same length each time.
///
///
/// # Example
///
/// ```no_run
/// #![feature(seek_convenience)]
/// use std::{
/// io::{self, Seek},
/// fs::File,
/// };
///
/// fn main() -> io::Result<()> {
/// let mut f = File::open("foo.txt")?;
///
/// let len = f.stream_len()?;
/// println!("The file is currently {} bytes long", len);
/// Ok(())
/// }
/// ```
#[unstable(feature = "seek_convenience", issue = "59359")]
fn stream_len(&mut self) -> Result<u64> {
let old_pos = self.stream_position()?;
let len = self.seek(SeekFrom::End(0))?;
// Avoid seeking a third time when we were already at the end of the
// stream. The branch is usually way cheaper than a seek operation.
if old_pos != len {
self.seek(SeekFrom::Start(old_pos))?;
}
Ok(len)
}
/// Returns the current seek position from the start of the stream.
///
/// This is equivalent to `self.seek(SeekFrom::Current(0))`.
///
///
/// # Example
///
/// ```no_run
/// #![feature(seek_convenience)]
/// use std::{
/// io::{self, BufRead, BufReader, Seek},
/// fs::File,
/// };
///
/// fn main() -> io::Result<()> {
/// let mut f = BufReader::new(File::open("foo.txt")?);
///
/// let before = f.stream_position()?;
/// f.read_line(&mut String::new())?;
/// let after = f.stream_position()?;
///
/// println!("The first line was {} bytes long", after - before);
/// Ok(())
/// }
/// ```
#[unstable(feature = "seek_convenience", issue = "59359")]
fn stream_position(&mut self) -> Result<u64> {
self.seek(SeekFrom::Current(0))
}
}
/// Enumeration of possible methods to seek within an I/O object.
///
/// It is used by the [`Seek`] trait.
///
/// [`Seek`]: trait.Seek.html
#[derive(Copy, PartialEq, Eq, Clone, Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub enum SeekFrom {
/// Sets the offset to the provided number of bytes.
#[stable(feature = "rust1", since = "1.0.0")]
Start(#[stable(feature = "rust1", since = "1.0.0")] u64),
/// Sets the offset to the size of this object plus the specified number of
/// bytes.
///
/// It is possible to seek beyond the end of an object, but it's an error to
/// seek before byte 0.
#[stable(feature = "rust1", since = "1.0.0")]
End(#[stable(feature = "rust1", since = "1.0.0")] i64),
/// Sets the offset to the current position plus the specified number of
/// bytes.
///
/// It is possible to seek beyond the end of an object, but it's an error to
/// seek before byte 0.
#[stable(feature = "rust1", since = "1.0.0")]
Current(#[stable(feature = "rust1", since = "1.0.0")] i64),
}
fn read_until<R: BufRead + ?Sized>(r: &mut R, delim: u8, buf: &mut Vec<u8>) -> Result<usize> {
let mut read = 0;
loop {
let (done, used) = {
let available = match r.fill_buf() {
Ok(n) => n,
Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
};
match memchr::memchr(delim, available) {
Some(i) => {
buf.extend_from_slice(&available[..=i]);
(true, i + 1)
}
None => {
buf.extend_from_slice(available);
(false, available.len())
}
}
};
r.consume(used);
read += used;
if done || used == 0 {
return Ok(read);
}
}
}
/// A `BufRead` is a type of `Read`er which has an internal buffer, allowing it
/// to perform extra ways of reading.
///
/// For example, reading line-by-line is inefficient without using a buffer, so
/// if you want to read by line, you'll need `BufRead`, which includes a
/// [`read_line`] method as well as a [`lines`] iterator.
///
/// # Examples
///
/// A locked standard input implements `BufRead`:
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
///
/// let stdin = io::stdin();
/// for line in stdin.lock().lines() {
/// println!("{}", line.unwrap());
/// }
/// ```
///
/// If you have something that implements [`Read`], you can use the [`BufReader`
/// type][`BufReader`] to turn it into a `BufRead`.
///
/// For example, [`File`] implements [`Read`], but not `BufRead`.
/// [`BufReader`] to the rescue!
///
/// [`BufReader`]: struct.BufReader.html
/// [`File`]: crate::fs::File
/// [`read_line`]: Self::read_line
/// [`lines`]: Self::lines
/// [`Read`]: trait.Read.html
///
/// ```no_run
/// use std::io::{self, BufReader};
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let f = File::open("foo.txt")?;
/// let f = BufReader::new(f);
///
/// for line in f.lines() {
/// println!("{}", line.unwrap());
/// }
///
/// Ok(())
/// }
/// ```
///
#[stable(feature = "rust1", since = "1.0.0")]
pub trait BufRead: Read {
/// Returns the contents of the internal buffer, filling it with more data
/// from the inner reader if it is empty.
///
/// This function is a lower-level call. It needs to be paired with the
/// [`consume`] method to function properly. When calling this
/// method, none of the contents will be "read" in the sense that later
/// calling `read` may return the same contents. As such, [`consume`] must
/// be called with the number of bytes that are consumed from this buffer to
/// ensure that the bytes are never returned twice.
///
/// [`consume`]: Self::consume
///
/// An empty buffer returned indicates that the stream has reached EOF.
/// | ///
/// This function will return an I/O error if the underlying reader was
/// read, but returned an error.
///
/// # Examples
///
/// A locked standard input implements `BufRead`:
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
///
/// let stdin = io::stdin();
/// let mut stdin = stdin.lock();
///
/// let buffer = stdin.fill_buf().unwrap();
///
/// // work with buffer
/// println!("{:?}", buffer);
///
/// // ensure the bytes we worked with aren't returned again later
/// let length = buffer.len();
/// stdin.consume(length);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn fill_buf(&mut self) -> Result<&[u8]>;
/// Tells this buffer that `amt` bytes have been consumed from the buffer,
/// so they should no longer be returned in calls to `read`.
///
/// This function is a lower-level call. It needs to be paired with the
/// [`fill_buf`] method to function properly. This function does
/// not perform any I/O, it simply informs this object that some amount of
/// its buffer, returned from [`fill_buf`], has been consumed and should
/// no longer be returned. As such, this function may do odd things if
/// [`fill_buf`] isn't called before calling it.
///
/// The `amt` must be `<=` the number of bytes in the buffer returned by
/// [`fill_buf`].
///
/// # Examples
///
/// Since `consume()` is meant to be used with [`fill_buf`],
/// that method's example includes an example of `consume()`.
///
/// [`fill_buf`]: Self::fill_buf
#[stable(feature = "rust1", since = "1.0.0")]
fn consume(&mut self, amt: usize);
/// Read all bytes into `buf` until the delimiter `byte` or EOF is reached.
///
/// This function will read bytes from the underlying stream until the
/// delimiter or EOF is found. Once found, all bytes up to, and including,
/// the delimiter (if found) will be appended to `buf`.
///
/// If successful, this function will return the total number of bytes read.
///
/// This function is blocking and should be used carefully: it is possible for
/// an attacker to continuously send bytes without ever sending the delimiter
/// or EOF.
///
/// # Errors
///
/// This function will ignore all instances of [`ErrorKind::Interrupted`] and
/// will otherwise return any errors returned by [`fill_buf`].
///
/// If an I/O error is encountered then all bytes read so far will be
/// present in `buf` and its length will have been adjusted appropriately.
///
/// [`fill_buf`]: Self::fill_buf
/// [`ErrorKind::Interrupted`]: enum.ErrorKind.html#variant.Interrupted
///
/// # Examples
///
/// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In
/// this example, we use [`Cursor`] to read all the bytes in a byte slice
/// in hyphen delimited segments:
///
/// [`Cursor`]: struct.Cursor.html
///
/// ```
/// use std::io::{self, BufRead};
///
/// let mut cursor = io::Cursor::new(b"lorem-ipsum");
/// let mut buf = vec![];
///
/// // cursor is at 'l'
/// let num_bytes = cursor.read_until(b'-', &mut buf)
/// .expect("reading from cursor won't fail");
/// assert_eq!(num_bytes, 6);
/// assert_eq!(buf, b"lorem-");
/// buf.clear();
///
/// // cursor is at 'i'
/// let num_bytes = cursor.read_until(b'-', &mut buf)
/// .expect("reading from cursor won't fail");
/// assert_eq!(num_bytes, 5);
/// assert_eq!(buf, b"ipsum");
/// buf.clear();
///
/// // cursor is at EOF
/// let num_bytes = cursor.read_until(b'-', &mut buf)
/// .expect("reading from cursor won't fail");
/// assert_eq!(num_bytes, 0);
/// assert_eq!(buf, b"");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> Result<usize> {
read_until(self, byte, buf)
}
/// Read all bytes until a newline (the 0xA byte) is reached, and append
/// them to the provided buffer.
///
/// This function will read bytes from the underlying stream until the
/// newline delimiter (the 0xA byte) or EOF is found. Once found, all bytes
/// up to, and including, the delimiter (if found) will be appended to
/// `buf`.
///
/// If successful, this function will return the total number of bytes read.
///
/// If this function returns `Ok(0)`, the stream has reached EOF.
///
/// This function is blocking and should be used carefully: it is possible for
/// an attacker to continuously send bytes without ever sending a newline
/// or EOF.
///
/// # Errors
///
/// This function has the same error semantics as [`read_until`] and will
/// also return an error if the read bytes are not valid UTF-8. If an I/O
/// error is encountered then `buf` may contain some bytes already read in
/// the event that all data read so far was valid UTF-8.
///
/// [`read_until`]: Self::read_until
///
/// # Examples
///
/// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In
/// this example, we use [`Cursor`] to read all the lines in a byte slice:
///
/// [`Cursor`]: struct.Cursor.html
///
/// ```
/// use std::io::{self, BufRead};
///
/// let mut cursor = io::Cursor::new(b"foo\nbar");
/// let mut buf = String::new();
///
/// // cursor is at 'f'
/// let num_bytes = cursor.read_line(&mut buf)
/// .expect("reading from cursor won't fail");
/// assert_eq!(num_bytes, 4);
/// assert_eq!(buf, "foo\n");
/// buf.clear();
///
/// // cursor is at 'b'
/// let num_bytes = cursor.read_line(&mut buf)
/// .expect("reading from cursor won't fail");
/// assert_eq!(num_bytes, 3);
/// assert_eq!(buf, "bar");
/// buf.clear();
///
/// // cursor is at EOF
/// let num_bytes = cursor.read_line(&mut buf)
/// .expect("reading from cursor won't fail");
/// assert_eq!(num_bytes, 0);
/// assert_eq!(buf, "");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn read_line(&mut self, buf: &mut String) -> Result<usize> {
// Note that we are not calling the `.read_until` method here, but
// rather our hardcoded implementation. For more details as to why, see
// the comments in `read_to_end`.
append_to_string(buf, |b| read_until(self, b'\n', b))
}
/// Returns an iterator over the contents of this reader split on the byte
/// `byte`.
///
/// The iterator returned from this function will return instances of
/// [`io::Result`]`<`[`Vec<u8>`]`>`. Each vector returned will *not* have
/// the delimiter byte at the end.
///
/// This function will yield errors whenever [`read_until`] would have
/// also yielded an error.
///
/// [`io::Result`]: self::Result
/// [`Vec<u8>`]: crate::vec::Vec
/// [`read_until`]: Self::read_until
///
/// # Examples
///
/// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In
/// this example, we use [`Cursor`] to iterate over all hyphen delimited
/// segments in a byte slice
///
/// [`Cursor`]: struct.Cursor.html
///
/// ```
/// use std::io::{self, BufRead};
///
/// let cursor = io::Cursor::new(b"lorem-ipsum-dolor");
///
/// let mut split_iter = cursor.split(b'-').map(|l| l.unwrap());
/// assert_eq!(split_iter.next(), Some(b"lorem".to_vec()));
/// assert_eq!(split_iter.next(), Some(b"ipsum".to_vec()));
/// assert_eq!(split_iter.next(), Some(b"dolor".to_vec()));
/// assert_eq!(split_iter.next(), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn split(self, byte: u8) -> Split<Self>
where
Self: Sized,
{
Split { buf: self, delim: byte }
}
/// Returns an iterator over the lines of this reader.
///
/// The iterator returned from this function will yield instances of
/// [`io::Result`]`<`[`String`]`>`. Each string returned will *not* have a newline
/// byte (the 0xA byte) or CRLF (0xD, 0xA bytes) at the end.
///
/// [`io::Result`]: self::Result
///
/// # Examples
///
/// [`std::io::Cursor`][`Cursor`] is a type that implements `BufRead`. In
/// this example, we use [`Cursor`] to iterate over all the lines in a byte
/// slice.
///
/// ```
/// use std::io::{self, BufRead};
///
/// let cursor = io::Cursor::new(b"lorem\nipsum\r\ndolor");
///
/// let mut lines_iter = cursor.lines().map(|l| l.unwrap());
/// assert_eq!(lines_iter.next(), Some(String::from("lorem")));
/// assert_eq!(lines_iter.next(), Some(String::from("ipsum")));
/// assert_eq!(lines_iter.next(), Some(String::from("dolor")));
/// assert_eq!(lines_iter.next(), None);
/// ```
///
/// # Errors
///
/// Each line of the iterator has the same error semantics as [`BufRead::read_line`].
///
/// [`BufRead::read_line`]: trait.BufRead.html#method.read_line
#[stable(feature = "rust1", since = "1.0.0")]
fn lines(self) -> Lines<Self>
where
Self: Sized,
{
Lines { buf: self }
}
}
/// Adaptor to chain together two readers.
///
/// This struct is generally created by calling [`chain`] on a reader.
/// Please see the documentation of [`chain`] for more details.
///
/// [`chain`]: trait.Read.html#method.chain
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Chain<T, U> {
first: T,
second: U,
done_first: bool,
}
impl<T, U> Chain<T, U> {
/// Consumes the `Chain`, returning the wrapped readers.
///
/// # Examples
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut foo_file = File::open("foo.txt")?;
/// let mut bar_file = File::open("bar.txt")?;
///
/// let chain = foo_file.chain(bar_file);
/// let (foo_file, bar_file) = chain.into_inner();
/// Ok(())
/// }
/// ```
#[stable(feature = "more_io_inner_methods", since = "1.20.0")]
pub fn into_inner(self) -> (T, U) {
(self.first, self.second)
}
/// Gets references to the underlying readers in this `Chain`.
///
/// # Examples
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut foo_file = File::open("foo.txt")?;
/// let mut bar_file = File::open("bar.txt")?;
///
/// let chain = foo_file.chain(bar_file);
/// let (foo_file, bar_file) = chain.get_ref();
/// Ok(())
/// }
/// ```
#[stable(feature = "more_io_inner_methods", since = "1.20.0")]
pub fn get_ref(&self) -> (&T, &U) {
(&self.first, &self.second)
}
/// Gets mutable references to the underlying readers in this `Chain`.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying readers as doing so may corrupt the internal state of this
/// `Chain`.
///
/// # Examples
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut foo_file = File::open("foo.txt")?;
/// let mut bar_file = File::open("bar.txt")?;
///
/// let mut chain = foo_file.chain(bar_file);
/// let (foo_file, bar_file) = chain.get_mut();
/// Ok(())
/// }
/// ```
#[stable(feature = "more_io_inner_methods", since = "1.20.0")]
pub fn get_mut(&mut self) -> (&mut T, &mut U) {
(&mut self.first, &mut self.second)
}
}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<T: fmt::Debug, U: fmt::Debug> fmt::Debug for Chain<T, U> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Chain").field("t", &self.first).field("u", &self.second).finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Read, U: Read> Read for Chain<T, U> {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
if !self.done_first {
match self.first.read(buf)? {
0 if !buf.is_empty() => self.done_first = true,
n => return Ok(n),
}
}
self.second.read(buf)
}
fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> Result<usize> {
if !self.done_first {
match self.first.read_vectored(bufs)? {
0 if bufs.iter().any(|b| !b.is_empty()) => self.done_first = true,
n => return Ok(n),
}
}
self.second.read_vectored(bufs)
}
unsafe fn initializer(&self) -> Initializer {
let initializer = self.first.initializer();
if initializer.should_initialize() { initializer } else { self.second.initializer() }
}
}
#[stable(feature = "chain_bufread", since = "1.9.0")]
impl<T: BufRead, U: BufRead> BufRead for Chain<T, U> {
fn fill_buf(&mut self) -> Result<&[u8]> {
if !self.done_first {
match self.first.fill_buf()? {
buf if buf.is_empty() => {
self.done_first = true;
}
buf => return Ok(buf),
}
}
self.second.fill_buf()
}
fn consume(&mut self, amt: usize) {
if !self.done_first { self.first.consume(amt) } else { self.second.consume(amt) }
}
}
/// Reader adaptor which limits the bytes read from an underlying reader.
///
/// This struct is generally created by calling [`take`] on a reader.
/// Please see the documentation of [`take`] for more details.
///
/// [`take`]: trait.Read.html#method.take
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct Take<T> {
inner: T,
limit: u64,
}
impl<T> Take<T> {
/// Returns the number of bytes that can be read before this instance will
/// return EOF.
///
/// # Note
///
/// This instance may reach `EOF` after reading fewer bytes than indicated by
/// this method if the underlying [`Read`] instance reaches EOF.
///
/// # Examples
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let f = File::open("foo.txt")?;
///
/// // read at most five bytes
/// let handle = f.take(5);
///
/// println!("limit: {}", handle.limit());
/// Ok(())
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn limit(&self) -> u64 {
self.limit
}
/// Sets the number of bytes that can be read before this instance will
/// return EOF. This is the same as constructing a new `Take` instance, so
/// the amount of bytes read and the previous limit value don't matter when
/// calling this method.
///
/// # Examples
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let f = File::open("foo.txt")?;
///
/// // read at most five bytes
/// let mut handle = f.take(5);
/// handle.set_limit(10);
///
/// assert_eq!(handle.limit(), 10);
/// Ok(())
/// }
/// ```
#[stable(feature = "take_set_limit", since = "1.27.0")]
pub fn set_limit(&mut self, limit: u64) {
self.limit = limit;
}
/// Consumes the `Take`, returning the wrapped reader.
///
/// # Examples
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut file = File::open("foo.txt")?;
///
/// let mut buffer = [0; 5];
/// let mut handle = file.take(5);
/// handle.read(&mut buffer)?;
///
/// let file = handle.into_inner();
/// Ok(())
/// }
/// ```
#[stable(feature = "io_take_into_inner", since = "1.15.0")]
pub fn into_inner(self) -> T {
self.inner
}
/// Gets a reference to the underlying reader.
///
/// # Examples
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut file = File::open("foo.txt")?;
///
/// let mut buffer = [0; 5];
/// let mut handle = file.take(5);
/// handle.read(&mut buffer)?;
///
/// let file = handle.get_ref();
/// Ok(())
/// }
/// ```
#[stable(feature = "more_io_inner_methods", since = "1.20.0")]
pub fn get_ref(&self) -> &T {
&self.inner
}
/// Gets a mutable reference to the underlying reader.
///
/// Care should be taken to avoid modifying the internal I/O state of the
/// underlying reader as doing so may corrupt the internal limit of this
/// `Take`.
///
/// # Examples
///
/// ```no_run
/// use std::io;
/// use std::io::prelude::*;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut file = File::open("foo.txt")?;
///
/// let mut buffer = [0; 5];
/// let mut handle = file.take(5);
/// handle.read(&mut buffer)?;
///
/// let file = handle.get_mut();
/// Ok(())
/// }
/// ```
#[stable(feature = "more_io_inner_methods", since = "1.20.0")]
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Read> Read for Take<T> {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
// Don't call into inner reader at all at EOF because it may still block
if self.limit == 0 {
return Ok(0);
}
let max = cmp::min(buf.len() as u64, self.limit) as usize;
let n = self.inner.read(&mut buf[..max])?;
self.limit -= n as u64;
Ok(n)
}
unsafe fn initializer(&self) -> Initializer {
self.inner.initializer()
}
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> Result<usize> {
// Pass in a reservation_size closure that respects the current value
// of limit for each read. If we hit the read limit, this prevents the
// final zero-byte read from allocating again.
read_to_end_with_reservation(self, buf, |self_| cmp::min(self_.limit, 32) as usize)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: BufRead> BufRead for Take<T> {
fn fill_buf(&mut self) -> Result<&[u8]> {
// Don't call into inner reader at all at EOF because it may still block
if self.limit == 0 {
return Ok(&[]);
}
let buf = self.inner.fill_buf()?;
let cap = cmp::min(buf.len() as u64, self.limit) as usize;
Ok(&buf[..cap])
}
fn consume(&mut self, amt: usize) {
// Don't let callers reset the limit by passing an overlarge value
let amt = cmp::min(amt as u64, self.limit) as usize;
self.limit -= amt as u64;
self.inner.consume(amt);
}
}
/// An iterator over `u8` values of a reader.
///
/// This struct is generally created by calling [`bytes`] on a reader.
/// Please see the documentation of [`bytes`] for more details.
///
/// [`bytes`]: trait.Read.html#method.bytes
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct Bytes<R> {
inner: R,
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<R: Read> Iterator for Bytes<R> {
type Item = Result<u8>;
fn next(&mut self) -> Option<Result<u8>> {
let mut byte = 0;
loop {
return match self.inner.read(slice::from_mut(&mut byte)) {
Ok(0) => None,
Ok(..) => Some(Ok(byte)),
Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
Err(e) => Some(Err(e)),
};
}
}
}
/// An iterator over the contents of an instance of `BufRead` split on a
/// particular byte.
///
/// This struct is generally created by calling [`split`] on a `BufRead`.
/// Please see the documentation of [`split`] for more details.
///
/// [`split`]: trait.BufRead.html#method.split
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct Split<B> {
buf: B,
delim: u8,
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<B: BufRead> Iterator for Split<B> {
type Item = Result<Vec<u8>>;
fn next(&mut self) -> Option<Result<Vec<u8>>> {
let mut buf = Vec::new();
match self.buf.read_until(self.delim, &mut buf) {
Ok(0) => None,
Ok(_n) => {
if buf[buf.len() - 1] == self.delim {
buf.pop();
}
Some(Ok(buf))
}
Err(e) => Some(Err(e)),
}
}
}
/// An iterator over the lines of an instance of `BufRead`.
///
/// This struct is generally created by calling [`lines`] on a `BufRead`.
/// Please see the documentation of [`lines`] for more details.
///
/// [`lines`]: trait.BufRead.html#method.lines
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct Lines<B> {
buf: B,
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<B: BufRead> Iterator for Lines<B> {
type Item = Result<String>;
fn next(&mut self) -> Option<Result<String>> {
let mut buf = String::new();
match self.buf.read_line(&mut buf) {
Ok(0) => None,
Ok(_n) => {
if buf.ends_with('\n') {
buf.pop();
if buf.ends_with('\r') {
buf.pop();
}
}
Some(Ok(buf))
}
Err(e) => Some(Err(e)),
}
}
}
#[cfg(test)]
mod tests {
use super::{repeat, Cursor, SeekFrom};
use crate::cmp::{self, min};
use crate::io::prelude::*;
use crate::io::{self, IoSlice, IoSliceMut};
use crate::ops::Deref;
#[test]
#[cfg_attr(target_os = "emscripten", ignore)]
fn read_until() {
let mut buf = Cursor::new(&b"12"[..]);
let mut v = Vec::new();
assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 2);
assert_eq!(v, b"12");
let mut buf = Cursor::new(&b"1233"[..]);
let mut v = Vec::new();
assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 3);
assert_eq!(v, b"123");
v.truncate(0);
assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 1);
assert_eq!(v, b"3");
v.truncate(0);
assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 0);
assert_eq!(v, []);
}
#[test]
fn split() {
let buf = Cursor::new(&b"12"[..]);
let mut s = buf.split(b'3');
assert_eq!(s.next().unwrap().unwrap(), vec![b'1', b'2']);
assert!(s.next().is_none());
let buf = Cursor::new(&b"1233"[..]);
let mut s = buf.split(b'3');
assert_eq!(s.next().unwrap().unwrap(), vec![b'1', b'2']);
assert_eq!(s.next().unwrap().unwrap(), vec![]);
assert!(s.next().is_none());
}
#[test]
fn read_line() {
let mut buf = Cursor::new(&b"12"[..]);
let mut v = String::new();
assert_eq!(buf.read_line(&mut v).unwrap(), 2);
assert_eq!(v, "12");
let mut buf = Cursor::new(&b"12\n\n"[..]);
let mut v = String::new();
assert_eq!(buf.read_line(&mut v).unwrap(), 3);
assert_eq!(v, "12\n");
v.truncate(0);
assert_eq!(buf.read_line(&mut v).unwrap(), 1);
assert_eq!(v, "\n");
v.truncate(0);
assert_eq!(buf.read_line(&mut v).unwrap(), 0);
assert_eq!(v, "");
}
#[test]
fn lines() {
let buf = Cursor::new(&b"12\r"[..]);
let mut s = buf.lines();
assert_eq!(s.next().unwrap().unwrap(), "12\r".to_string());
assert!(s.next().is_none());
let buf = Cursor::new(&b"12\r\n\n"[..]);
let mut s = buf.lines();
assert_eq!(s.next().unwrap().unwrap(), "12".to_string());
assert_eq!(s.next().unwrap().unwrap(), "".to_string());
assert!(s.next().is_none());
}
#[test]
fn read_to_end() {
let mut c = Cursor::new(&b""[..]);
let mut v = Vec::new();
assert_eq!(c.read_to_end(&mut v).unwrap(), 0);
assert_eq!(v, []);
let mut c = Cursor::new(&b"1"[..]);
let mut v = Vec::new();
assert_eq!(c.read_to_end(&mut v).unwrap(), 1);
assert_eq!(v, b"1");
let cap = 1024 * 1024;
let data = (0..cap).map(|i| (i / 3) as u8).collect::<Vec<_>>();
let mut v = Vec::new();
let (a, b) = data.split_at(data.len() / 2);
assert_eq!(Cursor::new(a).read_to_end(&mut v).unwrap(), a.len());
assert_eq!(Cursor::new(b).read_to_end(&mut v).unwrap(), b.len());
assert_eq!(v, data);
}
#[test]
fn read_to_string() {
let mut c = Cursor::new(&b""[..]);
let mut v = String::new();
assert_eq!(c.read_to_string(&mut v).unwrap(), 0);
assert_eq!(v, "");
let mut c = Cursor::new(&b"1"[..]);
let mut v = String::new();
assert_eq!(c.read_to_string(&mut v).unwrap(), 1);
assert_eq!(v, "1");
let mut c = Cursor::new(&b"\xff"[..]);
let mut v = String::new();
assert!(c.read_to_string(&mut v).is_err());
}
#[test]
fn read_exact() {
let mut buf = [0; 4];
let mut c = Cursor::new(&b""[..]);
assert_eq!(c.read_exact(&mut buf).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
let mut c = Cursor::new(&b"123"[..]).chain(Cursor::new(&b"456789"[..]));
c.read_exact(&mut buf).unwrap();
assert_eq!(&buf, b"1234");
c.read_exact(&mut buf).unwrap();
assert_eq!(&buf, b"5678");
assert_eq!(c.read_exact(&mut buf).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
}
#[test]
fn read_exact_slice() {
let mut buf = [0; 4];
let mut c = &b""[..];
assert_eq!(c.read_exact(&mut buf).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
let mut c = &b"123"[..];
assert_eq!(c.read_exact(&mut buf).unwrap_err().kind(), io::ErrorKind::UnexpectedEof);
// make sure the optimized (early returning) method is being used
assert_eq!(&buf, &[0; 4]);
let mut c = &b"1234"[..];
c.read_exact(&mut buf).unwrap();
assert_eq!(&buf, b"1234");
let mut c = &b"56789"[..];
c.read_exact(&mut buf).unwrap();
assert_eq!(&buf, b"5678");
assert_eq!(c, b"9");
}
#[test]
fn take_eof() {
struct R;
impl Read for R {
fn read(&mut self, _: &mut [u8]) -> io::Result<usize> {
Err(io::Error::new(io::ErrorKind::Other, ""))
}
}
impl BufRead for R {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
Err(io::Error::new(io::ErrorKind::Other, ""))
}
fn consume(&mut self, _amt: usize) {}
}
let mut buf = [0; 1];
assert_eq!(0, R.take(0).read(&mut buf).unwrap());
assert_eq!(b"", R.take(0).fill_buf().unwrap());
}
fn cmp_bufread<Br1: BufRead, Br2: BufRead>(mut br1: Br1, mut br2: Br2, exp: &[u8]) {
let mut cat = Vec::new();
loop {
let consume = {
let buf1 = br1.fill_buf().unwrap();
let buf2 = br2.fill_buf().unwrap();
let minlen = if buf1.len() < buf2.len() { buf1.len() } else { buf2.len() };
assert_eq!(buf1[..minlen], buf2[..minlen]);
cat.extend_from_slice(&buf1[..minlen]);
minlen
};
if consume == 0 {
break;
}
br1.consume(consume);
br2.consume(consume);
}
assert_eq!(br1.fill_buf().unwrap().len(), 0);
assert_eq!(br2.fill_buf().unwrap().len(), 0);
assert_eq!(&cat[..], &exp[..])
}
#[test]
fn chain_bufread() {
let testdata = b"ABCDEFGHIJKL";
let chain1 =
(&testdata[..3]).chain(&testdata[3..6]).chain(&testdata[6..9]).chain(&testdata[9..]);
let chain2 = (&testdata[..4]).chain(&testdata[4..8]).chain(&testdata[8..]);
cmp_bufread(chain1, chain2, &testdata[..]);
}
#[test]
fn chain_zero_length_read_is_not_eof() {
let a = b"A";
let b = b"B";
let mut s = String::new();
let mut chain = (&a[..]).chain(&b[..]);
chain.read(&mut []).unwrap();
chain.read_to_string(&mut s).unwrap();
assert_eq!("AB", s);
}
#[bench]
#[cfg_attr(target_os = "emscripten", ignore)]
fn bench_read_to_end(b: &mut test::Bencher) {
b.iter(|| {
let mut lr = repeat(1).take(10000000);
let mut vec = Vec::with_capacity(1024);
super::read_to_end(&mut lr, &mut vec)
});
}
#[test]
fn seek_len() -> io::Result<()> {
let mut c = Cursor::new(vec![0; 15]);
assert_eq!(c.stream_len()?, 15);
c.seek(SeekFrom::End(0))?;
let old_pos = c.stream_position()?;
assert_eq!(c.stream_len()?, 15);
assert_eq!(c.stream_position()?, old_pos);
c.seek(SeekFrom::Start(7))?;
c.seek(SeekFrom::Current(2))?;
let old_pos = c.stream_position()?;
assert_eq!(c.stream_len()?, 15);
assert_eq!(c.stream_position()?, old_pos);
Ok(())
}
#[test]
fn seek_position() -> io::Result<()> {
// All `asserts` are duplicated here to make sure the method does not
// change anything about the seek state.
let mut c = Cursor::new(vec![0; 15]);
assert_eq!(c.stream_position()?, 0);
assert_eq!(c.stream_position()?, 0);
c.seek(SeekFrom::End(0))?;
assert_eq!(c.stream_position()?, 15);
assert_eq!(c.stream_position()?, 15);
c.seek(SeekFrom::Start(7))?;
c.seek(SeekFrom::Current(2))?;
assert_eq!(c.stream_position()?, 9);
assert_eq!(c.stream_position()?, 9);
c.seek(SeekFrom::End(-3))?;
c.seek(SeekFrom::Current(1))?;
c.seek(SeekFrom::Current(-5))?;
assert_eq!(c.stream_position()?, 8);
assert_eq!(c.stream_position()?, 8);
Ok(())
}
// A simple example reader which uses the default implementation of
// read_to_end.
struct ExampleSliceReader<'a> {
slice: &'a [u8],
}
impl<'a> Read for ExampleSliceReader<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let len = cmp::min(self.slice.len(), buf.len());
buf[..len].copy_from_slice(&self.slice[..len]);
self.slice = &self.slice[len..];
Ok(len)
}
}
#[test]
fn test_read_to_end_capacity() -> io::Result<()> {
let input = &b"foo"[..];
// read_to_end() generally needs to over-allocate, both for efficiency
// and so that it can distinguish EOF. Assert that this is the case
// with this simple ExampleSliceReader struct, which uses the default
// implementation of read_to_end. Even though vec1 is allocated with
// exactly enough capacity for the read, read_to_end will allocate more
// space here.
let mut vec1 = Vec::with_capacity(input.len());
ExampleSliceReader { slice: input }.read_to_end(&mut vec1)?;
assert_eq!(vec1.len(), input.len());
assert!(vec1.capacity() > input.len(), "allocated more");
// However, std::io::Take includes an implementation of read_to_end
// that will not allocate when the limit has already been reached. In
// this case, vec2 never grows.
let mut vec2 = Vec::with_capacity(input.len());
ExampleSliceReader { slice: input }.take(input.len() as u64).read_to_end(&mut vec2)?;
assert_eq!(vec2.len(), input.len());
assert_eq!(vec2.capacity(), input.len(), "did not allocate more");
Ok(())
}
#[test]
fn io_slice_mut_advance() {
let mut buf1 = [1; 8];
let mut buf2 = [2; 16];
let mut buf3 = [3; 8];
let mut bufs = &mut [
IoSliceMut::new(&mut buf1),
IoSliceMut::new(&mut buf2),
IoSliceMut::new(&mut buf3),
][..];
// Only in a single buffer..
bufs = IoSliceMut::advance(bufs, 1);
assert_eq!(bufs[0].deref(), [1; 7].as_ref());
assert_eq!(bufs[1].deref(), [2; 16].as_ref());
assert_eq!(bufs[2].deref(), [3; 8].as_ref());
// Removing a buffer, leaving others as is.
bufs = IoSliceMut::advance(bufs, 7);
assert_eq!(bufs[0].deref(), [2; 16].as_ref());
assert_eq!(bufs[1].deref(), [3; 8].as_ref());
// Removing a buffer and removing from the next buffer.
bufs = IoSliceMut::advance(bufs, 18);
assert_eq!(bufs[0].deref(), [3; 6].as_ref());
}
#[test]
fn io_slice_mut_advance_empty_slice() {
let empty_bufs = &mut [][..];
// Shouldn't panic.
IoSliceMut::advance(empty_bufs, 1);
}
#[test]
fn io_slice_mut_advance_beyond_total_length() {
let mut buf1 = [1; 8];
let mut bufs = &mut [IoSliceMut::new(&mut buf1)][..];
// Going beyond the total length should be ok.
bufs = IoSliceMut::advance(bufs, 9);
assert!(bufs.is_empty());
}
#[test]
fn io_slice_advance() {
let buf1 = [1; 8];
let buf2 = [2; 16];
let buf3 = [3; 8];
let mut bufs = &mut [IoSlice::new(&buf1), IoSlice::new(&buf2), IoSlice::new(&buf3)][..];
// Only in a single buffer..
bufs = IoSlice::advance(bufs, 1);
assert_eq!(bufs[0].deref(), [1; 7].as_ref());
assert_eq!(bufs[1].deref(), [2; 16].as_ref());
assert_eq!(bufs[2].deref(), [3; 8].as_ref());
// Removing a buffer, leaving others as is.
bufs = IoSlice::advance(bufs, 7);
assert_eq!(bufs[0].deref(), [2; 16].as_ref());
assert_eq!(bufs[1].deref(), [3; 8].as_ref());
// Removing a buffer and removing from the next buffer.
bufs = IoSlice::advance(bufs, 18);
assert_eq!(bufs[0].deref(), [3; 6].as_ref());
}
#[test]
fn io_slice_advance_empty_slice() {
let empty_bufs = &mut [][..];
// Shouldn't panic.
IoSlice::advance(empty_bufs, 1);
}
#[test]
fn io_slice_advance_beyond_total_length() {
let buf1 = [1; 8];
let mut bufs = &mut [IoSlice::new(&buf1)][..];
// Going beyond the total length should be ok.
bufs = IoSlice::advance(bufs, 9);
assert!(bufs.is_empty());
}
/// Create a new writer that reads from at most `n_bufs` and reads
/// `per_call` bytes (in total) per call to write.
fn test_writer(n_bufs: usize, per_call: usize) -> TestWriter {
TestWriter { n_bufs, per_call, written: Vec::new() }
}
struct TestWriter {
n_bufs: usize,
per_call: usize,
written: Vec<u8>,
}
impl Write for TestWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.write_vectored(&[IoSlice::new(buf)])
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
let mut left = self.per_call;
let mut written = 0;
for buf in bufs.iter().take(self.n_bufs) {
let n = min(left, buf.len());
self.written.extend_from_slice(&buf[0..n]);
left -= n;
written += n;
}
Ok(written)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
#[test]
fn test_writer_read_from_one_buf() {
let mut writer = test_writer(1, 2);
assert_eq!(writer.write(&[]).unwrap(), 0);
assert_eq!(writer.write_vectored(&[]).unwrap(), 0);
// Read at most 2 bytes.
assert_eq!(writer.write(&[1, 1, 1]).unwrap(), 2);
let bufs = &[IoSlice::new(&[2, 2, 2])];
assert_eq!(writer.write_vectored(bufs).unwrap(), 2);
// Only read from first buf.
let bufs = &[IoSlice::new(&[3]), IoSlice::new(&[4, 4])];
assert_eq!(writer.write_vectored(bufs).unwrap(), 1);
assert_eq!(writer.written, &[1, 1, 2, 2, 3]);
}
#[test]
fn test_writer_read_from_multiple_bufs() {
let mut writer = test_writer(3, 3);
// Read at most 3 bytes from two buffers.
let bufs = &[IoSlice::new(&[1]), IoSlice::new(&[2, 2, 2])];
assert_eq!(writer.write_vectored(bufs).unwrap(), 3);
// Read at most 3 bytes from three buffers.
let bufs = &[IoSlice::new(&[3]), IoSlice::new(&[4]), IoSlice::new(&[5, 5])];
assert_eq!(writer.write_vectored(bufs).unwrap(), 3);
assert_eq!(writer.written, &[1, 2, 2, 3, 4, 5]);
}
#[test]
fn test_write_all_vectored() {
#[rustfmt::skip] // Becomes unreadable otherwise.
let tests: Vec<(_, &'static [u8])> = vec![
(vec![], &[]),
(vec![IoSlice::new(&[1])], &[1]),
(vec![IoSlice::new(&[1, 2])], &[1, 2]),
(vec![IoSlice::new(&[1, 2, 3])], &[1, 2, 3]),
(vec![IoSlice::new(&[1, 2, 3, 4])], &[1, 2, 3, 4]),
(vec![IoSlice::new(&[1, 2, 3, 4, 5])], &[1, 2, 3, 4, 5]),
(vec![IoSlice::new(&[1]), IoSlice::new(&[2])], &[1, 2]),
(vec![IoSlice::new(&[1]), IoSlice::new(&[2, 2])], &[1, 2, 2]),
(vec![IoSlice::new(&[1, 1]), IoSlice::new(&[2, 2])], &[1, 1, 2, 2]),
(vec![IoSlice::new(&[1, 1]), IoSlice::new(&[2, 2, 2])], &[1, 1, 2, 2, 2]),
(vec![IoSlice::new(&[1, 1]), IoSlice::new(&[2, 2, 2])], &[1, 1, 2, 2, 2]),
(vec![IoSlice::new(&[1, 1, 1]), IoSlice::new(&[2, 2, 2])], &[1, 1, 1, 2, 2, 2]),
(vec![IoSlice::new(&[1, 1, 1]), IoSlice::new(&[2, 2, 2, 2])], &[1, 1, 1, 2, 2, 2, 2]),
(vec![IoSlice::new(&[1, 1, 1, 1]), IoSlice::new(&[2, 2, 2, 2])], &[1, 1, 1, 1, 2, 2, 2, 2]),
(vec![IoSlice::new(&[1]), IoSlice::new(&[2]), IoSlice::new(&[3])], &[1, 2, 3]),
(vec![IoSlice::new(&[1, 1]), IoSlice::new(&[2, 2]), IoSlice::new(&[3, 3])], &[1, 1, 2, 2, 3, 3]),
(vec![IoSlice::new(&[1]), IoSlice::new(&[2, 2]), IoSlice::new(&[3, 3, 3])], &[1, 2, 2, 3, 3, 3]),
(vec![IoSlice::new(&[1, 1, 1]), IoSlice::new(&[2, 2, 2]), IoSlice::new(&[3, 3, 3])], &[1, 1, 1, 2, 2, 2, 3, 3, 3]),
];
let writer_configs = &[(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)];
for (n_bufs, per_call) in writer_configs.iter().copied() {
for (mut input, wanted) in tests.clone().into_iter() {
let mut writer = test_writer(n_bufs, per_call);
assert!(writer.write_all_vectored(&mut *input).is_ok());
assert_eq!(&*writer.written, &*wanted);
}
}
}
} | /// # Errors |
vgg16.py | import torch
import torchvision
from torchvision import models
from collections import namedtuple
class Vgg16(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=True).features # 获取预训练vgg网络层
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
for x in range(5):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(5, 10):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(10, 17):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(17, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X) | h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3'])
# 定义一个namedtuple类型数据,并包含列表中的属性。
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3)
return out # 得到经过不同层的特征值 | :
|
policies.rs | use super::digits::*;
/// A policy for inserting separators into numbers.
///
/// The configurable aspects are:
///
/// - The separator character to insert.
///
/// - How to group the separators.
///
/// - What characters are considered digits (for skipping non-digits such as
/// a minus sign).
#[derive(Debug, Clone, Copy)]
pub struct | <'a> {
/// The separator to insert.
pub separator: &'a str,
/// The grouping. The numbers in this array give the size of the groups, from
/// right to left, with the last number in the array giving the size of all
/// subsequent groups.
///
/// So to group by threes, as is typical in many places,
/// this array should be `&[3]`. However, to get a grouping like `1,23,45,678`,
/// where the last group has size three and the others size two, you would use
/// `&[3, 2]`.
pub groups: &'a [u8],
/// The characters that are considered digits. If there are multiple groups of
/// digits separated by non-digits, we only add separators to the first group.
/// This means, for example, that the number `-12345.67` will only have separators
/// inserted into the `12345` portion.
pub digits: &'a [char],
}
/// Policy for placing a comma every three decimal digits.
pub const COMMA_SEPARATOR: SeparatorPolicy = SeparatorPolicy {
separator: ",",
groups: &[3],
digits: ASCII_DECIMAL,
};
/// Policy for placing a space every three decimal digits.
pub const SPACE_SEPARATOR: SeparatorPolicy = SeparatorPolicy {
separator: " ",
groups: &[3],
digits: ASCII_DECIMAL,
};
/// Policy for placing a period every three decimal digits.
pub const DOT_SEPARATOR: SeparatorPolicy = SeparatorPolicy {
separator: ".",
groups: &[3],
digits: ASCII_DECIMAL,
};
/// Policy for placing an underscore every three decimal digits.
pub const UNDERSCORE_SEPARATOR: SeparatorPolicy = SeparatorPolicy {
separator: "_",
groups: &[3],
digits: ASCII_DECIMAL,
};
/// Policy for placing a space every four hexadecimal digits.
pub const HEX_FOUR: SeparatorPolicy = SeparatorPolicy {
separator: " ",
groups: &[4],
digits: ASCII_HEXADECIMAL,
};
| SeparatorPolicy |
router.go | package api
import (
"context"
"encoding/json"
"net/http"
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/go-chi/chi"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"m1k1o/ioth/internal/config"
"m1k1o/ioth/internal/utils"
)
type ApiManagerCtx struct {
logger zerolog.Logger
conf *config.API
}
func | (conf *config.API) *ApiManagerCtx {
return &ApiManagerCtx{
logger: log.With().Str("module", "router").Logger(),
conf: conf,
}
}
func (a *ApiManagerCtx) Mount(r chi.Router) {
// Register
r.Mount("/nodes", a.nodes())
r.Mount("/services", a.services())
r.Mount("/proxies", a.proxies())
cli, err := client.NewEnvClient()
if err != nil {
panic(err)
}
r.Get("/images", func(w http.ResponseWriter, r *http.Request) {
// List all images available locally
images, err := cli.ImageList(context.Background(), types.ImageListOptions{})
if err != nil {
utils.HttpInternalServer(w, err)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(images)
})
r.Get("/containers", func(w http.ResponseWriter, r *http.Request) {
// Retrieve a list of containers
containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
if err != nil {
utils.HttpInternalServer(w, err)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(containers)
})
r.Get("/networks", func(w http.ResponseWriter, r *http.Request) {
// List all networks
networks, err := cli.NetworkList(context.Background(), types.NetworkListOptions{})
if err != nil {
utils.HttpInternalServer(w, err)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(networks)
})
// ref.
// https://godoc.org/github.com/docker/docker/client
// services
// [x] func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error)
// [ ] func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error)
// [x] func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
// [ ] func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error)
// [x] func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error
// [ ] func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error)
// swarm
// [ ] func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error)
// [ ] func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error)
// [ ] func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error)
// [ ] func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error
// [ ] func (cli *Client) SwarmLeave(ctx context.Context, force bool) error
// [ ] func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error
// [ ] func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error
// nodes
// [ ] func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error)
// [x] func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
// [ ] func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error
// [ ] func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error
}
func (a *ApiManagerCtx) imageName(name string) string {
registry := strings.TrimRight(a.conf.Registry, "/")
return registry + "/" + name
}
| New |
lib.rs | pub fn brackets_are_balanced(string: &str) -> bool | {
unimplemented!("Check if the string \"{}\" contains balanced brackets", string);
} |
|
flow_mgr.rs | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tracks possibly-redundant flow control signals from other code and converts
// into flow control frames needing to be sent to the remote.
use std::collections::HashMap;
use std::mem;
use neqo_common::{qinfo, qtrace, qwarn, Encoder};
use crate::frame::{Frame, StreamType};
use crate::recovery::RecoveryToken;
use crate::recv_stream::RecvStreams;
use crate::send_stream::SendStreams;
use crate::stream_id::{StreamId, StreamIndex, StreamIndexes};
use crate::tracking::PNSpace;
use crate::AppError;
pub type FlowControlRecoveryToken = Frame;
#[derive(Debug, Default)]
pub struct FlowMgr {
// Discriminant as key ensures only 1 of every frame type will be queued.
from_conn: HashMap<mem::Discriminant<Frame>, Frame>,
// (id, discriminant) as key ensures only 1 of every frame type per stream
// will be queued.
from_streams: HashMap<(StreamId, mem::Discriminant<Frame>), Frame>,
// (stream_type, discriminant) as key ensures only 1 of every frame type
// per stream type will be queued.
from_stream_types: HashMap<(StreamType, mem::Discriminant<Frame>), Frame>,
used_data: u64,
max_data: u64,
}
impl FlowMgr {
pub fn conn_credit_avail(&self) -> u64 {
self.max_data - self.used_data
}
pub fn conn_increase_credit_used(&mut self, amount: u64) {
self.used_data += amount;
assert!(self.used_data <= self.max_data)
}
// Dummy DataBlocked frame for discriminant use below
/// Returns whether max credit was actually increased.
pub fn conn_increase_max_credit(&mut self, new: u64) -> bool {
if new > self.max_data {
self.max_data = new;
const DB_FRAME: Frame = Frame::DataBlocked { data_limit: 0 };
self.from_conn.remove(&mem::discriminant(&DB_FRAME));
true
} else {
false
}
}
// -- frames scoped on connection --
pub fn data_blocked(&mut self) {
let frame = Frame::DataBlocked {
data_limit: self.max_data,
};
self.from_conn.insert(mem::discriminant(&frame), frame);
}
pub fn path_response(&mut self, data: [u8; 8]) {
let frame = Frame::PathResponse { data };
self.from_conn.insert(mem::discriminant(&frame), frame);
}
pub fn max_data(&mut self, maximum_data: u64) {
let frame = Frame::MaxData { maximum_data };
self.from_conn.insert(mem::discriminant(&frame), frame);
}
// -- frames scoped on stream --
/// Indicate to receiving remote the stream is reset
pub fn stream_reset(
&mut self,
stream_id: StreamId,
application_error_code: AppError,
final_size: u64,
) {
let frame = Frame::ResetStream {
stream_id,
application_error_code,
final_size,
};
self.from_streams
.insert((stream_id, mem::discriminant(&frame)), frame);
}
/// Indicate to sending remote we are no longer interested in the stream
pub fn stop_sending(&mut self, stream_id: StreamId, application_error_code: AppError) {
let frame = Frame::StopSending {
stream_id,
application_error_code,
};
self.from_streams
.insert((stream_id, mem::discriminant(&frame)), frame);
}
/// Update sending remote with more credits
pub fn max_stream_data(&mut self, stream_id: StreamId, maximum_stream_data: u64) {
let frame = Frame::MaxStreamData {
stream_id,
maximum_stream_data,
};
self.from_streams
.insert((stream_id, mem::discriminant(&frame)), frame);
}
/// Don't send stream data updates if no more data is coming
pub fn clear_max_stream_data(&mut self, stream_id: StreamId) {
let frame = Frame::MaxStreamData {
stream_id,
maximum_stream_data: 0,
};
self.from_streams
.remove(&(stream_id, mem::discriminant(&frame)));
}
/// Indicate to receiving remote we need more credits
pub fn stream_data_blocked(&mut self, stream_id: StreamId, stream_data_limit: u64) {
let frame = Frame::StreamDataBlocked {
stream_id,
stream_data_limit,
};
self.from_streams
.insert((stream_id, mem::discriminant(&frame)), frame);
}
// -- frames scoped on stream type --
pub fn max_streams(&mut self, stream_limit: StreamIndex, stream_type: StreamType) {
let frame = Frame::MaxStreams {
stream_type,
maximum_streams: stream_limit,
};
self.from_stream_types
.insert((stream_type, mem::discriminant(&frame)), frame);
}
pub fn streams_blocked(&mut self, stream_limit: StreamIndex, stream_type: StreamType) {
let frame = Frame::StreamsBlocked {
stream_type,
stream_limit,
};
self.from_stream_types
.insert((stream_type, mem::discriminant(&frame)), frame);
}
pub fn peek(&self) -> Option<&Frame> {
if let Some(key) = self.from_conn.keys().next() {
self.from_conn.get(key)
} else if let Some(key) = self.from_streams.keys().next() {
self.from_streams.get(key)
} else if let Some(key) = self.from_stream_types.keys().next() {
self.from_stream_types.get(key)
} else {
None
}
}
pub(crate) fn acked(
&mut self,
token: &FlowControlRecoveryToken,
send_streams: &mut SendStreams,
) {
const RESET_STREAM: &Frame = &Frame::ResetStream {
stream_id: StreamId::new(0),
application_error_code: 0,
final_size: 0,
};
if let Frame::ResetStream { stream_id, .. } = token {
qinfo!("Reset received stream={}", stream_id.as_u64());
if self
.from_streams
.remove(&(*stream_id, mem::discriminant(RESET_STREAM)))
.is_some()
{
qinfo!("Removed RESET_STREAM frame for {}", stream_id.as_u64());
}
send_streams.reset_acked(*stream_id);
}
}
pub(crate) fn lost(
&mut self,
token: &FlowControlRecoveryToken,
send_streams: &mut SendStreams,
recv_streams: &mut RecvStreams,
indexes: &mut StreamIndexes,
) {
match *token {
// Always resend ResetStream if lost
Frame::ResetStream {
stream_id,
application_error_code,
final_size,
} => {
qinfo!(
"Reset lost stream={} err={} final_size={}",
stream_id.as_u64(),
application_error_code,
final_size
);
if send_streams.get(stream_id).is_ok() {
self.stream_reset(stream_id, application_error_code, final_size);
}
}
// Resend MaxStreams if lost (with updated value)
Frame::MaxStreams { stream_type, .. } => {
let local_max = match stream_type {
StreamType::BiDi => &mut indexes.local_max_stream_bidi,
StreamType::UniDi => &mut indexes.local_max_stream_uni,
};
self.max_streams(*local_max, stream_type)
}
// Only resend "*Blocked" frames if still blocked
Frame::DataBlocked { .. } => {
if self.conn_credit_avail() == 0 {
self.data_blocked()
}
}
Frame::StreamDataBlocked { stream_id, .. } => {
if let Ok(ss) = send_streams.get(stream_id) {
if ss.credit_avail() == 0 {
self.stream_data_blocked(stream_id, ss.max_stream_data())
}
}
}
Frame::StreamsBlocked { stream_type, .. } => match stream_type {
StreamType::UniDi => {
if indexes.remote_next_stream_uni >= indexes.remote_max_stream_uni {
self.streams_blocked(indexes.remote_max_stream_uni, StreamType::UniDi);
}
}
StreamType::BiDi => {
if indexes.remote_next_stream_bidi >= indexes.remote_max_stream_bidi {
self.streams_blocked(indexes.remote_max_stream_bidi, StreamType::BiDi);
}
} | stream_id,
application_error_code,
} => self.stop_sending(stream_id, application_error_code),
Frame::MaxStreamData { stream_id, .. } => {
if let Some(rs) = recv_streams.get_mut(&stream_id) {
if let Some(msd) = rs.max_stream_data() {
self.max_stream_data(stream_id, msd)
}
}
}
Frame::PathResponse { .. } => qinfo!("Path Response lost, not re-sent"),
_ => qwarn!("Unexpected Flow frame {:?} lost, not re-sent", token),
}
}
pub(crate) fn get_frame(
&mut self,
space: PNSpace,
remaining: usize,
) -> Option<(Frame, Option<RecoveryToken>)> {
if space != PNSpace::ApplicationData {
return None;
}
if let Some(frame) = self.peek() {
// A suboptimal way to figure out if the frame fits within remaining
// space.
let mut d = Encoder::default();
frame.marshal(&mut d);
if d.len() > remaining {
qtrace!("flowc frame doesn't fit in remaining");
return None;
}
} else {
return None;
}
// There is enough space we can add this frame to the packet.
let frame = self.next().expect("just peeked this");
Some((frame.clone(), Some(RecoveryToken::Flow(frame))))
}
}
impl Iterator for FlowMgr {
type Item = Frame;
/// Used by generator to get a flow control frame.
fn next(&mut self) -> Option<Frame> {
let first_key = self.from_conn.keys().next();
if let Some(&first_key) = first_key {
return self.from_conn.remove(&first_key);
}
let first_key = self.from_streams.keys().next();
if let Some(&first_key) = first_key {
return self.from_streams.remove(&first_key);
}
let first_key = self.from_stream_types.keys().next();
if let Some(&first_key) = first_key {
return self.from_stream_types.remove(&first_key);
}
None
}
} | },
// Resend StopSending
Frame::StopSending { |
page_setup.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use glib;
use glib::translate::*;
use gtk_sys;
use std;
use std::fmt;
use std::ptr;
use PageOrientation;
use PaperSize;
use Unit;
glib_wrapper! {
pub struct PageSetup(Object<gtk_sys::GtkPageSetup, PageSetupClass>);
match fn {
get_type => || gtk_sys::gtk_page_setup_get_type(),
}
}
impl PageSetup {
pub fn new() -> PageSetup {
assert_initialized_main_thread!();
unsafe { from_glib_full(gtk_sys::gtk_page_setup_new()) }
}
pub fn new_from_file<P: AsRef<std::path::Path>>(
file_name: P,
) -> Result<PageSetup, glib::Error> {
assert_initialized_main_thread!();
unsafe {
let mut error = ptr::null_mut();
let ret = gtk_sys::gtk_page_setup_new_from_file(
file_name.as_ref().to_glib_none().0,
&mut error,
);
if error.is_null() {
Ok(from_glib_full(ret))
} else {
Err(from_glib_full(error))
}
}
}
pub fn new_from_gvariant(variant: &glib::Variant) -> PageSetup {
assert_initialized_main_thread!();
unsafe {
from_glib_full(gtk_sys::gtk_page_setup_new_from_gvariant(
variant.to_glib_none().0,
))
}
}
pub fn new_from_key_file(
key_file: &glib::KeyFile,
group_name: Option<&str>,
) -> Result<PageSetup, glib::Error> {
assert_initialized_main_thread!();
unsafe {
let mut error = ptr::null_mut();
let ret = gtk_sys::gtk_page_setup_new_from_key_file(
key_file.to_glib_none().0,
group_name.to_glib_none().0,
&mut error,
);
if error.is_null() {
Ok(from_glib_full(ret))
} else {
Err(from_glib_full(error))
}
}
}
pub fn copy(&self) -> Option<PageSetup> {
unsafe { from_glib_full(gtk_sys::gtk_page_setup_copy(self.to_glib_none().0)) }
}
pub fn get_bottom_margin(&self, unit: Unit) -> f64 {
unsafe { gtk_sys::gtk_page_setup_get_bottom_margin(self.to_glib_none().0, unit.to_glib()) }
}
pub fn get_left_margin(&self, unit: Unit) -> f64 {
unsafe { gtk_sys::gtk_page_setup_get_left_margin(self.to_glib_none().0, unit.to_glib()) }
}
pub fn get_orientation(&self) -> PageOrientation |
pub fn get_page_height(&self, unit: Unit) -> f64 {
unsafe { gtk_sys::gtk_page_setup_get_page_height(self.to_glib_none().0, unit.to_glib()) }
}
pub fn get_page_width(&self, unit: Unit) -> f64 {
unsafe { gtk_sys::gtk_page_setup_get_page_width(self.to_glib_none().0, unit.to_glib()) }
}
pub fn get_paper_height(&self, unit: Unit) -> f64 {
unsafe { gtk_sys::gtk_page_setup_get_paper_height(self.to_glib_none().0, unit.to_glib()) }
}
pub fn get_paper_size(&self) -> PaperSize {
unsafe {
from_glib_none(gtk_sys::gtk_page_setup_get_paper_size(
self.to_glib_none().0,
))
}
}
pub fn get_paper_width(&self, unit: Unit) -> f64 {
unsafe { gtk_sys::gtk_page_setup_get_paper_width(self.to_glib_none().0, unit.to_glib()) }
}
pub fn get_right_margin(&self, unit: Unit) -> f64 {
unsafe { gtk_sys::gtk_page_setup_get_right_margin(self.to_glib_none().0, unit.to_glib()) }
}
pub fn get_top_margin(&self, unit: Unit) -> f64 {
unsafe { gtk_sys::gtk_page_setup_get_top_margin(self.to_glib_none().0, unit.to_glib()) }
}
pub fn load_file<P: AsRef<std::path::Path>>(&self, file_name: P) -> Result<(), glib::Error> {
unsafe {
let mut error = ptr::null_mut();
let _ = gtk_sys::gtk_page_setup_load_file(
self.to_glib_none().0,
file_name.as_ref().to_glib_none().0,
&mut error,
);
if error.is_null() {
Ok(())
} else {
Err(from_glib_full(error))
}
}
}
pub fn load_key_file(
&self,
key_file: &glib::KeyFile,
group_name: Option<&str>,
) -> Result<(), glib::Error> {
unsafe {
let mut error = ptr::null_mut();
let _ = gtk_sys::gtk_page_setup_load_key_file(
self.to_glib_none().0,
key_file.to_glib_none().0,
group_name.to_glib_none().0,
&mut error,
);
if error.is_null() {
Ok(())
} else {
Err(from_glib_full(error))
}
}
}
pub fn set_bottom_margin(&self, margin: f64, unit: Unit) {
unsafe {
gtk_sys::gtk_page_setup_set_bottom_margin(
self.to_glib_none().0,
margin,
unit.to_glib(),
);
}
}
pub fn set_left_margin(&self, margin: f64, unit: Unit) {
unsafe {
gtk_sys::gtk_page_setup_set_left_margin(self.to_glib_none().0, margin, unit.to_glib());
}
}
pub fn set_orientation(&self, orientation: PageOrientation) {
unsafe {
gtk_sys::gtk_page_setup_set_orientation(self.to_glib_none().0, orientation.to_glib());
}
}
pub fn set_paper_size(&self, size: &PaperSize) {
unsafe {
gtk_sys::gtk_page_setup_set_paper_size(
self.to_glib_none().0,
mut_override(size.to_glib_none().0),
);
}
}
pub fn set_paper_size_and_default_margins(&self, size: &PaperSize) {
unsafe {
gtk_sys::gtk_page_setup_set_paper_size_and_default_margins(
self.to_glib_none().0,
mut_override(size.to_glib_none().0),
);
}
}
pub fn set_right_margin(&self, margin: f64, unit: Unit) {
unsafe {
gtk_sys::gtk_page_setup_set_right_margin(self.to_glib_none().0, margin, unit.to_glib());
}
}
pub fn set_top_margin(&self, margin: f64, unit: Unit) {
unsafe {
gtk_sys::gtk_page_setup_set_top_margin(self.to_glib_none().0, margin, unit.to_glib());
}
}
pub fn to_file<P: AsRef<std::path::Path>>(&self, file_name: P) -> Result<(), glib::Error> {
unsafe {
let mut error = ptr::null_mut();
let _ = gtk_sys::gtk_page_setup_to_file(
self.to_glib_none().0,
file_name.as_ref().to_glib_none().0,
&mut error,
);
if error.is_null() {
Ok(())
} else {
Err(from_glib_full(error))
}
}
}
pub fn to_gvariant(&self) -> Option<glib::Variant> {
unsafe { from_glib_none(gtk_sys::gtk_page_setup_to_gvariant(self.to_glib_none().0)) }
}
pub fn to_key_file(&self, key_file: &glib::KeyFile, group_name: Option<&str>) {
unsafe {
gtk_sys::gtk_page_setup_to_key_file(
self.to_glib_none().0,
key_file.to_glib_none().0,
group_name.to_glib_none().0,
);
}
}
}
impl Default for PageSetup {
fn default() -> Self {
Self::new()
}
}
impl fmt::Display for PageSetup {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "PageSetup")
}
}
| {
unsafe {
from_glib(gtk_sys::gtk_page_setup_get_orientation(
self.to_glib_none().0,
))
}
} |
minimal.rs | use bevy::{
app::AppExit,
diagnostic::{EntityCountDiagnosticsPlugin, FrameTimeDiagnosticsPlugin, LogDiagnosticsPlugin},
prelude::*,
window::WindowResizeConstraints,
};
use bevy_pixels::prelude::*;
use rand::prelude::*;
const WIDTH: u32 = 320;
const HEIGHT: u32 = 240;
#[derive(Bundle, Debug)]
struct ObjectBundle {
position: Position,
velocity: Velocity,
size: Size,
color: Color,
}
#[derive(Component, Debug)]
struct Position {
x: u32,
y: u32,
}
#[derive(Component, Debug)]
struct Velocity {
x: i16,
y: i16,
}
#[derive(Component, Debug)]
struct Size {
width: u32,
height: u32,
}
#[derive(Component, Debug)]
struct Color(u8, u8, u8, u8);
#[derive(Debug, Hash, PartialEq, Eq, Clone, StageLabel)]
enum | {
DrawBackground,
DrawObjects,
}
fn main() {
App::new()
.insert_resource(WindowDescriptor {
title: "Hello Bevy Pixels".to_string(),
width: WIDTH as f32,
height: HEIGHT as f32,
resize_constraints: WindowResizeConstraints {
min_width: WIDTH as f32,
min_height: HEIGHT as f32,
..Default::default()
},
..Default::default()
})
.insert_resource(PixelsOptions {
width: WIDTH,
height: HEIGHT,
})
.add_plugins(DefaultPlugins)
.add_plugin(PixelsPlugin)
.add_plugin(FrameTimeDiagnosticsPlugin::default())
.add_plugin(EntityCountDiagnosticsPlugin::default())
.add_plugin(LogDiagnosticsPlugin::default())
.add_startup_system(setup_system)
.add_system(bounce_system)
.add_system(movement_system)
.add_system(exit_on_escape_system)
.add_stage_after(
PixelsStage::Draw,
AppStage::DrawBackground,
SystemStage::parallel(),
)
.add_stage_after(
AppStage::DrawBackground,
AppStage::DrawObjects,
SystemStage::parallel(),
)
.add_system_to_stage(AppStage::DrawBackground, draw_background_system)
.add_system_to_stage(AppStage::DrawObjects, draw_objects_system)
.run();
}
fn setup_system(mut commands: Commands) {
let box_object = ObjectBundle {
position: Position { x: 24, y: 16 },
velocity: Velocity { x: 1, y: 1 },
size: Size {
width: 64,
height: 64,
},
color: Color(0x5e, 0x48, 0xe8, 0xff),
};
commands.spawn().insert_bundle(box_object);
}
fn bounce_system(mut query: Query<(&Position, &mut Velocity, &Size, &mut Color)>) {
for (position, mut velocity, size, mut color) in query.iter_mut() {
let mut bounce = false;
if position.x == 0 || position.x + size.width > WIDTH {
velocity.x *= -1;
bounce = true;
}
if position.y == 0 || position.y + size.height > HEIGHT {
velocity.y *= -1;
bounce = true;
}
if bounce {
color.0 = random();
color.1 = random();
color.2 = random();
}
}
}
fn movement_system(mut query: Query<(&mut Position, &Velocity)>) {
for (mut position, velocity) in query.iter_mut() {
position.x = (position.x as i16 + velocity.x) as u32;
position.y = (position.y as i16 + velocity.y) as u32;
}
}
fn exit_on_escape_system(
keyboard_input: Res<Input<KeyCode>>,
mut app_exit_events: EventWriter<AppExit>,
) {
if keyboard_input.just_pressed(KeyCode::Escape) {
app_exit_events.send(AppExit);
}
}
fn draw_background_system(mut pixels_resource: ResMut<PixelsResource>) {
let frame = pixels_resource.pixels.get_frame();
frame.copy_from_slice(&[0x48, 0xb2, 0xe8, 0xff].repeat(frame.len() / 4));
}
fn draw_objects_system(
mut pixels_resource: ResMut<PixelsResource>,
query: Query<(&Position, &Size, &Color)>,
) {
let frame = pixels_resource.pixels.get_frame();
let frame_width_bytes = (WIDTH * 4) as usize;
for (position, size, color) in query.iter() {
let x_offset = (position.x * 4) as usize;
let width_bytes = (size.width * 4) as usize;
let object_row = &[color.0, color.1, color.2, color.3].repeat(size.width as usize);
for y in position.y..(position.y + size.height - 1) {
let y_offset = y as usize * frame_width_bytes;
let i = y_offset + x_offset;
let j = i + width_bytes;
frame[i..j].copy_from_slice(object_row);
}
}
}
| AppStage |
main.py | import http | from typing import Optional
from fastapi import FastAPI, Path, Query
app = FastAPI()
@app.api_route("/api_route")
def non_operation():
return {"message": "Hello World"}
def non_decorated_route():
return {"message": "Hello World"}
app.add_api_route("/non_decorated_route", non_decorated_route)
@app.get("/text")
def get_text():
return "Hello World"
@app.get("/path/{item_id}")
def get_id(item_id):
return item_id
@app.get("/path/str/{item_id}")
def get_str_id(item_id: str):
return item_id
@app.get("/path/int/{item_id}")
def get_int_id(item_id: int):
return item_id
@app.get("/path/float/{item_id}")
def get_float_id(item_id: float):
return item_id
@app.get("/path/bool/{item_id}")
def get_bool_id(item_id: bool):
return item_id
@app.get("/path/param/{item_id}")
def get_path_param_id(item_id: str = Path()):
return item_id
@app.get("/path/param-required/{item_id}")
def get_path_param_required_id(item_id: str = Path()):
return item_id
@app.get("/path/param-minlength/{item_id}")
def get_path_param_min_length(item_id: str = Path(min_length=3)):
return item_id
@app.get("/path/param-maxlength/{item_id}")
def get_path_param_max_length(item_id: str = Path(max_length=3)):
return item_id
@app.get("/path/param-min_maxlength/{item_id}")
def get_path_param_min_max_length(item_id: str = Path(max_length=3, min_length=2)):
return item_id
@app.get("/path/param-gt/{item_id}")
def get_path_param_gt(item_id: float = Path(gt=3)):
return item_id
@app.get("/path/param-gt0/{item_id}")
def get_path_param_gt0(item_id: float = Path(gt=0)):
return item_id
@app.get("/path/param-ge/{item_id}")
def get_path_param_ge(item_id: float = Path(ge=3)):
return item_id
@app.get("/path/param-lt/{item_id}")
def get_path_param_lt(item_id: float = Path(lt=3)):
return item_id
@app.get("/path/param-lt0/{item_id}")
def get_path_param_lt0(item_id: float = Path(lt=0)):
return item_id
@app.get("/path/param-le/{item_id}")
def get_path_param_le(item_id: float = Path(le=3)):
return item_id
@app.get("/path/param-lt-gt/{item_id}")
def get_path_param_lt_gt(item_id: float = Path(lt=3, gt=1)):
return item_id
@app.get("/path/param-le-ge/{item_id}")
def get_path_param_le_ge(item_id: float = Path(le=3, ge=1)):
return item_id
@app.get("/path/param-lt-int/{item_id}")
def get_path_param_lt_int(item_id: int = Path(lt=3)):
return item_id
@app.get("/path/param-gt-int/{item_id}")
def get_path_param_gt_int(item_id: int = Path(gt=3)):
return item_id
@app.get("/path/param-le-int/{item_id}")
def get_path_param_le_int(item_id: int = Path(le=3)):
return item_id
@app.get("/path/param-ge-int/{item_id}")
def get_path_param_ge_int(item_id: int = Path(ge=3)):
return item_id
@app.get("/path/param-lt-gt-int/{item_id}")
def get_path_param_lt_gt_int(item_id: int = Path(lt=3, gt=1)):
return item_id
@app.get("/path/param-le-ge-int/{item_id}")
def get_path_param_le_ge_int(item_id: int = Path(le=3, ge=1)):
return item_id
@app.get("/query")
def get_query(query):
return f"foo bar {query}"
@app.get("/query/optional")
def get_query_optional(query=None):
if query is None:
return "foo bar"
return f"foo bar {query}"
@app.get("/query/int")
def get_query_type(query: int):
return f"foo bar {query}"
@app.get("/query/int/optional")
def get_query_type_optional(query: Optional[int] = None):
if query is None:
return "foo bar"
return f"foo bar {query}"
@app.get("/query/int/default")
def get_query_type_int_default(query: int = 10):
return f"foo bar {query}"
@app.get("/query/param")
def get_query_param(query=Query(default=None)):
if query is None:
return "foo bar"
return f"foo bar {query}"
@app.get("/query/param-required")
def get_query_param_required(query=Query()):
return f"foo bar {query}"
@app.get("/query/param-required/int")
def get_query_param_required_type(query: int = Query()):
return f"foo bar {query}"
@app.get("/enum-status-code", status_code=http.HTTPStatus.CREATED)
def get_enum_status_code():
return "foo bar" | |
decodescript.py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The vhkdCoin Core vhkd
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test decoding scripts via decodescript RPC command."""
from test_framework.test_framework import vhkdCoinTestFramework
from test_framework.util import *
from test_framework.mininode import *
from io import BytesIO
class DecodeScriptTest(vhkdCoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def decodescript_script_sig(self):
signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
push_signature = '48' + signature
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
# below are test cases for all of the standard transaction types
# 1) P2PK scriptSig
# the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
rpc_result = self.nodes[0].decodescript(push_signature)
assert_equal(signature, rpc_result['asm'])
# 2) P2PKH scriptSig
rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
assert_equal(signature + ' ' + public_key, rpc_result['asm'])
# 3) multisig scriptSig
# this also tests the leading portion of a P2SH multisig scriptSig
# OP_0 <A sig> <B sig>
rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
# 4) P2SH scriptSig
# an empty P2SH redeemScript is valid and makes for a very simple test case.
# thus, such a spending scriptSig would just need to pass the outer redeemScript
# hash test and leave true on the top of the stack.
rpc_result = self.nodes[0].decodescript('5100')
assert_equal('1 0', rpc_result['asm'])
# 5) null data scriptSig - no such thing because null data scripts can not be spent.
# thus, no test case for that standard transaction type is here.
def decodescript_script_pub_key(self):
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
public_key_hash = '11695b6cd891484c2d49ec5aa738ec2b2f897777'
push_public_key_hash = '14' + public_key_hash
# below are test cases for all of the standard transaction types
# 1) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
# 2) P2PKH scriptPubKey
# OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
# 3) multisig scriptPubKey
# <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_public_key + push_public_key + '53ae')
assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
# 4) P2SH scriptPubKey
# OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
# push_public_key_hash here should actually be the hash of a redeem script.
# but this works the same for purposes of this test.
rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
# 5) null data scriptPubKey
# use a signature look-alike here to make sure that we do not decode random data as a signature.
# this matters if/when signature sighash decoding comes along.
# would want to make sure that no such decoding takes place in this case.
signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
# OP_RETURN <data>
rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
# 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
# OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
#
# OP_IF
# <receiver-pubkey> OP_CHECKSIGVERIFY
# OP_ELSE
# <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
# OP_ENDIF
# <sender-pubkey> OP_CHECKSIG
#
# lock until block 500,000
rpc_result = self.nodes[0].decodescript('63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac')
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
def decoderawtransaction_asm_sighashtype(self):
"""Test decoding scripts via RPC command "decoderawtransaction".
This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
"""
# this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
# it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
# verify that we have not altered scriptPubKey decoding.
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
txSave = CTransaction()
txSave.deserialize(BytesIO(hex_str_to_bytes(tx)))
# make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
# verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
# some more full transaction tests of varying specific scriptSigs. used instead of
# tests in decodescript_script_sig because the decodescript RPC is specifically
# for working on scriptPubKeys (argh!).
push_signature = bytes_to_hex_str(txSave.vin[0].scriptSig)[2:(0x48*2+4)]
signature = push_signature[2:]
der_signature = signature[:-2]
signature_sighash_decoded = der_signature + '[ALL]'
signature_2 = der_signature + '82'
push_signature_2 = '48' + signature_2
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
# 1) P2PK scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 2) multisig scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 3) test a scriptSig that contains more than push operations.
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it. |
def run_test(self):
self.decodescript_script_sig()
self.decodescript_script_pub_key()
self.decoderawtransaction_asm_sighashtype()
if __name__ == '__main__':
DecodeScriptTest().main() | txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(bytes_to_hex_str(txSave.serialize()))
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm']) |
test_spm.py | # Author: Christian Brodbeck <[email protected]>
import pickle
from nose.tools import eq_
import numpy as np
from numpy.testing import assert_array_equal
from eelbrain import datasets
from eelbrain._stats.spm import LM, LMGroup
def test_lm():
|
def test_random_lm():
# dummy coding
ds = datasets.get_uts()
lms = []
for i in range(5):
ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape)
lms.append(LM('uts', 'A*B*Y', ds))
rlm = LMGroup(lms)
eq_(repr(rlm), '<LMGroup: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y, n=5>')
# coefficients
ds = rlm.coefficients_dataset(('A', 'A x B'))
eq_(ds['term'].cells, ('A', 'A x B'))
# tests
res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025)
eq_(res.clusters.n_cases, 1)
# effect coding
ds = datasets.get_uts()
lms = []
for i in range(5):
ds['uts'].x += np.random.normal(0, 2, ds['uts'].shape)
lms.append(LM('uts', 'A*B*Y', ds, 'effect'))
rlm = LMGroup(lms)
res = rlm.column_ttest('A x B', samples=100, pmin=0.05, mintime=0.025)
eq_(res.clusters.n_cases, 6)
# persistence
rlm_p = pickle.loads(pickle.dumps(rlm, pickle.HIGHEST_PROTOCOL))
eq_(rlm_p.dims, rlm.dims)
| ds = datasets.get_uts()
model = ds.eval("A*B*Y")
coeffs = ds['uts'].ols(model)
lm = LM('uts', 'A*B*Y', ds, 'effect')
eq_(repr(lm), "<LM: uts ~ A + B + A x B + Y + A x Y + B x Y + A x B x Y>")
for i, effect in enumerate(model.effects):
assert_array_equal(lm.coefficient(effect.name).x, coeffs.x[i]) |
light.py | """Support for Fibaro lights."""
import asyncio
from functools import partial
import logging
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
DOMAIN,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.const import CONF_WHITE_VALUE
import homeassistant.util.color as color_util
from . import CONF_COLOR, CONF_DIMMING, CONF_RESET_COLOR, FIBARO_DEVICES, FibaroDevice
_LOGGER = logging.getLogger(__name__)
def scaleto255(value):
|
def scaleto100(value):
"""Scale the input value from 0-255 to 0-100."""
# Make sure a low but non-zero value is not rounded down to zero
if 0 < value < 3:
return 1
return max(0, min(100, ((value * 100.0) / 255.0)))
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Perform the setup for Fibaro controller devices."""
if discovery_info is None:
return
async_add_entities(
[FibaroLight(device) for device in hass.data[FIBARO_DEVICES]["light"]], True
)
class FibaroLight(FibaroDevice, LightEntity):
"""Representation of a Fibaro Light, including dimmable."""
def __init__(self, fibaro_device):
"""Initialize the light."""
self._brightness = None
self._color = (0, 0)
self._last_brightness = 0
self._supported_flags = 0
self._update_lock = asyncio.Lock()
self._white = 0
devconf = fibaro_device.device_config
self._reset_color = devconf.get(CONF_RESET_COLOR, False)
supports_color = (
"color" in fibaro_device.properties and "setColor" in fibaro_device.actions
)
supports_dimming = "levelChange" in fibaro_device.interfaces
supports_white_v = "setW" in fibaro_device.actions
# Configuration can override default capability detection
if devconf.get(CONF_DIMMING, supports_dimming):
self._supported_flags |= SUPPORT_BRIGHTNESS
if devconf.get(CONF_COLOR, supports_color):
self._supported_flags |= SUPPORT_COLOR
if devconf.get(CONF_WHITE_VALUE, supports_white_v):
self._supported_flags |= SUPPORT_WHITE_VALUE
super().__init__(fibaro_device)
self.entity_id = f"{DOMAIN}.{self.ha_id}"
@property
def brightness(self):
"""Return the brightness of the light."""
return scaleto255(self._brightness)
@property
def hs_color(self):
"""Return the color of the light."""
return self._color
@property
def white_value(self):
"""Return the white value of this light between 0..255."""
return self._white
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_flags
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
async with self._update_lock:
await self.hass.async_add_executor_job(partial(self._turn_on, **kwargs))
def _turn_on(self, **kwargs):
"""Really turn the light on."""
if self._supported_flags & SUPPORT_BRIGHTNESS:
target_brightness = kwargs.get(ATTR_BRIGHTNESS)
# No brightness specified, so we either restore it to
# last brightness or switch it on at maximum level
if target_brightness is None:
if self._brightness == 0:
if self._last_brightness:
self._brightness = self._last_brightness
else:
self._brightness = 100
else:
# We set it to the target brightness and turn it on
self._brightness = scaleto100(target_brightness)
if self._supported_flags & SUPPORT_COLOR:
if (
self._reset_color
and kwargs.get(ATTR_WHITE_VALUE) is None
and kwargs.get(ATTR_HS_COLOR) is None
and kwargs.get(ATTR_BRIGHTNESS) is None
):
self._color = (100, 0)
# Update based on parameters
self._white = kwargs.get(ATTR_WHITE_VALUE, self._white)
self._color = kwargs.get(ATTR_HS_COLOR, self._color)
rgb = color_util.color_hs_to_RGB(*self._color)
self.call_set_color(
round(rgb[0] * self._brightness / 100.0),
round(rgb[1] * self._brightness / 100.0),
round(rgb[2] * self._brightness / 100.0),
round(self._white * self._brightness / 100.0),
)
if self.state == "off":
self.set_level(int(self._brightness))
return
if self._reset_color:
bri255 = scaleto255(self._brightness)
self.call_set_color(bri255, bri255, bri255, bri255)
if self._supported_flags & SUPPORT_BRIGHTNESS:
self.set_level(int(self._brightness))
return
# The simplest case is left for last. No dimming, just switch on
self.call_turn_on()
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
async with self._update_lock:
await self.hass.async_add_executor_job(partial(self._turn_off, **kwargs))
def _turn_off(self, **kwargs):
"""Really turn the light off."""
# Let's save the last brightness level before we switch it off
if (
(self._supported_flags & SUPPORT_BRIGHTNESS)
and self._brightness
and self._brightness > 0
):
self._last_brightness = self._brightness
self._brightness = 0
self.call_turn_off()
@property
def is_on(self):
"""Return true if device is on."""
return self.current_binary_state
async def async_update(self):
"""Update the state."""
async with self._update_lock:
await self.hass.async_add_executor_job(self._update)
def _update(self):
"""Really update the state."""
# Brightness handling
if self._supported_flags & SUPPORT_BRIGHTNESS:
self._brightness = float(self.fibaro_device.properties.value)
# Fibaro might report 0-99 or 0-100 for brightness,
# based on device type, so we round up here
if self._brightness > 99:
self._brightness = 100
# Color handling
if (
self._supported_flags & SUPPORT_COLOR
and "color" in self.fibaro_device.properties
and "," in self.fibaro_device.properties.color
):
# Fibaro communicates the color as an 'R, G, B, W' string
rgbw_s = self.fibaro_device.properties.color
if rgbw_s == "0,0,0,0" and "lastColorSet" in self.fibaro_device.properties:
rgbw_s = self.fibaro_device.properties.lastColorSet
rgbw_list = [int(i) for i in rgbw_s.split(",")][:4]
if rgbw_list[0] or rgbw_list[1] or rgbw_list[2]:
self._color = color_util.color_RGB_to_hs(*rgbw_list[:3])
if (self._supported_flags & SUPPORT_WHITE_VALUE) and self.brightness != 0:
self._white = min(255, max(0, rgbw_list[3] * 100.0 / self._brightness))
| """Scale the input value from 0-100 to 0-255."""
# Fibaro has a funny way of storing brightness either 0-100 or 0-99
# depending on device type (e.g. dimmer vs led)
if value > 98:
value = 100
return max(0, min(255, ((value * 255.0) / 100.0))) |
sessions.py | """Session implementation for CherryPy.
You need to edit your config file to use sessions. Here's an example::
[/]
tools.sessions.on = True
tools.sessions.storage_class = cherrypy.lib.sessions.FileSession
tools.sessions.storage_path = "/home/site/sessions"
tools.sessions.timeout = 60
This sets the session to be stored in files in the directory
/home/site/sessions, and the session timeout to 60 minutes. If you omit
``storage_class``, the sessions will be saved in RAM.
``tools.sessions.on`` is the only required line for working sessions,
the rest are optional.
By default, the session ID is passed in a cookie, so the client's browser must
have cookies enabled for your site.
To set data for the current session, use
``cherrypy.session['fieldname'] = 'fieldvalue'``;
to get data use ``cherrypy.session.get('fieldname')``.
================
Locking sessions
================
By default, the ``'locking'`` mode of sessions is ``'implicit'``, which means
the session is locked early and unlocked late. Be mindful of this default mode
for any requests that take a long time to process (streaming responses,
expensive calculations, database lookups, API calls, etc), as other concurrent
requests that also utilize sessions will hang until the session is unlocked.
If you want to control when the session data is locked and unlocked,
set ``tools.sessions.locking = 'explicit'``. Then call
``cherrypy.session.acquire_lock()`` and ``cherrypy.session.release_lock()``.
Regardless of which mode you use, the session is guaranteed to be unlocked when
the request is complete.
=================
Expiring Sessions
=================
You can force a session to expire with :func:`cherrypy.lib.sessions.expire`.
Simply call that function at the point you want the session to expire, and it
will cause the session cookie to expire client-side.
===========================
Session Fixation Protection
===========================
If CherryPy receives, via a request cookie, a session id that it does not
recognize, it will reject that id and create a new one to return in the
response cookie. This `helps prevent session fixation attacks
<http://en.wikipedia.org/wiki/Session_fixation#Regenerate_SID_on_each_request>`_.
However, CherryPy "recognizes" a session id by looking up the saved session
data for that id. Therefore, if you never save any session data,
**you will get a new session id for every request**.
A side effect of CherryPy overwriting unrecognised session ids is that if you
have multiple, separate CherryPy applications running on a single domain (e.g.
on different ports), each app will overwrite the other's session id because by
default they use the same cookie name (``"session_id"``) but do not recognise
each others sessions. It is therefore a good idea to use a different name for
each, for example::
[/]
...
tools.sessions.name = "my_app_session_id"
================
Sharing Sessions
================
If you run multiple instances of CherryPy (for example via mod_python behind
Apache prefork), you most likely cannot use the RAM session backend, since each
instance of CherryPy will have its own memory space. Use a different backend
instead, and verify that all instances are pointing at the same file or db
location. Alternately, you might try a load balancer which makes sessions
"sticky". Google is your friend, there.
================
Expiration Dates
================
The response cookie will possess an expiration date to inform the client at
which point to stop sending the cookie back in requests. If the server time
and client time differ, expect sessions to be unreliable. **Make sure the
system time of your server is accurate**.
CherryPy defaults to a 60-minute session timeout, which also applies to the
cookie which is sent to the client. Unfortunately, some versions of Safari
("4 public beta" on Windows XP at least) appear to have a bug in their parsing
of the GMT expiration date--they appear to interpret the date as one hour in
the past. Sixty minutes minus one hour is pretty close to zero, so you may
experience this bug as a new session id for every request, unless the requests
are less than one second apart. To fix, try increasing the session.timeout.
On the other extreme, some users report Firefox sending cookies after their
expiration date, although this was on a system with an inaccurate system time.
Maybe FF doesn't trust system time.
"""
import sys
import datetime
import os
import time
import threading
import binascii
import pickle
import contextlib
import zc.lockfile
import cherrypy
from cherrypy.lib import httputil
from cherrypy.lib import locking
from cherrypy.lib import is_iterator
missing = object()
class Session(object):
"""A CherryPy dict-like Session object (one per request)."""
_id = None
id_observers = None
"A list of callbacks to which to pass new id's."
@property
def id(self):
"""Return the current session id."""
return self._id
@id.setter
def id(self, value):
self._id = value
for o in self.id_observers:
o(value)
timeout = 60
'Number of minutes after which to delete session data.'
locked = False
"""
If True, this session instance has exclusive read/write access
to session data."""
loaded = False
"""
If True, data has been retrieved from storage. This should happen
automatically on the first attempt to access session data."""
clean_thread = None
'Class-level Monitor which calls self.clean_up.'
clean_freq = 5
'The poll rate for expired session cleanup in minutes.'
originalid = None
'The session id passed by the client. May be missing or unsafe.'
missing = False
'True if the session requested by the client did not exist.'
regenerated = False
"""
True if the application called session.regenerate(). This is not set by
internal calls to regenerate the session id."""
debug = False
'If True, log debug information.'
# --------------------- Session management methods --------------------- #
def __init__(self, id=None, **kwargs):
self.id_observers = []
self._data = {}
for k, v in kwargs.items():
setattr(self, k, v)
self.originalid = id
self.missing = False
if id is None:
if self.debug:
cherrypy.log('No id given; making a new one', 'TOOLS.SESSIONS')
self._regenerate()
else:
self.id = id
if self._exists():
if self.debug:
cherrypy.log('Set id to %s.' % id, 'TOOLS.SESSIONS')
else:
if self.debug:
cherrypy.log('Expired or malicious session %r; '
'making a new one' % id, 'TOOLS.SESSIONS')
# Expired or malicious session. Make a new one.
# See https://github.com/cherrypy/cherrypy/issues/709.
self.id = None
self.missing = True
self._regenerate()
def now(self):
"""Generate the session specific concept of 'now'.
Other session providers can override this to use alternative,
possibly timezone aware, versions of 'now'.
"""
return datetime.datetime.now()
def regenerate(self):
"""Replace the current session (with a new id)."""
self.regenerated = True
self._regenerate()
def _regenerate(self):
if self.id is not None:
if self.debug:
cherrypy.log(
'Deleting the existing session %r before '
'regeneration.' % self.id,
'TOOLS.SESSIONS')
self.delete()
old_session_was_locked = self.locked
if old_session_was_locked:
self.release_lock()
if self.debug:
cherrypy.log('Old lock released.', 'TOOLS.SESSIONS')
self.id = None
while self.id is None:
self.id = self.generate_id()
# Assert that the generated id is not already stored.
if self._exists():
self.id = None
if self.debug:
cherrypy.log('Set id to generated %s.' % self.id,
'TOOLS.SESSIONS')
if old_session_was_locked:
self.acquire_lock()
if self.debug:
cherrypy.log('Regenerated lock acquired.', 'TOOLS.SESSIONS')
def clean_up(self):
"""Clean up expired sessions."""
pass
def generate_id(self):
"""Return a new session id."""
return binascii.hexlify(os.urandom(20)).decode('ascii')
def save(self):
"""Save session data."""
try:
# If session data has never been loaded then it's never been
# accessed: no need to save it
if self.loaded:
t = datetime.timedelta(seconds=self.timeout * 60)
expiration_time = self.now() + t
if self.debug:
cherrypy.log('Saving session %r with expiry %s' %
(self.id, expiration_time),
'TOOLS.SESSIONS')
self._save(expiration_time)
else:
if self.debug:
cherrypy.log(
'Skipping save of session %r (no session loaded).' %
self.id, 'TOOLS.SESSIONS')
finally:
if self.locked:
# Always release the lock if the user didn't release it
self.release_lock()
if self.debug:
cherrypy.log('Lock released after save.', 'TOOLS.SESSIONS')
def load(self):
"""Copy stored session data into this session instance."""
data = self._load()
# data is either None or a tuple (session_data, expiration_time)
if data is None or data[1] < self.now():
if self.debug:
cherrypy.log('Expired session %r, flushing data.' % self.id,
'TOOLS.SESSIONS')
self._data = {}
else:
if self.debug:
cherrypy.log('Data loaded for session %r.' % self.id,
'TOOLS.SESSIONS')
self._data = data[0]
self.loaded = True
# Stick the clean_thread in the class, not the instance.
# The instances are created and destroyed per-request.
cls = self.__class__
if self.clean_freq and not cls.clean_thread:
# clean_up is an instancemethod and not a classmethod,
# so that tool config can be accessed inside the method.
t = cherrypy.process.plugins.Monitor(
cherrypy.engine, self.clean_up, self.clean_freq * 60,
name='Session cleanup')
t.subscribe()
cls.clean_thread = t
t.start()
if self.debug:
cherrypy.log('Started cleanup thread.', 'TOOLS.SESSIONS')
def delete(self):
"""Delete stored session data."""
self._delete()
if self.debug:
cherrypy.log('Deleted session %s.' % self.id,
'TOOLS.SESSIONS')
# -------------------- Application accessor methods -------------------- #
def __getitem__(self, key):
if not self.loaded:
self.load()
return self._data[key]
def __setitem__(self, key, value):
if not self.loaded:
self.load()
self._data[key] = value
def __delitem__(self, key):
if not self.loaded:
self.load()
del self._data[key]
def pop(self, key, default=missing):
"""Remove the specified key and return the corresponding value.
If key is not found, default is returned if given,
otherwise KeyError is raised.
"""
if not self.loaded:
self.load()
if default is missing:
return self._data.pop(key)
else:
return self._data.pop(key, default)
def __contains__(self, key):
if not self.loaded:
self.load()
return key in self._data
def get(self, key, default=None):
"""D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None."""
if not self.loaded:
self.load()
return self._data.get(key, default)
def update(self, d):
"""D.update(E) -> None. Update D from E: for k in E: D[k] = E[k]."""
if not self.loaded:
self.load()
self._data.update(d)
def setdefault(self, key, default=None):
"""D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D."""
if not self.loaded:
self.load()
return self._data.setdefault(key, default)
def clear(self):
"""D.clear() -> None. Remove all items from D."""
if not self.loaded:
self.load()
self._data.clear()
def keys(self):
"""D.keys() -> list of D's keys."""
if not self.loaded:
self.load()
return self._data.keys()
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples."""
if not self.loaded:
self.load()
return self._data.items()
def values(self):
"""D.values() -> list of D's values."""
if not self.loaded:
self.load()
return self._data.values()
class RamSession(Session):
# Class-level objects. Don't rebind these!
cache = {}
locks = {}
def clean_up(self):
"""Clean up expired sessions."""
now = self.now()
for _id, (data, expiration_time) in self.cache.copy().items():
if expiration_time <= now:
try:
del self.cache[_id]
except KeyError:
pass
try:
if self.locks[_id].acquire(blocking=False):
lock = self.locks.pop(_id)
lock.release()
except KeyError:
pass
# added to remove obsolete lock objects
for _id in list(self.locks):
locked = (
_id not in self.cache
and self.locks[_id].acquire(blocking=False)
)
if locked:
lock = self.locks.pop(_id)
lock.release()
def _exists(self):
return self.id in self.cache
def _load(self):
return self.cache.get(self.id)
def _save(self, expiration_time):
self.cache[self.id] = (self._data, expiration_time)
def _delete(self):
self.cache.pop(self.id, None)
def acquire_lock(self):
"""Acquire an exclusive lock on the currently-loaded session data."""
self.locked = True
self.locks.setdefault(self.id, threading.RLock()).acquire()
def release_lock(self):
"""Release the lock on the currently-loaded session data."""
self.locks[self.id].release()
self.locked = False
def __len__(self):
"""Return the number of active sessions."""
return len(self.cache)
class FileSession(Session):
"""Implementation of the File backend for sessions
storage_path
The folder where session data will be saved. Each session
will be saved as pickle.dump(data, expiration_time) in its own file;
the filename will be self.SESSION_PREFIX + self.id.
lock_timeout
A timedelta or numeric seconds indicating how long
to block acquiring a lock. If None (default), acquiring a lock
will block indefinitely.
"""
SESSION_PREFIX = 'session-'
LOCK_SUFFIX = '.lock'
pickle_protocol = pickle.HIGHEST_PROTOCOL
def __init__(self, id=None, **kwargs):
# The 'storage_path' arg is required for file-based sessions.
kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])
kwargs.setdefault('lock_timeout', None)
Session.__init__(self, id=id, **kwargs)
# validate self.lock_timeout
if isinstance(self.lock_timeout, (int, float)):
self.lock_timeout = datetime.timedelta(seconds=self.lock_timeout)
if not isinstance(self.lock_timeout, (datetime.timedelta, type(None))):
raise ValueError(
'Lock timeout must be numeric seconds or a timedelta instance.'
)
@classmethod
def setup(cls, **kwargs):
"""Set up the storage system for file-based sessions.
This should only be called once per process; this will be done
automatically when using sessions.init (as the built-in Tool does).
"""
# The 'storage_path' arg is required for file-based sessions.
kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])
for k, v in kwargs.items():
setattr(cls, k, v)
def _get_file_path(self):
|
def _exists(self):
path = self._get_file_path()
return os.path.exists(path)
def _load(self, path=None):
assert self.locked, ('The session load without being locked. '
"Check your tools' priority levels.")
if path is None:
path = self._get_file_path()
try:
f = open(path, 'rb')
try:
return pickle.load(f)
finally:
f.close()
except (IOError, EOFError):
e = sys.exc_info()[1]
if self.debug:
cherrypy.log('Error loading the session pickle: %s' %
e, 'TOOLS.SESSIONS')
return None
def _save(self, expiration_time):
assert self.locked, ('The session was saved without being locked. '
"Check your tools' priority levels.")
f = open(self._get_file_path(), 'wb')
try:
pickle.dump((self._data, expiration_time), f, self.pickle_protocol)
finally:
f.close()
def _delete(self):
assert self.locked, ('The session deletion without being locked. '
"Check your tools' priority levels.")
try:
os.unlink(self._get_file_path())
except OSError:
pass
def acquire_lock(self, path=None):
"""Acquire an exclusive lock on the currently-loaded session data."""
if path is None:
path = self._get_file_path()
path += self.LOCK_SUFFIX
checker = locking.LockChecker(self.id, self.lock_timeout)
while not checker.expired():
try:
self.lock = zc.lockfile.LockFile(path)
except zc.lockfile.LockError:
time.sleep(0.1)
else:
break
self.locked = True
if self.debug:
cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS')
def release_lock(self, path=None):
"""Release the lock on the currently-loaded session data."""
self.lock.close()
with contextlib.suppress(FileNotFoundError):
os.remove(self.lock._path)
self.locked = False
def clean_up(self):
"""Clean up expired sessions."""
now = self.now()
# Iterate over all session files in self.storage_path
for fname in os.listdir(self.storage_path):
have_session = (
fname.startswith(self.SESSION_PREFIX)
and not fname.endswith(self.LOCK_SUFFIX)
)
if have_session:
# We have a session file: lock and load it and check
# if it's expired. If it fails, nevermind.
path = os.path.join(self.storage_path, fname)
self.acquire_lock(path)
if self.debug:
# This is a bit of a hack, since we're calling clean_up
# on the first instance rather than the entire class,
# so depending on whether you have "debug" set on the
# path of the first session called, this may not run.
cherrypy.log('Cleanup lock acquired.', 'TOOLS.SESSIONS')
try:
contents = self._load(path)
# _load returns None on IOError
if contents is not None:
data, expiration_time = contents
if expiration_time < now:
# Session expired: deleting it
os.unlink(path)
finally:
self.release_lock(path)
def __len__(self):
"""Return the number of active sessions."""
return len([fname for fname in os.listdir(self.storage_path)
if (fname.startswith(self.SESSION_PREFIX) and
not fname.endswith(self.LOCK_SUFFIX))])
class MemcachedSession(Session):
# The most popular memcached client for Python isn't thread-safe.
# Wrap all .get and .set operations in a single lock.
mc_lock = threading.RLock()
# This is a separate set of locks per session id.
locks = {}
servers = ['localhost:11211']
@classmethod
def setup(cls, **kwargs):
"""Set up the storage system for memcached-based sessions.
This should only be called once per process; this will be done
automatically when using sessions.init (as the built-in Tool does).
"""
for k, v in kwargs.items():
setattr(cls, k, v)
import memcache
cls.cache = memcache.Client(cls.servers)
def _exists(self):
self.mc_lock.acquire()
try:
return bool(self.cache.get(self.id))
finally:
self.mc_lock.release()
def _load(self):
self.mc_lock.acquire()
try:
return self.cache.get(self.id)
finally:
self.mc_lock.release()
def _save(self, expiration_time):
# Send the expiration time as "Unix time" (seconds since 1/1/1970)
td = int(time.mktime(expiration_time.timetuple()))
self.mc_lock.acquire()
try:
if not self.cache.set(self.id, (self._data, expiration_time), td):
raise AssertionError(
'Session data for id %r not set.' % self.id)
finally:
self.mc_lock.release()
def _delete(self):
self.cache.delete(self.id)
def acquire_lock(self):
"""Acquire an exclusive lock on the currently-loaded session data."""
self.locked = True
self.locks.setdefault(self.id, threading.RLock()).acquire()
if self.debug:
cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS')
def release_lock(self):
"""Release the lock on the currently-loaded session data."""
self.locks[self.id].release()
self.locked = False
def __len__(self):
"""Return the number of active sessions."""
raise NotImplementedError
# Hook functions (for CherryPy tools)
def save():
"""Save any changed session data."""
if not hasattr(cherrypy.serving, 'session'):
return
request = cherrypy.serving.request
response = cherrypy.serving.response
# Guard against running twice
if hasattr(request, '_sessionsaved'):
return
request._sessionsaved = True
if response.stream:
# If the body is being streamed, we have to save the data
# *after* the response has been written out
request.hooks.attach('on_end_request', cherrypy.session.save)
else:
# If the body is not being streamed, we save the data now
# (so we can release the lock).
if is_iterator(response.body):
response.collapse_body()
cherrypy.session.save()
save.failsafe = True
def close():
"""Close the session object for this request."""
sess = getattr(cherrypy.serving, 'session', None)
if getattr(sess, 'locked', False):
# If the session is still locked we release the lock
sess.release_lock()
if sess.debug:
cherrypy.log('Lock released on close.', 'TOOLS.SESSIONS')
close.failsafe = True
close.priority = 90
def init(storage_type=None, path=None, path_header=None, name='session_id',
timeout=60, domain=None, secure=False, clean_freq=5,
persistent=True, httponly=False, debug=False,
# Py27 compat
# *, storage_class=RamSession,
**kwargs):
"""Initialize session object (using cookies).
storage_class
The Session subclass to use. Defaults to RamSession.
storage_type
(deprecated)
One of 'ram', 'file', memcached'. This will be
used to look up the corresponding class in cherrypy.lib.sessions
globals. For example, 'file' will use the FileSession class.
path
The 'path' value to stick in the response cookie metadata.
path_header
If 'path' is None (the default), then the response
cookie 'path' will be pulled from request.headers[path_header].
name
The name of the cookie.
timeout
The expiration timeout (in minutes) for the stored session data.
If 'persistent' is True (the default), this is also the timeout
for the cookie.
domain
The cookie domain.
secure
If False (the default) the cookie 'secure' value will not
be set. If True, the cookie 'secure' value will be set (to 1).
clean_freq (minutes)
The poll rate for expired session cleanup.
persistent
If True (the default), the 'timeout' argument will be used
to expire the cookie. If False, the cookie will not have an expiry,
and the cookie will be a "session cookie" which expires when the
browser is closed.
httponly
If False (the default) the cookie 'httponly' value will not be set.
If True, the cookie 'httponly' value will be set (to 1).
Any additional kwargs will be bound to the new Session instance,
and may be specific to the storage type. See the subclass of Session
you're using for more information.
"""
# Py27 compat
storage_class = kwargs.pop('storage_class', RamSession)
request = cherrypy.serving.request
# Guard against running twice
if hasattr(request, '_session_init_flag'):
return
request._session_init_flag = True
# Check if request came with a session ID
id = None
if name in request.cookie:
id = request.cookie[name].value
if debug:
cherrypy.log('ID obtained from request.cookie: %r' % id,
'TOOLS.SESSIONS')
first_time = not hasattr(cherrypy, 'session')
if storage_type:
if first_time:
msg = 'storage_type is deprecated. Supply storage_class instead'
cherrypy.log(msg)
storage_class = storage_type.title() + 'Session'
storage_class = globals()[storage_class]
# call setup first time only
if first_time:
if hasattr(storage_class, 'setup'):
storage_class.setup(**kwargs)
# Create and attach a new Session instance to cherrypy.serving.
# It will possess a reference to (and lock, and lazily load)
# the requested session data.
kwargs['timeout'] = timeout
kwargs['clean_freq'] = clean_freq
cherrypy.serving.session = sess = storage_class(id, **kwargs)
sess.debug = debug
def update_cookie(id):
"""Update the cookie every time the session id changes."""
cherrypy.serving.response.cookie[name] = id
sess.id_observers.append(update_cookie)
# Create cherrypy.session which will proxy to cherrypy.serving.session
if not hasattr(cherrypy, 'session'):
cherrypy.session = cherrypy._ThreadLocalProxy('session')
if persistent:
cookie_timeout = timeout
else:
# See http://support.microsoft.com/kb/223799/EN-US/
# and http://support.mozilla.com/en-US/kb/Cookies
cookie_timeout = None
set_response_cookie(path=path, path_header=path_header, name=name,
timeout=cookie_timeout, domain=domain, secure=secure,
httponly=httponly)
def set_response_cookie(path=None, path_header=None, name='session_id',
timeout=60, domain=None, secure=False, httponly=False):
"""Set a response cookie for the client.
path
the 'path' value to stick in the response cookie metadata.
path_header
if 'path' is None (the default), then the response
cookie 'path' will be pulled from request.headers[path_header].
name
the name of the cookie.
timeout
the expiration timeout for the cookie. If 0 or other boolean
False, no 'expires' param will be set, and the cookie will be a
"session cookie" which expires when the browser is closed.
domain
the cookie domain.
secure
if False (the default) the cookie 'secure' value will not
be set. If True, the cookie 'secure' value will be set (to 1).
httponly
If False (the default) the cookie 'httponly' value will not be set.
If True, the cookie 'httponly' value will be set (to 1).
"""
# Set response cookie
cookie = cherrypy.serving.response.cookie
cookie[name] = cherrypy.serving.session.id
cookie[name]['path'] = (
path or
cherrypy.serving.request.headers.get(path_header) or
'/'
)
if timeout:
cookie[name]['max-age'] = timeout * 60
_add_MSIE_max_age_workaround(cookie[name], timeout)
if domain is not None:
cookie[name]['domain'] = domain
if secure:
cookie[name]['secure'] = 1
if httponly:
if not cookie[name].isReservedKey('httponly'):
raise ValueError('The httponly cookie token is not supported.')
cookie[name]['httponly'] = 1
def _add_MSIE_max_age_workaround(cookie, timeout):
"""
We'd like to use the "max-age" param as indicated in
http://www.faqs.org/rfcs/rfc2109.html but IE doesn't
save it to disk and the session is lost if people close
the browser. So we have to use the old "expires" ... sigh ...
"""
expires = time.time() + timeout * 60
cookie['expires'] = httputil.HTTPDate(expires)
def expire():
"""Expire the current session cookie."""
name = cherrypy.serving.request.config.get(
'tools.sessions.name', 'session_id')
one_year = 60 * 60 * 24 * 365
e = time.time() - one_year
cherrypy.serving.response.cookie[name]['expires'] = httputil.HTTPDate(e)
cherrypy.serving.response.cookie[name].pop('max-age', None)
| f = os.path.join(self.storage_path, self.SESSION_PREFIX + self.id)
if not os.path.abspath(f).startswith(self.storage_path):
raise cherrypy.HTTPError(400, 'Invalid session id in cookie.')
return f |
sol_8.py | import typing
import numpy as np
import numba as nb
@nb.njit
def find_divisors(
n: int,
) -> np.array:
i = np.arange(int(n ** .5))
i += 1
i = i[n % i == 0]
i = np.hstack((i, n // i))
return np.unique(i)
@nb.njit
def | (
n: int = 1 << 20,
) -> np.array:
s = np.arange(n)
s[:2] = -1
i = 0
while i * i < n - 1:
i += 1
if s[i] == i: s[i::i] = i
return s
@nb.njit
def lpf(
n: int = 1 << 20,
) -> np.array:
s = np.arange(n)
s[:2] = -1
i = 0
while i * i < n - 1:
i += 1
if s[i] != i: continue
j = np.arange(i, n, i)
s[j][s[j] == j] = i
return s
@nb.njit
def sieve_of_eratosthenes(
n: int = 1 << 20,
) -> np.array:
return gpf(n) == np.arange(n)
@nb.njit
def prime_numbers(
n: int = 1 << 20,
) -> np.array:
s = sieve_of_eratosthenes(n)
return np.flatnonzero(s)
@nb.njit
def euler_totient(
n: int,
prime_numbers: np.array,
) -> int:
c = n
for p in prime_numbers:
if p * p > n: break
if n % p: continue
c = c // p * (p - 1)
while not n % p: n //= p
if n > 1:
c = c // n * (n - 1)
return c
@nb.njit(
(nb.i8, ),
cache=True,
)
def solve(
p: int,
) -> typing.NoReturn:
n = p - 1
divs = find_divisors(n)
pn = prime_numbers(1 << 20)
mod = 998244353
c = 1
for d in divs:
e = euler_totient(d, pn)
e %= mod
d %= mod
c += e * d % mod
c %= mod
print(c)
def main() -> typing.NoReturn:
p = int(input())
solve(p)
main() | gpf |
cyclonedx.py | """
SPDX-License-Identifier: MIT
Copyright (c) 2021, SCANOSS
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import json
import os.path
import sys
import hashlib
import time
class CycloneDx:
"""
CycloneDX management class
Handle all interaction with CycloneDX formatting
"""
def __init__(self, debug: bool = False, output_file: str = None):
"""
Initialise the CycloneDX class
"""
self.output_file = output_file
self.debug = debug
@staticmethod
def print_stderr(*args, **kwargs):
|
def print_msg(self, *args, **kwargs):
"""
Print message if quite mode is not enabled
"""
if not self.quiet:
self.print_stderr(*args, **kwargs)
def print_debug(self, *args, **kwargs):
"""
Print debug message if enabled
"""
if self.debug:
self.print_stderr(*args, **kwargs)
def parse(self, data: json):
"""
Parse the given input (raw/plain) JSON string and return CycloneDX summary
:param data: json - JSON object
:return: CycloneDX dictionary
"""
if not data:
self.print_stderr('ERROR: No JSON data provided to parse.')
return None
self.print_debug(f'Processing raw results into CycloneDX format...')
cdx = {}
for f in data:
file_details = data.get(f)
# print(f'File: {f}: {file_details}')
for d in file_details:
id_details = d.get("id")
if not id_details or id_details == 'none':
# print(f'No ID for {f}')
continue
purl = None
purls = d.get('purl')
if not purls:
self.print_stderr(f'Purl block missing for {f}: {file_details}')
continue
for p in purls:
self.print_debug(f'Purl: {p}')
purl = p
break
if not purl:
self.print_stderr(f'Warning: No PURL found for {f}: {file_details}')
continue
if cdx.get(purl):
self.print_debug(f'Component {purl} already stored: {cdx.get(purl)}')
continue
fd = {}
# print(f'Vendor: {d.get("vendor")}, Comp: {d.get("component")}, Ver: {d.get("version")},'
# f' Latest: {d.get("latest")} ID: {d.get("id")}')
for field in ['id', 'vendor', 'component', 'version', 'latest']:
fd[field] = d.get(field)
licenses = d.get('licenses')
fdl = []
for lic in licenses:
# print(f'License: {lic.get("name")}')
fdl.append({'id':lic.get("name")})
fd['licenses'] = fdl
cdx[p] = fd
# print(f'License summary: {cdx}')
return cdx
def produce_from_file(self, json_file: str, output_file: str = None) -> bool:
"""
Parse plain/raw input JSON file and produce CycloneDX output
:param json_file:
:param output_file:
:return: True if successful, False otherwise
"""
if not json_file:
self.print_stderr('ERROR: No JSON file provided to parse.')
return False
if not os.path.isfile(json_file):
self.print_stderr(f'ERROR: JSON file does not exist or is not a file: {json_file}')
return False
success = True
with open(json_file, 'r') as f:
success = self.produce_from_str(f.read(), output_file)
return success
def produce_from_json(self, data: json, output_file: str = None) -> bool:
"""
Produce the CycloneDX output from the input JSON object
:param data: JSON object
:param output_file: Output file (optional)
:return: True if successful, False otherwise
"""
cdx = self.parse(data)
if not cdx:
self.print_stderr('ERROR: No CycloneDX data returned for the JSON string provided.')
return False
md5hex = hashlib.md5(f'{time.time()}'.encode('utf-8')).hexdigest()
data = {}
data['bomFormat'] = 'CycloneDX'
data['specVersion'] = '1.2'
data['serialNumber'] = f'scanoss:SCANOSS-PY - SCANOSS CLI-{md5hex}'
data['version'] = '1'
data['components'] = []
for purl in cdx:
comp = cdx.get(purl)
lic = []
licenses = comp.get('licenses')
if licenses:
for l in licenses:
lic.append({'license': { 'id': l.get('id')}})
m_type = 'Snippet' if comp.get('id') == 'snippet' else 'Library'
data['components'].append({
'type': m_type,
'name': comp.get('component'),
'publisher': comp.get('vendor'),
'version': comp.get('version'),
'purl': purl,
'licenses': lic
# 'licenses': [{
# 'license': {
# 'id': comp.get('license')
# }
# }]
})
# End for loop
file = sys.stdout
if not output_file and self.output_file:
output_file = self.output_file
if output_file:
file = open(output_file, 'w')
print(json.dumps(data, indent=2), file=file)
if output_file:
file.close()
return True
def produce_from_str(self, json_str: str, output_file: str = None) -> bool:
"""
Produce CycloneDX output from input JSON string
:param json_str: input JSON string
:param output_file: Output file (optional)
:return: True if successful, False otherwise
"""
if not json_str:
self.print_stderr('ERROR: No JSON string provided to parse.')
return False
data = None
try:
data = json.loads(json_str)
except Exception as e:
self.print_stderr(f'ERROR: Problem parsing input JSON: {e}')
return False
else:
return self.produce_from_json(data, output_file)
return False
#
# End of CycloneDX Class
# | """
Print the given message to STDERR
"""
print(*args, file=sys.stderr, **kwargs) |
video_jpg_ucf101_hmdb51.py | from __future__ import print_function, division
import os
import sys
import subprocess
def | (dir_path, dst_dir_path, class_name):
class_path = os.path.join(dir_path, class_name)
if not os.path.isdir(class_path):
return
dst_class_path = os.path.join(dst_dir_path, class_name)
if not os.path.exists(dst_class_path):
os.mkdir(dst_class_path)
for file_name in os.listdir(class_path):
# if '.avi' not in file_name:
# continue
name, ext = os.path.splitext(file_name)
dst_directory_path = os.path.join(dst_class_path, name)
video_file_path = os.path.join(class_path, file_name)
try:
if os.path.exists(dst_directory_path):
if not os.path.exists(os.path.join(dst_directory_path, 'image_00001.jpg')):
subprocess.call('rm -r \"{}\"'.format(dst_directory_path), shell=True)
print('remove {}'.format(dst_directory_path))
os.mkdir(dst_directory_path)
else:
continue
else:
os.mkdir(dst_directory_path)
except:
print(dst_directory_path)
continue
cmd = 'ffmpeg -i \"{}\" -vf scale=-1:240 \"{}/image_%05d.jpg\"'.format(video_file_path, dst_directory_path)
print(cmd)
subprocess.call(cmd, shell=True)
print('\n')
if __name__=="__main__":
dir_path = './data/trimmed_data'
dst_dir_path = './data/image_data'
for class_name in os.listdir(dir_path):
class_process(dir_path, dst_dir_path, class_name)
| class_process |
main.py | from typing import (Any, Union, Type) # noqa: F401
from newchain_keys.datatypes import (
LazyBackend,
PublicKey,
PrivateKey,
Signature,
)
from newchain_keys.exceptions import (
ValidationError,
)
from newchain_keys.validation import (
validate_message_hash,
)
# These must be aliased due to a scoping issue in mypy
# https://github.com/python/mypy/issues/1775
_PublicKey = PublicKey
_PrivateKey = PrivateKey
_Signature = Signature
class KeyAPI(LazyBackend):
#
# datatype shortcuts
#
PublicKey = PublicKey # type: Type[_PublicKey]
PrivateKey = PrivateKey # type: Type[_PrivateKey]
Signature = Signature # type: Type[_Signature]
#
# Proxy method calls to the backends
#
def ecdsa_sign(self,
message_hash: bytes,
private_key: _PrivateKey) -> _Signature:
validate_message_hash(message_hash)
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `newchain_keys.datatypes.PrivateKey`"
)
signature = self.backend.ecdsa_sign(message_hash, private_key)
if not isinstance(signature, Signature):
raise ValidationError(
"Backend returned an invalid signature. Return value must be "
"an instance of `newchain_keys.datatypes.Signature`"
)
return signature
def ecdsa_verify(self,
message_hash: bytes,
signature: _Signature, | if not isinstance(public_key, PublicKey):
raise ValidationError(
"The `public_key` must be an instance of `newchain_keys.datatypes.PublicKey`"
)
return self.ecdsa_recover(message_hash, signature) == public_key
def ecdsa_recover(self,
message_hash: bytes,
signature: _Signature) -> _PublicKey:
validate_message_hash(message_hash)
if not isinstance(signature, Signature):
raise ValidationError(
"The `signature` must be an instance of `newchain_keys.datatypes.Signature`"
)
public_key = self.backend.ecdsa_recover(message_hash, signature)
if not isinstance(public_key, _PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `newchain_keys.datatypes.PublicKey`"
)
return public_key
def private_key_to_public_key(self, private_key: _PrivateKey) -> _PublicKey:
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `newchain_keys.datatypes.PrivateKey`"
)
public_key = self.backend.private_key_to_public_key(private_key)
if not isinstance(public_key, PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `newchain_keys.datatypes.PublicKey`"
)
return public_key
# This creates an easy to import backend which will lazily fetch whatever
# backend has been configured at runtime (as opposed to import or instantiation time).
lazy_key_api = KeyAPI(backend=None) | public_key: _PublicKey) -> bool: |
index.ts | import { BackupServiceService } from 'cloud/mdb/clickhouse/v1/backup_service';
import { ClusterServiceService } from 'cloud/mdb/clickhouse/v1/cluster_service';
import { DatabaseServiceService } from 'cloud/mdb/clickhouse/v1/database_service';
import { FormatSchemaServiceService } from 'cloud/mdb/clickhouse/v1/format_schema_service';
import { MlModelServiceService } from 'cloud/mdb/clickhouse/v1/ml_model_service';
import { ResourcePresetServiceService } from 'cloud/mdb/clickhouse/v1/resource_preset_service';
import { UserServiceService } from 'cloud/mdb/clickhouse/v1/user_service';
import { VersionsServiceService } from 'cloud/mdb/clickhouse/v1/versions_service';
import { Client } from 'nice-grpc';
import { SdkServiceDefinition, Session } from 'src/index';
export const BackupServiceDef: SdkServiceDefinition<
typeof BackupServiceService
> = {
...BackupServiceService,
__endpointId: 'managed-clickhouse',
};
export function BackupService(
session?: Session
): Client<typeof BackupServiceService, {}> {
if (session === undefined) {
session = new Session();
}
return session.client(BackupServiceDef);
}
export const ClusterServiceDef: SdkServiceDefinition<
typeof ClusterServiceService
> = {
...ClusterServiceService,
__endpointId: 'managed-clickhouse',
};
export function ClusterService(
session?: Session
): Client<typeof ClusterServiceService, {}> {
if (session === undefined) {
session = new Session();
}
return session.client(ClusterServiceDef);
}
export const DatabaseServiceDef: SdkServiceDefinition<
typeof DatabaseServiceService
> = {
...DatabaseServiceService,
__endpointId: 'managed-clickhouse',
};
export function DatabaseService(
session?: Session
): Client<typeof DatabaseServiceService, {}> {
if (session === undefined) {
session = new Session();
}
return session.client(DatabaseServiceDef);
}
export const FormatSchemaServiceDef: SdkServiceDefinition<
typeof FormatSchemaServiceService
> = {
...FormatSchemaServiceService,
__endpointId: 'managed-clickhouse',
};
export function FormatSchemaService(
session?: Session
): Client<typeof FormatSchemaServiceService, {}> {
if (session === undefined) {
session = new Session();
}
return session.client(FormatSchemaServiceDef);
}
export const MlModelServiceDef: SdkServiceDefinition<
typeof MlModelServiceService
> = {
...MlModelServiceService,
__endpointId: 'managed-clickhouse',
};
export function MlModelService(
session?: Session
): Client<typeof MlModelServiceService, {}> {
if (session === undefined) {
session = new Session();
}
return session.client(MlModelServiceDef);
}
export const ResourcePresetServiceDef: SdkServiceDefinition<
typeof ResourcePresetServiceService
> = {
...ResourcePresetServiceService,
__endpointId: 'managed-clickhouse',
};
export function ResourcePresetService(
session?: Session
): Client<typeof ResourcePresetServiceService, {}> {
if (session === undefined) {
session = new Session();
}
return session.client(ResourcePresetServiceDef);
}
export const UserServiceDef: SdkServiceDefinition<typeof UserServiceService> = {
...UserServiceService,
__endpointId: 'managed-clickhouse',
};
export function | (
session?: Session
): Client<typeof UserServiceService, {}> {
if (session === undefined) {
session = new Session();
}
return session.client(UserServiceDef);
}
export const VersionsServiceDef: SdkServiceDefinition<
typeof VersionsServiceService
> = {
...VersionsServiceService,
__endpointId: 'managed-clickhouse',
};
export function VersionsService(
session?: Session
): Client<typeof VersionsServiceService, {}> {
if (session === undefined) {
session = new Session();
}
return session.client(VersionsServiceDef);
}
| UserService |
rnn_test.py | # -*- coding: utf-8 -*-
"""
NLP From Scratch: Translation with a Sequence to Sequence Network and Attention
*******************************************************************************
**Author**: `Sean Robertson <https://github.com/spro/practical-pytorch>`_
This is the third and final tutorial on doing "NLP From Scratch", where we
write our own classes and functions to preprocess the data to do our NLP
modeling tasks. We hope after you complete this tutorial that you'll proceed to
learn how `torchtext` can handle much of this preprocessing for you in the
three tutorials immediately following this one.
In this project we will be teaching a neural network to translate from
French to English.
::
[KEY: > input, = target, < output]
> il est en train de peindre un tableau .
= he is painting a picture .
< he is painting a picture .
> pourquoi ne pas essayer ce vin delicieux ?
= why not try that delicious wine ?
< why not try that delicious wine ?
> elle n est pas poete mais romanciere .
= she is not a poet but a novelist .
< she not not a poet but a novelist .
> vous etes trop maigre .
= you re too skinny .
< you re all alone .
... to varying degrees of success.
This is made possible by the simple but powerful idea of the `sequence
to sequence network <https://arxiv.org/abs/1409.3215>`__, in which two
recurrent neural networks work together to transform one sequence to
another. An encoder network condenses an input sequence into a vector,
and a decoder network unfolds that vector into a new sequence.
.. figure:: /_static/img/seq-seq-images/seq2seq.png
:alt:
To improve upon this model we'll use an `attention
mechanism <https://arxiv.org/abs/1409.0473>`__, which lets the decoder
learn to focus over a specific range of the input sequence.
**Recommended Reading:**
I assume you have at least installed PyTorch, know Python, and
understand Tensors:
- https://pytorch.org/ For installation instructions
- :doc:`/beginner/deep_learning_60min_blitz` to get started with PyTorch in general
- :doc:`/beginner/pytorch_with_examples` for a wide and deep overview
- :doc:`/beginner/former_torchies_tutorial` if you are former Lua Torch user
It would also be useful to know about Sequence to Sequence networks and
how they work:
- `Learning Phrase Representations using RNN Encoder-Decoder for
Statistical Machine Translation <https://arxiv.org/abs/1406.1078>`__
- `Sequence to Sequence Learning with Neural
Networks <https://arxiv.org/abs/1409.3215>`__
- `Neural Machine Translation by Jointly Learning to Align and
Translate <https://arxiv.org/abs/1409.0473>`__
- `A Neural Conversational Model <https://arxiv.org/abs/1506.05869>`__
You will also find the previous tutorials on
:doc:`/intermediate/char_rnn_classification_tutorial`
and :doc:`/intermediate/char_rnn_generation_tutorial`
helpful as those concepts are very similar to the Encoder and Decoder
models, respectively.
**Requirements**
"""
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
######################################################################
# Loading data files
# ==================
#
# The data for this project is a set of many thousands of English to
# French translation pairs.
#
# `This question on Open Data Stack
# Exchange <https://opendata.stackexchange.com/questions/3888/dataset-of-sentences-translated-into-many-languages>`__
# pointed me to the open translation site https://tatoeba.org/ which has
# downloads available at https://tatoeba.org/eng/downloads - and better
# yet, someone did the extra work of splitting language pairs into
# individual text files here: https://www.manythings.org/anki/
#
# The English to French pairs are too big to include in the repo, so
# download to ``data/eng-fra.txt`` before continuing. The file is a tab
# separated list of translation pairs:
#
# ::
#
# I am cold. J'ai froid.
#
# .. Note::
# Download the data from
# `here <https://download.pytorch.org/tutorial/data.zip>`_
# and extract it to the current directory.
######################################################################
# Similar to the character encoding used in the character-level RNN
# tutorials, we will be representing each word in a language as a one-hot
# vector, or giant vector of zeros except for a single one (at the index
# of the word). Compared to the dozens of characters that might exist in a
# language, there are many many more words, so the encoding vector is much
# larger. We will however cheat a bit and trim the data to only use a few
# thousand words per language.
#
# .. figure:: /_static/img/seq-seq-images/word-encoding.png
# :alt:
#
#
######################################################################
# We'll need a unique index per word to use as the inputs and targets of
# the networks later. To keep track of all this we will use a helper class
# called ``Lang`` which has word → index (``word2index``) and index → word
# (``index2word``) dictionaries, as well as a count of each word
# ``word2count`` which will be used to replace rare words later.
#
SOS_token = 0
EOS_token = 1
class Lang:
def | #####################################################################
# The files are all in Unicode, to simplify we will turn Unicode
# characters to ASCII, make everything lowercase, and trim most
# punctuation.
#
# Turn a Unicode string to plain ASCII, thanks to
# https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
######################################################################
# To read the data file we will split the file into lines, and then split
# lines into pairs. The files are all English → Other Language, so if we
# want to translate from Other Language → English I added the ``reverse``
# flag to reverse the pairs.
#
def readLangs(lang1, lang2, reverse=False):
print("Reading lines...")
# Read the file and split into lines
lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').\
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
######################################################################
# Since there are a *lot* of example sentences and we want to train
# something quickly, we'll trim the data set to only relatively short and
# simple sentences. Here the maximum length is 10 words (that includes
# ending punctuation) and we're filtering to sentences that translate to
# the form "I am" or "He is" etc. (accounting for apostrophes replaced
# earlier).
#
MAX_LENGTH = 10
eng_prefixes = (
"i am ", "i m ",
"he is", "he s ",
"she is", "she s ",
"you are", "you re ",
"we are", "we re ",
"they are", "they re "
)
def filterPair(p):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH and \
p[1].startswith(eng_prefixes)
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
######################################################################
# The full process for preparing the data is:
#
# - Read text file and split into lines, split lines into pairs
# - Normalize text, filter by length and content
# - Make word lists from sentences in pairs
#
def prepareData(lang1, lang2, reverse=False):
input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
input_lang, output_lang, pairs = prepareData('eng', 'fra', True)
print(random.choice(pairs))
######################################################################
# The Seq2Seq Model
# =================
#
# A Recurrent Neural Network, or RNN, is a network that operates on a
# sequence and uses its own output as input for subsequent steps.
#
# A `Sequence to Sequence network <https://arxiv.org/abs/1409.3215>`__, or
# seq2seq network, or `Encoder Decoder
# network <https://arxiv.org/pdf/1406.1078v3.pdf>`__, is a model
# consisting of two RNNs called the encoder and decoder. The encoder reads
# an input sequence and outputs a single vector, and the decoder reads
# that vector to produce an output sequence.
#
# .. figure:: /_static/img/seq-seq-images/seq2seq.png
# :alt:
#
# Unlike sequence prediction with a single RNN, where every input
# corresponds to an output, the seq2seq model frees us from sequence
# length and order, which makes it ideal for translation between two
# languages.
#
# Consider the sentence "Je ne suis pas le chat noir" → "I am not the
# black cat". Most of the words in the input sentence have a direct
# translation in the output sentence, but are in slightly different
# orders, e.g. "chat noir" and "black cat". Because of the "ne/pas"
# construction there is also one more word in the input sentence. It would
# be difficult to produce a correct translation directly from the sequence
# of input words.
#
# With a seq2seq model the encoder creates a single vector which, in the
# ideal case, encodes the "meaning" of the input sequence into a single
# vector — a single point in some N dimensional space of sentences.
#
######################################################################
# The Encoder
# -----------
#
# The encoder of a seq2seq network is a RNN that outputs some value for
# every word from the input sentence. For every input word the encoder
# outputs a vector and a hidden state, and uses the hidden state for the
# next input word.
#
# .. figure:: /_static/img/seq-seq-images/encoder-network.png
# :alt:
#
#
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size):
super(EncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
def forward(self, input, hidden):
embedded = self.embedding(input).view(1, 1, -1)
output = embedded
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
######################################################################
# The Decoder
# -----------
#
# The decoder is another RNN that takes the encoder output vector(s) and
# outputs a sequence of words to create the translation.
#
######################################################################
# Simple Decoder
# ^^^^^^^^^^^^^^
#
# In the simplest seq2seq decoder we use only last output of the encoder.
# This last output is sometimes called the *context vector* as it encodes
# context from the entire sequence. This context vector is used as the
# initial hidden state of the decoder.
#
# At every step of decoding, the decoder is given an input token and
# hidden state. The initial input token is the start-of-string ``<SOS>``
# token, and the first hidden state is the context vector (the encoder's
# last hidden state).
#
# .. figure:: /_static/img/seq-seq-images/decoder-network.png
# :alt:
#
#
class DecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size):
super(DecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(output_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
output = self.embedding(input).view(1, 1, -1)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = self.softmax(self.out(output[0]))
return output, hidden
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
######################################################################
# I encourage you to train and observe the results of this model, but to
# save space we'll be going straight for the gold and introducing the
# Attention Mechanism.
#
######################################################################
# Attention Decoder
# ^^^^^^^^^^^^^^^^^
#
# If only the context vector is passed between the encoder and decoder,
# that single vector carries the burden of encoding the entire sentence.
#
# Attention allows the decoder network to "focus" on a different part of
# the encoder's outputs for every step of the decoder's own outputs. First
# we calculate a set of *attention weights*. These will be multiplied by
# the encoder output vectors to create a weighted combination. The result
# (called ``attn_applied`` in the code) should contain information about
# that specific part of the input sequence, and thus help the decoder
# choose the right output words.
#
# .. figure:: https://i.imgur.com/1152PYf.png
# :alt:
#
# Calculating the attention weights is done with another feed-forward
# layer ``attn``, using the decoder's input and hidden state as inputs.
# Because there are sentences of all sizes in the training data, to
# actually create and train this layer we have to choose a maximum
# sentence length (input length, for encoder outputs) that it can apply
# to. Sentences of the maximum length will use all the attention weights,
# while shorter sentences will only use the first few.
#
# .. figure:: /_static/img/seq-seq-images/attention-decoder-network.png
# :alt:
#
#
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.dropout_p = dropout_p
self.max_length = max_length
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_outputs):
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(
self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
attn_applied = torch.bmm(attn_weights.unsqueeze(0),
encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]), dim=1)
return output, hidden, attn_weights
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
######################################################################
# .. note:: There are other forms of attention that work around the length
# limitation by using a relative position approach. Read about "local
# attention" in `Effective Approaches to Attention-based Neural Machine
# Translation <https://arxiv.org/abs/1508.04025>`__.
#
# Training
# ========
#
# Preparing Training Data
# -----------------------
#
# To train, for each pair we will need an input tensor (indexes of the
# words in the input sentence) and target tensor (indexes of the words in
# the target sentence). While creating these vectors we will append the
# EOS token to both sequences.
#
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def tensorFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def tensorsFromPair(pair):
input_tensor = tensorFromSentence(input_lang, pair[0])
target_tensor = tensorFromSentence(output_lang, pair[1])
return (input_tensor, target_tensor)
######################################################################
# Training the Model
# ------------------
#
# To train we run the input sentence through the encoder, and keep track
# of every output and the latest hidden state. Then the decoder is given
# the ``<SOS>`` token as its first input, and the last hidden state of the
# encoder as its first hidden state.
#
# "Teacher forcing" is the concept of using the real target outputs as
# each next input, instead of using the decoder's guess as the next input.
# Using teacher forcing causes it to converge faster but `when the trained
# network is exploited, it may exhibit
# instability <http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.378.4095&rep=rep1&type=pdf>`__.
#
# You can observe outputs of teacher-forced networks that read with
# coherent grammar but wander far from the correct translation -
# intuitively it has learned to represent the output grammar and can "pick
# up" the meaning once the teacher tells it the first few words, but it
# has not properly learned how to create the sentence from the translation
# in the first place.
#
# Because of the freedom PyTorch's autograd gives us, we can randomly
# choose to use teacher forcing or not with a simple if statement. Turn
# ``teacher_forcing_ratio`` up to use more of it.
#
teacher_forcing_ratio = 0.5
def train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):
encoder_hidden = encoder.initHidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = input_tensor.size(0)
target_length = target_tensor.size(0)
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(
input_tensor[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device)
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
loss += criterion(decoder_output, target_tensor[di])
decoder_input = target_tensor[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.topk(1)
decoder_input = topi.squeeze().detach() # detach from history as input
loss += criterion(decoder_output, target_tensor[di])
if decoder_input.item() == EOS_token:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.item() / target_length
######################################################################
# This is a helper function to print time elapsed and estimated time
# remaining given the current time and progress %.
#
import time
import math
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
######################################################################
# The whole training process looks like this:
#
# - Start a timer
# - Initialize optimizers and criterion
# - Create set of training pairs
# - Start empty losses array for plotting
#
# Then we call ``train`` many times and occasionally print the progress (%
# of examples, time so far, estimated time) and average loss.
#
def trainIters(encoder, decoder, n_iters, print_every=1000, plot_every=100, learning_rate=0.01):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
training_pairs = [tensorsFromPair(random.choice(pairs))
for i in range(n_iters)]
criterion = nn.NLLLoss()
for iter in range(1, n_iters + 1):
training_pair = training_pairs[iter - 1]
input_tensor = training_pair[0]
target_tensor = training_pair[1]
loss = train(input_tensor, target_tensor, encoder,
decoder, encoder_optimizer, decoder_optimizer, criterion)
print_loss_total += loss
plot_loss_total += loss
if iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
iter, iter / n_iters * 100, print_loss_avg))
if iter % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
showPlot(plot_losses)
######################################################################
# Plotting results
# ----------------
#
# Plotting is done with matplotlib, using the array of loss values
# ``plot_losses`` saved while training.
#
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.ticker as ticker
import numpy as np
def showPlot(points):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
######################################################################
# Evaluation
# ==========
#
# Evaluation is mostly the same as training, but there are no targets so
# we simply feed the decoder's predictions back to itself for each step.
# Every time it predicts a word we add it to the output string, and if it
# predicts the EOS token we stop there. We also store the decoder's
# attention outputs for display later.
#
def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):
with torch.no_grad():
input_tensor = tensorFromSentence(input_lang, sentence)
input_length = input_tensor.size()[0]
encoder_hidden = encoder.initHidden()
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_tensor[ei],
encoder_hidden)
encoder_outputs[ei] += encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device) # SOS
decoder_hidden = encoder_hidden
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
if topi.item() == EOS_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(output_lang.index2word[topi.item()])
decoder_input = topi.squeeze().detach()
return decoded_words, decoder_attentions[:di + 1]
######################################################################
# We can evaluate random sentences from the training set and print out the
# input, target, and output to make some subjective quality judgements:
#
def evaluateRandomly(encoder, decoder, n=10):
for i in range(n):
pair = random.choice(pairs)
print('>', pair[0])
print('=', pair[1])
output_words, attentions = evaluate(encoder, decoder, pair[0])
output_sentence = ' '.join(output_words)
print('<', output_sentence)
print('')
######################################################################
# Training and Evaluating
# =======================
#
# With all these helper functions in place (it looks like extra work, but
# it makes it easier to run multiple experiments) we can actually
# initialize a network and start training.
#
# Remember that the input sentences were heavily filtered. For this small
# dataset we can use relatively small networks of 256 hidden nodes and a
# single GRU layer. After about 40 minutes on a MacBook CPU we'll get some
# reasonable results.
#
# .. Note::
# If you run this notebook you can train, interrupt the kernel,
# evaluate, and continue training later. Comment out the lines where the
# encoder and decoder are initialized and run ``trainIters`` again.
#
hidden_size = 256
encoder1 = EncoderRNN(input_lang.n_words, hidden_size).to(device)
attn_decoder1 = AttnDecoderRNN(hidden_size, output_lang.n_words, dropout_p=0.1).to(device)
trainIters(encoder1, attn_decoder1, 75000, print_every=5000)
######################################################################
#
evaluateRandomly(encoder1, attn_decoder1)
######################################################################
# Visualizing Attention
# ---------------------
#
# A useful property of the attention mechanism is its highly interpretable
# outputs. Because it is used to weight specific encoder outputs of the
# input sequence, we can imagine looking where the network is focused most
# at each time step.
#
# You could simply run ``plt.matshow(attentions)`` to see attention output
# displayed as a matrix, with the columns being input steps and rows being
# output steps:
#
output_words, attentions = evaluate(
encoder1, attn_decoder1, "je suis trop froid .")
plt.matshow(attentions.numpy())
######################################################################
# For a better viewing experience we will do the extra work of adding axes
# and labels:
#
def showAttention(input_sentence, output_words, attentions):
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.numpy(), cmap='bone')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sentence.split(' ') +
['<EOS>'], rotation=90)
ax.set_yticklabels([''] + output_words)
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def evaluateAndShowAttention(input_sentence):
output_words, attentions = evaluate(
encoder1, attn_decoder1, input_sentence)
print('input =', input_sentence)
print('output =', ' '.join(output_words))
showAttention(input_sentence, output_words, attentions)
evaluateAndShowAttention("elle a cinq ans de moins que moi .")
evaluateAndShowAttention("elle est trop petit .")
evaluateAndShowAttention("je ne crains pas de mourir .")
evaluateAndShowAttention("c est un jeune directeur plein de talent .")
######################################################################
# Exercises
# =========
#
# - Try with a different dataset
#
# - Another language pair
# - Human → Machine (e.g. IOT commands)
# - Chat → Response
# - Question → Answer
#
# - Replace the embeddings with pre-trained word embeddings such as word2vec or
# GloVe
# - Try with more layers, more hidden units, and more sentences. Compare
# the training time and results.
# - If you use a translation file where pairs have two of the same phrase
# (``I am test \t I am test``), you can use this as an autoencoder. Try
# this:
#
# - Train as an autoencoder
# - Save only the Encoder network
# - Train a new Decoder for translation from there
# | __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
# |
leases_pb2_grpc.py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from containerd.services.leases.v1 import leases_pb2 as containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class LeasesStub(object):
"""Leases service manages resources leases within the metadata store.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Create = channel.unary_unary(
'/containerd.services.leases.v1.Leases/Create',
request_serializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.CreateRequest.SerializeToString,
response_deserializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.CreateResponse.FromString,
)
self.Delete = channel.unary_unary(
'/containerd.services.leases.v1.Leases/Delete',
request_serializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.DeleteRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.List = channel.unary_unary(
'/containerd.services.leases.v1.Leases/List',
request_serializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.ListRequest.SerializeToString,
response_deserializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.ListResponse.FromString,
)
self.AddResource = channel.unary_unary(
'/containerd.services.leases.v1.Leases/AddResource',
request_serializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.AddResourceRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteResource = channel.unary_unary(
'/containerd.services.leases.v1.Leases/DeleteResource',
request_serializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.DeleteResourceRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListResources = channel.unary_unary(
'/containerd.services.leases.v1.Leases/ListResources',
request_serializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.ListResourcesRequest.SerializeToString,
response_deserializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.ListResourcesResponse.FromString,
)
class LeasesServicer(object):
"""Leases service manages resources leases within the metadata store.
"""
def Create(self, request, context):
"""Create creates a new lease for managing changes to metadata. A lease
can be used to protect objects from being removed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Delete deletes the lease and makes any unreferenced objects created
during the lease eligible for garbage collection if not referenced
or retained by other resources during the lease.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""List lists all active leases, returning the full list of
leases and optionally including the referenced resources.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddResource(self, request, context):
"""AddResource references the resource by the provided lease.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteResource(self, request, context):
"""DeleteResource dereferences the resource by the provided lease.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListResources(self, request, context):
"""ListResources lists all the resources referenced by the lease.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_LeasesServicer_to_server(servicer, server):
rpc_method_handlers = {
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.CreateRequest.FromString,
response_serializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.CreateResponse.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.DeleteRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.ListRequest.FromString,
response_serializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.ListResponse.SerializeToString,
),
'AddResource': grpc.unary_unary_rpc_method_handler(
servicer.AddResource,
request_deserializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.AddResourceRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteResource': grpc.unary_unary_rpc_method_handler(
servicer.DeleteResource,
request_deserializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.DeleteResourceRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ListResources': grpc.unary_unary_rpc_method_handler(
servicer.ListResources,
request_deserializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.ListResourcesRequest.FromString,
response_serializer=containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.ListResourcesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'containerd.services.leases.v1.Leases', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Leases(object):
| """Leases service manages resources leases within the metadata store.
"""
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/containerd.services.leases.v1.Leases/Create',
containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.CreateRequest.SerializeToString,
containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.CreateResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/containerd.services.leases.v1.Leases/Delete',
containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.DeleteRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/containerd.services.leases.v1.Leases/List',
containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.ListRequest.SerializeToString,
containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.ListResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AddResource(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/containerd.services.leases.v1.Leases/AddResource',
containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.AddResourceRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteResource(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/containerd.services.leases.v1.Leases/DeleteResource',
containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.DeleteResourceRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListResources(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/containerd.services.leases.v1.Leases/ListResources',
containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.ListResourcesRequest.SerializeToString,
containerd_dot_services_dot_leases_dot_v1_dot_leases__pb2.ListResourcesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata) |
|
ex034.py | salário = float(input('Digite o valor do seu salário: R$'))
if salário <= 1250:
novo = salário + (salário * 15 / 100)
else:
novo | 'Quem ganhava R$ {:.2f} passou a ganhar R$ {:.2f}'.format(salário, novo))
| = salário + (salário * 10 / 100)
print( |
vrs.go | /*
Copyright (c) 2015, Alcatel-Lucent Inc
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package vspk
import "github.com/nuagenetworks/go-bambou/bambou"
// VRSIdentity represents the Identity of the object
var VRSIdentity = bambou.Identity{
Name: "vrs",
Category: "vrss",
}
// VRSsList represents a list of VRSs
type VRSsList []*VRS
// VRSsAncestor is the interface that an ancestor of a VRS must implement.
// An Ancestor is defined as an entity that has VRS as a descendant.
// An Ancestor can get a list of its child VRSs, but not necessarily create one.
type VRSsAncestor interface {
VRSs(*bambou.FetchingInfo) (VRSsList, *bambou.Error)
}
// VRSsParent is the interface that a parent of a VRS must implement.
// A Parent is defined as an entity that has VRS as a child.
// A Parent is an Ancestor which can create a VRS.
type VRSsParent interface {
VRSsAncestor
CreateVRS(*VRS) *bambou.Error
}
// VRS represents the model of a vrs
type VRS struct {
ID string `json:"ID,omitempty"`
ParentID string `json:"parentID,omitempty"`
ParentType string `json:"parentType,omitempty"`
Owner string `json:"owner,omitempty"`
Name string `json:"name,omitempty"`
ManagementIP string `json:"managementIP,omitempty"`
ParentIDs []interface{} `json:"parentIDs,omitempty"`
LastEventName string `json:"lastEventName,omitempty"`
LastEventObject string `json:"lastEventObject,omitempty"`
LastEventTimestamp int `json:"lastEventTimestamp,omitempty"`
LastStateChange int `json:"lastStateChange,omitempty"`
LastUpdatedBy string `json:"lastUpdatedBy,omitempty"`
GatewayUUID string `json:"gatewayUUID,omitempty"`
DbSynced bool `json:"dbSynced"`
Address string `json:"address,omitempty"`
PeakCPUUsage float64 `json:"peakCPUUsage,omitempty"`
PeakMemoryUsage float64 `json:"peakMemoryUsage,omitempty"`
Personality string `json:"personality,omitempty"`
Description string `json:"description,omitempty"`
Messages []interface{} `json:"messages,omitempty"`
RevertBehaviorEnabled bool `json:"revertBehaviorEnabled"`
RevertCompleted bool `json:"revertCompleted"`
RevertCount int `json:"revertCount,omitempty"`
RevertFailedCount int `json:"revertFailedCount,omitempty"`
LicensedState string `json:"licensedState,omitempty"`
Disks []interface{} `json:"disks,omitempty"`
EmbeddedMetadata []interface{} `json:"embeddedMetadata,omitempty"`
EntityScope string `json:"entityScope,omitempty"`
Location string `json:"location,omitempty"`
Role string `json:"role,omitempty"`
Uptime int `json:"uptime,omitempty"`
PrimaryVSCConnectionLost bool `json:"primaryVSCConnectionLost"`
ProductVersion string `json:"productVersion,omitempty"`
IsResilient bool `json:"isResilient"`
Status string `json:"status,omitempty"`
MultiNICVPortEnabled bool `json:"multiNICVPortEnabled"`
NumberOfBridgeInterfaces int `json:"numberOfBridgeInterfaces,omitempty"`
NumberOfContainers int `json:"numberOfContainers,omitempty"`
NumberOfHostInterfaces int `json:"numberOfHostInterfaces,omitempty"`
NumberOfVirtualMachines int `json:"numberOfVirtualMachines,omitempty"`
CurrentCPUUsage float64 `json:"currentCPUUsage,omitempty"`
CurrentMemoryUsage float64 `json:"currentMemoryUsage,omitempty"`
AverageCPUUsage float64 `json:"averageCPUUsage,omitempty"`
AverageMemoryUsage float64 `json:"averageMemoryUsage,omitempty"`
ExternalID string `json:"externalID,omitempty"`
HypervisorConnectionState string `json:"hypervisorConnectionState,omitempty"`
HypervisorIdentifier string `json:"hypervisorIdentifier,omitempty"`
HypervisorName string `json:"hypervisorName,omitempty"`
HypervisorType string `json:"hypervisorType,omitempty"`
}
// NewVRS returns a new *VRS
func NewVRS() *VRS |
// Identity returns the Identity of the object.
func (o *VRS) Identity() bambou.Identity {
return VRSIdentity
}
// Identifier returns the value of the object's unique identifier.
func (o *VRS) Identifier() string {
return o.ID
}
// SetIdentifier sets the value of the object's unique identifier.
func (o *VRS) SetIdentifier(ID string) {
o.ID = ID
}
// Fetch retrieves the VRS from the server
func (o *VRS) Fetch() *bambou.Error {
return bambou.CurrentSession().FetchEntity(o)
}
// Save saves the VRS into the server
func (o *VRS) Save() *bambou.Error {
return bambou.CurrentSession().SaveEntity(o)
}
// Delete deletes the VRS from the server
func (o *VRS) Delete() *bambou.Error {
return bambou.CurrentSession().DeleteEntity(o)
}
// Metadatas retrieves the list of child Metadatas of the VRS
func (o *VRS) Metadatas(info *bambou.FetchingInfo) (MetadatasList, *bambou.Error) {
var list MetadatasList
err := bambou.CurrentSession().FetchChildren(o, MetadataIdentity, &list, info)
return list, err
}
// CreateMetadata creates a new child Metadata under the VRS
func (o *VRS) CreateMetadata(child *Metadata) *bambou.Error {
return bambou.CurrentSession().CreateChild(o, child)
}
// Alarms retrieves the list of child Alarms of the VRS
func (o *VRS) Alarms(info *bambou.FetchingInfo) (AlarmsList, *bambou.Error) {
var list AlarmsList
err := bambou.CurrentSession().FetchChildren(o, AlarmIdentity, &list, info)
return list, err
}
// GlobalMetadatas retrieves the list of child GlobalMetadatas of the VRS
func (o *VRS) GlobalMetadatas(info *bambou.FetchingInfo) (GlobalMetadatasList, *bambou.Error) {
var list GlobalMetadatasList
err := bambou.CurrentSession().FetchChildren(o, GlobalMetadataIdentity, &list, info)
return list, err
}
// CreateGlobalMetadata creates a new child GlobalMetadata under the VRS
func (o *VRS) CreateGlobalMetadata(child *GlobalMetadata) *bambou.Error {
return bambou.CurrentSession().CreateChild(o, child)
}
// VMs retrieves the list of child VMs of the VRS
func (o *VRS) VMs(info *bambou.FetchingInfo) (VMsList, *bambou.Error) {
var list VMsList
err := bambou.CurrentSession().FetchChildren(o, VMIdentity, &list, info)
return list, err
}
// CreateJob creates a new child Job under the VRS
func (o *VRS) CreateJob(child *Job) *bambou.Error {
return bambou.CurrentSession().CreateChild(o, child)
}
// MonitoringPorts retrieves the list of child MonitoringPorts of the VRS
func (o *VRS) MonitoringPorts(info *bambou.FetchingInfo) (MonitoringPortsList, *bambou.Error) {
var list MonitoringPortsList
err := bambou.CurrentSession().FetchChildren(o, MonitoringPortIdentity, &list, info)
return list, err
}
// Containers retrieves the list of child Containers of the VRS
func (o *VRS) Containers(info *bambou.FetchingInfo) (ContainersList, *bambou.Error) {
var list ContainersList
err := bambou.CurrentSession().FetchChildren(o, ContainerIdentity, &list, info)
return list, err
}
// ControllerVRSLinks retrieves the list of child ControllerVRSLinks of the VRS
func (o *VRS) ControllerVRSLinks(info *bambou.FetchingInfo) (ControllerVRSLinksList, *bambou.Error) {
var list ControllerVRSLinksList
err := bambou.CurrentSession().FetchChildren(o, ControllerVRSLinkIdentity, &list, info)
return list, err
}
// VPorts retrieves the list of child VPorts of the VRS
func (o *VRS) VPorts(info *bambou.FetchingInfo) (VPortsList, *bambou.Error) {
var list VPortsList
err := bambou.CurrentSession().FetchChildren(o, VPortIdentity, &list, info)
return list, err
}
// Statistics retrieves the list of child Statistics of the VRS
func (o *VRS) Statistics(info *bambou.FetchingInfo) (StatisticsList, *bambou.Error) {
var list StatisticsList
err := bambou.CurrentSession().FetchChildren(o, StatisticsIdentity, &list, info)
return list, err
}
// MultiNICVPorts retrieves the list of child MultiNICVPorts of the VRS
func (o *VRS) MultiNICVPorts(info *bambou.FetchingInfo) (MultiNICVPortsList, *bambou.Error) {
var list MultiNICVPortsList
err := bambou.CurrentSession().FetchChildren(o, MultiNICVPortIdentity, &list, info)
return list, err
}
// EventLogs retrieves the list of child EventLogs of the VRS
func (o *VRS) EventLogs(info *bambou.FetchingInfo) (EventLogsList, *bambou.Error) {
var list EventLogsList
err := bambou.CurrentSession().FetchChildren(o, EventLogIdentity, &list, info)
return list, err
}
| {
return &VRS{}
} |
comment.js | (function(d, s, id) {
var js, fjs = d.getElementsByTagName(s)[0]; | if (d.getElementById(id)) return;
js = d.createElement(s); js.id = id;
js.src = 'https://connect.facebook.net/vi_VN/sdk.js#xfbml=1&version=v3.0&appId=154259861915442&autoLogAppEvents=1';
fjs.parentNode.insertBefore(js, fjs);
}(document, 'script', 'facebook-jssdk'));
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-121882571-1');
(adsbygoogle = window.adsbygoogle || []).push({
google_ad_client: "ca-pub-3145178805367189",
enable_page_level_ads: true
}); | |
index.js | module.exports = {
// YOUR CODE GOES HERE
User: require("./User")
}; | // Exporting an object containing all of our models
|
|
gachiGASM.py | import logging
import os
import traceback
from datetime import datetime, time, timezone
from random import Random, choice
import disnake
from disnake.ext import tasks
from disnake.ext.commands import BucketType, cooldown, guild_only
from bot.bot import command, group, has_permissions
from bot.globals import PLAYLISTS
from cogs.cog import Cog
from utils.utilities import read_lines
logger = logging.getLogger('terminal')
class WrestlingGif:
def __init__(self, url, text):
self.url = url
self.text = text
| description = self.text.format(author=author, recipient=recipient)
embed = disnake.Embed(description=description)
embed.set_image(url=self.url)
return embed
wrestling_gifs = [
WrestlingGif('https://i.imgur.com/xUi2Vq1.gif', "**{recipient.name}** tries to grab but it fails. **{author.name}** grabs **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/osDWTHG.gif', "**{recipient.name}** tries to escape but **{author.name}** pins them down"),
WrestlingGif('https://i.imgur.com/HS6R463.gif', "**{author.name}** lifts **{recipient.name}** up. **{recipient.name}** is powerless to do anything"),
WrestlingGif('https://i.imgur.com/jbE2XVt.gif', "**{author.name}** challenges **{recipient.name}** to a friendly wrestling match"),
WrestlingGif('https://i.imgur.com/XVUjH9x.gif', "**{recipient.name}** tries to attack but **{author.name}** counters"),
WrestlingGif('https://i.imgur.com/vTeoYAE.gif', "**{author.name}** and **{recipient.name}** engage in a battle of strength"),
WrestlingGif('https://i.imgur.com/iu2kiVy.gif', "**{author.name}** gets a hold of **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/BulkVW1.gif', "**{author.name}** gets **{recipient.name}** with a knee strike"),
WrestlingGif('https://i.imgur.com/zXaIYLp.gif', "**{author.name}** beats **{recipient.name}** down"),
WrestlingGif('https://i.imgur.com/XNOMUcg.gif', "**{author.name}** delivers a low blow to **{recipient.name}**. Nasty strategy"),
WrestlingGif('https://i.imgur.com/oSG0V6a.gif', "**{recipient.name}** gets beaten by **{author.name}**"),
WrestlingGif('https://i.imgur.com/u0H0ZSA.gif', "**{author.name}** grabs **{recipient.name}**s fucking pants <:GWjojoGachiGASM:363025405562585088>"),
WrestlingGif('https://i.imgur.com/VFruiTR.gif', "**{author.name}** flexes on **{recipient.name}** after kicking their ass. WOO"),
WrestlingGif('https://i.imgur.com/YCd1aSo.gif', "**{author.name}** beats **{recipient.name}** up"),
WrestlingGif('https://i.imgur.com/M3sAu23.gif', "**{author.name}** chokes **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/inEROy3.gif', "**{author.name}** throws **{recipient.name}** on the ground"),
WrestlingGif('https://i.imgur.com/8qI8f1M.gif', "**{author.name}** battles **{recipient.name}** in a feat of pure strength"),
WrestlingGif('https://i.imgur.com/xhVIjIt.gif', "**{author.name}** lifts **{recipient.name}** up"),
WrestlingGif('https://i.imgur.com/RW07zr0.gif', "**{author.name}** escapes the choke of **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/g6wVGpG.gif', "**{author.name}** escapes **{recipient.name}**s grab and begins a counter-attack"),
WrestlingGif('https://i.imgur.com/LKHtUeo.gif', "**{author.name}** gets a hold of **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/eCCAKoA.gif', "It's time to wrestle"),
WrestlingGif('https://i.imgur.com/ZFiT5Ew.gif', "**{author.name}** lifts **{recipient.name}** up"),
WrestlingGif('https://i.imgur.com/A4Oo0Tp.gif', "**{author.name}** puts **{recipient.name}** down"),
WrestlingGif('https://i.imgur.com/COQlI5t.gif', "**{author.name}** swaps positions with **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/pIaErDy.gif', "**{author.name}** pulls **{recipient.name}**s arms"),
WrestlingGif('https://i.imgur.com/hThhSrl.gif', "**{author.name}** locks **{recipient.name}**s leg"),
WrestlingGif('https://i.imgur.com/goMZvRE.gif', "**{author.name}** turns the tables on **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/3A9eMu0.gif', "**{author.name}** slams **{recipient.name}** on the floor"),
WrestlingGif('https://i.imgur.com/G9Iklxu.gif', "**{author.name}** and **{recipient.name}** are in the middle of an intense battle"),
WrestlingGif('https://i.imgur.com/c1CQBnJ.gif', "**{recipient.name}** gets elbow struck by **{author.name}**"),
WrestlingGif('https://i.imgur.com/cKcOJo0.gif', "**{author.name}** pulls **{recipient.name}**s leg"),
WrestlingGif('https://i.imgur.com/Q41oEne.gif', "**{recipient.name}** gets elbow struck by **{author.name}**"),
WrestlingGif('https://i.imgur.com/AP7MRnF.gif', "**{author.name}** escapes the hold of **{recipient.name}** and is ready for more"),
WrestlingGif('https://i.imgur.com/6khggL1.gif', "**{author.name}** pulls the hair of **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/bq0Bjbl.gif', "**{author.name}** got the moves"),
WrestlingGif('https://i.imgur.com/aIVoytr.gif', "**{author.name}** throws **{recipient.name}** on the ground"),
WrestlingGif('https://i.imgur.com/l137Zzh.gif', "**{recipient.name}** gets elbow struck by **{author.name}**"),
WrestlingGif('https://i.imgur.com/tFZv2j9.gif', "**{recipient.name}** and **{author.name}** engage in a fight. **{author.name}** makes the first move"),
WrestlingGif('https://i.imgur.com/kVXjE3Q.gif', "**{author.name}** pulls **{recipient.name}**'s hands"),
WrestlingGif('https://i.imgur.com/4IsfXSD.gif', "**{author.name}** has **{recipient.name}** locked down"),
WrestlingGif('https://i.imgur.com/HnLRl26.gif', "**{author.name}** spins **{recipient.name}** right round baby right round"),
WrestlingGif('https://i.imgur.com/uJtuZ4V.gif', "**{author.name}** beats **{recipient.name}** up and locks him down"),
WrestlingGif('https://i.imgur.com/ZgXNVIb.gif', "**{recipient.name}** flails his arms around helplessly"),
WrestlingGif('https://i.imgur.com/Jcu4NyL.gif', "**{author.name}** manages to get a quick jab in at **{recipient.name}**"),
WrestlingGif('https://i.imgur.com/XUpxidH.gif', "**{author.name}** pulls on **{recipient.name}**'s leg"),
WrestlingGif('https://i.imgur.com/pTBy6ap.gif', "**{recipient.name}** and **{author.name}** engage in a hugging competition"),
WrestlingGif('https://i.imgur.com/ggTj4xI.gif', "**{author.name}** escapes **{recipient.name}**'s hold and counters"),
WrestlingGif('https://i.imgur.com/lS2zZre.gif', "**{author.name}** locks **{recipient.name}**'s legs"),
WrestlingGif('https://i.imgur.com/fdgI1Br.gif', "**{recipient.name}** gets choked by **{author.name}** and tries to escape but fails"),
]
class gachiGASM(Cog):
def __init__(self, bot):
super().__init__(bot)
self.gachilist = self.bot.gachilist
if not self.gachilist:
self.reload_gachilist()
self._start_task = self._reload_and_post.start()
logger.info(f'Starting gachi loop.\n{"".join(traceback.format_stack()[-8:])}')
def cog_unload(self):
self._reload_and_post.cancel()
@tasks.loop(time=time(tzinfo=timezone.utc), reconnect=False)
async def _reload_and_post(self):
logger.info(f'Start task is {self._start_task}, '
f'current task is {self._reload_and_post.get_task()}, '
f'fail status: {self._reload_and_post._last_iteration_failed}, '
f'next iter {self._reload_and_post.next_iteration}.\n{"".join(traceback.format_stack()[-8:])}')
self.reload_gachilist()
for guild in self.bot.guilds:
channel = self.bot.guild_cache.dailygachi(guild.id)
if not channel:
continue
channel = guild.get_channel(channel)
if not channel:
continue
vid = Random(self.get_day()+guild.id).choice(self.gachilist)
try:
await channel.send(f'Daily gachi {vid}')
except disnake.HTTPException:
pass
def reload_gachilist(self):
self.bot.gachilist = read_lines(os.path.join(PLAYLISTS, 'gachi.txt'))
self.gachilist = self.bot.gachilist
@staticmethod
def get_day():
return (datetime.utcnow() - datetime.min).days
@command()
@cooldown(1, 2, BucketType.channel)
async def gachify(self, ctx, *, words):
"""Gachify a string"""
if ' ' not in words:
# We need to undo the string view or it will skip the first word
ctx.view.undo()
await self.gachify2.invoke(ctx)
else:
return await ctx.send(words.replace(' ', r' \♂ ').upper()[:2000])
@command()
@cooldown(1, 2, BucketType.channel)
async def gachify2(self, ctx, *, words):
"""An alternative way of gachifying"""
s = r'\♂ ' + words.replace(' ', r' \♂ ').upper() + r' \♂'
return await ctx.send(s[:2000])
@command(aliases=['rg'])
@cooldown(1, 5, BucketType.channel)
async def randomgachi(self, ctx):
await ctx.send(choice(self.gachilist))
@group(invoke_without_command=True, aliases=['dg'])
@guild_only()
@cooldown(1, 5, BucketType.channel)
async def dailygachi(self, ctx):
await ctx.send(Random(self.get_day()+ctx.guild.id).choice(self.gachilist))
@dailygachi.command(np_pm=True)
@cooldown(1, 5)
@has_permissions(manage_guild=True)
async def subscribe(self, ctx, *, channel: disnake.TextChannel=None):
if channel:
await self.bot.guild_cache.set_dailygachi(ctx.guild.id, channel.id)
return await ctx.send(f'New dailygachi channel set to {channel}')
channel = self.bot.guild_cache.dailygachi(ctx.guild.id)
channel = ctx.guild.get_channel(channel)
if channel:
await ctx.send(f'Current dailygachi channel is {channel}')
else:
await ctx.send('No dailygachi channel set')
@dailygachi.command()
@cooldown(1, 5)
@has_permissions(manage_guild=True)
@guild_only()
async def unsubscribe(self, ctx):
await self.bot.guild_cache.set_dailygachi(ctx.guild.id, None)
await ctx.send('Dailygachi channel no longer set')
@command()
@cooldown(1, 5, BucketType.member)
@guild_only()
async def wrestle(self, ctx, *, user: disnake.User):
if user == ctx.author:
await ctx.send('Wrestling against yourself...')
return
wrestling_gif = choice(wrestling_gifs)
await ctx.send(embed=wrestling_gif.build_embed(ctx.author, user))
def setup(bot):
bot.add_cog(gachiGASM(bot)) | def build_embed(self, author, recipient): |
generate:module.ts | import { GluegunToolbox } from 'gluegun'
module.exports = {
name: 'generate:module',
description: 'create a new module inside the app',
run: async (toolbox: GluegunToolbox) => {
const { parameters, print: { success, error } } = toolbox;
const name = parameters.first;
if(!name) {
error('ERROR: module name must be provided!');
return;
}
success('Created module not implemented yet');
// create folder with module name
// create screens folder with an example screen
// crate navigation folder, with stack navigator configured
// create redux folder, with examples
// create interface folder, with interface example
// alert about export module on index.ts (modules)
// alert about import reducer on root reducer
// alert about import sagas on root saga
// alert about import navigation on root navigation
}
} | ||
sortCategories.ts | /*
* Power BI Visual CLI
*
* Copyright (c) Microsoft Corporation
* All rights reserved.
* MIT License
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the ""Software""), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
'use strict'
import { ICategory } from '@essex/react-heat-streams'
import { ICategoryDataMap, IVisualDataOptions } from '../chart/interfaces'
type Sorter = (cat1: ICategory, cat2: ICategory) => number
const invert = (sortComparator: Sorter) => (cat1: ICategory, cat2: ICategory) =>
-1 * sortComparator(cat1, cat2)
function | (
categories: ICategory[],
categoryData: ICategoryDataMap,
options: IVisualDataOptions,
): Sorter {
const { sortBy, sortInvert: isInverted } = options
categories.forEach((cat: ICategory) => {
const data = categoryData[cat.id]
const count = data.length
const sum = data.reduce((prev, current) => prev + current.value, 0)
const max = Math.max(...data.map(c => c.value))
cat.metadata = {
average: sum / count,
density: data.length,
max,
sum,
}
})
const valueCompare = (field: string) => (
cat1: ICategory,
cat2: ICategory,
) => {
const v1 = cat1.metadata[field]
const v2 = cat2.metadata[field]
return v2 - v1
}
const SORT_COMPARATORS: { [key: string]: Sorter } = {
average: valueCompare('average'),
density: valueCompare('density'),
max: valueCompare('max'),
name: (cat1: ICategory, cat2: ICategory) => {
if (cat1.name < cat2.name) {
return -1
} else if (cat1.name > cat2.name) {
return 1
} else {
return 0
}
},
}
const comparator: Sorter = SORT_COMPARATORS[sortBy]
return isInverted ? invert(comparator) : comparator
}
export default function sortCategories(
categories: ICategory[],
categoryData: ICategoryDataMap,
options: IVisualDataOptions,
) {
const comparator = getSortComparator(categories, categoryData, options)
return categories.sort(comparator)
}
| getSortComparator |
metric_name.go | package storage
import (
"bytes"
"fmt"
"sort"
"strings"
"sync"
"sync/atomic"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
)
const (
escapeChar = 0
tagSeparatorChar = 1
kvSeparatorChar = 2
)
// Tag represents a (key, value) tag for metric.
type Tag struct {
Key []byte
Value []byte
}
// Reset resets the tag.
func (tag *Tag) Reset() {
tag.Key = tag.Key[:0]
tag.Value = tag.Value[:0]
}
// Equal returns true if tag equals t
func (tag *Tag) Equal(t *Tag) bool {
return string(tag.Key) == string(t.Key) && string(tag.Value) == string(t.Value)
}
// Marshal appends marshaled tag to dst and returns the result.
func (tag *Tag) Marshal(dst []byte) []byte {
dst = marshalTagValue(dst, tag.Key)
dst = marshalTagValue(dst, tag.Value)
return dst
}
// Unmarshal unmarshals tag from src and returns the remaining data from src.
func (tag *Tag) Unmarshal(src []byte) ([]byte, error) {
var err error
src, tag.Key, err = unmarshalTagValue(tag.Key[:0], src)
if err != nil {
return src, fmt.Errorf("cannot unmarshal key: %w", err)
}
src, tag.Value, err = unmarshalTagValue(tag.Value[:0], src)
if err != nil {
return src, fmt.Errorf("cannot unmarshal value: %w", err)
}
return src, nil
}
func (tag *Tag) copyFrom(src *Tag) {
tag.Key = append(tag.Key[:0], src.Key...)
tag.Value = append(tag.Value[:0], src.Value...)
}
func marshalTagValueNoTrailingTagSeparator(dst, src []byte) []byte {
dst = marshalTagValue(dst, src)
// Remove trailing tagSeparatorChar
return dst[:len(dst)-1]
}
func marshalTagValue(dst, src []byte) []byte {
n1 := bytes.IndexByte(src, escapeChar)
n2 := bytes.IndexByte(src, tagSeparatorChar)
n3 := bytes.IndexByte(src, kvSeparatorChar)
if n1 < 0 && n2 < 0 && n3 < 0 {
// Fast path.
dst = append(dst, src...)
dst = append(dst, tagSeparatorChar)
return dst
}
// Slow path.
for _, ch := range src {
switch ch {
case escapeChar:
dst = append(dst, escapeChar, '0')
case tagSeparatorChar:
dst = append(dst, escapeChar, '1')
case kvSeparatorChar:
dst = append(dst, escapeChar, '2')
default:
dst = append(dst, ch)
}
}
dst = append(dst, tagSeparatorChar)
return dst
}
func unmarshalTagValue(dst, src []byte) ([]byte, []byte, error) {
n := bytes.IndexByte(src, tagSeparatorChar)
if n < 0 {
return src, dst, fmt.Errorf("cannot find the end of tag value")
}
b := src[:n]
src = src[n+1:]
for {
n := bytes.IndexByte(b, escapeChar)
if n < 0 {
dst = append(dst, b...)
return src, dst, nil
}
dst = append(dst, b[:n]...)
b = b[n+1:]
if len(b) == 0 {
return src, dst, fmt.Errorf("missing escaped char")
}
switch b[0] {
case '0':
dst = append(dst, escapeChar)
case '1':
dst = append(dst, tagSeparatorChar)
case '2':
dst = append(dst, kvSeparatorChar)
default:
return src, dst, fmt.Errorf("unsupported escaped char: %c", b[0])
}
b = b[1:]
}
}
// MetricName reperesents a metric name.
type MetricName struct {
MetricGroup []byte
// Tags are optional. They must be sorted by tag Key for canonical view.
// Use sortTags method.
Tags []Tag
}
// GetMetricName returns a MetricName from pool.
func GetMetricName() *MetricName {
v := mnPool.Get()
if v == nil {
return &MetricName{}
}
return v.(*MetricName)
}
// PutMetricName returns mn to the pool.
func PutMetricName(mn *MetricName) {
mn.Reset()
mnPool.Put(mn)
}
var mnPool sync.Pool
// Reset resets the mn.
func (mn *MetricName) Reset() {
mn.MetricGroup = mn.MetricGroup[:0]
mn.Tags = mn.Tags[:0]
}
// CopyFrom copies src to mn.
func (mn *MetricName) CopyFrom(src *MetricName) {
if cap(mn.MetricGroup) > 0 {
mn.MetricGroup = append(mn.MetricGroup[:0], src.MetricGroup...)
mn.Tags = copyTags(mn.Tags[:0], src.Tags)
return
}
// Pre-allocate a single byte slice for MetricGroup + all the tags.
// This reduces the number of memory allocations for zero mn.
size := len(src.MetricGroup)
for i := range src.Tags {
tag := &src.Tags[i]
size += len(tag.Key)
size += len(tag.Value)
}
b := make([]byte, 0, size)
b = append(b, src.MetricGroup...)
mn.MetricGroup = b[:len(b):len(b)]
mn.Tags = make([]Tag, len(src.Tags))
for i := range src.Tags {
st := &src.Tags[i]
dt := &mn.Tags[i]
b = append(b, st.Key...)
dt.Key = b[len(b)-len(st.Key) : len(b) : len(b)]
b = append(b, st.Value...)
dt.Value = b[len(b)-len(st.Value) : len(b) : len(b)]
}
}
// AddTag adds new tag to mn with the given key and value.
func (mn *MetricName) AddTag(key, value string) {
if key == string(metricGroupTagKey) {
mn.MetricGroup = append(mn.MetricGroup, value...)
return
}
tag := mn.addNextTag()
tag.Key = append(tag.Key[:0], key...)
tag.Value = append(tag.Value[:0], value...)
}
// AddTagBytes adds new tag to mn with the given key and value.
func (mn *MetricName) AddTagBytes(key, value []byte) {
if string(key) == string(metricGroupTagKey) {
mn.MetricGroup = append(mn.MetricGroup, value...)
return
}
tag := mn.addNextTag()
tag.Key = append(tag.Key[:0], key...)
tag.Value = append(tag.Value[:0], value...)
}
func (mn *MetricName) addNextTag() *Tag {
if len(mn.Tags) < cap(mn.Tags) {
mn.Tags = mn.Tags[:len(mn.Tags)+1]
} else {
mn.Tags = append(mn.Tags, Tag{})
}
return &mn.Tags[len(mn.Tags)-1]
}
// ResetMetricGroup resets mn.MetricGroup
func (mn *MetricName) ResetMetricGroup() {
mn.MetricGroup = mn.MetricGroup[:0]
}
var metricGroupTagKey = []byte("__name__")
// RemoveTagsOn removes all the tags not included to onTags.
func (mn *MetricName) RemoveTagsOn(onTags []string) {
if !hasTag(onTags, metricGroupTagKey) {
mn.ResetMetricGroup()
}
tags := mn.Tags
mn.Tags = mn.Tags[:0]
if len(onTags) == 0 {
return
}
for i := range tags {
tag := &tags[i]
if hasTag(onTags, tag.Key) {
mn.AddTagBytes(tag.Key, tag.Value)
}
}
}
// RemoveTag removes a tag with the given tagKey
func (mn *MetricName) RemoveTag(tagKey string) {
if tagKey == "__name__" {
mn.ResetMetricGroup()
return
}
tags := mn.Tags
mn.Tags = mn.Tags[:0]
for i := range tags {
tag := &tags[i]
if string(tag.Key) != tagKey {
mn.AddTagBytes(tag.Key, tag.Value)
}
}
}
// RemoveTagsIgnoring removes all the tags included in ignoringTags.
func (mn *MetricName) RemoveTagsIgnoring(ignoringTags []string) {
if len(ignoringTags) == 0 {
return
}
if hasTag(ignoringTags, metricGroupTagKey) {
mn.ResetMetricGroup()
}
tags := mn.Tags
mn.Tags = mn.Tags[:0]
for i := range tags {
tag := &tags[i]
if !hasTag(ignoringTags, tag.Key) {
mn.AddTagBytes(tag.Key, tag.Value)
}
}
}
// GetTagValue returns tag value for the given tagKey.
func (mn *MetricName) GetTagValue(tagKey string) []byte {
if tagKey == "__name__" {
return mn.MetricGroup
}
tags := mn.Tags
for i := range tags {
tag := &tags[i]
if string(tag.Key) == tagKey {
return tag.Value
}
}
return nil
}
// SetTags sets tags from src with keys matching addTags.
func (mn *MetricName) SetTags(addTags []string, src *MetricName) {
for _, tagName := range addTags {
if tagName == string(metricGroupTagKey) {
mn.MetricGroup = append(mn.MetricGroup[:0], src.MetricGroup...)
continue
}
var srcTag *Tag
for i := range src.Tags {
t := &src.Tags[i]
if string(t.Key) == tagName {
srcTag = t
break
}
}
if srcTag == nil {
mn.RemoveTag(tagName)
continue
}
found := false
for i := range mn.Tags {
t := &mn.Tags[i]
if string(t.Key) == tagName {
t.Value = append(t.Value[:0], srcTag.Value...)
found = true
break
}
}
if !found {
mn.AddTagBytes(srcTag.Key, srcTag.Value)
}
}
}
func hasTag(tags []string, key []byte) bool {
for _, t := range tags {
if t == string(key) {
return true
}
}
return false
}
// String returns user-readable representation of the metric name.
//
// Use this function only for debug logging.
func (mn *MetricName) String() string {
mn.sortTags()
var tags []string
for i := range mn.Tags {
t := &mn.Tags[i]
tags = append(tags, fmt.Sprintf("%q=%q", t.Key, t.Value))
}
tagsStr := strings.Join(tags, ", ")
return fmt.Sprintf("MetricGroup=%q, tags=[%s]", mn.MetricGroup, tagsStr)
}
// Marshal appends marshaled mn to dst and returns the result.
//
// Tags must be sorted before calling this function.
func (mn *MetricName) Marshal(dst []byte) []byte {
// Calculate the required size and pre-allocate space in dst
dstLen := len(dst)
requiredSize := len(mn.MetricGroup) + 1
for i := range mn.Tags {
tag := &mn.Tags[i]
requiredSize += len(tag.Key) + len(tag.Value) + 2
}
dst = bytesutil.Resize(dst, requiredSize)
dst = dst[:dstLen]
// Marshal MetricGroup
dst = marshalTagValue(dst, mn.MetricGroup)
// Marshal tags.
dst = marshalTags(dst, mn.Tags)
return dst
}
// Unmarshal unmarshals mn from src.
func (mn *MetricName) Unmarshal(src []byte) error {
// Unmarshal MetricGroup.
var err error
src, mn.MetricGroup, err = unmarshalTagValue(mn.MetricGroup[:0], src)
if err != nil {
return fmt.Errorf("cannot unmarshal MetricGroup: %w", err)
}
mn.Tags = mn.Tags[:0]
for len(src) > 0 {
tag := mn.addNextTag()
var err error
src, err = tag.Unmarshal(src)
if err != nil {
return fmt.Errorf("cannot unmarshal tag: %w", err)
}
}
// There is no need in verifying for identical tag keys,
// since they must be handled in MetricName.Marshal inside marshalTags.
return nil
}
// The maximum length of label name.
//
// Longer names are truncated.
const maxLabelNameLen = 256
// The maximum length of label value.
//
// Longer values are truncated.
const maxLabelValueLen = 16 * 1024
// The maximum number of labels per each timeseries.
var maxLabelsPerTimeseries = 30
// SetMaxLabelsPerTimeseries sets the limit on the number of labels
// per each time series.
//
// Superfouos labels are dropped.
func SetMaxLabelsPerTimeseries(maxLabels int) {
if maxLabels <= 0 {
logger.Panicf("BUG: maxLabels must be positive; got %d", maxLabels)
}
maxLabelsPerTimeseries = maxLabels
}
// MarshalMetricNameRaw marshals labels to dst and returns the result.
//
// The result must be unmarshaled with MetricName.unmarshalRaw
func MarshalMetricNameRaw(dst []byte, labels []prompb.Label) []byte {
// Calculate the required space for dst.
dstLen := len(dst)
dstSize := dstLen
for i := range labels {
if i >= maxLabelsPerTimeseries {
atomic.AddUint64(&MetricsWithDroppedLabels, 1)
break
}
label := &labels[i]
if len(label.Name) > maxLabelNameLen {
atomic.AddUint64(&TooLongLabelNames, 1)
label.Name = label.Name[:maxLabelNameLen]
}
if len(label.Value) > maxLabelValueLen {
atomic.AddUint64(&TooLongLabelValues, 1)
label.Value = label.Value[:maxLabelValueLen]
}
if len(label.Value) == 0 {
// Skip labels without values, since they have no sense in prometheus.
continue
}
if string(label.Name) == "__name__" {
label.Name = label.Name[:0]
}
dstSize += len(label.Name)
dstSize += len(label.Value)
dstSize += 4
}
dst = bytesutil.Resize(dst, dstSize)[:dstLen]
// Marshal labels to dst.
for i := range labels {
if i >= maxLabelsPerTimeseries {
break
}
label := &labels[i]
if len(label.Value) == 0 {
// Skip labels without values, since they have no sense in prometheus.
continue
}
dst = marshalBytesFast(dst, label.Name)
dst = marshalBytesFast(dst, label.Value)
}
return dst
}
var (
// MetricsWithDroppedLabels is the number of metrics with at least a single dropped label
MetricsWithDroppedLabels uint64
// TooLongLabelNames is the number of too long label names
TooLongLabelNames uint64
// TooLongLabelValues is the number of too long label values
TooLongLabelValues uint64
)
// marshalRaw marshals mn to dst and returns the result.
//
// The results may be unmarshaled with MetricName.unmarshalRaw.
//
// This function is for testing purposes. MarshalMetricNameRaw must be used
// in prod instead.
func (mn *MetricName) marshalRaw(dst []byte) []byte {
dst = marshalBytesFast(dst, nil)
dst = marshalBytesFast(dst, mn.MetricGroup)
mn.sortTags()
for i := range mn.Tags {
tag := &mn.Tags[i]
dst = marshalBytesFast(dst, tag.Key)
dst = marshalBytesFast(dst, tag.Value)
}
return dst
}
// unmarshalRaw unmarshals mn encoded with MarshalMetricNameRaw.
func (mn *MetricName) unmarshalRaw(src []byte) error {
mn.Reset()
for len(src) > 0 {
tail, key, err := unmarshalBytesFast(src)
if err != nil {
return fmt.Errorf("cannot decode key: %w", err)
}
src = tail
tail, value, err := unmarshalBytesFast(src)
if err != nil {
return fmt.Errorf("cannot decode value: %w", err)
}
src = tail
if len(key) == 0 {
mn.MetricGroup = append(mn.MetricGroup[:0], value...)
} else {
mn.AddTagBytes(key, value)
}
}
return nil
}
func marshalBytesFast(dst []byte, s []byte) []byte {
dst = encoding.MarshalUint16(dst, uint16(len(s)))
dst = append(dst, s...)
return dst
}
func | (src []byte) ([]byte, []byte, error) {
if len(src) < 2 {
return src, nil, fmt.Errorf("cannot decode size form src=%X; it must be at least 2 bytes", src)
}
n := encoding.UnmarshalUint16(src)
src = src[2:]
if len(src) < int(n) {
return src, nil, fmt.Errorf("too short src=%X; it must be at least %d bytes", src, n)
}
return src[n:], src[:n], nil
}
// sortTags sorts tags in mn.
//
// Tags sorting is quite slow, so try avoiding it by caching mn
// with sorted tags.
func (mn *MetricName) sortTags() {
if len(mn.Tags) == 0 {
return
}
cts := getCanonicalTags()
if n := len(mn.Tags) - cap(cts.tags); n > 0 {
cts.tags = append(cts.tags[:cap(cts.tags)], make([]canonicalTag, n)...)
}
dst := cts.tags[:len(mn.Tags)]
for i := range mn.Tags {
tag := &mn.Tags[i]
ct := &dst[i]
ct.key = normalizeTagKey(tag.Key)
ct.tag.copyFrom(tag)
}
cts.tags = dst
// Use sort.Sort instead of sort.Slice, since sort.Slice allocates a lot.
sort.Sort(&cts.tags)
for i := range cts.tags {
mn.Tags[i].copyFrom(&cts.tags[i].tag)
}
putCanonicalTags(cts)
}
func getCanonicalTags() *canonicalTags {
v := canonicalTagsPool.Get()
if v == nil {
return &canonicalTags{}
}
return v.(*canonicalTags)
}
func putCanonicalTags(cts *canonicalTags) {
cts.tags = cts.tags[:0]
canonicalTagsPool.Put(cts)
}
var canonicalTagsPool sync.Pool
type canonicalTags struct {
tags canonicalTagsSort
}
type canonicalTag struct {
key []byte
tag Tag
}
type canonicalTagsSort []canonicalTag
func (ts *canonicalTagsSort) Len() int { return len(*ts) }
func (ts *canonicalTagsSort) Less(i, j int) bool {
x := *ts
return string(x[i].key) < string(x[j].key)
}
func (ts *canonicalTagsSort) Swap(i, j int) {
x := *ts
x[i], x[j] = x[j], x[i]
}
func marshalTags(dst []byte, tags []Tag) []byte {
var prevKey []byte
for i := range tags {
t := &tags[i]
if string(prevKey) == string(t.Key) {
// Skip duplicate keys, since they aren't allowed in Prometheus data model.
continue
}
prevKey = t.Key
dst = t.Marshal(dst)
}
return dst
}
func copyTags(dst, src []Tag) []Tag {
dstLen := len(dst)
if n := dstLen + len(src) - cap(dst); n > 0 {
dst = append(dst[:cap(dst)], make([]Tag, n)...)
}
dst = dst[:dstLen+len(src)]
for i := range src {
dst[dstLen+i].copyFrom(&src[i])
}
return dst
}
var commonTagKeys = func() map[string][]byte {
lcm := map[string][]byte{
// job-like tags must go first in MetricName.Tags.
// This should improve data locality.
// They start with \x00\x00.
// Do not change values!
//
// TODO: add more job-like tags.
"namespace": []byte("\x00\x00\x00"),
"ns": []byte("\x00\x00\x01"),
"datacenter": []byte("\x00\x00\x08"),
"dc": []byte("\x00\x00\x09"),
"environment": []byte("\x00\x00\x0c"),
"env": []byte("\x00\x00\x0d"),
"cluster": []byte("\x00\x00\x10"),
"service": []byte("\x00\x00\x18"),
"job": []byte("\x00\x00\x20"),
"model": []byte("\x00\x00\x28"),
"type": []byte("\x00\x00\x30"),
"sensor_type": []byte("\x00\x00\x38"),
"SensorType": []byte("\x00\x00\x38"),
"db": []byte("\x00\x00\x40"),
// instance-like tags must go second in MetricName.Tags.
// This should improve data locality.
// They start with \x00\x01.
// Do not change values!
//
// TODO: add more instance-like tags.
"instance": []byte("\x00\x01\x00"),
"host": []byte("\x00\x01\x08"),
"server": []byte("\x00\x01\x10"),
"pod": []byte("\x00\x01\x18"),
"node": []byte("\x00\x01\x20"),
"device": []byte("\x00\x01\x28"),
"tenant": []byte("\x00\x01\x30"),
"client": []byte("\x00\x01\x38"),
"name": []byte("\x00\x01\x40"),
"measurement": []byte("\x00\x01\x48"),
}
// Generate Upper-case variants of lc
m := make(map[string][]byte, len(lcm)*2)
for k, v := range lcm {
s := strings.ToUpper(k[:1]) + k[1:]
m[k] = v
m[s] = v
}
return m
}()
func normalizeTagKey(key []byte) []byte {
tagKey := commonTagKeys[string(key)]
if tagKey == nil {
return key
}
return tagKey
}
| unmarshalBytesFast |
lib.rs | mod schemas;
pub use schemas::*;
pub use csv;
use std::io;
#[derive(Debug)]
pub enum Error {
CSV(csv::Error),
UnsupportedHeaders(csv::StringRecord),
}
impl From<csv::Error> for Error {
fn from(e: ::csv::Error) -> Self {
Self::CSV(e)
}
}
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Debug)]
pub struct CSVBlock<Attrs> {
/// Center of the block.
pub center: [f32; 3],
/// Half-extents of the block.
pub half_shape: [f32; 3],
/// Attributes for this block.
pub attributes: Attrs,
}
/// Detects a supported schema based on the headers and returns an iterator over [`CSVBlock`]s.
///
/// See [`SchemaClass`] for supported schemas.
pub fn read_csv_blocks<Src>(
mut csv_reader: csv::Reader<Src>,
) -> Result<Box<dyn Iterator<Item = CSVBlock<f32>>>>
where
Src: io::Read + 'static,
{
let headers = csv_reader.headers()?.clone();
SchemaClass::matching_schema(&headers)
.map(|class| { | SchemaClass::Class1 => Box::new(read_csv_records::<Schema1, _>(csv_reader)),
};
iter
})
.ok_or_else(|| Error::UnsupportedHeaders(headers.clone()))
}
/// Reads the records from `csv_reader` assuming they match `Schema`.
///
/// # Panics
///
/// If any errors occur while reading the file. We panic to provide an iterator over values instead of [`Result`]s.
pub fn read_csv_records<Schema, Src>(
csv_reader: csv::Reader<Src>,
) -> impl Iterator<Item = Schema::Repr>
where
Schema: RecordReader,
Src: io::Read,
{
csv_reader.into_records().map(|record_result| {
let mut record = record_result.unwrap();
Schema::read_record(&mut record)
})
} | let iter: Box<dyn Iterator<Item = CSVBlock<f32>>> = match class {
SchemaClass::Class0 => Box::new(read_csv_records::<Schema0, _>(csv_reader)), |
new0.go | package main
func main() {
a := new(int)
*a = 3 | // 3 | println(*a)
}
// Output: |
test_helpers.py | """
Unit test for multiple modules
This module illustrates what a proper unit test should look like.
Each function being tested has its own test procedure.
It also has a segment of "script code" that invokes the test
procedure when this module is run as an script.
Author: Walker M. White
Date: February 14, 2019
"""
import introcs # introcs assert functions
import helpers # function to be tested
def test_first_name():
"""
Test procedure for first_name(n)
"""
print('Testing first_name')
# Test case 1
result = helpers.first_name('Walker White')
introcs.assert_equals('Walker',result)
# Test case 2
result = helpers.first_name('Walker White')
introcs.assert_equals('Walker',result)
def test_last_name():
|
def test_last_name_first():
"""
Test procedure for last_name_first(n)
"""
print('Testing last_name_first')
# Test case 1
result = helpers.last_name_first('Walker White')
introcs.assert_equals('White, Walker',result)
# Test case 2
result = helpers.last_name_first('Walker White')
introcs.assert_equals('White, Walker',result)
# Script code
if __name__ == '__main__':
test_first_name()
test_last_name()
test_last_name_first()
print('The module helpers passed all tests')
| """
Test procedure for last_name(n)
"""
print('Testing last_name')
# Test case 1
result = helpers.last_name('Walker White')
introcs.assert_equals('White',result)
# Test case 2
result = helpers.last_name('Walker White')
introcs.assert_equals('White',result) |
main.py | def sum(x, y, z):
|
print(sum(1, 1, 1))
print(sum(1, 3, 3))
print(sum(3, 1, 3))
| if x == y == z:
calc = 0
else:
calc = x + y + z
return calc |
DeleteDatabaseCommand.ts | // smithy-typescript generated code
import { getEndpointDiscoveryPlugin } from "@aws-sdk/middleware-endpoint-discovery";
import { getSerdePlugin } from "@aws-sdk/middleware-serde";
import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http";
import { Command as $Command } from "@aws-sdk/smithy-client";
import {
FinalizeHandlerArguments,
Handler,
HandlerExecutionContext,
HttpHandlerOptions as __HttpHandlerOptions,
MetadataBearer as __MetadataBearer,
MiddlewareStack,
SerdeContext as __SerdeContext,
} from "@aws-sdk/types";
import { DeleteDatabaseRequest } from "../models/models_0";
import {
deserializeAws_json1_0DeleteDatabaseCommand,
serializeAws_json1_0DeleteDatabaseCommand,
} from "../protocols/Aws_json1_0";
import { ServiceInputTypes, ServiceOutputTypes, TimestreamWriteClientResolvedConfig } from "../TimestreamWriteClient";
export interface DeleteDatabaseCommandInput extends DeleteDatabaseRequest {}
export interface DeleteDatabaseCommandOutput extends __MetadataBearer {}
/**
* <p>Deletes a given Timestream database. <i>This is an irreversible operation.
* After a database is deleted, the time series data from its tables cannot be recovered.</i>
* </p>
*
* <note>
* <p>All tables in the database must be deleted first, or a ValidationException error will be thrown.
* </p>
*
* <p>Due to the nature of distributed retries,
* the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent.</p>
* </note>
*
* <p>See
* <a href="https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.delete-db.html">code sample</a> for details.</p>
* @example
* Use a bare-bones client and the command you need to make an API call.
* ```javascript
* import { TimestreamWriteClient, DeleteDatabaseCommand } from "@aws-sdk/client-timestream-write"; // ES Modules import
* // const { TimestreamWriteClient, DeleteDatabaseCommand } = require("@aws-sdk/client-timestream-write"); // CommonJS import
* const client = new TimestreamWriteClient(config);
* const command = new DeleteDatabaseCommand(input);
* const response = await client.send(command);
* ```
*
* @see {@link DeleteDatabaseCommandInput} for command's `input` shape.
* @see {@link DeleteDatabaseCommandOutput} for command's `response` shape.
* @see {@link TimestreamWriteClientResolvedConfig | config} for TimestreamWriteClient's `config` shape.
*
*/
export class | extends $Command<
DeleteDatabaseCommandInput,
DeleteDatabaseCommandOutput,
TimestreamWriteClientResolvedConfig
> {
// Start section: command_properties
// End section: command_properties
constructor(readonly input: DeleteDatabaseCommandInput) {
// Start section: command_constructor
super();
// End section: command_constructor
}
/**
* @internal
*/
resolveMiddleware(
clientStack: MiddlewareStack<ServiceInputTypes, ServiceOutputTypes>,
configuration: TimestreamWriteClientResolvedConfig,
options?: __HttpHandlerOptions
): Handler<DeleteDatabaseCommandInput, DeleteDatabaseCommandOutput> {
this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize));
this.middlewareStack.use(
getEndpointDiscoveryPlugin(configuration, { clientStack, options, isDiscoveredEndpointRequired: true })
);
const stack = clientStack.concat(this.middlewareStack);
const { logger } = configuration;
const clientName = "TimestreamWriteClient";
const commandName = "DeleteDatabaseCommand";
const handlerExecutionContext: HandlerExecutionContext = {
logger,
clientName,
commandName,
inputFilterSensitiveLog: DeleteDatabaseRequest.filterSensitiveLog,
outputFilterSensitiveLog: (output: any) => output,
};
const { requestHandler } = configuration;
return stack.resolve(
(request: FinalizeHandlerArguments<any>) =>
requestHandler.handle(request.request as __HttpRequest, options || {}),
handlerExecutionContext
);
}
private serialize(input: DeleteDatabaseCommandInput, context: __SerdeContext): Promise<__HttpRequest> {
return serializeAws_json1_0DeleteDatabaseCommand(input, context);
}
private deserialize(output: __HttpResponse, context: __SerdeContext): Promise<DeleteDatabaseCommandOutput> {
return deserializeAws_json1_0DeleteDatabaseCommand(output, context);
}
// Start section: command_body_extra
// End section: command_body_extra
}
| DeleteDatabaseCommand |
types.rs | use std::io;
pub enum CqlVersion {
V_3_0_0,
}
impl CqlVersion {
pub fn to_str(&self) -> &'static str {
match *self {
CqlVersion::V_3_0_0 => "3.0.0",
}
}
}
pub enum Compression {
LZ4,
Snappy,
}
impl Compression {
pub fn to_str(&self) -> &'static str {
match *self {
Compression::LZ4 => "lz4",
Compression::Snappy => "snappy",
}
}
}
pub enum CqlValue {
Int(i32),
}
pub struct CqlOption {
id: i16,
value: CqlValue,
}
pub enum CqlTypeCode {
Custom = 0x0000,
Ascii = 0x0001,
Bigint = 0x0002,
Blob = 0x0003,
Boolean = 0x0004,
Counter = 0x0005,
Decimal = 0x0006,
Double = 0x0007,
Float = 0x0008, | Varint = 0x000E,
TimeUuid = 0x000F,
Inet = 0x0010,
List = 0x0020,
Map = 0x0021,
Set = 0x0022,
Udt = 0x0030,
Tuple = 0x0031,
}
pub enum CqlType {
Custom(String),
Ascii,
Bigint,
Blob,
Boolean,
Counter,
Decimal,
Double,
Float,
Int,
Timestamp,
Uuid,
Varchar,
Varint,
Timeuuid,
Inet,
//List(CqlOption),
//Map(CqlOption, CqlOption),
//Set(CqlOption),
//UDT(CqlUDT),
//Tuple(CqlTuple),
}
pub trait ToCql {
fn write(&self, buf: &mut Vec<u8>) -> io::Result<()>;
} | Int = 0x0009,
Timestamp = 0x000B,
Uuid = 0x000C,
Varchar = 0x000D, |
client.go | package twitchpubsub
import (
"errors"
"log"
"sync"
"time"
)
const (
reconnectInterval = 5 * time.Second
pingInterval = 4 * time.Minute
pongDeadlineTime = 9 * time.Second
writerBufferLength = 100
readerBufferLength = 100
messageBusBufferLength = 50
// maximum number of connections to open
defaultConnectionLimit = 10
// maximum number of topics one connection can listen to
defaultTopicLimit = 50
)
var (
// ErrNotConnected is returned if an action is attempted to be performed on a Client when it is not connected
ErrNotConnected = errors.New("go-twitch-pubsub: Not connected")
// ErrDisconnectedByUser is returned from Connect after the user calls Disconnect()
ErrDisconnectedByUser = errors.New("go-twitch-pubsub: Disconnected by user")
// DefaultHost is the default host to connect to Twitch's pubsub servers
DefaultHost = "wss://pubsub-edge.twitch.tv"
)
type messageBusType chan sharedMessage
// Client is the client that connects to Twitch's pubsub servers
type Client struct {
// Callbacks
onModerationAction func(channelID string, data *ModerationAction)
onBitsEvent func(channelID string, data *BitsEvent)
onPointsEvent func(channelID string, data *PointsEvent)
connectionManager *connectionManager
topics *topicManager
messageBus chan sharedMessage
quitChannel chan struct{}
}
// NewClient creates a client struct and fills it in with some default values
func | (host string) *Client {
c := &Client{
messageBus: make(chan sharedMessage, messageBusBufferLength),
quitChannel: make(chan struct{}),
topics: newTopicManager(),
}
c.connectionManager = &connectionManager{
host: host,
connectionLimit: defaultConnectionLimit,
connectionLimitMutex: &sync.RWMutex{},
topicLimit: defaultTopicLimit,
topicLimitMutex: &sync.RWMutex{},
messageBus: c.messageBus,
quitChannel: c.quitChannel,
}
return c
}
func (c *Client) SetConnectionLimit(connectionLimit int) {
c.connectionManager.setConnectionLimit(connectionLimit)
}
func (c *Client) SetTopicLimit(topicLimit int) {
c.connectionManager.setTopicLimit(topicLimit)
}
// OnModerationAction attaches the given callback to the moderation action event
func (c *Client) OnModerationAction(callback func(channelID string, data *ModerationAction)) {
c.onModerationAction = callback
}
// OnBitsEvent attaches the given callback to the bits event
func (c *Client) OnBitsEvent(callback func(channelID string, data *BitsEvent)) {
c.onBitsEvent = callback
}
// OnBitsEvent attaches the given callback to the points event
func (c *Client) OnPointsEvent(callback func(channelID string, data *PointsEvent)) {
c.onPointsEvent = callback
}
// Connect starts attempting to connect to the pubsub host
func (c *Client) Start() error {
go c.connectionManager.run()
for {
select {
case msg := <-c.messageBus:
switch msg.Message.(type) {
case *ModerationAction:
d := msg.Message.(*ModerationAction)
channelID, err := parseChannelIDFromModerationTopic(msg.Topic)
if err != nil {
log.Println("Error parsing channel id from moderation topic:", err)
continue
}
c.onModerationAction(channelID, d)
case *BitsEvent:
d := msg.Message.(*BitsEvent)
channelID, err := parseChannelIDFromBitsTopic(msg.Topic)
if err != nil {
log.Println("Error parsing channel id from bits topic:", err)
continue
}
c.onBitsEvent(channelID, d)
case *PointsEvent:
d := msg.Message.(*PointsEvent)
channelID, err := parseChannelIDFromPointsTopic(msg.Topic)
if err != nil {
log.Println("Error parsing channel id from points topic:", err)
continue
}
c.onPointsEvent(channelID, d)
default:
log.Println("unknown message in message bus")
}
case <-c.quitChannel:
return ErrDisconnectedByUser
}
}
}
// Disconnect disconnects from Twitch's pubsub servers and leave the client in an idle state
func (c *Client) Disconnect() {
c.connectionManager.disconnect()
close(c.quitChannel)
}
// Listen sends a message to Twitch's pubsub servers telling them we're interested in a specific topic
// Some topics require authentication, and for those you will need to pass a valid authentication token
func (c *Client) Listen(topicName string, authToken string) {
topic := newTopic(topicName, authToken)
if !c.topics.Add(topic) {
// We were already subscribed to this topic
return
}
c.connectionManager.refreshTopic(topic)
}
| NewClient |
tap.rs | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
use net_gen::ifreq;
use std::fs::File;
use std::io::{Error as IoError, Read, Result as IoResult, Write};
use std::os::raw::*;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use utils::ioctl::{ioctl_with_mut_ref, ioctl_with_ref, ioctl_with_val};
use utils::{ioctl_expr, ioctl_ioc_nr, ioctl_iow_nr};
// As defined in the Linux UAPI:
// https://elixir.bootlin.com/linux/v4.17/source/include/uapi/linux/if.h#L33
const IFACE_NAME_MAX_LEN: usize = 16;
/// List of errors the tap implementation can throw.
#[derive(Debug)]
pub enum Error {
/// Unable to create tap interface.
CreateTap(IoError),
/// Invalid interface name.
InvalidIfname,
/// ioctl failed.
IoctlError(IoError),
/// Couldn't open /dev/net/tun.
OpenTun(IoError),
}
pub type Result<T> = ::std::result::Result<T, Error>;
const TUNTAP: ::std::os::raw::c_uint = 84;
ioctl_iow_nr!(TUNSETIFF, TUNTAP, 202, ::std::os::raw::c_int);
ioctl_iow_nr!(TUNSETOFFLOAD, TUNTAP, 208, ::std::os::raw::c_uint);
ioctl_iow_nr!(TUNSETVNETHDRSZ, TUNTAP, 216, ::std::os::raw::c_int);
struct IfReqBuilder(ifreq);
impl IfReqBuilder {
fn new() -> Self {
Self(Default::default())
}
fn if_name(mut self, if_name: &[u8; IFACE_NAME_MAX_LEN]) -> Self {
// Since we don't call as_mut on the same union field more than once, this block is safe.
let ifrn_name = unsafe { self.0.ifr_ifrn.ifrn_name.as_mut() };
ifrn_name.copy_from_slice(if_name.as_ref());
self
}
fn flags(mut self, flags: i16) -> Self {
// Since we don't call as_mut on the same union field more than once, this block is safe.
let ifru_flags = unsafe { self.0.ifr_ifru.ifru_flags.as_mut() };
*ifru_flags = flags;
self
}
fn execute<F: AsRawFd>(mut self, socket: &F, ioctl: u64) -> Result<ifreq> {
// ioctl is safe. Called with a valid socket fd, and we check the return.
let ret = unsafe { ioctl_with_mut_ref(socket, ioctl, &mut self.0) };
if ret < 0 {
return Err(Error::IoctlError(IoError::last_os_error()));
}
Ok(self.0)
}
}
/// Handle for a network tap interface.
///
/// For now, this simply wraps the file descriptor for the tap device so methods
/// can run ioctls on the interface. The tap interface fd will be closed when
/// Tap goes out of scope, and the kernel will clean up the interface automatically.
#[derive(Debug)]
pub struct Tap {
tap_file: File,
if_name: [u8; IFACE_NAME_MAX_LEN],
}
// Returns a byte vector representing the contents of a null terminated C string which
// contains if_name.
fn build_terminated_if_name(if_name: &str) -> Result<[u8; IFACE_NAME_MAX_LEN]> {
// Convert the string slice to bytes, and shadow the variable,
// since we no longer need the &str version.
let if_name = if_name.as_bytes();
if if_name.len() >= IFACE_NAME_MAX_LEN {
return Err(Error::InvalidIfname);
}
let mut terminated_if_name = [b'\0'; IFACE_NAME_MAX_LEN];
terminated_if_name[..if_name.len()].copy_from_slice(if_name);
Ok(terminated_if_name)
}
impl Tap {
/// Create a TUN/TAP device given the interface name.
/// # Arguments
///
/// * `if_name` - the name of the interface.
pub fn open_named(if_name: &str) -> Result<Tap> {
let terminated_if_name = build_terminated_if_name(if_name)?;
let fd = unsafe {
// Open calls are safe because we give a constant null-terminated
// string and verify the result.
libc::open(
b"/dev/net/tun\0".as_ptr() as *const c_char,
libc::O_RDWR | libc::O_NONBLOCK | libc::O_CLOEXEC,
)
};
if fd < 0 {
return Err(Error::OpenTun(IoError::last_os_error()));
}
// We just checked that the fd is valid.
let tuntap = unsafe { File::from_raw_fd(fd) };
let ifreq = IfReqBuilder::new()
.if_name(&terminated_if_name)
.flags((net_gen::IFF_TAP | net_gen::IFF_NO_PI | net_gen::IFF_VNET_HDR) as i16)
.execute(&tuntap, TUNSETIFF())?;
// Safe since only the name is accessed, and it's cloned out.
Ok(Tap {
tap_file: tuntap,
if_name: unsafe { *ifreq.ifr_ifrn.ifrn_name.as_ref() },
})
}
pub fn if_name_as_str(&self) -> &str {
let len = self
.if_name
.iter()
.position(|x| *x == 0)
.unwrap_or(IFACE_NAME_MAX_LEN);
std::str::from_utf8(&self.if_name[..len]).unwrap_or("")
}
/// Set the offload flags for the tap interface.
pub fn set_offload(&self, flags: c_uint) -> Result<()> {
// ioctl is safe. Called with a valid tap fd, and we check the return.
let ret = unsafe { ioctl_with_val(&self.tap_file, TUNSETOFFLOAD(), c_ulong::from(flags)) };
if ret < 0 {
return Err(Error::IoctlError(IoError::last_os_error()));
}
Ok(())
}
/// Set the size of the vnet hdr.
pub fn set_vnet_hdr_size(&self, size: c_int) -> Result<()> {
// ioctl is safe. Called with a valid tap fd, and we check the return.
let ret = unsafe { ioctl_with_ref(&self.tap_file, TUNSETVNETHDRSZ(), &size) };
if ret < 0 {
return Err(Error::IoctlError(IoError::last_os_error()));
}
Ok(())
}
}
impl Read for Tap {
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> {
self.tap_file.read(buf)
}
}
impl Write for Tap {
fn write(&mut self, buf: &[u8]) -> IoResult<usize> |
fn flush(&mut self) -> IoResult<()> {
Ok(())
}
}
impl AsRawFd for Tap {
fn as_raw_fd(&self) -> RawFd {
self.tap_file.as_raw_fd()
}
}
#[cfg(test)]
pub mod tests {
use std::mem;
use std::os::unix::ffi::OsStrExt;
use std::process::Command;
use super::*;
use net_gen::ETH_HLEN;
// The size of the virtio net header
const VNET_HDR_SIZE: usize = 10;
const PAYLOAD_SIZE: usize = 512;
const PACKET_SIZE: usize = 1024;
fn create_socket() -> File {
// This is safe since we check the return value.
let socket = unsafe {
libc::socket(
libc::AF_PACKET,
libc::SOCK_RAW,
libc::ETH_P_ALL.to_be() as i32,
)
};
if socket < 0 {
panic!("Unable to create tap socket");
}
// This is safe; nothing else will use or hold onto the raw socket fd.
unsafe { File::from_raw_fd(socket) }
}
impl Tap {
pub fn if_index(&self) -> i32 {
let sock = create_socket();
let ifreq = IfReqBuilder::new()
.if_name(&self.if_name)
.execute(&sock, c_ulong::from(net_gen::sockios::SIOCGIFINDEX))
.unwrap();
unsafe { *ifreq.ifr_ifru.ifru_ivalue.as_ref() }
}
/// Enable the tap interface.
pub fn enable(&self) {
// Disable IPv6 router advertisment requests
Command::new("sh")
.arg("-c")
.arg(format!(
"echo 0 > /proc/sys/net/ipv6/conf/{}/accept_ra",
self.if_name_as_str()
))
.output()
.unwrap();
let sock = create_socket();
IfReqBuilder::new()
.if_name(&self.if_name)
.flags(
(net_gen::net_device_flags_IFF_UP
| net_gen::net_device_flags_IFF_RUNNING
| net_gen::net_device_flags_IFF_NOARP) as i16,
)
.execute(&sock, c_ulong::from(net_gen::sockios::SIOCSIFFLAGS))
.unwrap();
}
}
pub struct TapTrafficSimulator {
socket: File,
send_addr: libc::sockaddr_ll,
}
impl TapTrafficSimulator {
pub fn new(tap_index: i32) -> Self {
// Create sockaddr_ll struct.
let send_addr_ptr = &unsafe { mem::zeroed() } as *const libc::sockaddr_storage;
unsafe {
let sock_addr: *mut libc::sockaddr_ll = send_addr_ptr as *mut libc::sockaddr_ll;
(*sock_addr).sll_family = libc::AF_PACKET as libc::sa_family_t;
(*sock_addr).sll_protocol = (libc::ETH_P_ALL as u16).to_be();
(*sock_addr).sll_halen = libc::ETH_ALEN as u8;
(*sock_addr).sll_ifindex = tap_index;
}
// Bind socket to tap interface.
let socket = create_socket();
let ret = unsafe {
libc::bind(
socket.as_raw_fd(),
send_addr_ptr as *const _,
mem::size_of::<libc::sockaddr_ll>() as libc::socklen_t,
)
};
if ret == -1 {
panic!("Can't create TapChannel");
}
// Enable nonblocking
let ret = unsafe { libc::fcntl(socket.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK) };
if ret == -1 {
panic!("Couldn't make TapChannel non-blocking");
}
Self {
socket,
send_addr: unsafe { *(send_addr_ptr as *const _) },
}
}
pub fn push_tx_packet(&self, buf: &[u8]) {
let res = unsafe {
libc::sendto(
self.socket.as_raw_fd(),
buf.as_ptr() as *const _,
buf.len(),
0,
(&self.send_addr as *const libc::sockaddr_ll) as *const _,
mem::size_of::<libc::sockaddr_ll>() as libc::socklen_t,
)
};
if res == -1 {
panic!("Can't inject tx_packet");
}
}
pub fn pop_rx_packet(&self, buf: &mut [u8]) -> bool {
let ret = unsafe {
libc::recvfrom(
self.socket.as_raw_fd(),
buf.as_ptr() as *mut _,
buf.len(),
0,
(&mut mem::zeroed() as *mut libc::sockaddr_storage) as *mut _,
&mut (mem::size_of::<libc::sockaddr_storage>() as libc::socklen_t),
)
};
if ret == -1 {
return false;
}
true
}
}
#[test]
fn test_tap_name() {
// Sanity check that the assumed max iface name length is correct.
assert_eq!(
IFACE_NAME_MAX_LEN,
net_gen::ifreq__bindgen_ty_1::default()
.bindgen_union_field
.len()
);
// Empty name - The tap should be named "tap0" by default
let tap = Tap::open_named("").unwrap();
assert_eq!(b"tap0\0\0\0\0\0\0\0\0\0\0\0\0", &tap.if_name);
assert_eq!("tap0", tap.if_name_as_str());
// 16 characters - too long.
let name = "a123456789abcdef";
match Tap::open_named(name) {
Err(Error::InvalidIfname) => (),
_ => panic!("Expected Error::InvalidIfname"),
};
// 15 characters - OK.
let name = "a123456789abcde";
let tap = Tap::open_named(name).unwrap();
assert_eq!(&format!("{}\0", name).as_bytes(), &tap.if_name);
assert_eq!(name, tap.if_name_as_str());
}
#[test]
fn test_tap_exclusive_open() {
let _tap1 = Tap::open_named("exclusivetap").unwrap();
// Opening same tap device a second time should not be permitted.
Tap::open_named("exclusivetap").unwrap_err();
}
#[test]
fn test_set_options() {
// This line will fail to provide an initialized FD if the test is not run as root.
let tap = Tap::open_named("").unwrap();
tap.set_vnet_hdr_size(16).unwrap();
tap.set_offload(0).unwrap();
let faulty_tap = Tap {
tap_file: unsafe { File::from_raw_fd(-1) },
if_name: [0x01; 16],
};
assert!(faulty_tap.set_vnet_hdr_size(16).is_err());
assert!(faulty_tap.set_offload(0).is_err());
}
#[test]
fn test_raw_fd() {
let tap = Tap::open_named("").unwrap();
assert_eq!(tap.as_raw_fd(), tap.tap_file.as_raw_fd());
}
#[test]
fn test_read() {
let mut tap = Tap::open_named("").unwrap();
tap.enable();
let tap_traffic_simulator = TapTrafficSimulator::new(tap.if_index());
let packet = utils::rand::rand_alphanumerics(PAYLOAD_SIZE);
tap_traffic_simulator.push_tx_packet(packet.as_bytes());
let mut buf = [0u8; PACKET_SIZE];
assert!(tap.read(&mut buf).is_ok());
assert_eq!(
&buf[VNET_HDR_SIZE..packet.len() + VNET_HDR_SIZE],
packet.as_bytes()
);
}
#[test]
fn test_write() {
let mut tap = Tap::open_named("").unwrap();
tap.enable();
let tap_traffic_simulator = TapTrafficSimulator::new(tap.if_index());
let mut packet = [0u8; PACKET_SIZE];
let payload = utils::rand::rand_alphanumerics(PAYLOAD_SIZE);
packet[ETH_HLEN as usize..payload.len() + ETH_HLEN as usize]
.copy_from_slice(payload.as_bytes());
assert!(tap.write(&packet).is_ok());
let mut read_buf = [0u8; PACKET_SIZE];
assert!(tap_traffic_simulator.pop_rx_packet(&mut read_buf));
assert_eq!(
&read_buf[..PACKET_SIZE - VNET_HDR_SIZE],
&packet[VNET_HDR_SIZE..]
);
}
}
| {
self.tap_file.write(&buf)
} |
lib.rs | /*!
[](https://travis-ci.org/jaemk/cached)
[](https://crates.io/crates/cached)
[](https://docs.rs/cached)
> Caching structures and simplified function memoization
`cached` provides implementations of several caching structures as well as a handy macro
for defining memoized functions.
## Defining memoized functions using `cached!`
`cached!` defined functions will have their results cached using the function's arguments as a key
(or a specific expression when using `cached_key!`).
When a `cached!` defined function is called, the function's cache is first checked for an already
computed (and still valid) value before evaluating the function body.
Due to the requirements of storing arguments and return values in a global cache:
- Function return types must be owned and implement `Clone`
- Function arguments must either be owned and implement `Hash + Eq + Clone` OR the `cached_key!`
macro must be used to convert arguments into an owned + `Hash + Eq + Clone` type.
- Arguments and return values will be `cloned` in the process of insertion and retrieval.
- `cached!` functions should not be used to produce side-effectual results!
**NOTE**: Any custom cache that implements `cached::Cached` can be used with the `cached` macros in place of the built-ins.
See [`examples`](https://github.com/jaemk/cached/tree/master/examples) for basic usage and
an example of implementing a custom cache-store.
### `cached!` and `cached_key!` Usage & Options:
There are several options depending on how explicit you want to be. See below for a full syntax breakdown.
1.) Using the shorthand will use an unbounded cache.
```rust,no_run
#[macro_use] extern crate cached;
#[macro_use] extern crate lazy_static;
/// Defines a function named `fib` that uses a cache named `FIB`
cached!{
FIB;
fn fib(n: u64) -> u64 = {
if n == 0 || n == 1 { return n }
fib(n-1) + fib(n-2)
}
}
# pub fn main() { }
```
2.) Using the full syntax requires specifying the full cache type and providing
an instance of the cache to use. Note that the cache's key-type is a tuple
of the function argument types. If you would like fine grained control over
the key, you can use the `cached_key!` macro.
The follow example uses a `SizedCache` (LRU):
```rust,no_run
#[macro_use] extern crate cached;
#[macro_use] extern crate lazy_static;
use std::thread::sleep;
use std::time::Duration;
use cached::SizedCache;
/// Defines a function `fib` that uses an LRU cache named `FIB` which has a
/// size limit of 50 items. The `cached!` macro will implicitly combine
/// the function arguments into a tuple to be used as the cache key.
cached!{
FIB: SizedCache<(u64, u64), u64> = SizedCache::with_size(50);
fn fib(a: u64, b: u64) -> u64 = {
sleep(Duration::new(2, 0));
return a * b;
}
}
# pub fn main() { }
```
3.) The `cached_key` macro functions identically, but allows you define the
cache key as an expression.
```rust,no_run
#[macro_use] extern crate cached;
#[macro_use] extern crate lazy_static;
use std::thread::sleep;
use std::time::Duration;
use cached::SizedCache;
/// Defines a function named `fib` that uses an LRU cache named `FIB`.
/// The `Key = ` expression is used to explicitly define the value that
/// should be used as the cache key. Here the borrowed arguments are converted
/// to an owned string that can be stored in the global function cache.
cached_key!{
FIB: SizedCache<String, usize> = SizedCache::with_size(50);
Key = { format!("{}{}", a, b) };
fn fib(a: &str, b: &str) -> usize = {
let size = a.len() + b.len();
sleep(Duration::new(size as u64, 0));
size
}
}
# pub fn main() { }
```
4. The `cached_result` and `cached_key_result` macros function similarly to `cached`
and `cached_key` respectively but the cached function needs to return `Result`
(or some type alias like `io::Result`). If the function returns `Ok(val)` then `val`
is cached, but errors are not. Note that the error type does _not_ need to implement `Clone`,
only the success type however the cache type cannot be derived and must always be
explicitly specified.
```rust,no_run
#[macro_use] extern crate cached;
#[macro_use] extern crate lazy_static;
use cached::UnboundCache;
/// Cache the successes of a function.
/// To use `cached_key_result` add a key function as in `cached_key`.
cached_result!{
FIB: UnboundCache<(u64, u64), u64> = UnboundCache::new(); // Type must always be specified
fn fib(a: u64, b: u64) -> Result<u64, ()> = {
if a == 0 || b == 0 {
return Err(());
} else {
return Ok(a * b);
}
}
}
# pub fn main() { }
```
## Syntax
The complete macro syntax is:
```rust,ignore
cached_key!{
CACHE_NAME: CacheType = CacheInstance;
Key = KeyExpression;
fn func_name(arg1: arg_type, arg2: arg_type) -> return_type = {
// do stuff like normal
return_type
}
}
```
Where:
- `CACHE_NAME` is the unique name used to hold a `static ref` to the cache
- `CacheType` is the full type of the cache
- `CacheInstance` is any expression that yields an instance of `CacheType` to be used
as the cache-store, followed by `;`
- When using the `cached_key!` macro, the "Key" line must be specified. This line must start with
the literal tokens `Key = `, followed by an expression that evaluates to the key, followed by `;`
- `fn func_name(arg1: arg_type) -> return_type` is the same form as a regular function signature, with the exception
that functions with no return value must be explicitly stated (e.g. `fn func_name(arg: arg_type) -> ()`)
- The expression following `=` is the function body assigned to `func_name`. Note, the function
body can make recursive calls to its cached-self (`func_name`).
*/
pub mod macros;
pub mod stores;
pub use stores::{
UnboundCache, SizedCache, TimedCache,
};
/// Cache operations
pub trait Cached<K, V> {
/// Attempt to retrieve a cached value
fn cache_get(&mut self, k: &K) -> Option<&V>;
/// Insert a key, value pair
fn cache_set(&mut self, k: K, v: V);
/// Return the current cache size (number of elements)
fn cache_size(&self) -> usize;
/// Return the number of times a cached value was successfully retrieved
fn cache_hits(&self) -> Option<u32> { None }
/// Return the number of times a cached value was unable to be retrieved
fn cache_misses(&self) -> Option<u32> { None }
/// Return the cache capacity
fn cache_capacity(&self) -> Option<usize> |
/// Return the lifespan of cached values (time to eviction)
fn cache_lifespan(&self) -> Option<u64> { None }
}
| { None } |
main.rs | #![feature(plugin)]
#![plugin(rocket_codegen)]
extern crate cloudflare;
extern crate dotenv;
extern crate hmac;
#[macro_use]
extern crate lazy_static;
extern crate rayon;
extern crate reqwest;
extern crate rocket;
extern crate rocket_contrib;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
extern crate sha1;
extern crate toml;
use cloudflare::Cloudflare;
use rocket::http::RawStr;
use rocket::response::{NamedFile, Redirect};
use rocket::State;
use rocket_contrib::Template;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::{Mutex, RwLock};
mod errors;
mod github_event;
mod redirect_utils;
pub use errors::{Error, Result};
use github_event::{PushEvent, SignedPushEvent};
type RedirectMap = RwLock<RedirectData>;
type CloudflareApi = Mutex<Cloudflare>;
lazy_static! {
static ref GH_SECRET: String = dotenv::var("github_secret").expect("github secret ENV not found!");
}
#[derive(Debug, Serialize)]
pub struct RedirectData {
map: HashMap<String, String>,
commit_hash: String,
commit_url: String,
}
/// Update the servers redirect map whenever `redirects.toml` is updated in the
/// master branch on Github.
///
/// Called by Github's servers whenever there is a `push` event in the Github repository.
/// Returns 200 with message if everything went ok, otherwise a 500 internal error if
/// something went wrong when updating the redirect map
#[post("/github/webhook", data = "<event>")]
fn webhook(
event: SignedPushEvent,
redirs: State<RedirectMap>,
cf: State<CloudflareApi>,
) -> Result<&'static str> {
let push: PushEvent = event.0;
// check if this is a push to master. if not, return early
if push.refs != "refs/heads/master" {
return Ok("Event not on master branch, ignoring\n");
}
// check that the redirects file was actually modified
if !push.file_modified("redirects.toml") {
return Ok("redirects.toml was not modified, ignoring\n");
}
redirect_utils::update_redirect_map(redirs, cf).map(|_| Ok("Redirects Updated!\n"))?
}
/// Return a page listing all current redirects in alphabetic order
#[get("/")]
fn index(redirs: State<RedirectMap>) -> Template {
let data: &RedirectData = &*redirs.read().expect("rlock failed");
Template::render("index", data)
}
/// Redirect a subdomain to its matching page via 302 redirect.
/// If `key` is not in the redirect map return 404.
///
/// Example: cook.rustref.com => https://doc.rust-lang.org/cargo/
#[get("/redirect/<key>")]
fn redirect_bare(key: String, redirs: State<RedirectMap>) -> Option<Redirect> {
let map: &HashMap<String, String> = &redirs.read().expect("could not lock rlock").map;
match map.get(&key) {
Some(url) => Some(Redirect::found(url)),
None => None,
}
}
/// Redirect a subdomain to its matching page via 302 redirect, preserving path.
/// If `key` is not in the redirect map return 404.
///
/// Example: ex.rustref.com/primitives.html =>
/// https://doc.rust-lang.org/stable/rust-by-example/primitives.html
#[get("/redirect/<key>/<path>")]
fn redirect(key: String, path: &RawStr, redirs: State<RedirectMap>) -> Option<Redirect> {
let map = &redirs.read().expect("could not lock rlock").map;
match map.get(&key) {
Some(url) => Some(Redirect::found(&format!("{}/{}", url, path))),
None => None,
}
}
#[get("/<file..>", rank = 2)]
fn files(file: PathBuf) -> Option<NamedFile> {
NamedFile::open(Path::new("static/").join(file)).ok()
}
fn rocket() -> rocket::Rocket {
let redirects = redirect_utils::redirects_from_file("redirects.toml")
.expect("error reading redirects from file");
let redirect_data = RedirectData {
map: redirects,
commit_hash: ".toml".into(),
commit_url: "".into(),
};
let cf_api_key: String = dotenv::var("cloudflare_key").expect("no cloudflare key found!");
let cf_email: String = dotenv::var("cloudflare_email").expect("no cloudflare email found!");
let cf_api = Cloudflare::new(
&cf_api_key,
&cf_email,
"https://api.cloudflare.com/client/v4/",
).expect("failed to create cloudflare client");
rocket::ignite()
.mount("/", routes![index, files, redirect, redirect_bare, webhook])
.manage(RwLock::new(redirect_data))
.manage(Mutex::new(cf_api))
.attach(Template::fairing())
}
fn main() {
rocket().launch();
}
#[cfg(test)]
mod tests {
extern crate serde_json;
use super::*;
#[test]
fn parse_readme_webhook() {
let json_str = include_str!("../test_data/readme_updated.json");
let parsed = serde_json::from_str::<PushEvent>(&json_str);
// println!("{:?}", parsed);
assert!(parsed.is_ok());
let push = parsed.unwrap();
assert!(push.refs == "refs/heads/rocket");
assert!(push.commits.len() > 0);
assert!(push.commits[0].modified.len() > 0);
assert!(push.commits[0].modified[0] == "Readme.md");
assert!(!push.file_modified("redirects.toml"));
}
#[test]
fn | () {
let json_str = include_str!("../test_data/multiple_commits.json");
let parsed = serde_json::from_str::<PushEvent>(&json_str);
assert!(parsed.is_ok());
let push = parsed.unwrap();
assert!(push.refs == "refs/heads/master");
assert!(push.file_modified("redirects.toml"));
}
}
| parse_webhook_multiple_commits |
reverse.js | import assert from "assert";
import reverse from "../basics/reverse.js";
describe("reverse.js in js/exercise/basics ", function () {
const qna = [
["cat", "tac"],
["alphabet", "tebahpla"],
["niknok", "konkin"],
["javascript", "tpircsavaj"],
];
for (let index = 0; index < qna.length; index++) { | const a = qna[index][1];
it(`should return ${a} for input ${q}`, function () {
assert.equal(reverse(q), a);
});
}
}); | const q = qna[index][0]; |
common.rs | use std::path::Path;
/// Extension trait for `Vec` collection.
pub trait VecExt<T> {
/// Sorts the collection.
/// Returns the modified collection.
fn into_sorted(self) -> Self;
/// Appends an element to the back of a collection.
/// Returns the modified collection.
fn push_inplace(self, item: T) -> Self;
/// Extends a collection with the contents of the iterator.
/// Returns the modified collection.
fn extend_inplace<I: IntoIterator<Item = T>>(self, iter: I) -> Self;
}
impl<T: Ord> VecExt<T> for Vec<T> {
#[inline(always)]
fn into_sorted(mut self) -> Self {
self.sort();
self
}
#[inline(always)]
fn push_inplace(mut self, item: T) -> Self {
self.push(item);
self
}
#[inline(always)]
fn extend_inplace<I: IntoIterator<Item = T>>(mut self, iter: I) -> Self {
self.extend(iter);
self
}
}
/// Extension trait for `Path`-like structs.
pub trait PathExt {
/// Yields a [`&str`] slice.
/// Panics if the path is not valid utf-8. | fn to_path_str(&self) -> &str;
/// Returns the final component of the Path.
/// Panics if the name is not valid utf-8.
fn to_filename_str(&self) -> &str;
}
impl<P: AsRef<Path>> PathExt for P {
#[inline(always)]
fn to_path_str(&self) -> &str {
self.as_ref().to_str().unwrap()
}
#[inline(always)]
fn to_filename_str(&self) -> &str {
self.as_ref().file_name().unwrap().to_str().unwrap()
}
}
#[cfg(test)]
mod test {
use std::path::Path;
use super::{PathExt, VecExt};
#[test]
fn should_sort_vec_integers() {
assert_eq!([2, 1, 3].to_vec().into_sorted(), [1, 2, 3]);
}
#[test]
fn should_get_unicode_str_for_path() {
let path = Path::new("test").join("path.txt");
assert_eq!(path.to_path_str(), "test/path.txt");
}
#[test]
fn should_get_filename_str_for_path() {
let path = Path::new("test").join("path.txt");
assert_eq!(path.to_filename_str(), "path.txt");
}
} | |
update.js |
function setEventHandler(name, handler, dom, tag, item) {
dom[name] = function(e) {
// cross browser event fix
e = e || window.event
e.which = e.which || e.charCode || e.keyCode
e.target = e.target || e.srcElement
e.currentTarget = dom
e.item = item
// prevent default behaviour (by default)
if (handler.call(tag, e) !== true) {
e.preventDefault && e.preventDefault()
e.returnValue = false
}
var el = item ? tag.parent : tag
el.update()
}
}
// used by if- attribute
function insertTo(root, node, before) {
if (root) {
root.insertBefore(before, node)
root.removeChild(node)
}
}
// item = currently looped item
function | (expressions, tag, item) {
each(expressions, function(expr) {
var dom = expr.dom,
attr_name = expr.attr,
value = tmpl(expr.expr, tag),
parent = expr.dom.parentNode
if (value == null) value = ''
// leave out riot- prefixes from strings inside textarea
if (parent && parent.tagName == 'TEXTAREA') value = value.replace(/riot-/g, '')
// no change
if (expr.value === value) return
expr.value = value
// text node
if (!attr_name) return dom.nodeValue = value
// remove original attribute
remAttr(dom, attr_name)
// event handler
if (typeof value == 'function') {
setEventHandler(attr_name, value, dom, tag, item)
// if- conditional
} else if (attr_name == 'if') {
var stub = expr.stub
// add to DOM
if (value) {
stub && insertTo(stub.parentNode, stub, dom)
// remove from DOM
} else {
stub = expr.stub = stub || document.createTextNode('')
insertTo(dom.parentNode, dom, stub)
}
// show / hide
} else if (/^(show|hide)$/.test(attr_name)) {
if (attr_name == 'hide') value = !value
dom.style.display = value ? '' : 'none'
// field value
} else if (attr_name == 'value') {
dom.value = value
// <img src="{ expr }">
} else if (attr_name.slice(0, 5) == 'riot-') {
attr_name = attr_name.slice(5)
value ? dom.setAttribute(attr_name, value) : remAttr(dom, attr_name)
} else {
if (expr.bool) {
dom[attr_name] = value
if (!value) return
value = attr_name
}
if (typeof value != 'object') dom.setAttribute(attr_name, value)
}
})
} | update |
test758.js | var callbackArguments = [];
var argument1 = function() {
callbackArguments.push(arguments)
return undefined; };
var argument2 = false;
var argument3 = ";'";
var argument4 = function() {
callbackArguments.push(arguments)
return "x£["; };
var argument5 = function() {
callbackArguments.push(arguments)
return undefined; };
var argument6 = function() {
callbackArguments.push(arguments)
return [null,[],true,8]; };
var base_0 = [49]
var r_0= undefined
try {
r_0 = base_0.every(argument1,argument2,argument3)
}
catch(e) {
r_0= "Error"
}
var base_1 = [49]
var r_1= undefined
try {
r_1 = base_1.every(argument4)
}
catch(e) {
r_1= "Error"
}
var base_2 = [49]
var r_2= undefined
try {
r_2 = base_2.every(argument5)
}
catch(e) {
r_2= "Error"
}
var base_3 = [49] | try {
r_3 = base_3.every(argument6)
}
catch(e) {
r_3= "Error"
}
function serialize(array){
return array.map(function(a){
if (a === null || a == undefined) return a;
var name = a.constructor.name;
if (name==='Object' || name=='Boolean'|| name=='Array'||name=='Number'||name=='String')
return JSON.stringify(a);
return name;
});
}
setTimeout(function(){
require("fs").writeFileSync("./experiments/every/everyQC/test758.json",JSON.stringify({"baseObjects":serialize([base_0,base_1,base_2,base_3]),"returnObjects":serialize([r_0,r_1,r_2,r_3]),"callbackArgs":callbackArguments}))
},300) | var r_3= undefined |
projects.ts | const baseUrl = "/api/projects";
export function createFetchProjectsRequest(id?: number): PortfolioAPIRequest {
return {
method: "GET",
url: `${baseUrl}${id ? `?id=${id}` : ""}`,
};
}
export function createProjectRequest(
name: string,
description: string,
content: string,
date: Date,
projectImages: { imageId: number; priority: number }[],
projectVideos: { videoId: number; priority: number }[]
): PortfolioAPIRequest {
return {
method: "PUT",
url: baseUrl,
body: { name, description, content, date, projectImages, projectVideos },
};
}
export function | (
id: number,
name: string,
description: string,
content: string,
date: Date,
projectImages: { id?: number; imageId?: number; priority: number }[],
projectVideos: { id?: number; videoId?: number; priority: number }[]
): PortfolioAPIRequest {
return {
method: "POST",
url: baseUrl,
body: {
id,
name,
description,
content,
date,
projectImages,
projectVideos,
},
};
}
export function createDeleteProjectRequest(id: number): PortfolioAPIRequest {
return {
method: "DELETE",
url: baseUrl,
body: { id },
};
}
| createUpdateProjectRequest |
test_biadjacent.py | import numpy as np
from graphidx.idx import BiAdjacent
def square():
head = np.array([0, 0, 1, 2])
tail = np.array([1, 2, 3, 3])
return BiAdjacent(head, tail)
def test_sqare():
neigh = square()
assert repr(neigh) == "BiAdjacent[m = 4, n = 4]"
assert set(neigh[0]) == {1, 2}
assert set(neigh[1]) == {0, 3}
assert set(neigh[2]) == {0, 3}
assert set(neigh[3]) == {1, 2}
def | ():
head = np.array([0, 1, 2, 3], dtype=np.int32)
tail = np.array([1, 3, 1, 2], dtype=np.int32)
index = BiAdjacent(head, tail)
assert repr(index) == "BiAdjacent[m = 4, n = 4]"
i2 = index[2]
assert len(i2) == 2
assert list(i2) == [1, 3]
assert list(index[0]) == [1]
assert list(index[1]) == [0, 3, 2]
| test_1 |
list_backends_request_response.go | // Copyright (c) 2016, 2018, 2021, Oracle and/or its affiliates. All rights reserved.
// This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
// Code generated. DO NOT EDIT.
package networkloadbalancer
import (
"github.com/oracle/oci-go-sdk/v54/common"
"net/http"
)
// ListBackendsRequest wrapper for the ListBackends operation
//
// See also
//
// Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/networkloadbalancer/ListBackends.go.html to see an example of how to use ListBackendsRequest.
type ListBackendsRequest struct {
// The OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the network load balancer to update.
NetworkLoadBalancerId *string `mandatory:"true" contributesTo:"path" name:"networkLoadBalancerId"`
// The name of the backend set associated with the backend servers.
// Example: `example_backend_set`
BackendSetName *string `mandatory:"true" contributesTo:"path" name:"backendSetName"`
// The unique Oracle-assigned identifier for the request. If you must contact Oracle about a
// particular request, then provide the request identifier.
OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"`
// The system returns the requested resource, with a 200 status, only if the resource has no etag
// matching the one specified. If the condition fails for the GET and HEAD methods, then the system returns the
// HTTP status code `304 (Not Modified)`.
// Example: `example-etag`
IfNoneMatch *string `mandatory:"false" contributesTo:"header" name:"if-none-match"`
// For list pagination. The maximum number of results per page or items to return, in a paginated "List" call.
// For important details about how pagination works, see List Pagination (https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine).
Limit *int `mandatory:"false" contributesTo:"query" name:"limit"`
// The page token representing the page from which to start retrieving results.
// For list pagination. The value of the `opc-next-page` response header from the previous "List" call.
// For important details about how pagination works, see List Pagination (https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine).
Page *string `mandatory:"false" contributesTo:"query" name:"page"`
// The sort order to use, either 'asc' (ascending) or 'desc' (descending).
SortOrder ListBackendsSortOrderEnum `mandatory:"false" contributesTo:"query" name:"sortOrder" omitEmpty:"true"`
// Metadata about the request. This information will not be transmitted to the service, but
// represents information that the SDK will consume to drive retry behavior.
RequestMetadata common.RequestMetadata
}
func (request ListBackendsRequest) String() string {
return common.PointerString(request)
}
// HTTPRequest implements the OCIRequest interface
func (request ListBackendsRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error) {
return common.MakeDefaultHTTPRequestWithTaggedStructAndExtraHeaders(method, path, request, extraHeaders)
}
// BinaryRequestBody implements the OCIRequest interface
func (request ListBackendsRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool) {
return nil, false
}
// RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.
func (request ListBackendsRequest) RetryPolicy() *common.RetryPolicy {
return request.RequestMetadata.RetryPolicy
}
// ListBackendsResponse wrapper for the ListBackends operation
type ListBackendsResponse struct {
// The underlying http response
RawResponse *http.Response
// A list of BackendCollection instances
BackendCollection `presentIn:"body"`
// Unique Oracle-assigned identifier for the request. If you must contact
// Oracle about a particular request, then provide the request identifier.
OpcRequestId *string `presentIn:"header" name:"opc-request-id"`
// For pagination of a list of items. When paging through a list, if this header appears in the response,
// then a partial list might have been returned. Include this value as the `page` parameter for the
// subsequent GET request to get the next batch of items.
OpcNextPage *string `presentIn:"header" name:"opc-next-page"`
}
func (response ListBackendsResponse) String() string {
return common.PointerString(response)
}
// HTTPResponse implements the OCIResponse interface
func (response ListBackendsResponse) HTTPResponse() *http.Response {
return response.RawResponse
}
// ListBackendsSortOrderEnum Enum with underlying type: string
type ListBackendsSortOrderEnum string
// Set of constants representing the allowable values for ListBackendsSortOrderEnum
const (
ListBackendsSortOrderAsc ListBackendsSortOrderEnum = "ASC"
ListBackendsSortOrderDesc ListBackendsSortOrderEnum = "DESC"
)
var mappingListBackendsSortOrder = map[string]ListBackendsSortOrderEnum{
"ASC": ListBackendsSortOrderAsc,
"DESC": ListBackendsSortOrderDesc,
}
// GetListBackendsSortOrderEnumValues Enumerates the set of values for ListBackendsSortOrderEnum
func | () []ListBackendsSortOrderEnum {
values := make([]ListBackendsSortOrderEnum, 0)
for _, v := range mappingListBackendsSortOrder {
values = append(values, v)
}
return values
}
| GetListBackendsSortOrderEnumValues |
webhooks.py | # -*- coding: utf-8 -*-
"""Webex Teams Webhooks API wrapper.
Copyright (c) 2016-2020 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ..generator_containers import generator_container
from ..restsession import RestSession
from ..utils import (
check_type,
dict_from_items_with_values,
)
API_ENDPOINT = 'webhooks'
OBJECT_TYPE = 'webhook'
class WebhooksAPI(object):
"""Webex Teams Webhooks API.
Wraps the Webex Teams Webhooks API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory):
"""Initialize a new WebhooksAPI object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the Webex Teams service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(WebhooksAPI, self).__init__()
self._session = session
self._object_factory = object_factory
@generator_container
def list(self, max=100, **request_parameters):
"""List all of the authenticated user's webhooks.
This method supports Webex Teams's implementation of RFC5988 Web
Linking to provide pagination support. It returns a generator
container that incrementally yields all webhooks returned by the
query. The generator will automatically request additional 'pages' of
responses from Webex as needed until all responses have been returned.
The container makes the generator safe for reuse. A new API call will
be made, using the same parameters that were specified when the
generator was created, every time a new iterator is requested from the
container.
Args:
max(int): Limit the maximum number of items returned from the Webex
Teams service per request.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
GeneratorContainer: A GeneratorContainer which, when iterated,
yields the webhooks returned by the Webex Teams query.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
"""
check_type(max, int, optional=True)
params = dict_from_items_with_values(
request_parameters,
max=max,
)
# API request - get items
items = self._session.get_items(API_ENDPOINT, params=params)
# Yield webhook objects created from the returned items JSON objects
for item in items:
yield self._object_factory(OBJECT_TYPE, item)
def create(self, name, targetUrl, resource, event,
filter=None, secret=None, **request_parameters):
"""Create a webhook.
Args:
name(basestring): A user-friendly name for this webhook.
targetUrl(basestring): The URL that receives POST requests for
each event.
resource(basestring): The resource type for the webhook.
event(basestring): The event type for the webhook.
filter(basestring): The filter that defines the webhook scope.
secret(basestring): The secret used to generate payload signature.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
Webhook: A Webhook object with the details of the created webhook.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
"""
check_type(name, basestring)
check_type(targetUrl, basestring)
check_type(resource, basestring)
check_type(event, basestring)
check_type(filter, basestring, optional=True)
check_type(secret, basestring, optional=True)
post_data = dict_from_items_with_values(
request_parameters,
name=name,
targetUrl=targetUrl,
resource=resource,
event=event,
filter=filter,
secret=secret,
)
# API request
json_data = self._session.post(API_ENDPOINT, json=post_data)
# Return a webhook object created from the response JSON data
return self._object_factory(OBJECT_TYPE, json_data)
def get(self, webhookId):
"""Get the details of a webhook, by ID.
Args:
webhookId(basestring): The ID of the webhook to be retrieved.
Returns:
Webhook: A Webhook object with the details of the requested
webhook.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
"""
check_type(webhookId, basestring)
# API request
json_data = self._session.get(API_ENDPOINT + '/' + webhookId)
# Return a webhook object created from the response JSON data
return self._object_factory(OBJECT_TYPE, json_data)
def | (self, webhookId, name=None, targetUrl=None,
**request_parameters):
"""Update a webhook, by ID.
Args:
webhookId(basestring): The webhook ID.
name(basestring): A user-friendly name for this webhook.
targetUrl(basestring): The URL that receives POST requests for
each event.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
Webhook: A Webhook object with the updated Webex Teams webhook
details.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
"""
check_type(webhookId, basestring)
check_type(name, basestring, optional=True)
check_type(targetUrl, basestring, optional=True)
put_data = dict_from_items_with_values(
request_parameters,
name=name,
targetUrl=targetUrl,
)
# API request
json_data = self._session.put(API_ENDPOINT + '/' + webhookId,
json=put_data)
# Return a webhook object created from the response JSON data
return self._object_factory(OBJECT_TYPE, json_data)
def delete(self, webhookId):
"""Delete a webhook, by ID.
Args:
webhookId(basestring): The ID of the webhook to be deleted.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
"""
check_type(webhookId, basestring)
# API request
self._session.delete(API_ENDPOINT + '/' + webhookId)
| update |
layouts.py | import numpy as np
import numba
import umap.distances as dist
from umap.utils import tau_rand_int
@numba.njit()
def clip(val):
"""Standard clamping of a value into a fixed range (in this case -4.0 to
4.0)
Parameters
----------
val: float
The value to be clamped.
Returns
-------
The clamped value, now fixed to be in the range -4.0 to 4.0.
"""
if val > 4.0:
return 4.0
elif val < -4.0:
return -4.0
else:
return val
@numba.njit(
"f4(f4[::1],f4[::1])",
fastmath=True,
cache=True,
locals={
"result": numba.types.float32,
"diff": numba.types.float32,
"dim": numba.types.int32,
},
)
def | (x, y):
"""Reduced Euclidean distance.
Parameters
----------
x: array of shape (embedding_dim,)
y: array of shape (embedding_dim,)
Returns
-------
The squared euclidean distance between x and y
"""
result = 0.0
dim = x.shape[0]
for i in range(dim):
diff = x[i] - y[i]
result += diff * diff
return result
def _optimize_layout_euclidean_single_epoch(
head_embedding,
tail_embedding,
head,
tail,
n_vertices,
epochs_per_sample,
a,
b,
rng_state,
gamma,
dim,
move_other,
alpha,
epochs_per_negative_sample,
epoch_of_next_negative_sample,
epoch_of_next_sample,
n,
):
for i in numba.prange(epochs_per_sample.shape[0]):
if epoch_of_next_sample[i] <= n:
j = head[i]
k = tail[i]
current = head_embedding[j]
other = tail_embedding[k]
dist_squared = rdist(current, other)
if dist_squared > 0.0:
grad_coeff = -2.0 * a * b * pow(dist_squared, b - 1.0)
grad_coeff /= a * pow(dist_squared, b) + 1.0
else:
grad_coeff = 0.0
for d in range(dim):
grad_d = clip(grad_coeff * (current[d] - other[d]))
current[d] += grad_d * alpha
if move_other:
other[d] += -grad_d * alpha
epoch_of_next_sample[i] += epochs_per_sample[i]
n_neg_samples = int(
(n - epoch_of_next_negative_sample[i]) / epochs_per_negative_sample[i]
)
for p in range(n_neg_samples):
k = tau_rand_int(rng_state) % n_vertices
other = tail_embedding[k]
dist_squared = rdist(current, other)
if dist_squared > 0.0:
grad_coeff = 2.0 * gamma * b
grad_coeff /= (0.001 + dist_squared) * (
a * pow(dist_squared, b) + 1
)
elif j == k:
continue
else:
grad_coeff = 0.0
for d in range(dim):
if grad_coeff > 0.0:
grad_d = clip(grad_coeff * (current[d] - other[d]))
else:
grad_d = 4.0
current[d] += grad_d * alpha
epoch_of_next_negative_sample[i] += (
n_neg_samples * epochs_per_negative_sample[i]
)
def optimize_layout_euclidean(
head_embedding,
tail_embedding,
head,
tail,
n_epochs,
n_vertices,
epochs_per_sample,
a,
b,
rng_state,
gamma=1.0,
initial_alpha=1.0,
negative_sample_rate=5.0,
parallel=False,
verbose=False,
):
"""Improve an embedding using stochastic gradient descent to minimize the
fuzzy set cross entropy between the 1-skeletons of the high dimensional
and low dimensional fuzzy simplicial sets. In practice this is done by
sampling edges based on their membership strength (with the (1-p) terms
coming from negative sampling similar to word2vec).
Parameters
----------
head_embedding: array of shape (n_samples, n_components)
The initial embedding to be improved by SGD.
tail_embedding: array of shape (source_samples, n_components)
The reference embedding of embedded points. If not embedding new
previously unseen points with respect to an existing embedding this
is simply the head_embedding (again); otherwise it provides the
existing embedding to embed with respect to.
head: array of shape (n_1_simplices)
The indices of the heads of 1-simplices with non-zero membership.
tail: array of shape (n_1_simplices)
The indices of the tails of 1-simplices with non-zero membership.
n_epochs: int
The number of training epochs to use in optimization.
n_vertices: int
The number of vertices (0-simplices) in the dataset.
epochs_per_samples: array of shape (n_1_simplices)
A float value of the number of epochs per 1-simplex. 1-simplices with
weaker membership strength will have more epochs between being sampled.
a: float
Parameter of differentiable approximation of right adjoint functor
b: float
Parameter of differentiable approximation of right adjoint functor
rng_state: array of int64, shape (3,)
The internal state of the rng
gamma: float (optional, default 1.0)
Weight to apply to negative samples.
initial_alpha: float (optional, default 1.0)
Initial learning rate for the SGD.
negative_sample_rate: int (optional, default 5)
Number of negative samples to use per positive sample.
parallel: bool (optional, default False)
Whether to run the computation using numba parallel.
Running in parallel is non-deterministic, and is not used
if a random seed has been set, to ensure reproducibility.
verbose: bool (optional, default False)
Whether to report information on the current progress of the algorithm.
Returns
-------
embedding: array of shape (n_samples, n_components)
The optimized embedding.
"""
dim = head_embedding.shape[1]
move_other = head_embedding.shape[0] == tail_embedding.shape[0]
alpha = initial_alpha
epochs_per_negative_sample = epochs_per_sample / negative_sample_rate
epoch_of_next_negative_sample = epochs_per_negative_sample.copy()
epoch_of_next_sample = epochs_per_sample.copy()
optimize_fn = numba.njit(
_optimize_layout_euclidean_single_epoch, fastmath=True, parallel=parallel
)
for n in range(n_epochs):
optimize_fn(
head_embedding,
tail_embedding,
head,
tail,
n_vertices,
epochs_per_sample,
a,
b,
rng_state,
gamma,
dim,
move_other,
alpha,
epochs_per_negative_sample,
epoch_of_next_negative_sample,
epoch_of_next_sample,
n,
)
alpha = initial_alpha * (1.0 - (float(n) / float(n_epochs)))
if verbose and n % int(n_epochs / 10) == 0:
print("\tcompleted ", n, " / ", n_epochs, "epochs")
return head_embedding
@numba.njit(fastmath=True)
def optimize_layout_generic(
head_embedding,
tail_embedding,
head,
tail,
n_epochs,
n_vertices,
epochs_per_sample,
a,
b,
rng_state,
gamma=1.0,
initial_alpha=1.0,
negative_sample_rate=5.0,
output_metric=dist.euclidean,
output_metric_kwds=(),
verbose=False,
):
"""Improve an embedding using stochastic gradient descent to minimize the
fuzzy set cross entropy between the 1-skeletons of the high dimensional
and low dimensional fuzzy simplicial sets. In practice this is done by
sampling edges based on their membership strength (with the (1-p) terms
coming from negative sampling similar to word2vec).
Parameters
----------
head_embedding: array of shape (n_samples, n_components)
The initial embedding to be improved by SGD.
tail_embedding: array of shape (source_samples, n_components)
The reference embedding of embedded points. If not embedding new
previously unseen points with respect to an existing embedding this
is simply the head_embedding (again); otherwise it provides the
existing embedding to embed with respect to.
head: array of shape (n_1_simplices)
The indices of the heads of 1-simplices with non-zero membership.
tail: array of shape (n_1_simplices)
The indices of the tails of 1-simplices with non-zero membership.
weight: array of shape (n_1_simplices)
The membership weights of the 1-simplices.
n_epochs: int
The number of training epochs to use in optimization.
n_vertices: int
The number of vertices (0-simplices) in the dataset.
epochs_per_sample: array of shape (n_1_simplices)
A float value of the number of epochs per 1-simplex. 1-simplices with
weaker membership strength will have more epochs between being sampled.
a: float
Parameter of differentiable approximation of right adjoint functor
b: float
Parameter of differentiable approximation of right adjoint functor
rng_state: array of int64, shape (3,)
The internal state of the rng
gamma: float (optional, default 1.0)
Weight to apply to negative samples.
initial_alpha: float (optional, default 1.0)
Initial learning rate for the SGD.
negative_sample_rate: int (optional, default 5)
Number of negative samples to use per positive sample.
verbose: bool (optional, default False)
Whether to report information on the current progress of the algorithm.
Returns
-------
embedding: array of shape (n_samples, n_components)
The optimized embedding.
"""
dim = head_embedding.shape[1]
move_other = head_embedding.shape[0] == tail_embedding.shape[0]
alpha = initial_alpha
epochs_per_negative_sample = epochs_per_sample / negative_sample_rate
epoch_of_next_negative_sample = epochs_per_negative_sample.copy()
epoch_of_next_sample = epochs_per_sample.copy()
for n in range(n_epochs):
for i in range(epochs_per_sample.shape[0]):
if epoch_of_next_sample[i] <= n:
j = head[i]
k = tail[i]
current = head_embedding[j]
other = tail_embedding[k]
dist_output, grad_dist_output = output_metric(
current, other, *output_metric_kwds
)
_, rev_grad_dist_output = output_metric(
other, current, *output_metric_kwds
)
if dist_output > 0.0:
w_l = pow((1 + a * pow(dist_output, 2 * b)), -1)
else:
w_l = 1.0
grad_coeff = 2 * b * (w_l - 1) / (dist_output + 1e-6)
for d in range(dim):
grad_d = clip(grad_coeff * grad_dist_output[d])
current[d] += grad_d * alpha
if move_other:
grad_d = clip(grad_coeff * rev_grad_dist_output[d])
other[d] += grad_d * alpha
epoch_of_next_sample[i] += epochs_per_sample[i]
n_neg_samples = int(
(n - epoch_of_next_negative_sample[i])
/ epochs_per_negative_sample[i]
)
for p in range(n_neg_samples):
k = tau_rand_int(rng_state) % n_vertices
other = tail_embedding[k]
dist_output, grad_dist_output = output_metric(
current, other, *output_metric_kwds
)
if dist_output > 0.0:
w_l = pow((1 + a * pow(dist_output, 2 * b)), -1)
elif j == k:
continue
else:
w_l = 1.0
grad_coeff = gamma * 2 * b * w_l / (dist_output + 1e-6)
for d in range(dim):
grad_d = clip(grad_coeff * grad_dist_output[d])
current[d] += grad_d * alpha
epoch_of_next_negative_sample[i] += (
n_neg_samples * epochs_per_negative_sample[i]
)
alpha = initial_alpha * (1.0 - (float(n) / float(n_epochs)))
if verbose and n % int(n_epochs / 10) == 0:
print("\tcompleted ", n, " / ", n_epochs, "epochs")
return head_embedding
@numba.njit(fastmath=True)
def optimize_layout_inverse(
head_embedding,
tail_embedding,
head,
tail,
weight,
sigmas,
rhos,
n_epochs,
n_vertices,
epochs_per_sample,
a,
b,
rng_state,
gamma=1.0,
initial_alpha=1.0,
negative_sample_rate=5.0,
output_metric=dist.euclidean,
output_metric_kwds=(),
verbose=False,
):
"""Improve an embedding using stochastic gradient descent to minimize the
fuzzy set cross entropy between the 1-skeletons of the high dimensional
and low dimensional fuzzy simplicial sets. In practice this is done by
sampling edges based on their membership strength (with the (1-p) terms
coming from negative sampling similar to word2vec).
Parameters
----------
head_embedding: array of shape (n_samples, n_components)
The initial embedding to be improved by SGD.
tail_embedding: array of shape (source_samples, n_components)
The reference embedding of embedded points. If not embedding new
previously unseen points with respect to an existing embedding this
is simply the head_embedding (again); otherwise it provides the
existing embedding to embed with respect to.
head: array of shape (n_1_simplices)
The indices of the heads of 1-simplices with non-zero membership.
tail: array of shape (n_1_simplices)
The indices of the tails of 1-simplices with non-zero membership.
weight: array of shape (n_1_simplices)
The membership weights of the 1-simplices.
n_epochs: int
The number of training epochs to use in optimization.
n_vertices: int
The number of vertices (0-simplices) in the dataset.
epochs_per_sample: array of shape (n_1_simplices)
A float value of the number of epochs per 1-simplex. 1-simplices with
weaker membership strength will have more epochs between being sampled.
a: float
Parameter of differentiable approximation of right adjoint functor
b: float
Parameter of differentiable approximation of right adjoint functor
rng_state: array of int64, shape (3,)
The internal state of the rng
gamma: float (optional, default 1.0)
Weight to apply to negative samples.
initial_alpha: float (optional, default 1.0)
Initial learning rate for the SGD.
negative_sample_rate: int (optional, default 5)
Number of negative samples to use per positive sample.
verbose: bool (optional, default False)
Whether to report information on the current progress of the algorithm.
Returns
-------
embedding: array of shape (n_samples, n_components)
The optimized embedding.
"""
dim = head_embedding.shape[1]
move_other = head_embedding.shape[0] == tail_embedding.shape[0]
alpha = initial_alpha
epochs_per_negative_sample = epochs_per_sample / negative_sample_rate
epoch_of_next_negative_sample = epochs_per_negative_sample.copy()
epoch_of_next_sample = epochs_per_sample.copy()
for n in range(n_epochs):
for i in range(epochs_per_sample.shape[0]):
if epoch_of_next_sample[i] <= n:
j = head[i]
k = tail[i]
current = head_embedding[j]
other = tail_embedding[k]
dist_output, grad_dist_output = output_metric(
current, other, *output_metric_kwds
)
w_l = weight[i]
grad_coeff = -(1 / (w_l * sigmas[j] + 1e-6))
for d in range(dim):
grad_d = clip(grad_coeff * grad_dist_output[d])
current[d] += grad_d * alpha
if move_other:
other[d] += -grad_d * alpha
epoch_of_next_sample[i] += epochs_per_sample[i]
n_neg_samples = int(
(n - epoch_of_next_negative_sample[i])
/ epochs_per_negative_sample[i]
)
for p in range(n_neg_samples):
k = tau_rand_int(rng_state) % n_vertices
other = tail_embedding[k]
dist_output, grad_dist_output = output_metric(
current, other, *output_metric_kwds
)
# w_l = 0.0 # for negative samples, the edge does not exist
w_h = np.exp(-max(dist_output - rhos[k], 1e-6) / (sigmas[k] + 1e-6))
grad_coeff = -gamma * ((0 - w_h) / ((1 - w_h) * sigmas[k] + 1e-6))
for d in range(dim):
grad_d = clip(grad_coeff * grad_dist_output[d])
current[d] += grad_d * alpha
epoch_of_next_negative_sample[i] += (
n_neg_samples * epochs_per_negative_sample[i]
)
alpha = initial_alpha * (1.0 - (float(n) / float(n_epochs)))
if verbose and n % int(n_epochs / 10) == 0:
print("\tcompleted ", n, " / ", n_epochs, "epochs")
return head_embedding
| rdist |
GLOrtho.py | from pygame import Rect
# noinspection PyPackageRequirements
from OpenGL import GL
from albow.openGL.GLViewport import GLViewport
class GLOrtho(GLViewport):
"""
GLOrtho provides an OpenGL drawing area with an orthographic projection.
Using a GLOrtho widget is the same as using a GLViewport, except that you do not need to
provide a `setup_projection()` method.
------
------
"""
def __init__(self, rect: Rect=None, xmin=-1, xmax=1, ymin=-1, ymax=1, near=-1, far=1, **kwds):
"""
Creates a GLOrtho instance with the given initial values for its projection parameters.
|
xmin: Specify the coordinates for the left vertical clipping planes.
xmax: Specify the coordinates for the right vertical clipping planes.
ymin: Specify the coordinates for the bottom horizontal clipping planes.
ymax: Specify the coordinates for the top horizontal clipping planes.
near: Specify the distances to the nearer clipping planes.
These distances are negative if the plane is to be behind the viewer.
far: Specify the distances to the depth clipping planes.
These distances are negative if the plane is to be behind the viewer.
**kwds:
"""
#
# Python 3 update
#
# GLViewport.__init__(self, rect, **kwds)
super().__init__(rect, **kwds)
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.near = near
self.far = far
def setup_projection(self):
GL.glOrtho(self.xmin, self.xmax, self.ymin, self.ymax, self.near, self.far) | Args:
rect: A pygame Rect |
spinner.service.ts | import { Injectable, EventEmitter } from '@angular/core';
import { Ng4LoadingSpinnerService } from 'ng4-loading-spinner';
@Injectable()
export class | {
public spinnerActive: EventEmitter<Boolean>;
constructor(
private spinnerService: Ng4LoadingSpinnerService
) {
this.spinnerActive = new EventEmitter();
}
activate() {
this.spinnerService.show();
}
deactivate() {
this.spinnerService.hide();
}
} | SpinnerService |
test.py | import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
|
if __name__ == '__main__':
unittest.main()
| @patch('builtins.input', side_effect=[
'1',
'5',
'3 2 3 1 2',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'3\n' +
'1 2 4\n') |
util.js | System.register(["./Logger", "./Config", "./TimeUtils", "./math/Intersection", "./math/MathUtils", "./math/Plane", "./math/Primitive", "./math/Quaternion", "./math/Ray", "./math/Sphere", "./math/Vec3f"], function(exports_1) {
function exportStar_1(m) {
var exports = {};
for(var n in m) {
if (n !== "default") exports[n] = m[n];
}
exports_1(exports);
}
return {
setters:[
function (Logger_1_1) {
exportStar_1(Logger_1_1);
},
function (Config_1_1) {
exportStar_1(Config_1_1);
},
function (TimeUtils_1_1) {
exportStar_1(TimeUtils_1_1);
},
function (Intersection_1_1) {
exportStar_1(Intersection_1_1);
},
function (MathUtils_1_1) {
exportStar_1(MathUtils_1_1);
},
function (Plane_1_1) {
exportStar_1(Plane_1_1);
},
function (Primitive_1_1) {
exportStar_1(Primitive_1_1);
},
function (Quaternion_1_1) {
exportStar_1(Quaternion_1_1); | },
function (Ray_1_1) {
exportStar_1(Ray_1_1);
},
function (Sphere_1_1) {
exportStar_1(Sphere_1_1);
},
function (Vec3f_1_1) {
exportStar_1(Vec3f_1_1);
}],
execute: function() {
}
}
});
//# sourceMappingURL=util.js.map | |
context.js | import React from "react";
const AuthUserContext = React.createContext(null); // default starting value
/**
* Higher order component for consuming auth user
| return function(props) {
return (
<AuthUserContext.Consumer>
{context => (
<Component
{...props}
authUser={context.authUser}
authDataFetched={context.authDataFetched}
/>
)}
</AuthUserContext.Consumer>
);
};
};
export default AuthUserContext; | */
export const withAuthUser = function(Component) {
|
env.py | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
|
else:
run_migrations_online()
| run_migrations_offline() |
model.py | """ Class for the Sequence to sequence model for ATIS."""
import os
import torch
import torch.nn.functional as F
from . import torch_utils
from . import utils_bert
from data_util.vocabulary import DEL_TOK, UNK_TOK
from .encoder import Encoder, Encoder_Gnn
from .embedder import Embedder
from .token_predictor import construct_token_predictor
import numpy as np
from data_util.atis_vocab import ATISVocabulary
from .gated_graph_conv import GatedGraphConv
def get_token_indices(token, index_to_token):
""" Maps from a gold token (string) to a list of indices.
Inputs:
token (string): String to look up.
index_to_token (list of tokens): Ordered list of tokens.
Returns:
list of int, representing the indices of the token in the probability
distribution.
"""
if token in index_to_token:
if len(set(index_to_token)) == len(index_to_token): # no duplicates
return [index_to_token.index(token)]
else:
indices = []
for index, other_token in enumerate(index_to_token):
if token == other_token:
indices.append(index)
assert len(indices) == len(set(indices))
return indices
else:
return [index_to_token.index(UNK_TOK)]
def flatten_utterances(utterances):
""" Gets a flat sequence from a sequence of utterances.
Inputs:
utterances (list of list of str): Utterances to concatenate.
Returns:
list of str, representing the flattened sequence with separating
delimiter tokens.
"""
sequence = []
for i, utterance in enumerate(utterances):
sequence.extend(utterance)
if i < len(utterances) - 1:
sequence.append(DEL_TOK)
return sequence
def encode_snippets_with_states(snippets, states):
""" Encodes snippets by using previous query states instead.
Inputs:
snippets (list of Snippet): Input snippets.
states (list of dy.Expression): Previous hidden states to use.
TODO: should this by dy.Expression or vector values?
"""
for snippet in snippets:
snippet.set_embedding(torch.cat([states[snippet.startpos],states[snippet.endpos]], dim=0))
return snippets
def load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params):
# print(output_vocabulary.inorder_tokens)
# print()
def read_glove_embedding(embedding_filename, embedding_size):
|
print('Loading Glove Embedding from', params.embedding_filename)
glove_embedding_size = 300
glove_embeddings = read_glove_embedding(params.embedding_filename, glove_embedding_size)
print('Done')
input_embedding_size = glove_embedding_size
def create_word_embeddings(vocab):
vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32)
vocabulary_tokens = vocab.inorder_tokens
glove_oov = 0
para_oov = 0
for token in vocabulary_tokens:
token_id = vocab.token_to_id(token)
if token in glove_embeddings:
vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token]
else:
glove_oov += 1
print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab))
return vocabulary_embeddings
input_vocabulary_embeddings = create_word_embeddings(input_vocabulary)
output_vocabulary_embeddings = create_word_embeddings(output_vocabulary)
output_vocabulary_schema_embeddings = None
if output_vocabulary_schema:
output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema)
return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size
class ATISModel(torch.nn.Module):
""" Sequence-to-sequence model for predicting a SQL query given an utterance
and an interaction prefix.
"""
def __init__(
self,
params,
input_vocabulary,
output_vocabulary,
output_vocabulary_schema,
anonymizer):
super().__init__()
self.params = params
if params.use_bert:
self.model_bert, self.tokenizer, self.bert_config = utils_bert.get_bert(params)
self.gnn=None
if 'atis' not in params.data_directory:
if params.use_bert:
if params.use_gnn:
encoder_input_size = self.bert_config.hidden_size
encoder_output_size = params.encoder_state_size
self.gnn = GatedGraphConv(encoder_output_size, 2, 3) #input_dim, num_timesteps, num_edge_types,
input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params)
# Create the output embeddings
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
initializer=output_vocabulary_embeddings,
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = None
else:
input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params)
params.input_embedding_size = input_embedding_size
self.params.input_embedding_size = input_embedding_size
# Create the input embeddings
self.input_embedder = Embedder(params.input_embedding_size,
name="input-embedding",
initializer=input_vocabulary_embeddings,
vocabulary=input_vocabulary,
anonymizer=anonymizer,
freeze=params.freeze)
# Create the output embeddings
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
initializer=output_vocabulary_embeddings,
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = Embedder(params.input_embedding_size,
name="schema-embedding",
initializer=output_vocabulary_schema_embeddings,
vocabulary=output_vocabulary_schema,
anonymizer=anonymizer,
freeze=params.freeze)
else:
# Create the input embeddings
self.input_embedder = Embedder(params.input_embedding_size,
name="input-embedding",
vocabulary=input_vocabulary,
anonymizer=anonymizer,
freeze=False)
# Create the output embeddings
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = None
# Create the encoder
encoder_input_size = params.input_embedding_size
encoder_output_size = params.encoder_state_size
if params.use_bert:
encoder_input_size = self.bert_config.hidden_size
if params.discourse_level_lstm:
encoder_input_size += params.encoder_state_size / 2
self.utterance_encoder = Encoder(params.encoder_num_layers, encoder_input_size, encoder_output_size)
# Positional embedder for utterances
attention_key_size = params.encoder_state_size
self.schema_attention_key_size = attention_key_size
if params.state_positional_embeddings:
attention_key_size += params.positional_embedding_size
self.positional_embedder = Embedder(
params.positional_embedding_size,
name="positional-embedding",
num_tokens=params.maximum_utterances)
self.utterance_attention_key_size = attention_key_size
# Create the discourse-level LSTM parameters
if params.discourse_level_lstm:
self.discourse_lstms = torch_utils.create_multilayer_lstm_params(1, params.encoder_state_size, params.encoder_state_size / 2, "LSTM-t")
self.initial_discourse_state = torch_utils.add_params(tuple([params.encoder_state_size / 2]), "V-turn-state-0")
# Snippet encoder
final_snippet_size = 0
if params.use_snippets and not params.previous_decoder_snippet_encoding:
snippet_encoding_size = int(params.encoder_state_size / 2)
final_snippet_size = params.encoder_state_size
if params.snippet_age_embedding:
snippet_encoding_size -= int(
params.snippet_age_embedding_size / 4)
self.snippet_age_embedder = Embedder(
params.snippet_age_embedding_size,
name="snippet-age-embedding",
num_tokens=params.max_snippet_age_embedding)
final_snippet_size = params.encoder_state_size + params.snippet_age_embedding_size / 2
self.snippet_encoder = Encoder(params.snippet_num_layers,
params.output_embedding_size,
snippet_encoding_size)
# Previous query Encoder
if params.use_previous_query:
self.query_encoder = Encoder(params.encoder_num_layers, params.output_embedding_size, params.encoder_state_size)
self.final_snippet_size = final_snippet_size
self.dropout = 0.
def _encode_snippets(self, previous_query, snippets, input_schema):
""" Computes a single vector representation for each snippet.
Inputs:
previous_query (list of str): Previous query in the interaction.
snippets (list of Snippet): Snippets extracted from the previous
Returns:
list of Snippets, where the embedding is set to a vector.
"""
startpoints = [snippet.startpos for snippet in snippets]
endpoints = [snippet.endpos for snippet in snippets]
assert len(startpoints) == 0 or min(startpoints) >= 0
if input_schema:
assert len(endpoints) == 0 or max(endpoints) <= len(previous_query)
else:
assert len(endpoints) == 0 or max(endpoints) < len(previous_query)
snippet_embedder = lambda query_token: self.get_query_token_embedding(query_token, input_schema)
if previous_query and snippets:
_, previous_outputs = self.snippet_encoder(
previous_query, snippet_embedder, dropout_amount=self.dropout)
assert len(previous_outputs) == len(previous_query)
for snippet in snippets:
if input_schema:
embedding = torch.cat([previous_outputs[snippet.startpos],previous_outputs[snippet.endpos-1]], dim=0)
else:
embedding = torch.cat([previous_outputs[snippet.startpos],previous_outputs[snippet.endpos]], dim=0)
if self.params.snippet_age_embedding:
embedding = torch.cat([embedding, self.snippet_age_embedder(min(snippet.age, self.params.max_snippet_age_embedding - 1))], dim=0)
snippet.set_embedding(embedding)
return snippets
def _initialize_discourse_states(self):
discourse_state = self.initial_discourse_state
discourse_lstm_states = []
for lstm in self.discourse_lstms:
hidden_size = lstm.weight_hh.size()[1]
if lstm.weight_hh.is_cuda:
h_0 = torch.cuda.FloatTensor(1,hidden_size).fill_(0)
c_0 = torch.cuda.FloatTensor(1,hidden_size).fill_(0)
else:
h_0 = torch.zeros(1,hidden_size)
c_0 = torch.zeros(1,hidden_size)
discourse_lstm_states.append((h_0, c_0))
return discourse_state, discourse_lstm_states
def _add_positional_embeddings(self, hidden_states, utterances, group=False):
grouped_states = []
start_index = 0
for utterance in utterances:
grouped_states.append(hidden_states[start_index:start_index + len(utterance)])
start_index += len(utterance)
assert len(hidden_states) == sum([len(seq) for seq in grouped_states]) == sum([len(utterance) for utterance in utterances])
new_states = []
flat_sequence = []
num_utterances_to_keep = min(self.params.maximum_utterances, len(utterances))
for i, (states, utterance) in enumerate(zip(
grouped_states[-num_utterances_to_keep:], utterances[-num_utterances_to_keep:])):
positional_sequence = []
index = num_utterances_to_keep - i - 1
for state in states:
positional_sequence.append(torch.cat([state, self.positional_embedder(index)], dim=0))
assert len(positional_sequence) == len(utterance), \
"Expected utterance and state sequence length to be the same, " \
+ "but they were " + str(len(utterance)) \
+ " and " + str(len(positional_sequence))
if group:
new_states.append(positional_sequence)
else:
new_states.extend(positional_sequence)
flat_sequence.extend(utterance)
return new_states, flat_sequence
def build_optim(self):
params_trainer = []
params_bert_trainer = []
for name, param in self.named_parameters():
if param.requires_grad:
if 'model_bert' in name:
params_bert_trainer.append(param)
else:
params_trainer.append(param)
self.trainer = torch.optim.Adam(params_trainer, lr=self.params.initial_learning_rate)
if self.params.fine_tune_bert:
self.bert_trainer = torch.optim.Adam(params_bert_trainer, lr=self.params.lr_bert)
def set_dropout(self, value):
""" Sets the dropout to a specified value.
Inputs:
value (float): Value to set dropout to.
"""
self.dropout = value
def set_learning_rate(self, value):
""" Sets the learning rate for the trainer.
Inputs:
value (float): The new learning rate.
"""
for param_group in self.trainer.param_groups:
param_group['lr'] = value
def save(self, filename):
""" Saves the model to the specified filename.
Inputs:
filename (str): The filename to save to.
"""
torch.save(self.state_dict(), filename)
def load(self, filename):
""" Loads saved parameters into the parameter collection.
Inputs:
filename (str): Name of file containing parameters.
"""
self.load_state_dict(torch.load(filename))
print("Loaded model from file " + filename)
| glove_embeddings = {}
with open(embedding_filename) as f:
cnt = 1
for line in f:
cnt += 1
if params.debug or not params.train:
if cnt == 1000:
print('Read 1000 word embeddings')
break
l_split = line.split()
word = " ".join(l_split[0:len(l_split) - embedding_size])
embedding = np.array([float(val) for val in l_split[-embedding_size:]])
glove_embeddings[word] = embedding
return glove_embeddings |
reverse.rs | use crate::commands::WholeStreamCommand;
use crate::context::CommandRegistry;
use crate::prelude::*;
use nu_errors::ShellError;
use nu_protocol::Signature;
pub struct Reverse;
impl WholeStreamCommand for Reverse {
fn name(&self) -> &str {
"reverse"
}
fn signature(&self) -> Signature |
fn usage(&self) -> &str {
"Reverses the table."
}
fn run(
&self,
args: CommandArgs,
registry: &CommandRegistry,
) -> Result<OutputStream, ShellError> {
reverse(args, registry)
}
}
fn reverse(args: CommandArgs, registry: &CommandRegistry) -> Result<OutputStream, ShellError> {
let args = args.evaluate_once(registry)?;
let (input, _args) = args.parts();
let input = input.values.collect::<Vec<_>>();
let output = input.map(move |mut vec| {
vec.reverse();
futures::stream::iter(vec)
});
Ok(output.flatten_stream().from_input_stream())
}
| {
Signature::build("reverse")
} |
index.js | import React from 'react'
import PropTypes from 'prop-types'
import { connect } from 'dva'
import { Info } from './components'
function InfoView ({ agentInfo }) {
const { info } = agentInfo
return (<div className="content-inner">
<Info {...info} />
</div>)
}
InfoView.propTypes = { | info: PropTypes.object,
}
export default connect(({ agentInfo }) => ({ agentInfo }))(InfoView) | agentInfo: PropTypes.object, |
meteors.py | import os
import sys
import random
import pygame
def load_image(name, colorkey=None): # not sure if this method is needed
fullname = os.path.join('data', name)
# если файл не существует, то выходим
if not os.path.isfile(fullname):
print(f"Файл с изображением '{fullname}' не найден")
sys.exit()
image = pygame.image.load(fullname) # we can just use this one, cuz we know that pics are ok
return image
enemies = pygame.sprite.Group()
bullets = pygame.sprite.Group()
class Meteor(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.frames = []
self.cut_sheet(load_image("meteors1.png"), 5, 1)
self.cur_frame = 0
self.image = self.frames[self.cur_frame]
self.count = 0
self.mask = pygame.mask.from_surface(self.image)
self.rect.x = random.randrange(width)
self.rect.y = -1 * self.image.get_height()
while pygame.sprite.spritecollideany(self, enemies, pygame.sprite.collide_mask) or\
self.rect.x < 0 or self.rect.right > width:
self.rect.x = random.randrange(width)
self.life = 1
def cut_sheet(self, sheet, columns, rows):
self.rect = pygame.Rect(0, 0, sheet.get_width() // columns,
sheet.get_height() // rows)
for j in range(rows):
for i in range(columns):
frame_location = (self.rect.w * i, self.rect.h * j)
self.frames.append(sheet.subsurface(pygame.Rect(
frame_location, self.rect.size)))
def update(self):
if pygame.sprite.spritecollideany(self, bullets, pygame.sprite.collide_mask):
self.life -= 1
if self.life > 0 and self.rect.y <= height:
self.rect = self.rect.move(0, 1)
self.count += 1
if self.count % 7 == 0:
self.cur_frame = (self.cur_frame + 1) % len(self.frames)
self.image = self.frames[self.cur_frame]
else:
self.kill()
def except_hook(cls, exception, traceback):
sys.__excepthook__(cls, exception, traceback)
if __name__ == '__main__':
pygame.init()
size = width, height = 500, 700 # other parameters may be set in the main game
screen = pygame.display.set_mode(size)
clock = pygame.time.Clock()
fps = 60
MYEVENTTYPE = pygame.USEREVENT + 1
pygame.time.set_timer(MYEVENTTYPE, 3000)
for _ in range(random.randrange(1, 4)):
enemies.add(Meteor())
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == MYEVENTTYPE: # every 3000 frames new enemies are created
for _ in range(random.randrange(1, 4)):
enemies.add(Meteor())
screen.fill(pygame.Color('blue')) # in the main game, there will be a background(animated?)
enemies.draw(screen)
enemies.update()
clock.tick(fps) | pygame.display.flip()
pygame.quit()
sys.excepthook = except_hook |
|
async_crud.py | import warnings
from typing import Union, Iterable, TextIO, Dict, Optional
import numpy as np
from ...clients.base import InputType, CallbackFnType
from ...enums import DataInputType
from ...helper import deprecated_alias
class AsyncCRUDFlowMixin:
"""The asynchronous version of the Mixin for CRUD in Flow"""
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def train(
self,
inputs: InputType,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Do training on the current Flow
:param inputs: An iterator of bytes. If not given, then you have to specify it in **kwargs**.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
warnings.warn(f'{self.train} is under heavy refactoring', FutureWarning)
async for r in self._get_client(**kwargs).train(
inputs, on_done, on_error, on_always, **kwargs
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def index_ndarray(
self,
array: 'np.ndarray',
axis: int = 0,
size: Optional[int] = None,
shuffle: bool = False,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Using numpy ndarray as the index source for the current Flow
:param array: the numpy ndarray data source
:param axis: iterate over that axis
:param size: the maximum number of the sub arrays
:param shuffle: shuffle the the numpy data source beforehand
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_ndarray
async for r in self._get_client(**kwargs).index(
_input_ndarray(array, axis, size, shuffle),
on_done,
on_error,
on_always,
data_type=DataInputType.CONTENT,
**kwargs,
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def search_ndarray(
self,
array: 'np.ndarray',
axis: int = 0,
size: Optional[int] = None,
shuffle: bool = False,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a numpy ndarray as the query source for searching on the current Flow
:param array: the numpy ndarray data source
:param axis: iterate over that axis
:param size: the maximum number of the sub arrays
:param shuffle: shuffle the the numpy data source beforehand
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_ndarray
async for r in self._get_client(**kwargs).search(
_input_ndarray(array, axis, size, shuffle),
on_done,
on_error,
on_always,
data_type=DataInputType.CONTENT,
**kwargs,
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def index_lines(
self,
lines: Optional[Union[Iterable[str], TextIO]] = None,
filepath: Optional[str] = None,
size: Optional[int] = None,
sampling_rate: Optional[float] = None,
read_mode: str = 'r',
line_format: str = 'json',
field_resolver: Optional[Dict[str, str]] = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a list of lines as the index source for indexing on the current Flow
:param lines: a list of strings, each is considered as d document
:param filepath: a text file that each line contains a document
:param size: the maximum number of the documents
:param sampling_rate: the sampling rate between [0, 1]
:param read_mode: specifies the mode in which the file
is opened. 'r' for reading in text mode, 'rb' for reading in binary
:param line_format: the format of each line: ``json`` or ``csv``
:param field_resolver: a map from field names defined in ``document`` (JSON, dict) to the field
names defined in Protobuf. This is only used when the given ``document`` is
a JSON string or a Python dict.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_lines
async for r in self._get_client(**kwargs).index(
_input_lines(
lines,
filepath,
size=size,
sampling_rate=sampling_rate,
read_mode=read_mode,
line_format=line_format,
field_resolver=field_resolver,
),
on_done,
on_error,
on_always,
data_type=DataInputType.AUTO,
**kwargs,
):
yield r
async def | (
self,
lines: Union[Iterable[str], TextIO],
field_resolver: Dict[str, str] = None,
size: Optional[int] = None,
sampling_rate: Optional[float] = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a list of lines as the index source for indexing on the current Flow
:param lines: a list of strings, each is considered as d document
:param size: the maximum number of the documents
:param sampling_rate: the sampling rate between [0, 1]
:param field_resolver: a map from field names defined in ``document`` (JSON, dict) to the field
names defined in Protobuf. This is only used when the given ``document`` is
a JSON string or a Python dict.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_csv
async for r in self._get_client(**kwargs).index(
_input_csv(
lines,
size=size,
sampling_rate=sampling_rate,
field_resolver=field_resolver,
),
on_done,
on_error,
on_always,
data_type=DataInputType.AUTO,
**kwargs,
):
yield r
async def index_ndjson(
self,
lines: Union[Iterable[str], TextIO],
field_resolver: Optional[Dict[str, str]] = None,
size: Optional[int] = None,
sampling_rate: Optional[float] = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a list of lines as the index source for indexing on the current Flow
:param lines: a list of strings, each is considered as d document
:param size: the maximum number of the documents
:param sampling_rate: the sampling rate between [0, 1]
:param field_resolver: a map from field names defined in ``document`` (JSON, dict) to the field
names defined in Protobuf. This is only used when the given ``document`` is
a JSON string or a Python dict.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_ndjson
async for r in self._get_client(**kwargs).index(
_input_ndjson(
lines,
size=size,
sampling_rate=sampling_rate,
field_resolver=field_resolver,
),
on_done,
on_error,
on_always,
data_type=DataInputType.AUTO,
**kwargs,
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def index_files(
self,
patterns: Union[str, Iterable[str]],
recursive: bool = True,
size: Optional[int] = None,
sampling_rate: Optional[float] = None,
read_mode: Optional[str] = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a set of files as the index source for indexing on the current Flow
:param patterns: The pattern may contain simple shell-style wildcards, e.g. '\*.py', '[\*.zip, \*.gz]'
:param recursive: If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
:param size: the maximum number of the files
:param sampling_rate: the sampling rate between [0, 1]
:param read_mode: specifies the mode in which the file
is opened. 'r' for reading in text mode, 'rb' for reading in binary mode
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_files
async for r in self._get_client(**kwargs).index(
_input_files(patterns, recursive, size, sampling_rate, read_mode),
on_done,
on_error,
on_always,
data_type=DataInputType.CONTENT,
**kwargs,
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def search_files(
self,
patterns: Union[str, Iterable[str]],
recursive: bool = True,
size: Optional[int] = None,
sampling_rate: Optional[float] = None,
read_mode: Optional[str] = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a set of files as the query source for searching on the current Flow
:param patterns: The pattern may contain simple shell-style wildcards, e.g. '\*.py', '[\*.zip, \*.gz]'
:param recursive: If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
:param size: the maximum number of the files
:param sampling_rate: the sampling rate between [0, 1]
:param read_mode: specifies the mode in which the file
is opened. 'r' for reading in text mode, 'rb' for reading in
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_files
async for r in self._get_client(**kwargs).search(
_input_files(patterns, recursive, size, sampling_rate, read_mode),
on_done,
on_error,
on_always,
data_type=DataInputType.CONTENT,
**kwargs,
):
yield r
async def search_ndjson(
self,
lines: Union[Iterable[str], TextIO],
field_resolver: Optional[Dict[str, str]] = None,
size: Optional[int] = None,
sampling_rate: Optional[float] = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a list of files as the query source for searching on the current Flow
:param lines: a list of strings, each is considered as d document
:param size: the maximum number of the documents
:param sampling_rate: the sampling rate between [0, 1]
:param field_resolver: a map from field names defined in ``document`` (JSON, dict) to the field
names defined in Protobuf. This is only used when the given ``document`` is
a JSON string or a Python dict.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_ndjson
async for r in self._get_client(**kwargs).search(
_input_ndjson(
lines,
size=size,
sampling_rate=sampling_rate,
field_resolver=field_resolver,
),
on_done,
on_error,
on_always,
data_type=DataInputType.AUTO,
**kwargs,
):
yield r
async def search_csv(
self,
lines: Union[Iterable[str], TextIO],
field_resolver: Optional[Dict[str, str]] = None,
size: Optional[int] = None,
sampling_rate: Optional[float] = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a list of lines as the index source for indexing on the current Flow
:param lines: a list of strings, each is considered as d document
:param size: the maximum number of the documents
:param sampling_rate: the sampling rate between [0, 1]
:param field_resolver: a map from field names defined in ``document`` (JSON, dict) to the field
names defined in Protobuf. This is only used when the given ``document`` is
a JSON string or a Python dict.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_csv
async for r in self._get_client(**kwargs).search(
_input_csv(
lines,
size=size,
sampling_rate=sampling_rate,
field_resolver=field_resolver,
),
on_done,
on_error,
on_always,
data_type=DataInputType.AUTO,
**kwargs,
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def search_lines(
self,
lines: Optional[Union[Iterable[str], TextIO]] = None,
filepath: Optional[str] = None,
size: Optional[int] = None,
sampling_rate: Optional[float] = None,
read_mode: str = 'r',
line_format: str = 'json',
field_resolver: Optional[Dict[str, str]] = None,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Use a list of files as the query source for searching on the current Flow
:param filepath: a text file that each line contains a document
:param lines: a list of strings, each is considered as d document
:param size: the maximum number of the documents
:param sampling_rate: the sampling rate between [0, 1]
:param read_mode: specifies the mode in which the file
is opened. 'r' for reading in text mode, 'rb' for reading in binary
:param line_format: the format of each line: ``json`` or ``csv``
:param field_resolver: a map from field names defined in ``document`` (JSON, dict) to the field
names defined in Protobuf. This is only used when the given ``document`` is
a JSON string or a Python dict.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
from ...clients.sugary_io import _input_lines
async for r in self._get_client(**kwargs).search(
_input_lines(
lines,
filepath,
size=size,
sampling_rate=sampling_rate,
read_mode=read_mode,
line_format=line_format,
field_resolver=field_resolver,
),
on_done,
on_error,
on_always,
data_type=DataInputType.CONTENT,
**kwargs,
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def index(
self,
inputs: InputType,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Do indexing on the current Flow
It will start a :py:class:`CLIClient` and call :py:func:`index`.
:param inputs: An iterator of bytes. If not given, then you have to specify it in **kwargs**.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
async for r in self._get_client(**kwargs).index(
inputs, on_done, on_error, on_always, **kwargs
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def update(
self,
inputs: InputType,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Do updates on the current Flow
It will start a :py:class:`CLIClient` and call :py:func:`index`.
:param inputs: An iterator of bytes. If not given, then you have to specify it in **kwargs**.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
async for r in self._get_client(**kwargs).update(
inputs, on_done, on_error, on_always, **kwargs
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def delete(
self,
ids: Iterable[str],
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Do deletion on the current Flow
:param ids: An iterable of ids
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
async for r in self._get_client(**kwargs).delete(
ids, on_done, on_error, on_always, **kwargs
):
yield r
@deprecated_alias(
input_fn=('inputs', 0),
buffer=('inputs', 1),
callback=('on_done', 1),
output_fn=('on_done', 1),
)
async def search(
self,
inputs: InputType,
on_done: CallbackFnType = None,
on_error: CallbackFnType = None,
on_always: CallbackFnType = None,
**kwargs,
):
"""Do searching on the current Flow
It will start a :py:class:`CLIClient` and call :py:func:`search`.
:param inputs: An iterator of bytes. If not given, then you have to specify it in **kwargs**.
:param on_done: the function to be called when the :class:`Request` object is resolved.
:param on_error: the function to be called when the :class:`Request` object is rejected.
:param on_always: the function to be called when the :class:`Request` object is is either resolved or rejected.
:param kwargs: accepts all keyword arguments of `jina client` CLI
:yields: results
"""
async for r in self._get_client(**kwargs).search(
inputs, on_done, on_error, on_always, **kwargs
):
yield r
| index_csv |
component_test.go | // Copyright 2020 The Lokomotive Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dex_test
import (
"testing"
"github.com/kinvolk/lokomotive/pkg/components"
"github.com/kinvolk/lokomotive/pkg/components/util"
)
const name = "dex"
//nolint:funlen
func TestRenderManifest(t *testing.T) | {
tests := []struct {
desc string
hcl string
wantErr bool
}{
{
desc: "Valid config",
hcl: `
component "dex" {
ingress_host = "foo"
issuer_host = "bar"
connector "github" {
id = "github"
name = "Github"
config {
client_id = "clientid"
client_secret = "clientsecret"
redirect_uri = "redirecturi"
team_name_field = "slug"
org {
name = "kinvolk"
teams = [
"lokomotive-developers",
]
}
}
}
static_client {
name = "gangway"
id = "gangway id"
secret = "gangway secret"
redirect_uris = ["redirecturis"]
}
}
`,
},
{
desc: "invalid config",
hcl: `
component "dex" {
ingress_host = "NodePort"
}
`,
wantErr: true,
},
}
for _, tc := range tests {
b, d := util.GetComponentBody(tc.hcl, name)
if d != nil {
t.Errorf("%s - Error getting component body: %v", tc.desc, d)
}
c, err := components.Get(name)
if err != nil {
t.Fatalf("failed getting component: %v", err)
}
d = c.LoadConfig(b, nil)
if !tc.wantErr && d.HasErrors() {
t.Errorf("%s - Valid config should not return error, got: %s", tc.desc, d)
}
if tc.wantErr && !d.HasErrors() {
t.Errorf("%s - Wrong config should have returned an error", tc.desc)
}
m, err := c.RenderManifests()
if err != nil {
t.Errorf("%s - Rendering manifests with valid config should succeed, got: %s", tc.desc, err)
}
if len(m) == 0 {
t.Errorf("%s - Rendered manifests shouldn't be empty", tc.desc)
}
}
} |
|
0008_auto_20170530_1548.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import pipedrive.fields
class | (migrations.Migration):
dependencies = [
('pipedrive', '0007_auto_20170519_0052'),
]
operations = [
migrations.AddField(
model_name='deal',
name='stage',
field=models.ForeignKey(to_field=b'external_id', blank=True, to='pipedrive.Stage', null=True),
),
migrations.AddField(
model_name='deal',
name='status',
field=pipedrive.fields.TruncatingCharField(max_length=500, null=True, blank=True),
),
]
| Migration |
gpio_blinkled.rs | // Copyright (c) 2017-2019 Rene van der Meer
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
// gpio_blinkled.rs - Blinks an LED connected to a GPIO pin in a loop.
//
// Remember to add a resistor of an appropriate value in series, to prevent
// exceeding the maximum current rating of the GPIO pin and the LED.
//
// Interrupting the process by pressing Ctrl-C causes the application to exit
// immediately without resetting the pin's state, so the LED might stay lit.
// Check out the gpio_blinkled_signals.rs example to learn how to properly
// handle incoming signals to prevent an abnormal termination.
use std::error::Error;
use std::thread;
use std::time::Duration;
use rppal::gpio::Gpio;
// Gpio uses BCM pin numbering. BCM GPIO 23 is tied to physical pin 16.
const GPIO_LED: u8 = 23;
fn | () -> Result<(), Box<dyn Error>> {
// Retrieve the GPIO pin and configure it as an output.
let mut pin = Gpio::new()?.get(GPIO_LED)?.into_output();
loop {
pin.toggle();
thread::sleep(Duration::from_millis(500));
}
}
| main |
markdownItTasklist.js | function attrSet(token, name, value) {
const index = token.attrIndex(name);
const attr = [name, value];
if (index < 0) {
token.attrPush(attr);
} else {
token.attrs[index] = attr;
}
}
| if (token.content
&& token.content.charCodeAt(0) === 0x5b /* [ */
&& token.content.charCodeAt(2) === 0x5d /* ] */
&& token.content.charCodeAt(3) === 0x20 /* space */
&& token.type === 'inline'
&& tokens[i - 1].type === 'paragraph_open'
&& tokens[i - 2].type === 'list_item_open'
) {
const cross = token.content[1].toLowerCase();
if (cross === ' ' || cross === 'x') {
const checkbox = new Token('html_inline', '', 0);
if (cross === ' ') {
checkbox.content = '<span class="task-list-item-checkbox" type="checkbox">☐</span>';
} else {
checkbox.content = '<span class="task-list-item-checkbox checked" type="checkbox">☑</span>';
}
token.children.unshift(checkbox);
token.children[1].content = token.children[1].content.slice(3);
token.content = token.content.slice(3);
attrSet(tokens[i - 2], 'class', 'task-list-item');
}
}
}
});
}; | module.exports = (md) => {
md.core.ruler.after('inline', 'tasklist', ({ tokens, Token }) => {
for (let i = 2; i < tokens.length; i += 1) {
const token = tokens[i]; |
lib.rs | //! # $name$
//!
//! - [`$name_pascal_case$::Config`](./trait.Config.html)
//!
//! ## Overview
//!
//! $param.description$
//!
//! ## Interface
//!
//! ### Dispatchable Functions
//!
//! * `$param.method1_snake_case$` -
//! * `$param.method2_snake_case$` -
//! * `$param.method3_snake_case$` -
//!
#![cfg_attr(not(feature = "std"), no_std)]
use frame_support::{
dispatch::DispatchError,
ensure,
traits::{Currency, EnsureOrigin, Get, OnUnbalanced, ReservableCurrency, UnixTime},
};
use frame_system::ensure_signed;
use sp_runtime::RuntimeDebug;
use sp_runtime::{
traits::{StaticLookup, Zero},
SaturatedConversion,
};
use sp_std::{fmt::Debug, prelude::*, vec};
pub use pallet::*;
#[cfg(feature = "runtime-benchmarks")]
mod benchmarking;
pub mod weights;
pub use weights::WeightInfo;
use codec::{Decode, Encode, HasCompact};
type $param.object_name_pascal_case$Id = u32;
#[frame_support::pallet]
pub mod pallet {
use super::*; |
#[pallet::pallet]
#[pallet::generate_store(pub(super) trait Store)]
pub struct Pallet<T>(_);
#[pallet::config]
pub trait Config: frame_system::Config {
/// The overarching event type.
type Event: From<Event<Self>> + IsType<<Self as frame_system::Config>::Event>;
/// The origin which may forcibly set or remove a name. Root can always do this.
type ForceOrigin: EnsureOrigin<Self::Origin>;
/// Min $param.object_name_lower_case$ name length
type Min$param.object_name_pascal_case$NameLength: Get<usize>;
/// Max $param.object_name_lower_case$ name length
type Max$param.object_name_pascal_case$NameLength: Get<usize>;
/// Weight information
type WeightInfo: WeightInfo;
}
#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)]
pub struct $param.object_name_pascal_case$<AccountId: Encode + Decode + Clone + Debug + Eq + PartialEq> {
/// $param.object_name$ name
name: Vec<u8>,
/// owner of the $param.object_name$
owner: AccountId,
}
#[pallet::error]
pub enum Error<T> {
/// The $param.object_name$ already exsits
AlreadyExists,
/// Name too long
TooLong,
/// Name too short
TooShort,
/// $param.object_name$ doesn't exist.
NotExists,
/// Origin has no authorization to do this operation
PermissionDenied,
/// ID already exists
IdAlreadyExists,
/// Unknown error occurred
Unknown,
}
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
#[pallet::metadata(T::AccountId = "AccountId", T::Balance = "Balance", $param.object_name_pascal_case$Id = "$param.object_name_pascal_case$Id")]
pub enum Event<T: Config> {
/// Some $param.object_name$ added inside the system.
$param.object_name_pascal_case$Added($param.object_name_pascal_case$Id, T::AccountId),
/// When $param.object_name$ deleted
$param.object_name_pascal_case$Deleted($param.object_name_pascal_case$Id)
}
/// Index of id -> data
#[pallet::storage]
pub type $param.object_name_pascal_case$s<T: Config> =
StorageMap<_, Blake2_128Concat, $param.object_name_pascal_case$Id, $param.object_name_pascal_case$<T::AccountId>>;
#[pallet::storage]
pub type $param.object_name_pascal_case$Link<T: Config> = StorageMap<
_,
Blake2_128Concat,
$param.object_name_pascal_case$Id,
u32, // change me
>;
#[pallet::storage]
pub type $param.object_name_pascal_case$IdIndex<T> = StorageValue<_, u32>;
/// $name_pascal_case$ module declaration.
// pub struct Module<T: Config> for enum Call where origin: T::Origin {
#[pallet::call]
impl<T: Config> Pallet<T> {
/// Add new object.
///
/// The dispatch origin for this call must be _Signed_.
///
/// # <weight>
/// # </weight>
#[pallet::weight(T::WeightInfo::$param.method1_snake_case$())]
fn $param.method1_snake_case$(
origin: OriginFor<T>,
name: Vec<u8>,
owner: <T::Lookup as StaticLookup>::Source,
) -> DispatchResultWithPostInfo {
let _origin = T::ForceOrigin::ensure_origin(origin)?;
ensure!(
name.len() >= T::Min$param.object_name_pascal_case$NameLength::get(),
Error::<T>::TooShort
);
ensure!(
name.len() <= T::Max$param.object_name_pascal_case$NameLength::get(),
Error::<T>::TooLong
);
let id = Self::next_id();
ensure!(
!$param.object_name_pascal_case$s::<T>::contains_key(id),
Error::<T>::IdAlreadyExists
);
let owner = T::Lookup::lookup(owner)?;
$param.object_name_pascal_case$s::<T>::insert(
id as $param.object_name_pascal_case$Id,
$param.object_name_pascal_case$ {
name: name.clone(),
owner: owner.clone(),
},
);
Self::deposit_event(Event::$param.object_name_pascal_case$Added(id, owner));
Ok(().into())
}
/// $param.method2$
///
#[pallet::weight(100_000)]
fn $param.method2_snake_case$(origin: OriginFor<T>) -> DispatchResultWithPostInfo {
Ok(().into())
}
/// $param.method3$
///
#[pallet::weight(100_000)]
fn $param.method3_snake_case$(origin: OriginFor<T>) -> DispatchResultWithPostInfo {
Ok(().into())
}
}
// ----------------------------------------------------------------
// HOOKS
// ----------------------------------------------------------------
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
// fn offchain_worker(n: T::BlockNumber){
// // @TODO(you): Your off-chain logic here
// }
}
// -------------------------------------------------------------------
// GENESIS CONFIGURATION
// -------------------------------------------------------------------
// The genesis config type.
#[pallet::genesis_config]
pub struct GenesisConfig<T: Config> {
pub dummy: u32,
pub bar: Vec<(T::AccountId, u32)>,
pub foo: u32,
}
// The default value for the genesis config type.
#[cfg(feature = "std")]
impl<T: Config> Default for GenesisConfig<T> {
fn default() -> Self {
Self {
dummy: Default::default(),
bar: Default::default(),
foo: Default::default(),
}
}
}
// The build of genesis for the pallet.
#[pallet::genesis_build]
impl<T: Config> GenesisBuild<T> for GenesisConfig<T> {
fn build(&self) {
// <Dummy<T>>::put(&self.dummy);
// for (a, b) in &self.bar {
// <Bar<T>>::insert(a, b);
// }
// <Foo<T>>::put(&self.foo);
}
}
}
/// The main implementation of this $name_pascal_case$ pallet.
impl<T: Config> Pallet<T> {
/// Get the $name$ detail
pub fn $name_snake_case$(id: $param.object_name_pascal_case$Id) -> Option<$param.object_name_pascal_case$<T::AccountId>> {
$param.object_name_pascal_case$s::<T>::get(id)
}
/// Get next $name$ ID
pub fn next_id() -> u32 {
let next_id = <$param.object_name_pascal_case$IdIndex<T>>::try_get().unwrap_or(0).saturating_add(1);
<$param.object_name_pascal_case$IdIndex<T>>::put(next_id);
next_id
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate as pallet_$name_snake_case$;
use frame_support::{assert_noop, assert_ok, ord_parameter_types, parameter_types};
use frame_system::EnsureSignedBy;
use sp_core::H256;
use sp_runtime::{
testing::Header,
traits::{BlakeTwo256, IdentityLookup},
};
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Test>;
type Block = frame_system::mocking::MockBlock<Test>;
frame_support::construct_runtime!(
pub enum Test where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic,
{
System: frame_system::{Module, Call, Config, Storage, Event<T>},
Balances: pallet_balances::{Module, Call, Storage, Config<T>, Event<T>},
$name_pascal_case$: pallet_$name_snake_case$::{Module, Call, Storage, Event<T>},
}
);
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub BlockWeights: frame_system::limits::BlockWeights =
frame_system::limits::BlockWeights::simple_max(1024);
}
impl frame_system::Config for Test {
type BaseCallFilter = ();
type BlockWeights = ();
type BlockLength = ();
type DbWeight = ();
type Origin = Origin;
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Call = Call;
type Hashing = BlakeTwo256;
type AccountId = u64;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type Version = ();
type PalletInfo = PalletInfo;
type AccountData = pallet_balances::AccountData<u64>;
type OnNewAccount = ();
type OnKilledAccount = ();
type SystemWeightInfo = ();
type SS58Prefix = ();
}
parameter_types! {
pub const ExistentialDeposit: u64 = 1;
}
impl pallet_balances::Config for Test {
type MaxLocks = ();
type Balance = u64;
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = ();
}
parameter_types! {
pub const Min$param.object_name_pascal_case$NameLength: usize = 3;
pub const Max$param.object_name_pascal_case$NameLength: usize = 16;
}
ord_parameter_types! {
pub const One: u64 = 1;
}
impl Config for Test {
type Event = Event;
type ForceOrigin = EnsureSignedBy<One, u64>;
type Min$param.object_name_pascal_case$NameLength = Min$param.object_name_pascal_case$NameLength;
type Max$param.object_name_pascal_case$NameLength = Max$param.object_name_pascal_case$NameLength;
type WeightInfo = weights::SubstrateWeight<Test>;
}
fn new_test_ext() -> sp_io::TestExternalities {
let mut t = frame_system::GenesisConfig::default()
.build_storage::<Test>()
.unwrap();
pallet_balances::GenesisConfig::<Test> {
balances: vec![(1, 10), (2, 10)],
}
.assimilate_storage(&mut t)
.unwrap();
t.into()
}
#[test]
fn force_origin_able_to_create_$param.object_name_snake_case$() {
new_test_ext().execute_with(|| {
assert_ok!($name_pascal_case$::$param.method1_snake_case$(
Origin::signed(1), b"ORG1".to_vec(), 2));
});
}
#[test]
fn non_force_origin_cannot_create_$param.object_name_snake_case$() {
new_test_ext().execute_with(|| {
assert_noop!($name_pascal_case$::$param.method1_snake_case$(
Origin::signed(2), b"ORG1".to_vec(), 2
), DispatchError::BadOrigin);
});
}
} | use frame_support::{dispatch::DispatchResultWithPostInfo, pallet_prelude::*};
use frame_system::pallet_prelude::*; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.