file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
show-edge-login.js
|
import { loginWithEdge } from "modules/auth/actions/login-with-edge";
import {
updateAuthStatus,
EDGE_CONTEXT,
EDGE_LOADING
} from "modules/auth/actions/update-auth-status";
import {
selectEdgeContextState,
selectEdgeLoadingState
} from "src/select-state";
import { makeEdgeUiContext } from "edge-login-ui-web";
export const BEGIN_EDGE_LOADING = "BEGIN_EDGE_LOADING";
export const UPDATE_EDGE_CONTEXT = "UPDATE_EDGE_CONTEXT";
export const showEdgeLogin = history => (dispatch, getState) => {
const state = getState();
const edgeContext = selectEdgeContextState(state);
const edgeLoading = selectEdgeLoadingState(state);
if (edgeContext) {
edgeContext.on("login", edgeAccount =>
dispatch(loginWithEdge(edgeAccount, history))
);
edgeContext.showLoginWindow();
} else if (!edgeLoading) {
dispatch(updateAuthStatus(EDGE_LOADING, true));
makeEdgeUiContext({
apiKey: "e239ec875955ec7474628a1dc3d449c8ea8e1b48",
appId: "net.augur.app",
hideKeys: true,
|
"https://airbitz.co/go/wp-content/uploads/2016/08/augur_logo_100.png"
}).then(edgeContext => {
dispatch(updateAuthStatus(EDGE_LOADING, false));
dispatch(updateAuthStatus(EDGE_CONTEXT, edgeContext));
edgeContext.on("login", edgeAccount =>
dispatch(loginWithEdge(edgeAccount, history))
);
edgeContext.showLoginWindow();
});
}
};
|
vendorName: "Augur",
vendorImageUrl:
|
styler.js
|
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { contrastBorder, listFocusBackground, listFocusForeground, listActiveSelectionBackground, listActiveSelectionForeground, listInactiveSelectionForeground, listInactiveSelectionBackground, listInactiveFocusBackground, listHoverBackground, listHoverForeground, listDropBackground, widgetShadow, activeContrastBorder, badgeBackground, badgeForeground, menuForeground, menuBackground, menuSelectionForeground, menuSelectionBackground, menuSelectionBorder, menuBorder, menuSeparatorBackground, listFilterWidgetOutline, listFilterWidgetNoMatchesOutline, listFilterWidgetBackground, treeIndentGuidesStroke, resolveColorValue, listFocusOutline, listInactiveFocusOutline, tableColumnsBorder } from './colorRegistry.js';
export function computeStyles(theme, styleMap) {
const styles = Object.create(null);
for (let key in styleMap) {
const value = styleMap[key];
if (value) {
styles[key] = resolveColorValue(value, theme);
}
}
return styles;
}
export function attachStyler(themeService, styleMap, widgetOrCallback) {
function applyStyles(theme) {
const styles = computeStyles(themeService.getColorTheme(), styleMap);
if (typeof widgetOrCallback === 'function') {
widgetOrCallback(styles);
}
else {
widgetOrCallback.style(styles);
}
}
applyStyles(themeService.getColorTheme());
return themeService.onDidColorThemeChange(applyStyles);
}
export function attachBadgeStyler(widget, themeService, style) {
return attachStyler(themeService, {
badgeBackground: (style === null || style === void 0 ? void 0 : style.badgeBackground) || badgeBackground,
badgeForeground: (style === null || style === void 0 ? void 0 : style.badgeForeground) || badgeForeground,
badgeBorder: contrastBorder
}, widget);
}
export function attachListStyler(widget, themeService, overrides) {
return attachStyler(themeService, Object.assign(Object.assign({}, defaultListStyles), (overrides || {})), widget);
}
export const defaultListStyles = {
listFocusBackground,
listFocusForeground,
listFocusOutline,
listActiveSelectionBackground,
listActiveSelectionForeground,
listFocusAndSelectionBackground: listActiveSelectionBackground,
listFocusAndSelectionForeground: listActiveSelectionForeground,
listInactiveSelectionBackground,
listInactiveSelectionForeground,
listInactiveFocusBackground,
listInactiveFocusOutline,
listHoverBackground,
listHoverForeground,
listDropBackground,
listSelectionOutline: activeContrastBorder,
listHoverOutline: activeContrastBorder,
listFilterWidgetBackground,
listFilterWidgetOutline,
listFilterWidgetNoMatchesOutline,
listMatchesShadow: widgetShadow,
treeIndentGuidesStroke,
tableColumnsBorder
};
export const defaultMenuStyles = {
shadowColor: widgetShadow,
borderColor: menuBorder,
foregroundColor: menuForeground,
backgroundColor: menuBackground,
selectionForegroundColor: menuSelectionForeground,
selectionBackgroundColor: menuSelectionBackground,
|
separatorColor: menuSeparatorBackground
};
export function attachMenuStyler(widget, themeService, style) {
return attachStyler(themeService, Object.assign(Object.assign({}, defaultMenuStyles), style), widget);
}
|
selectionBorderColor: menuSelectionBorder,
|
tx.go
|
package cli
import (
"github.com/cosmos/cosmos-sdk/client/context"
"github.com/cosmos/cosmos-sdk/client/utils"
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
authcmd "github.com/cosmos/cosmos-sdk/x/auth/client/cli"
authtxb "github.com/cosmos/cosmos-sdk/x/auth/client/txbuilder"
"github.com/cosmos/cosmos-sdk/x/slashing"
"github.com/spf13/cobra"
)
// GetCmdUnjail implements the create unjail validator command.
func GetCmdUnjail(cdc *codec.Codec) *cobra.Command {
cmd := &cobra.Command{
Use: "unjail",
Args: cobra.NoArgs,
Short: "unjail validator previously jailed for downtime",
RunE: func(cmd *cobra.Command, args []string) error {
txBldr := authtxb.NewTxBuilderFromCLI().WithCodec(cdc)
cliCtx := context.NewCLIContext().
WithCodec(cdc).
WithAccountDecoder(authcmd.GetAccountDecoder(cdc))
valAddr, err := cliCtx.GetFromAddress()
if err != nil {
return err
}
msg := slashing.NewMsgUnjail(sdk.ValAddress(valAddr))
if cliCtx.GenerateOnly {
|
return utils.PrintUnsignedStdTx(txBldr, cliCtx, []sdk.Msg{msg}, false)
}
return utils.CompleteAndBroadcastTxCli(txBldr, cliCtx, []sdk.Msg{msg})
},
}
return cmd
}
| |
tvm-build.rs
|
use structopt::StructOpt;
use tracing_subscriber;
use tvm_build::{self, build, BuildConfig, UserSettings};
#[derive(StructOpt, Debug)]
#[structopt()]
struct InstallCommand {
revision: String,
repository: Option<String>,
#[structopt(short, long)]
/// The directory to build TVM in.
output_path: Option<String>,
#[structopt(short, long)]
debug: bool,
#[structopt(short, long)]
clean: bool,
#[structopt(short, long)]
verbose: bool,
#[structopt(flatten)]
settings: UserSettings,
}
#[derive(StructOpt, Debug)]
#[structopt()]
struct
|
{
revision: String,
#[structopt(short, long)]
/// The directory that TVM was built in.
output_path: Option<String>
}
#[derive(StructOpt, Debug)]
#[structopt()]
struct VersionCommand {
revision: String,
}
#[derive(StructOpt, Debug)]
#[structopt(about = "A CLI for maintaining TVM installations.")]
enum TVMBuildArgs {
/// Install a revision of TVM on your machine.
Install(InstallCommand),
/// Remove a revision of TVM on your machine.
Uninstall(UninstallCommand),
/// Get the configuration of the version.
VersionConfig(VersionCommand),
}
fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt::init();
let args = TVMBuildArgs::from_args();
match args {
TVMBuildArgs::Install(install_cmd) => {
let mut config = BuildConfig::default();
config.verbose = true;
config.branch = Some(install_cmd.revision);
config.clean = install_cmd.clean;
config.repository = install_cmd.repository;
config.verbose = install_cmd.verbose;
config.output_path = install_cmd.output_path;
config.settings = install_cmd.settings;
build(config)?;
Ok(())
}
TVMBuildArgs::Uninstall(uninstall_cmd) => {
tvm_build::uninstall(uninstall_cmd.revision, uninstall_cmd.output_path)?;
Ok(())
}
TVMBuildArgs::VersionConfig(version_cmd) => {
let config = tvm_build::version_config(version_cmd.revision)?;
println!("{}", serde_json::to_string(&config).unwrap());
Ok(())
}
}
}
|
UninstallCommand
|
tail_test.go
|
package dendrite
import (
|
"encoding/json"
"io/ioutil"
"os"
"path"
"testing"
"time"
)
type FixedTimeProvider struct{}
func (*FixedTimeProvider) Now() time.Time {
return time.Unix(1234567890, 0)
}
var parser Parser
var offsetFile string
var tail *Tail
var output chan Record
var line = "{\"_file\":{\"Type\":0,\"Treatment\":0,\"Value\":\"solr.txt\"},\"_group\":{\"Type\":0,\"Treatment\":0,\"Value\":\"foo\"},\"_hostname\":{\"Type\":0,\"Treatment\":0,\"Value\":\"host.local\"},\"_offset\":{\"Type\":1,\"Treatment\":0,\"Value\":0},\"_time\":{\"Type\":3,\"Treatment\":0,\"Value\":1234567890},\"line\":{\"Type\":0,\"Treatment\":0,\"Value\":\"INFO: [1234567898765] webapp=/solr path=/select params={start=0&q=*:*&wt=ruby&fq=type:User&rows=30} hits=3186235 status=0 QTime=1\"}}"
func _tail_init() {
StandardTimeProvider = new(FixedTimeProvider)
output = make(chan Record, 100)
offsetFile = path.Join(os.TempDir(), "test.txt")
_ = os.Remove(offsetFile)
parser = NewRegexpParser("host.local", "foo", "solr.txt", output, "(?P<line>.*[^\r])\r?\n", nil, 32768)
tail = NewTail(parser, -1, "testdata/solr.txt", offsetFile, 0)
}
func TestStartsAtZero(t *testing.T) {
_tail_init()
if tail.Offset() != 0 {
t.Error("initial offset wasn't zero")
}
}
func TestStartsAtOffset(t *testing.T) {
_tail_init()
ioutil.WriteFile(offsetFile, []byte("747\n"), 0777)
tail = NewTail(parser, -1, "testdata/solr.txt", offsetFile, 0)
if tail.Offset() != 747 {
t.Errorf("initial offset was %d, not 747", tail.Offset())
}
}
func TestReading(t *testing.T) {
_tail_init()
go tail.Poll()
rec := <-output
json, _ := json.Marshal(rec)
if string(json) != line {
t.Errorf("Oops, diff between\n %s\n %s", string(json), line)
}
}
func TestOffsetUpdated(t *testing.T) {
_tail_init()
go tail.Poll()
_ = <-output
if tail.Offset() == 0 {
t.Error("offset was zero")
}
}
| |
color_test.rs
|
use egui::{color::*, widgets::color_picker::show_color, *};
use std::collections::HashMap;
const GRADIENT_SIZE: Vec2 = vec2(256.0, 24.0);
const BLACK: Color32 = Color32::BLACK;
const GREEN: Color32 = Color32::GREEN;
const RED: Color32 = Color32::RED;
const TRANSPARENT: Color32 = Color32::TRANSPARENT;
const WHITE: Color32 = Color32::WHITE;
#[cfg_attr(feature = "persistence", derive(serde::Deserialize, serde::Serialize))]
pub struct ColorTest {
#[cfg_attr(feature = "persistence", serde(skip))]
tex_mngr: TextureManager,
vertex_gradients: bool,
texture_gradients: bool,
srgb: bool,
}
impl Default for ColorTest {
fn default() -> Self {
Self {
tex_mngr: Default::default(),
vertex_gradients: true,
texture_gradients: true,
srgb: false,
}
}
}
impl epi::App for ColorTest {
fn name(&self) -> &str {
"🎨 Color test"
}
fn update(&mut self, ctx: &egui::CtxRef, frame: &mut epi::Frame<'_>) {
egui::CentralPanel::default().show(ctx, |ui| {
if frame.is_web() {
ui.colored_label(
RED,
"NOTE: The WebGL backend does NOT pass the color test."
);
ui.small("This is because WebGL does not support a linear framebuffer blending (not even WebGL2!).\nMaybe when WebGL3 becomes mainstream in 2030 the web can finally get colors right?");
ui.separator();
}
ScrollArea::auto_sized().show(ui, |ui| {
self.ui(ui, &mut Some(frame.tex_allocator()));
});
});
}
}
impl ColorTest {
pub fn ui(
&mut self,
ui: &mut Ui,
mut tex_allocator: &mut Option<&mut dyn epi::TextureAllocator>,
) {
ui.vertical_centered(|ui| {
ui.add(crate::__egui_github_link_file!());
});
ui.label("This is made to test that the egui painter backend is set up correctly, so that all colors are interpolated and blended in linear space with premultiplied alpha.");
ui.label("If everything is set up correctly, all groups of gradients will look uniform");
ui.checkbox(&mut self.vertex_gradients, "Vertex gradients");
ui.checkbox(&mut self.texture_gradients, "Texture gradients");
ui.checkbox(&mut self.srgb, "Show naive sRGBA horror");
ui.heading("sRGB color test");
ui.label("Use a color picker to ensure this color is (255, 165, 0) / #ffa500");
ui.scope(|ui| {
ui.spacing_mut().item_spacing.y = 0.0; // No spacing between gradients
let g = Gradient::one_color(Color32::from_rgb(255, 165, 0));
self.vertex_gradient(ui, "orange rgb(255, 165, 0) - vertex", WHITE, &g);
self.tex_gradient(
ui,
tex_allocator,
"orange rgb(255, 165, 0) - texture",
WHITE,
&g,
);
});
ui.separator();
ui.label("Test that vertex color times texture color is done in linear space:");
ui.scope(|ui| {
ui.spacing_mut().item_spacing.y = 0.0; // No spacing between gradients
let tex_color = Rgba::from_rgb(1.0, 0.25, 0.25);
let vertex_color = Rgba::from_rgb(0.5, 0.75, 0.75);
ui.horizontal(|ui| {
let color_size = ui.spacing().interact_size;
ui.label("texture");
show_color(ui, tex_color, color_size);
ui.label(" * ");
show_color(ui, vertex_color, color_size);
ui.label(" vertex color =");
});
{
let g = Gradient::one_color(Color32::from(tex_color * vertex_color));
self.vertex_gradient(ui, "Ground truth (vertices)", WHITE, &g);
self.tex_gradient(ui, tex_allocator, "Ground truth (texture)", WHITE, &g);
}
if let Some(tex_allocator) = &mut tex_allocator {
ui.horizontal(|ui| {
let g = Gradient::one_color(Color32::from(tex_color));
let tex = self.tex_mngr.get(*tex_allocator, &g);
let texel_offset = 0.5 / (g.0.len() as f32);
let uv =
Rect::from_min_max(pos2(texel_offset, 0.0), pos2(1.0 - texel_offset, 1.0));
ui.add(Image::new(tex, GRADIENT_SIZE).tint(vertex_color).uv(uv))
.on_hover_text(format!("A texture that is {} texels wide", g.0.len()));
ui.label("GPU result");
});
}
});
ui.separator();
// TODO: test color multiplication (image tint),
// to make sure vertex and texture color multiplication is done in linear space.
self.show_gradients(ui, tex_allocator, WHITE, (RED, GREEN));
if self.srgb {
ui.label("Notice the darkening in the center of the naive sRGB interpolation.");
}
ui.separator();
self.show_gradients(ui, tex_allocator, RED, (TRANSPARENT, GREEN));
ui.separator();
self.show_gradients(ui, tex_allocator, WHITE, (TRANSPARENT, GREEN));
if self.srgb {
ui.label(
"Notice how the linear blend stays green while the naive sRGBA interpolation looks gray in the middle.",
);
}
ui.separator();
// TODO: another ground truth where we do the alpha-blending against the background also.
// TODO: exactly the same thing, but with vertex colors (no textures)
self.show_gradients(ui, tex_allocator, WHITE, (TRANSPARENT, BLACK));
ui.separator();
self.show_gradients(ui, tex_allocator, BLACK, (TRANSPARENT, WHITE));
ui.separator();
ui.label("Additive blending: add more and more blue to the red background:");
self.show_gradients(
ui,
tex_allocator,
RED,
(TRANSPARENT, Color32::from_rgba_premultiplied(0, 0, 255, 0)),
);
ui.separator();
pixel_test(ui);
}
fn show_gradients(
&mut self,
ui: &mut Ui,
tex_allocator: &mut Option<&mut dyn epi::TextureAllocator>,
bg_fill: Color32,
(left, right): (Color32, Color32),
) {
let is_opaque = left.is_opaque() && right.is_opaque();
ui.horizontal(|ui| {
let color_size = ui.spacing().interact_size;
if !is_opaque {
ui.label("Background:");
show_color(ui, bg_fill, color_size);
}
ui.label("gradient");
show_color(ui, left, color_size);
ui.label("-");
show_color(ui, right, color_size);
});
ui.scope(|ui| {
ui.spacing_mut().item_spacing.y = 0.0; // No spacing between gradients
if is_opaque {
let g = Gradient::ground_truth_linear_gradient(left, right);
self.vertex_gradient(ui, "Ground Truth (CPU gradient) - vertices", bg_fill, &g);
self.tex_gradient(
ui,
tex_allocator,
"Ground Truth (CPU gradient) - texture",
bg_fill,
&g,
);
} else {
let g = Gradient::ground_truth_linear_gradient(left, right).with_bg_fill(bg_fill);
self.vertex_gradient(
ui,
"Ground Truth (CPU gradient, CPU blending) - vertices",
bg_fill,
&g,
);
self.tex_gradient(
ui,
tex_allocator,
"Ground Truth (CPU gradient, CPU blending) - texture",
bg_fill,
&g,
);
let g = Gradient::ground_truth_linear_gradient(left, right);
self.vertex_gradient(ui, "CPU gradient, GPU blending - vertices", bg_fill, &g);
self.tex_gradient(
ui,
tex_allocator,
"CPU gradient, GPU blending - texture",
bg_fill,
&g,
);
}
let g = Gradient::texture_gradient(left, right);
self.vertex_gradient(
ui,
"Triangle mesh of width 2 (test vertex decode and interpolation)",
bg_fill,
&g,
);
self.tex_gradient(
ui,
tex_allocator,
"Texture of width 2 (test texture sampler)",
bg_fill,
&g,
);
if self.srgb {
let g =
Gradient::ground_truth_bad_srgba_gradient(left, right).with_bg_fill(bg_fill);
self.vertex_gradient(
ui,
"Triangle mesh with naive sRGBA interpolation (WRONG)",
bg_fill,
&g,
);
self.tex_gradient(
ui,
tex_allocator,
"Naive sRGBA interpolation (WRONG)",
bg_fill,
&g,
);
}
});
}
fn tex_gradient(
&mut self,
ui: &mut Ui,
tex_allocator: &mut Option<&mut dyn epi::TextureAllocator>,
label: &str,
bg_fill: Color32,
gradient: &Gradient,
) {
if !self.texture_gradients {
return;
}
if let Some(tex_allocator) = tex_allocator {
ui.horizontal(|ui| {
let tex = self.tex_mngr.get(*tex_allocator, gradient);
let texel_offset = 0.5 / (gradient.0.len() as f32);
let uv = Rect::from_min_max(pos2(texel_offset, 0.0), pos2(1.0 - texel_offset, 1.0));
ui.add(Image::new(tex, GRADIENT_SIZE).bg_fill(bg_fill).uv(uv))
.on_hover_text(format!(
"A texture that is {} texels wide",
gradient.0.len()
));
ui.label(label);
});
}
}
fn vertex_gradient(&mut self, ui: &mut Ui, label: &str, bg_fill: Color32, gradient: &Gradient) {
if !self.vertex_gradients {
return;
}
ui.horizontal(|ui| {
vertex_gradient(ui, bg_fill, gradient).on_hover_text(format!(
"A triangle mesh that is {} vertices wide",
gradient.0.len()
));
ui.label(label);
});
}
}
fn vertex_gradient(ui: &mut Ui, bg_fill: Color32, gradient: &Gradient) -> Response {
use egui::epaint::*;
let (rect, response) = ui.allocate_at_least(GRADIENT_SIZE, Sense::hover());
if bg_fill != Default::default() {
let mut mesh = Mesh::default();
mesh.add_colored_rect(rect, bg_fill);
ui.painter().add(Shape::mesh(mesh));
}
{
let n = gradient.0.len();
assert!(n >= 2);
let mut mesh = Mesh::default();
for (i, &color) in gradient.0.iter().enumerate() {
let t = i as f32 / (n as f32 - 1.0);
let x = lerp(rect.x_range(), t);
mesh.colored_vertex(pos2(x, rect.top()), color);
mesh.colored_vertex(pos2(x, rect.bottom()), color);
if i < n - 1 {
let i = i as u32;
mesh.add_triangle(2 * i, 2 * i + 1, 2 * i + 2);
mesh.add_triangle(2 * i + 1, 2 * i + 2, 2 * i + 3);
}
}
ui.painter().add(Shape::mesh(mesh));
}
response
}
#[derive(Clone, Hash, PartialEq, Eq)]
struct Gradient(pub Vec<Color32>);
impl Gradient {
pub fn one_color(srgba: Color32) -> Self {
|
pub fn texture_gradient(left: Color32, right: Color32) -> Self {
Self(vec![left, right])
}
pub fn ground_truth_linear_gradient(left: Color32, right: Color32) -> Self {
let left = Rgba::from(left);
let right = Rgba::from(right);
let n = 255;
Self(
(0..=n)
.map(|i| {
let t = i as f32 / n as f32;
Color32::from(lerp(left..=right, t))
})
.collect(),
)
}
/// This is how a bad person blends `sRGBA`
pub fn ground_truth_bad_srgba_gradient(left: Color32, right: Color32) -> Self {
let n = 255;
Self(
(0..=n)
.map(|i| {
let t = i as f32 / n as f32;
Color32::from_rgba_premultiplied(
lerp((left[0] as f32)..=(right[0] as f32), t).round() as u8, // Don't ever do this please!
lerp((left[1] as f32)..=(right[1] as f32), t).round() as u8, // Don't ever do this please!
lerp((left[2] as f32)..=(right[2] as f32), t).round() as u8, // Don't ever do this please!
lerp((left[3] as f32)..=(right[3] as f32), t).round() as u8, // Don't ever do this please!
)
})
.collect(),
)
}
/// Do premultiplied alpha-aware blending of the gradient on top of the fill color
pub fn with_bg_fill(self, bg: Color32) -> Self {
let bg = Rgba::from(bg);
Self(
self.0
.into_iter()
.map(|fg| {
let fg = Rgba::from(fg);
Color32::from(bg * (1.0 - fg.a()) + fg)
})
.collect(),
)
}
pub fn to_pixel_row(&self) -> Vec<Color32> {
self.0.clone()
}
}
#[derive(Default)]
struct TextureManager(HashMap<Gradient, TextureId>);
impl TextureManager {
fn get(
&mut self,
tex_allocator: &mut dyn epi::TextureAllocator,
gradient: &Gradient,
) -> TextureId {
*self.0.entry(gradient.clone()).or_insert_with(|| {
let pixels = gradient.to_pixel_row();
let width = pixels.len();
let height = 1;
tex_allocator.alloc_srgba_premultiplied((width, height), &pixels)
})
}
}
fn pixel_test(ui: &mut Ui) {
ui.label("Each subsequent square should be one physical pixel larger than the previous. They should be exactly one physical pixel apart. They should be perfectly aligned to the pixel grid.");
let pixels_per_point = ui.ctx().pixels_per_point();
let num_squares: u32 = 8;
let size_pixels = Vec2::new(
((num_squares + 1) * (num_squares + 2) / 2) as f32,
num_squares as f32,
);
let size_points = size_pixels / pixels_per_point + Vec2::splat(2.0);
let (response, painter) = ui.allocate_painter(size_points, Sense::hover());
let mut cursor_pixel = Pos2::new(
response.rect.min.x * pixels_per_point,
response.rect.min.y * pixels_per_point,
)
.ceil();
for size in 1..=num_squares {
let rect_points = Rect::from_min_size(
Pos2::new(
cursor_pixel.x / pixels_per_point,
cursor_pixel.y / pixels_per_point,
),
Vec2::splat(size as f32) / pixels_per_point,
);
painter.rect_filled(rect_points, 0.0, egui::Color32::WHITE);
cursor_pixel.x += (1 + size) as f32;
}
}
|
Self(vec![srgba, srgba])
}
|
mouse_position.rs
|
use specs::prelude::*;
use specs_derive::*;
use crate::atlas::prelude::*;
#[derive(Component, Clone)] // NotConvertSaveload
pub struct MousePositionComponent {
pub position: Point,
}
impl MousePositionComponent {
pub fn
|
() -> MousePositionComponent {
MousePositionComponent { position: Point::init(0, 0) }
}
}
|
init
|
utils.rs
|
// Copyright 2019 Liebi Technologies.
// This file is part of Bifrost.
// Bifrost is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Bifrost is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Bifrost. If not, see <http://www.gnu.org/licenses/>.
use hex::FromHexError;
use primitive_types::U256;
use primitives::{blake2_256, H256 as Hash, twox_128};
fn storage_key_hash_vec(module: &str, storage_key_name: &str, param: Option<Vec<u8>>) -> Vec<u8> {
let mut key = [module, storage_key_name].join(" ").as_bytes().to_vec();
match param {
Some(par) => {
key.extend(&par);
blake2_256(&key).to_vec()
},
_ => {
twox_128(&key).to_vec()
},
}
}
pub fn storage_key_hash(module: &str, storage_key_name: &str, param: Option<Vec<u8>>) -> String {
let mut keyhash_str = hex::encode(
storage_key_hash_vec(module, storage_key_name, param));
keyhash_str.insert_str(0, "0x");
keyhash_str
}
pub fn storage_key_hash_double_map(module: &str, storage_key_name: &str, first: Vec<u8>, second: Vec<u8>) -> String {
let mut keyhash = storage_key_hash_vec(module, storage_key_name, Some(first));
keyhash.extend(&blake2_256(&second).to_vec());
let mut keyhash_str = hex::encode(keyhash);
keyhash_str.insert_str(0, "0x");
keyhash_str
}
pub fn hexstr_to_vec(hexstr: String) -> Result<Vec<u8>, FromHexError> {
let hexstr = hexstr
.trim_matches('\"')
.to_string()
.trim_start_matches("0x")
.to_string();
match hexstr.as_str() {
"null" => Ok(vec!(0u8)),
_ => hex::decode(&hexstr),
}
}
pub fn hexstr_to_u64(hexstr: String) -> Result<u64, FromHexError> {
let unhex = hexstr_to_vec(hexstr);
match unhex {
Ok(vec) => {
match vec.len() {
1|2|4|8 => {
let mut h: [u8; 8] = Default::default();
h[..vec.len()].copy_from_slice(&vec);
Ok(u64::from_le_bytes(h))
},
_ => {
match vec.iter().sum() {
0 => Ok(0u64),
_ => Err(hex::FromHexError::InvalidStringLength)
}
}
}},
Err(err) => Err(err),
}
}
pub fn hexstr_to_u256(hexstr: String) -> Result<U256, FromHexError> {
let unhex = hexstr_to_vec(hexstr);
match unhex {
Ok(vec) => {
match vec.len() {
1|2|4|8|16|32 => {
Ok(U256::from_little_endian(&vec[..]))
},
_ => {
match vec.iter().sum() {
0 => Ok(U256::from(0)),
_ => Err(hex::FromHexError::InvalidStringLength)
}
}
}},
Err(err) => Err(err),
}
}
pub fn hexstr_to_hash(hexstr: String) -> Result<Hash, FromHexError> {
let unhex = hexstr_to_vec(hexstr);
match unhex {
Ok(vec) => {
match vec.len() {
32 => {
let mut gh: [u8; 32] = Default::default();
gh.copy_from_slice(&vec[..]);
Ok(Hash::from(gh))
},
_ => Err(hex::FromHexError::InvalidStringLength)
}},
Err(err) => Err(err),
}
}
#[cfg(test)]
mod tests {
// Note this useful idiom: importing names from outer (for mod tests) scope.
use super::*;
#[test]
fn test_hextstr_to_vec() {
assert_eq!(hexstr_to_vec("0x01020a".to_string()), Ok(vec!(1,2,10)));
assert_eq!(hexstr_to_vec("null".to_string()), Ok(vec!(0u8)));
assert_eq!(hexstr_to_vec("0x0q".to_string()), Err(hex::FromHexError::InvalidHexCharacter{c: 'q', index:1}));
}
#[test]
fn test_hextstr_to_u64() {
assert_eq!(hexstr_to_u64("0x0100000000000000".to_string()), Ok(1u64));
assert_eq!(hexstr_to_u64("0x01000000".to_string()), Ok(1u64));
assert_eq!(hexstr_to_u64("null".to_string()), Ok(0u64));
assert_eq!(hexstr_to_u64("0x010000000000000000".to_string()), Err(hex::FromHexError::InvalidStringLength));
assert_eq!(hexstr_to_u64("0x0q".to_string()), Err(hex::FromHexError::InvalidHexCharacter{c: 'q', index:1}));
}
#[test]
fn
|
() {
assert_eq!(hexstr_to_u256("0x0100000000000000000000000000000000000000000000000000000000000000".to_string()), Ok(U256::from(1)));
assert_eq!(hexstr_to_u256("0x01000000".to_string()), Ok(U256::from(1)));
assert_eq!(hexstr_to_u256("null".to_string()), Ok(U256::from(0)));
assert_eq!(hexstr_to_u256("0x010000000000000000".to_string()), Err(hex::FromHexError::InvalidStringLength));
assert_eq!(hexstr_to_u256("0x0q".to_string()), Err(hex::FromHexError::InvalidHexCharacter{c: 'q', index:1}));
}
#[test]
fn test_hextstr_to_hash() {
assert_eq!(hexstr_to_hash("0x0000000000000000000000000000000000000000000000000000000000000000".to_string()), Ok(Hash::from([0u8;32])));
assert_eq!(hexstr_to_hash("0x010000000000000000".to_string()), Err(hex::FromHexError::InvalidStringLength));
assert_eq!(hexstr_to_hash("0x0q".to_string()), Err(hex::FromHexError::InvalidHexCharacter{c: 'q', index:1}));
}
}
|
test_hextstr_to_u256
|
TableDisplay.ts
|
/*
* Copyright 2017 TWO SIGMA OPEN SOURCE, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import widgets from './widgets';
|
defaults() {
return {
...super.defaults(),
_model_name: 'TableDisplayModel',
_view_name: 'TableDisplayView',
_model_module: 'beakerx',
_view_module: 'beakerx',
_model_module_version: BEAKERX_MODULE_VERSION,
_view_module_version: BEAKERX_MODULE_VERSION
};
}
}
// Custom View. Renders the widget model.
export class TableDisplayView extends widgets.DOMWidgetView {
private _currentScope: DataGridScope;
render(): void {
this._currentScope = null;
this.$el.addClass('beaker-table-display');
this.displayed.then(() => {
const tableModel = this.model.get('model');
if (tableModel.tooManyRows) {
this.showWarning(tableModel);
}
this.initDataGridTable(tableModel);
this.listenTo(this.model, 'beakerx-tabSelected', () => {
this._currentScope && this._currentScope.setInitialSize();
});
this.listenTo(this.model, 'change:updateData', this.handleUpdateData);
this.listenTo(this.model, 'change:model', this.handleModellUpdate);
});
}
handleModellUpdate(): void {
this._currentScope.updateModelData(this.model.get('model'));
this._currentScope.doResetAll();
}
handleUpdateData(): void {
const change = this.model.get('updateData');
const currentModel = this.model.get('model');
this.model.set('model', { ...currentModel, ...change }, { updated_view: this });
this.handleModellUpdate();
}
showWarning(data): void {
const rowLimitMsg = data.rowLimitMsg;
const modal = document.createElement('div');
modal.setAttribute('id', this.wrapperId);
modal.innerHTML = `<p class="ansired">${rowLimitMsg}</p>`;
this.el.appendChild(modal);
}
initDataGridTable(data: any): void {
this._currentScope = new DataGridScope({
element: this.el,
data: data,
widgetModel: this.model,
widgetView: this
});
this._currentScope.render();
}
remove(): void {
this._currentScope && this._currentScope.doDestroy();
if (this.pWidget) {
this.pWidget.dispose();
}
setTimeout(() => { this._currentScope = null; });
return super.remove.call(this);
}
}
export default {
TableDisplayModel,
TableDisplayView
};
|
import { DataGridScope } from './tableDisplay/dataGrid';
export class TableDisplayModel extends widgets.DOMWidgetModel {
|
index.js
|
import React, { Component, PropTypes } from 'react'
import classnames from 'classnames'
import './index.less'
import echarts from 'echarts/lib/echarts';
import 'echarts/lib/chart/pie';
import 'echarts/lib/component/tooltip';
import 'echarts/lib/component/title';
class
|
extends Component {
componentDidMount() {
var myChart = echarts.init(document.getElementById('main'));
myChart.setOption(this.props.data)
}
render() {
return (
<div id="main" style={this.props.style} className={this.props.className}></div>
);
}
}
Piechat.propTypes = {
/**
* 系列列表。每个系列通过 type 决定自己的图表类型
* ```js
* [{
* name: 系列名称,用于tooltip的显示,legend 的图例筛选,在 setOption 更新数据和配置项时用于指定对应的系列。
* type: [ default: 'pie' ]。
* data: 系列中的数据内容数组。数组项通常为具体的数据项。
* }]
* ```
*/
series: PropTypes.array,
}
export default Piechat
|
Piechat
|
save_amplitudes.py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Simulator instruction to save statevector amplitudes and amplitudes squared.
"""
from qiskit.circuit import QuantumCircuit
from qiskit.extensions.exceptions import ExtensionError
from .save_data import SaveSingleData, SaveAverageData, default_qubits
class SaveAmplitudes(SaveSingleData):
"""Save complex statevector amplitudes."""
def __init__(self,
key,
num_qubits,
params,
pershot=False,
conditional=False):
"""Instruction to save complex statevector amplitudes.
Args:
key (str): the key for retrieving saved data from results.
num_qubits (int): the number of qubits for the snapshot type.
params (list): list of entries to vale.
pershot (bool): if True save a list of amplitudes vectors for each
shot of the simulation rather than the a single
amplitude vector [Default: False].
conditional (bool): if True save the amplitudes vector conditional
on the current classical register values
[Default: False].
Raises:
ExtensionError: if params is invalid for the specified number of qubits.
"""
params = _format_amplitude_params(params, num_qubits)
super().__init__("save_amplitudes",
key,
num_qubits,
pershot=pershot,
conditional=conditional,
params=params)
class SaveAmplitudesSquared(SaveAverageData):
"""Save squared statevector amplitudes (probabilities)."""
def __init__(self,
key,
num_qubits,
params,
unnormalized=False,
pershot=False,
conditional=False):
"""Instruction to save squared statevector amplitudes (probabilities).
Args:
key (str): the key for retrieving saved data from results.
num_qubits (int): the number of qubits for the snapshot type.
params (list): list of entries to vale.
unnormalized (bool): If True return save the unnormalized accumulated
probabilities over all shots [Default: False].
pershot (bool): if True save a list of probability vectors for each
shot of the simulation rather than the a single
amplitude vector [Default: False].
conditional (bool): if True save the probability vector conditional
on the current classical register values
[Default: False].
Raises:
ExtensionError: if params is invalid for the specified number of qubits.
"""
params = _format_amplitude_params(params, num_qubits)
super().__init__("save_amplitudes_sq",
key,
num_qubits,
unnormalized=unnormalized,
pershot=pershot,
conditional=conditional,
params=params)
def save_amplitudes(self, key, params, pershot=False, conditional=False):
"""Save complex statevector amplitudes.
Args:
key (str): the key for retrieving saved data from results.
params (List[int] or List[str]): the basis states to return amplitudes for.
pershot (bool): if True save a list of amplitudes vectors for each
shot of the simulation rather than the a single
amplitude vector [Default: False].
conditional (bool): if True save the amplitudes vector conditional
on the current classical register values
[Default: False].
Returns:
QuantumCircuit: with attached instruction.
Raises:
ExtensionError: if params is invalid for the specified number of qubits.
"""
qubits = default_qubits(self)
instr = SaveAmplitudes(key, len(qubits), params,
pershot=pershot, conditional=conditional)
return self.append(instr, qubits)
def save_amplitudes_squared(self, key, params,
unnormalized=False,
pershot=False,
conditional=False):
"""Save squared statevector amplitudes (probabilities).
Args:
key (str): the key for retrieving saved data from results.
params (List[int] or List[str]): the basis states to return amplitudes for.
unnormalized (bool): If True return save the unnormalized accumulated
probabilities over all shots [Default: False].
pershot (bool): if True save a list of probability vectors for each
shot of the simulation rather than the a single
amplitude vector [Default: False].
conditional (bool): if True save the probability vector conditional
on the current classical register values
[Default: False].
Returns:
QuantumCircuit: with attached instruction.
Raises:
ExtensionError: if params is invalid for the specified number of qubits.
"""
qubits = default_qubits(self)
instr = SaveAmplitudesSquared(key, len(qubits), params,
unnormalized=unnormalized,
pershot=pershot,
conditional=conditional)
return self.append(instr, qubits)
def _format_amplitude_params(params, num_qubits=None):
"""Format amplitude params as a interger list."""
if isinstance(params[0], str):
|
if num_qubits and max(params) >= 2 ** num_qubits:
raise ExtensionError(
"Param values contain a state larger than the number of qubits")
return params
QuantumCircuit.save_amplitudes = save_amplitudes
QuantumCircuit.save_amplitudes_squared = save_amplitudes_squared
|
if params[0].find('0x') == 0:
params = [int(i, 16) for i in params]
else:
params = [int(i, 2) for i in params]
|
hstdmaaddress2.rs
|
#[doc = "Register `HSTDMAADDRESS2` reader"]
pub struct R(crate::R<HSTDMAADDRESS2_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<HSTDMAADDRESS2_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::convert::From<crate::R<HSTDMAADDRESS2_SPEC>> for R {
fn from(reader: crate::R<HSTDMAADDRESS2_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `HSTDMAADDRESS2` writer"]
pub struct W(crate::W<HSTDMAADDRESS2_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<HSTDMAADDRESS2_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl core::convert::From<crate::W<HSTDMAADDRESS2_SPEC>> for W {
fn from(writer: crate::W<HSTDMAADDRESS2_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `BUFF_ADD` reader - Buffer Address"]
pub struct BUFF_ADD_R(crate::FieldReader<u32, u32>);
impl BUFF_ADD_R {
pub(crate) fn new(bits: u32) -> Self {
BUFF_ADD_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for BUFF_ADD_R {
type Target = crate::FieldReader<u32, u32>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `BUFF_ADD` writer - Buffer Address"]
pub struct BUFF_ADD_W<'a> {
w: &'a mut W,
}
impl<'a> BUFF_ADD_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff_ffff) | (value as u32 & 0xffff_ffff);
self.w
}
}
impl R {
#[doc = "Bits 0:31 - Buffer Address"]
#[inline(always)]
pub fn buff_add(&self) -> BUFF_ADD_R {
BUFF_ADD_R::new((self.bits & 0xffff_ffff) as u32)
}
}
impl W {
#[doc = "Bits 0:31 - Buffer Address"]
#[inline(always)]
pub fn buff_add(&mut self) -> BUFF_ADD_W {
BUFF_ADD_W { w: self }
}
#[doc = "Writes raw bits to the register."]
|
}
#[doc = "Host DMA Channel Address Register (n = 2)\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [hstdmaaddress2](index.html) module"]
pub struct HSTDMAADDRESS2_SPEC;
impl crate::RegisterSpec for HSTDMAADDRESS2_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [hstdmaaddress2::R](R) reader structure"]
impl crate::Readable for HSTDMAADDRESS2_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [hstdmaaddress2::W](W) writer structure"]
impl crate::Writable for HSTDMAADDRESS2_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets HSTDMAADDRESS2 to value 0"]
impl crate::Resettable for HSTDMAADDRESS2_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
|
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
|
install.go
|
// Copyright © 2016 David Cuadrado
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/dcu/capn-hook/core"
"github.com/spf13/cobra"
)
var (
hookTemplate = `#!/usr/bin/env bash
capn-hook run -s {hook} "$@"<<<"$(cat)"
`
)
// installCmd represents the install command
var installCmd = &cobra.Command{
Use: "install",
Short: "Installs capn-hook in your git hooks",
Long: `The install command replaces all your hooks with a capn-hook version.
If you have hooks that you are currently using please back them up before running this command.
`,
Aliases: []string{"i"},
Run: func(cmd *cobra.Command, args []string) {
gitDir, err := core.FindGitDir()
if err != nil {
|
for _, hookName := range core.SupportedHooks {
hookPath := filepath.Join(gitDir, "hooks", hookName)
os.Remove(hookPath) // In case there's a symlink
tmpl := core.Template{Text: hookTemplate}
tmpl.Apply(core.Vars{"hook": hookName})
ioutil.WriteFile(hookPath, []byte(tmpl.Text), 0755)
}
},
}
func init() {
RootCmd.AddCommand(installCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// installCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// installCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
|
fmt.Printf("Error: %s\n", err)
return
}
|
sensor.py
|
"""Support for TMB (Transports Metropolitans de Barcelona) Barcelona public transport."""
from datetime import timedelta
import logging
from requests import HTTPError
from tmb import IBus
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME, TIME_MINUTES
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Transport Metropolitans de Barcelona"
ICON = "mdi:bus-clock"
CONF_APP_ID = "app_id"
CONF_APP_KEY = "app_key"
CONF_LINE = "line"
CONF_BUS_STOP = "stop"
CONF_BUS_STOPS = "stops"
ATTR_BUS_STOP = "stop"
ATTR_LINE = "line"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
LINE_STOP_SCHEMA = vol.Schema(
{
vol.Required(CONF_BUS_STOP): cv.string,
vol.Required(CONF_LINE): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_APP_ID): cv.string,
vol.Required(CONF_APP_KEY): cv.string,
vol.Required(CONF_BUS_STOPS): vol.All(cv.ensure_list, [LINE_STOP_SCHEMA]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensors."""
ibus_client = IBus(config[CONF_APP_ID], config[CONF_APP_KEY])
sensors = []
for line_stop in config.get(CONF_BUS_STOPS):
line = line_stop[CONF_LINE]
stop = line_stop[CONF_BUS_STOP]
if line_stop.get(CONF_NAME):
name = f"{line} - {line_stop[CONF_NAME]} ({stop})"
else:
name = f"{line} - {stop}"
sensors.append(TMBSensor(ibus_client, stop, line, name))
add_entities(sensors, True)
class TMBSensor(Entity):
"""Implementation of a TMB line/stop Sensor."""
def __init__(self, ibus_client, stop, line, name):
"""Initialize the sensor."""
self._ibus_client = ibus_client
self._stop = stop
self._line = line.upper()
self._name = name
self._unit = TIME_MINUTES
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon for the frontend."""
return ICON
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def unique_id(self):
|
@property
def state(self):
"""Return the next departure time."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes of the last update."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_BUS_STOP: self._stop,
ATTR_LINE: self._line,
}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the next bus information."""
try:
self._state = self._ibus_client.get_stop_forecast(self._stop, self._line)
except HTTPError:
_LOGGER.error(
"Unable to fetch data from TMB API. Please check your API keys are valid"
)
|
"""Return a unique, HASS-friendly identifier for this entity."""
return f"{self._stop}_{self._line}"
|
setup.py
|
#! /usr/bin/env python
"""
@author: maedbhking
based heavily on flexible functionality of nilearn `setup.py`
"""
descr = """A python package for cerebellar neuroimaging..."""
import sys
import os
from setuptools import setup, find_packages
def load_version():
"""Executes SUITPy/version.py in a globals dictionary and return it.
Note: importing SUITPy is not an option because there may be
dependencies like nibabel which are not installed and
setup.py is supposed to install them.
"""
# load all vars into globals, otherwise
# the later function call using global vars doesn't work.
globals_dict = {}
with open(os.path.join('SUITPy', 'version.py')) as fp:
exec(fp.read(), globals_dict)
return globals_dict
def is_installing():
# Allow command-lines such as "python setup.py build install"
install_commands = set(['install', 'develop'])
return install_commands.intersection(set(sys.argv))
def list_required_packages():
required_packages = []
required_packages_orig = ['%s>=%s' % (mod, meta['min_version'])
for mod, meta
in _VERSION_GLOBALS['REQUIRED_MODULE_METADATA']
]
for package in required_packages_orig:
required_packages.append(package)
return required_packages
# Make sources available using relative paths from this file's directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
_VERSION_GLOBALS = load_version()
DISTNAME = 'SUITPy'
DESCRIPTION = 'Mapping and plotting cerebellar fMRI data in Python'
with open('README.rst') as fp:
LONG_DESCRIPTION = fp.read()
MAINTAINER = 'Maedbh King'
MAINTAINER_EMAIL = '[email protected]'
URL = 'https://github.com/DiedrichsenLab/SUITPy'
LICENSE = 'MIT'
DOWNLOAD_URL = 'https://github.com/DiedrichsenLab/SUITPy/archive/refs/tags/v1.0.3.tar.gz'
VERSION = _VERSION_GLOBALS['__version__']
if __name__ == "__main__":
if is_installing():
module_check_fn = _VERSION_GLOBALS['_check_module_dependencies']
module_check_fn(is_SUITPy_installing=True)
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
zip_safe=False, # the package can run out of an .egg file
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
|
],
packages=find_packages(),
package_data={
'SUITPy.surfaces': ['*.surf.gii', '*.C.scene', '*.shape.gii', '*.txt'],
},
install_requires=list_required_packages(),
python_requires='>=3.6',
)
|
'Programming Language :: Python :: 3.9',
|
models.go
|
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.13.0
package datatype
import (
"database/sql"
"time"
"github.com/jackc/pgtype"
)
type DtCharacter struct {
A sql.NullString
B sql.NullString
C sql.NullString
D sql.NullString
E sql.NullString
}
type DtCharacterNotNull struct {
A string
B string
C string
D string
E string
}
type DtDatetime struct {
A sql.NullTime
B sql.NullTime
C sql.NullTime
D sql.NullTime
E sql.NullTime
F sql.NullTime
G sql.NullTime
H sql.NullTime
}
type DtDatetimeNotNull struct {
A time.Time
B time.Time
C time.Time
D time.Time
E time.Time
F time.Time
G time.Time
H time.Time
}
type DtNetType struct {
A pgtype.Inet
B pgtype.CIDR
C pgtype.Macaddr
}
type DtNetTypesNotNull struct {
A pgtype.Inet
B pgtype.CIDR
C pgtype.Macaddr
}
type DtNumeric struct {
A sql.NullInt16
B sql.NullInt32
C sql.NullInt64
D pgtype.Numeric
E pgtype.Numeric
F sql.NullFloat64
G sql.NullFloat64
H sql.NullInt16
I sql.NullInt32
J sql.NullInt64
K sql.NullInt16
L sql.NullInt32
M sql.NullInt64
}
type DtNumericNotNull struct {
A int16
B int32
C int64
D pgtype.Numeric
|
E pgtype.Numeric
F float32
G float64
H int16
I int32
J int64
K int16
L int32
M int64
}
type DtRange struct {
A pgtype.Int4range
B pgtype.Int8range
C pgtype.Numrange
D pgtype.Tsrange
E pgtype.Tstzrange
F pgtype.Daterange
}
type DtRangeNotNull struct {
A pgtype.Int4range
B pgtype.Int8range
C pgtype.Numrange
D pgtype.Tsrange
E pgtype.Tstzrange
F pgtype.Daterange
}
| |
location_service_pb2_grpc.py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from yandex.cloud.ydb.v1 import location_pb2 as yandex_dot_cloud_dot_ydb_dot_v1_dot_location__pb2
from yandex.cloud.ydb.v1 import location_service_pb2 as yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2
class LocationServiceStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/yandex.cloud.ydb.v1.LocationService/Get',
request_serializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.GetLocationRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__pb2.Location.FromString,
)
self.List = channel.unary_unary(
'/yandex.cloud.ydb.v1.LocationService/List',
request_serializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsResponse.FromString,
)
class LocationServiceServicer(object):
"""Missing associated documentation comment in .proto file."""
def Get(self, request, context):
"""Returns the specified location.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
|
def add_LocationServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.GetLocationRequest.FromString,
response_serializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__pb2.Location.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.ydb.v1.LocationService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class LocationService(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.ydb.v1.LocationService/Get',
yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.GetLocationRequest.SerializeToString,
yandex_dot_cloud_dot_ydb_dot_v1_dot_location__pb2.Location.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.ydb.v1.LocationService/List',
yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsRequest.SerializeToString,
yandex_dot_cloud_dot_ydb_dot_v1_dot_location__service__pb2.ListLocationsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
"""Returns the list of available locations.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
|
util.ts
|
import isAfter from 'date-fns/esm/isAfter';
import isBefore from 'date-fns/esm/isBefore';
import isSameDay from 'date-fns/esm/isSameDay';
import isSameYear from 'date-fns/esm/isSameYear';
import getDaysInMonth from 'date-fns/esm/getDaysInMonth';
export const weekDays = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
export const monthNames = [
'January',
'February',
'March',
'April',
'May',
'June',
'July',
'August',
'September',
'October',
'November',
'December',
];
export function removeTimeIfAvailable(date: Date) {
return date ? removeTime(date) : date;
}
export function
|
(date: Date) {
const d = new Date(date.getTime());
d.setHours(0);
d.setMinutes(0);
d.setSeconds(0);
d.setMilliseconds(0);
return d;
}
export function isSameDate(date1: Date, date2: Date) {
if (date2 && isSameDay(date2, date1) && isSameYear(date2, date1)) {
return true;
} else {
return false;
}
}
export function isBetweenDateRange(date: Date, range: [Date, Date]) {
const isDateRange = range[0] && range[1];
if (isDateRange && isAfter(date, range[0]) && isBefore(date, range[1])) {
return true;
} else {
return false;
}
}
export function isStartOfDateRange(date: Date, range: [Date, Date]) {
if (range && isSameDay(range[0], date) && isSameYear(range[0], date)) {
return true;
} else {
return false;
}
}
export function isEndOfDateRange(date: Date, range: [Date, Date]) {
if (range && isSameDay(range[1], date) && isSameYear(range[1], date)) {
return true;
} else {
return false;
}
}
export function getDayOfWeekOffset(date: Date) {
const firstDay = new Date(date.getFullYear(), date.getMonth(), 1);
const dayOfWeek = firstDay.getDay();
return Array.from(Array(dayOfWeek), (_, i) => i);
}
export function daysInMonth(date: Date) {
const totalDays = getDaysInMonth(date);
return Array.from(Array(totalDays), (_, i) => {
const day = new Date(date.getTime());
day.setDate(i + 1);
return day;
});
}
|
removeTime
|
ManageAccountViewModel.ts
|
/// <reference path="../exceptionless.ts" />
module exceptionless.account {
export class ManageAccountViewModel extends ViewModelBase {
private _navigationViewModel: NavigationViewModel;
emailNotificationsEnabled = ko.observable<boolean>(false);
|
saveCommand: KoliteCommand;
constructor(elementId: string, navigationElementId: string, tabElementId: string, nameEmailFormSelector: string, passwordFormSelector: string, emailNotificationsEnabled: boolean, isVerified: boolean) {
super(elementId);
this._navigationViewModel = new NavigationViewModel(navigationElementId);
TabUtil.init(tabElementId);
this.updatePasswordCommand = ko.asyncCommand({
canExecute: (isExecuting) => {
return !isExecuting;
},
execute: (complete) => {
if (!$(passwordFormSelector).valid()) {
complete();
return;
}
DataUtil.submitForm($(passwordFormSelector),
(data) => {
App.showSuccessNotification('Your password has been successfully updated!');
complete();
$(passwordFormSelector + ' input').each((index:number, element: any) => element.value = null);
}, (jqXHR: JQueryXHR, status: string, errorThrown: string) => {
if (jqXHR.status != 400)
App.showErrorNotification('An error occurred while updating your password.');
complete();
});
}
});
this.saveCommand = ko.asyncCommand({
canExecute: (isExecuting) => {
return !isExecuting;
},
execute: (complete) => {
if (!$(nameEmailFormSelector).valid()) {
complete();
return;
}
DataUtil.submitForm($(nameEmailFormSelector),
(data) => {
this.isVerified(data.IsVerified);
App.showSuccessNotification('Saved!');
complete();
}, (jqXHR: JQueryXHR, status: string, errorThrown: string) => {
if (jqXHR.status != 400)
App.showErrorNotification('An error occurred while saving your changes.');
complete();
});
}
});
this.isVerified(isVerified);
this.emailNotificationsEnabled.subscribe((value: boolean) => $('#EmailNotificationsEnabled').val(value.toString()));
this.emailNotificationsEnabled(emailNotificationsEnabled);
this.applyBindings();
}
public resendVerificationEmail(): void {
$.ajax('/account/resend-verification-email', {
dataType: 'json',
success: () => App.showSuccessNotification('Your verification email has been successfully sent!'),
error: () => App.showErrorNotification('An error occurred while resending the verification email.')
});
}
}
}
|
isVerified = ko.observable<boolean>(false);
updatePasswordCommand: KoliteCommand;
|
response.rs
|
use std::mem;
use std::fmt;
use std::io::{self, Read};
use std::net::SocketAddr;
use std::time::Duration;
use std::borrow::Cow;
use encoding_rs::{Encoding, UTF_8};
use futures::{Async, Poll, Stream};
use http;
use mime::Mime;
use serde::de::DeserializeOwned;
use serde_json;
use cookie;
use client::KeepCoreThreadAlive;
use hyper::header::HeaderMap;
use {async_impl, StatusCode, Url, Version, wait};
/// A Response to a submitted `Request`.
pub struct Response {
inner: async_impl::Response,
body: async_impl::ReadableChunks<WaitBody>,
content_length: Option<u64>,
_thread_handle: KeepCoreThreadAlive,
}
impl fmt::Debug for Response {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&self.inner, f)
}
}
impl Response {
pub(crate) fn
|
(mut res: async_impl::Response, timeout: Option<Duration>, thread: KeepCoreThreadAlive) -> Response {
let body = mem::replace(res.body_mut(), async_impl::Decoder::empty());
let len = body.content_length();
let body = async_impl::ReadableChunks::new(WaitBody {
inner: wait::stream(body, timeout)
});
Response {
inner: res,
body: body,
content_length: len,
_thread_handle: thread,
}
}
/// Get the `StatusCode` of this `Response`.
///
/// # Examples
///
/// Checking for general status class:
///
/// ```rust
/// # fn run() -> Result<(), Box<::std::error::Error>> {
/// let resp = reqwest::get("http://httpbin.org/get")?;
/// if resp.status().is_success() {
/// println!("success!");
/// } else if resp.status().is_server_error() {
/// println!("server error!");
/// } else {
/// println!("Something else happened. Status: {:?}", resp.status());
/// }
/// # Ok(())
/// # }
/// ```
///
/// Checking for specific status codes:
///
/// ```rust
/// use reqwest::Client;
/// use reqwest::StatusCode;
/// # fn run() -> Result<(), Box<::std::error::Error>> {
/// let client = Client::new();
///
/// let resp = client.post("http://httpbin.org/post")
/// .body("possibly too large")
/// .send()?;
///
/// match resp.status() {
/// StatusCode::OK => println!("success!"),
/// StatusCode::PAYLOAD_TOO_LARGE => {
/// println!("Request payload is too large!");
/// }
/// s => println!("Received response status: {:?}", s),
/// };
/// # Ok(())
/// # }
/// ```
#[inline]
pub fn status(&self) -> StatusCode {
self.inner.status()
}
/// Get the `Headers` of this `Response`.
///
/// # Example
///
/// Saving an etag when caching a file:
///
/// ```
/// use reqwest::Client;
/// use reqwest::header::ETAG;
///
/// # fn run() -> Result<(), Box<::std::error::Error>> {
/// let client = Client::new();
///
/// let mut resp = client.get("http://httpbin.org/cache").send()?;
/// if resp.status().is_success() {
/// if let Some(etag) = resp.headers().get(ETAG) {
/// std::fs::write("etag", etag.as_bytes());
/// }
/// let mut file = std::fs::File::create("file")?;
/// resp.copy_to(&mut file)?;
/// }
/// # Ok(())
/// # }
/// ```
#[inline]
pub fn headers(&self) -> &HeaderMap {
self.inner.headers()
}
/// Retrieve the cookies contained in the response.
///
/// Note that invalid 'Set-Cookie' headers will be ignored.
pub fn cookies<'a>(&'a self) -> impl Iterator< Item = cookie::Cookie<'a> > + 'a {
cookie::extract_response_cookies(self.headers())
.filter_map(Result::ok)
}
/// Get the HTTP `Version` of this `Response`.
#[inline]
pub fn version(&self) -> Version {
self.inner.version()
}
/// Get the final `Url` of this `Response`.
///
/// # Example
///
/// ```rust
/// # fn run() -> Result<(), Box<::std::error::Error>> {
/// let resp = reqwest::get("http://httpbin.org/redirect/1")?;
/// assert_eq!(resp.url().as_str(), "http://httpbin.org/get");
/// # Ok(())
/// # }
/// ```
#[inline]
pub fn url(&self) -> &Url {
self.inner.url()
}
/// Get the remote address used to get this `Response`.
///
/// # Example
///
/// ```rust
/// # fn run() -> Result<(), Box<::std::error::Error>> {
/// let resp = reqwest::get("http://httpbin.org/redirect/1")?;
/// println!("httpbin.org address: {:?}", resp.remote_addr());
/// # Ok(())
/// # }
/// ```
pub fn remote_addr(&self) -> Option<SocketAddr> {
self.inner.remote_addr()
}
/// Get the content-length of the response, if it is known.
///
/// Reasons it may not be known:
///
/// - The server didn't send a `content-length` header.
/// - The response is gzipped and automatically decoded (thus changing
/// the actual decoded length).
pub fn content_length(&self) -> Option<u64> {
self.inner.content_length()
}
/// Try and deserialize the response body as JSON using `serde`.
///
/// # Examples
///
/// ```rust
/// # extern crate reqwest;
/// # #[macro_use] extern crate serde_derive;
/// #
/// # use reqwest::Error;
/// #
/// #[derive(Deserialize)]
/// struct Ip {
/// origin: String,
/// }
///
/// # fn run() -> Result<(), Error> {
/// let json: Ip = reqwest::get("http://httpbin.org/ip")?.json()?;
/// # Ok(())
/// # }
/// #
/// # fn main() { }
/// ```
///
/// # Errors
///
/// This method fails whenever the response body is not in JSON format
/// or it cannot be properly deserialized to target type `T`. For more
/// details please see [`serde_json::from_reader`].
/// [`serde_json::from_reader`]: https://docs.serde.rs/serde_json/fn.from_reader.html
#[inline]
pub fn json<T: DeserializeOwned>(&mut self) -> ::Result<T> {
// There's 2 ways we could implement this:
//
// 1. Just using from_reader(self), making use of our blocking read adapter
// 2. Just use self.inner.json().wait()
//
// Doing 1 is pretty easy, but it means we have the `serde_json` code
// in more than one place, doing basically the same thing.
//
// Doing 2 would mean `serde_json` is only in one place, but we'd
// need to update the sync Response to lazily make a blocking read
// adapter, so that our `inner` could possibly still have the original
// body.
//
// Went for easier for now, just to get it working.
serde_json::from_reader(self).map_err(::error::from)
}
/// Get the response text.
///
/// This method decodes the response body with BOM sniffing
/// and with malformed sequences replaced with the REPLACEMENT CHARACTER.
/// Encoding is determinated from the `charset` parameter of `Content-Type` header,
/// and defaults to `utf-8` if not presented.
///
/// # Example
///
/// ```rust
/// # extern crate reqwest;
/// # fn run() -> Result<(), Box<::std::error::Error>> {
/// let content = reqwest::get("http://httpbin.org/range/26")?.text()?;
/// # Ok(())
/// # }
/// ```
///
/// # Note
///
/// This consumes the body. Trying to read more, or use of `response.json()`
/// will return empty values.
pub fn text(&mut self) -> ::Result<String> {
let len = self.content_length.unwrap_or(0);
let mut content = Vec::with_capacity(len as usize);
self.read_to_end(&mut content).map_err(::error::from)?;
let content_type = self.headers().get(::header::CONTENT_TYPE)
.and_then(|value| {
value.to_str().ok()
})
.and_then(|value| {
value.parse::<Mime>().ok()
});
let encoding_name = content_type
.as_ref()
.and_then(|mime| {
mime
.get_param("charset")
.map(|charset| charset.as_str())
})
.unwrap_or("utf-8");
let encoding = Encoding::for_label(encoding_name.as_bytes()).unwrap_or(UTF_8);
// a block because of borrow checker
{
let (text, _, _) = encoding.decode(&content);
match text {
Cow::Owned(s) => return Ok(s),
_ => (),
}
}
unsafe {
// decoding returned Cow::Borrowed, meaning these bytes
// are already valid utf8
Ok(String::from_utf8_unchecked(content))
}
}
/// Copy the response body into a writer.
///
/// This function internally uses [`std::io::copy`] and hence will continuously read data from
/// the body and then write it into writer in a streaming fashion until EOF is met.
///
/// On success, the total number of bytes that were copied to `writer` is returned.
///
/// [`std::io::copy`]: https://doc.rust-lang.org/std/io/fn.copy.html
///
/// # Example
///
/// ```rust
/// # fn run() -> Result<(), Box<::std::error::Error>> {
/// let mut resp = reqwest::get("http://httpbin.org/range/5")?;
/// let mut buf: Vec<u8> = vec![];
/// resp.copy_to(&mut buf)?;
/// assert_eq!(b"abcde", buf.as_slice());
/// # Ok(())
/// # }
/// ```
#[inline]
pub fn copy_to<W: ?Sized>(&mut self, w: &mut W) -> ::Result<u64>
where W: io::Write
{
io::copy(self, w).map_err(::error::from)
}
/// Turn a response into an error if the server returned an error.
///
/// # Example
///
/// ```rust,no_run
/// # extern crate reqwest;
/// # fn run() -> Result<(), Box<::std::error::Error>> {
/// let res = reqwest::get("http://httpbin.org/status/400")?
/// .error_for_status();
/// if let Err(err) = res {
/// assert_eq!(err.status(), Some(reqwest::StatusCode::BAD_REQUEST));
/// }
/// # Ok(())
/// # }
/// # fn main() {}
/// ```
#[inline]
pub fn error_for_status(self) -> ::Result<Self> {
let Response { body, content_length, inner, _thread_handle } = self;
inner.error_for_status().map(move |inner| {
Response {
inner,
body,
content_length,
_thread_handle,
}
})
}
/// Turn a reference to a response into an error if the server returned an error.
///
/// # Example
///
/// ```rust,no_run
/// # extern crate reqwest;
/// # fn run() -> Result<(), Box<::std::error::Error>> {
/// let res = reqwest::get("http://httpbin.org/status/400")?;
/// let res = res.error_for_status_ref();
/// if let Err(err) = res {
/// assert_eq!(err.status(), Some(reqwest::StatusCode::BAD_REQUEST));
/// }
/// # Ok(())
/// # }
/// # fn main() {}
/// ```
#[inline]
pub fn error_for_status_ref(&self) -> ::Result<&Self> {
self.inner.error_for_status_ref().and_then(|_| Ok(self))
}
}
impl Read for Response {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.body.read(buf)
}
}
struct WaitBody {
inner: wait::WaitStream<async_impl::Decoder>
}
impl Stream for WaitBody {
type Item = <async_impl::Decoder as Stream>::Item;
type Error = <async_impl::Decoder as Stream>::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
match self.inner.next() {
Some(Ok(chunk)) => Ok(Async::Ready(Some(chunk))),
Some(Err(e)) => {
let req_err = match e {
wait::Waited::TimedOut => ::error::timedout(None),
wait::Waited::Err(e) => e,
};
Err(req_err)
},
None => Ok(Async::Ready(None)),
}
}
}
impl<T: Into<async_impl::body::Body>> From<http::Response<T>> for Response {
fn from(r: http::Response<T>) -> Response {
let response = async_impl::Response::from(r);
Response::new(response, None, KeepCoreThreadAlive::empty())
}
}
|
new
|
click_view_service.pb.go
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.12.3
// source: google/ads/googleads/v4/services/click_view_service.proto
package services
import (
context "context"
reflect "reflect"
sync "sync"
proto "github.com/golang/protobuf/proto"
resources "google.golang.org/genproto/googleapis/ads/googleads/v4/resources"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// Request message for [ClickViewService.GetClickView][google.ads.googleads.v4.services.ClickViewService.GetClickView].
type GetClickViewRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Required. The resource name of the click view to fetch.
ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
}
func (x *GetClickViewRequest) Reset() {
*x = GetClickViewRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v4_services_click_view_service_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GetClickViewRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetClickViewRequest) ProtoMessage() {}
func (x *GetClickViewRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v4_services_click_view_service_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetClickViewRequest.ProtoReflect.Descriptor instead.
func (*GetClickViewRequest) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v4_services_click_view_service_proto_rawDescGZIP(), []int{0}
}
func (x *GetClickViewRequest) GetResourceName() string {
if x != nil {
return x.ResourceName
}
return ""
}
var File_google_ads_googleads_v4_services_click_view_service_proto protoreflect.FileDescriptor
var file_google_ads_googleads_v4_services_click_view_service_proto_rawDesc = []byte{
0x0a, 0x39, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x34, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x20, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64,
0x73, 0x2e, 0x76, 0x34, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x32, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x61, 0x64, 0x73, 0x2f, 0x76, 0x34, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
0x2f, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65,
0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76,
0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x22, 0x66, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x69, 0x63, 0x6b,
0x56, 0x69, 0x65, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x0d, 0x72,
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x56, 0x69, 0x65, 0x77, 0x52, 0x0c,
0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x32, 0xeb, 0x01, 0x0a,
0x10, 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x56, 0x69, 0x65, 0x77, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x12, 0xb9, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x56, 0x69,
0x65, 0x77, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x34, 0x2e, 0x73, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x56, 0x69,
0x65, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73,
0x2e, 0x76, 0x34, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x43, 0x6c,
0x69, 0x63, 0x6b, 0x56, 0x69, 0x65, 0x77, 0x22, 0x44, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12,
0x2c, 0x2f, 0x76, 0x34, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e,
0x61, 0x6d, 0x65, 0x3d, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f,
0x63, 0x6c, 0x69, 0x63, 0x6b, 0x56, 0x69, 0x65, 0x77, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x0d,
0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0x1b, 0xca,
0x41, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0xfc, 0x01, 0x0a, 0x24, 0x63,
0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x34, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x73, 0x42, 0x15, 0x43, 0x6c, 0x69, 0x63, 0x6b, 0x56, 0x69, 0x65, 0x77, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x48, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
0x70, 0x69, 0x73, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64,
0x73, 0x2f, 0x76, 0x34, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x3b, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0xa2, 0x02, 0x03, 0x47, 0x41, 0x41, 0xaa, 0x02, 0x20, 0x47,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x73, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x41, 0x64, 0x73, 0x2e, 0x56, 0x34, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0xca,
0x02, 0x20, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x73, 0x5c, 0x47, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x5c, 0x56, 0x34, 0x5c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x73, 0xea, 0x02, 0x24, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x73,
0x3a, 0x3a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x3a, 0x3a, 0x56, 0x34, 0x3a,
0x3a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var (
file_google_ads_googleads_v4_services_click_view_service_proto_rawDescOnce sync.Once
file_google_ads_googleads_v4_services_click_view_service_proto_rawDescData = file_google_ads_googleads_v4_services_click_view_service_proto_rawDesc
)
func
|
() []byte {
file_google_ads_googleads_v4_services_click_view_service_proto_rawDescOnce.Do(func() {
file_google_ads_googleads_v4_services_click_view_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_ads_googleads_v4_services_click_view_service_proto_rawDescData)
})
return file_google_ads_googleads_v4_services_click_view_service_proto_rawDescData
}
var file_google_ads_googleads_v4_services_click_view_service_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_google_ads_googleads_v4_services_click_view_service_proto_goTypes = []interface{}{
(*GetClickViewRequest)(nil), // 0: google.ads.googleads.v4.services.GetClickViewRequest
(*resources.ClickView)(nil), // 1: google.ads.googleads.v4.resources.ClickView
}
var file_google_ads_googleads_v4_services_click_view_service_proto_depIdxs = []int32{
0, // 0: google.ads.googleads.v4.services.ClickViewService.GetClickView:input_type -> google.ads.googleads.v4.services.GetClickViewRequest
1, // 1: google.ads.googleads.v4.services.ClickViewService.GetClickView:output_type -> google.ads.googleads.v4.resources.ClickView
1, // [1:2] is the sub-list for method output_type
0, // [0:1] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_google_ads_googleads_v4_services_click_view_service_proto_init() }
func file_google_ads_googleads_v4_services_click_view_service_proto_init() {
if File_google_ads_googleads_v4_services_click_view_service_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_google_ads_googleads_v4_services_click_view_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetClickViewRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_ads_googleads_v4_services_click_view_service_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_google_ads_googleads_v4_services_click_view_service_proto_goTypes,
DependencyIndexes: file_google_ads_googleads_v4_services_click_view_service_proto_depIdxs,
MessageInfos: file_google_ads_googleads_v4_services_click_view_service_proto_msgTypes,
}.Build()
File_google_ads_googleads_v4_services_click_view_service_proto = out.File
file_google_ads_googleads_v4_services_click_view_service_proto_rawDesc = nil
file_google_ads_googleads_v4_services_click_view_service_proto_goTypes = nil
file_google_ads_googleads_v4_services_click_view_service_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// ClickViewServiceClient is the client API for ClickViewService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ClickViewServiceClient interface {
// Returns the requested click view in full detail.
GetClickView(ctx context.Context, in *GetClickViewRequest, opts ...grpc.CallOption) (*resources.ClickView, error)
}
type clickViewServiceClient struct {
cc grpc.ClientConnInterface
}
func NewClickViewServiceClient(cc grpc.ClientConnInterface) ClickViewServiceClient {
return &clickViewServiceClient{cc}
}
func (c *clickViewServiceClient) GetClickView(ctx context.Context, in *GetClickViewRequest, opts ...grpc.CallOption) (*resources.ClickView, error) {
out := new(resources.ClickView)
err := c.cc.Invoke(ctx, "/google.ads.googleads.v4.services.ClickViewService/GetClickView", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ClickViewServiceServer is the server API for ClickViewService service.
type ClickViewServiceServer interface {
// Returns the requested click view in full detail.
GetClickView(context.Context, *GetClickViewRequest) (*resources.ClickView, error)
}
// UnimplementedClickViewServiceServer can be embedded to have forward compatible implementations.
type UnimplementedClickViewServiceServer struct {
}
func (*UnimplementedClickViewServiceServer) GetClickView(context.Context, *GetClickViewRequest) (*resources.ClickView, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetClickView not implemented")
}
func RegisterClickViewServiceServer(s *grpc.Server, srv ClickViewServiceServer) {
s.RegisterService(&_ClickViewService_serviceDesc, srv)
}
func _ClickViewService_GetClickView_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetClickViewRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ClickViewServiceServer).GetClickView(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/google.ads.googleads.v4.services.ClickViewService/GetClickView",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ClickViewServiceServer).GetClickView(ctx, req.(*GetClickViewRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ClickViewService_serviceDesc = grpc.ServiceDesc{
ServiceName: "google.ads.googleads.v4.services.ClickViewService",
HandlerType: (*ClickViewServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetClickView",
Handler: _ClickViewService_GetClickView_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "google/ads/googleads/v4/services/click_view_service.proto",
}
|
file_google_ads_googleads_v4_services_click_view_service_proto_rawDescGZIP
|
gen_XrBoundedReferenceSpace.rs
|
#![allow(unused_imports)]
use super::*;
use wasm_bindgen::prelude::*;
#[cfg(web_sys_unstable_apis)]
#[wasm_bindgen]
extern "C" {
# [wasm_bindgen (extends = XrReferenceSpace , extends = XrSpace , extends = EventTarget , extends = :: js_sys :: Object , js_name = XRBoundedReferenceSpace , typescript_type = "XRBoundedReferenceSpace")]
#[derive(Debug, Clone, PartialEq, Eq)]
#[doc = "The `XrBoundedReferenceSpace` class."]
#[doc = ""]
#[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/XRBoundedReferenceSpace)"]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `XrBoundedReferenceSpace`*"]
#[doc = ""]
#[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"]
#[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"]
pub type XrBoundedReferenceSpace;
#[cfg(web_sys_unstable_apis)]
# [wasm_bindgen (structural , method , getter , js_class = "XRBoundedReferenceSpace" , js_name = boundsGeometry)]
#[doc = "Getter for the `boundsGeometry` field of this object."]
#[doc = ""]
#[doc = "[MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/API/XRBoundedReferenceSpace/boundsGeometry)"]
#[doc = ""]
#[doc = "*This API requires the following crate features to be activated: `XrBoundedReferenceSpace`*"]
#[doc = ""]
#[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"]
#[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"]
pub fn bounds_geometry(this: &XrBoundedReferenceSpace) -> ::js_sys::Array;
|
}
|
|
uhyve.rs
|
//! This file contains the entry point to the Hypervisor. The Uhyve utilizes KVM to
//! create a Virtual Machine and load the kernel.
use error::*;
use kvm_bindings::*;
use kvm_ioctls::VmFd;
use linux::vcpu::*;
use linux::{MemoryRegion, KVM};
use nix::sys::mman::*;
use std;
use std::convert::TryInto;
use std::ffi::c_void;
use std::ptr;
use std::ptr::read_volatile;
use std::sync::{Arc, Mutex};
use vm::{BootInfo, VirtualCPU, Vm, VmParameter};
use debug_manager::DebugManager;
const KVM_32BIT_MAX_MEM_SIZE: usize = 1 << 32;
const KVM_32BIT_GAP_SIZE: usize = 768 << 20;
const KVM_32BIT_GAP_START: usize = KVM_32BIT_MAX_MEM_SIZE - KVM_32BIT_GAP_SIZE;
pub struct Uhyve {
vm: VmFd,
entry_point: u64,
mem: MmapMemory,
num_cpus: u32,
path: String,
boot_info: *const BootInfo,
verbose: bool,
dbg: Option<Arc<Mutex<DebugManager>>>,
}
impl Uhyve {
pub fn new(kernel_path: String, specs: &VmParameter, dbg: Option<DebugManager>) -> Result<Uhyve> {
let vm = KVM.create_vm().or_else(to_error)?;
let mut cap: kvm_enable_cap = Default::default();
cap.cap = KVM_CAP_SET_TSS_ADDR;
if vm.enable_cap(&cap).is_ok() {
debug!("Setting TSS address");
vm.set_tss_address(0xfffbd000).or_else(to_error)?;
}
let mem = MmapMemory::new(0, specs.mem_size, 0, specs.hugepage, specs.mergeable);
let sz = if specs.mem_size < KVM_32BIT_GAP_START {
specs.mem_size
} else {
KVM_32BIT_GAP_START
};
let kvm_mem = kvm_userspace_memory_region {
slot: 0,
flags: mem.flags(),
memory_size: sz as u64,
guest_phys_addr: mem.guest_address() as u64,
userspace_addr: mem.host_address() as u64,
};
unsafe { vm.set_user_memory_region(kvm_mem) }.or_else(to_error)?;
if specs.mem_size > KVM_32BIT_GAP_START + KVM_32BIT_GAP_SIZE {
let kvm_mem = kvm_userspace_memory_region {
slot: 1,
flags: mem.flags(),
memory_size: (specs.mem_size - KVM_32BIT_GAP_START - KVM_32BIT_GAP_SIZE) as u64,
guest_phys_addr: (mem.guest_address() + KVM_32BIT_GAP_START + KVM_32BIT_GAP_SIZE)
as u64,
userspace_addr: (mem.host_address() + KVM_32BIT_GAP_START + KVM_32BIT_GAP_SIZE)
as u64,
};
unsafe { vm.set_user_memory_region(kvm_mem) }.or_else(to_error)?;
}
let mut hyve = Uhyve {
vm: vm,
entry_point: 0,
mem: mem,
num_cpus: specs.num_cpus,
path: kernel_path,
boot_info: ptr::null(),
verbose: specs.verbose,
dbg: dbg.map(|g| Arc::new(Mutex::new(g))),
};
hyve.init()?;
Ok(hyve)
}
fn init(&mut self) -> Result<()> {
self.init_guest_mem();
debug!("Initialize interrupt controller");
// create basic interrupt controller
self.vm.create_irq_chip().or_else(to_error)?;
let pit_config = kvm_pit_config::default();
self.vm.create_pit2(pit_config).or_else(to_error)?;
// currently, we support only system, which provides the
// cpu feature TSC_DEADLINE
let mut cap: kvm_enable_cap = Default::default();
cap.cap = KVM_CAP_TSC_DEADLINE_TIMER;
if self.vm.enable_cap(&cap).is_ok() {
panic!("Processor feature \"tsc deadline\" isn't supported!")
}
Ok(())
}
}
impl Vm for Uhyve {
fn verbose(&self) -> bool {
self.verbose
}
fn set_entry_point(&mut self, entry: u64) {
self.entry_point = entry;
|
}
fn get_entry_point(&self) -> u64 {
self.entry_point
}
fn num_cpus(&self) -> u32 {
self.num_cpus
}
fn guest_mem(&self) -> (*mut u8, usize) {
(self.mem.host_address() as *mut u8, self.mem.memory_size())
}
fn kernel_path(&self) -> &str {
&self.path
}
fn create_cpu(&self, id: u32) -> Result<Box<dyn VirtualCPU>> {
let vm_start = self.mem.host_address() as usize;
Ok(Box::new(UhyveCPU::new(
id,
self.path.clone(),
self.vm
.create_vcpu(id.try_into().unwrap())
.or_else(to_error)?,
vm_start,
self.dbg.as_ref().cloned(),
)))
}
fn set_boot_info(&mut self, header: *const BootInfo) {
self.boot_info = header;
}
fn cpu_online(&self) -> u32 {
if self.boot_info.is_null() {
0
} else {
unsafe { read_volatile(&(*self.boot_info).cpu_online) }
}
}
}
impl Drop for Uhyve {
fn drop(&mut self) {
debug!("Drop virtual machine");
}
}
unsafe impl Send for Uhyve {}
unsafe impl Sync for Uhyve {}
#[derive(Debug)]
struct MmapMemory {
flags: u32,
memory_size: usize,
guest_address: usize,
host_address: usize,
}
impl MmapMemory {
pub fn new(
flags: u32,
memory_size: usize,
guest_address: u64,
huge_pages: bool,
mergeable: bool,
) -> MmapMemory {
let host_address = unsafe {
mmap(
std::ptr::null_mut(),
memory_size,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS | MapFlags::MAP_NORESERVE,
-1,
0,
)
.expect("mmap failed")
};
if mergeable {
debug!("Enable kernel feature to merge same pages");
unsafe {
if madvise(host_address, memory_size, MmapAdvise::MADV_MERGEABLE).is_err() {
panic!("madvise failed");
}
}
}
if huge_pages {
debug!("Uhyve uses huge pages");
unsafe {
if madvise(host_address, memory_size, MmapAdvise::MADV_HUGEPAGE).is_err() {
panic!("madvise failed");
}
}
}
MmapMemory {
flags: flags,
memory_size: memory_size,
guest_address: guest_address as usize,
host_address: host_address as usize,
}
}
#[allow(dead_code)]
fn as_slice_mut(&mut self) -> &mut [u8] {
unsafe { std::slice::from_raw_parts_mut(self.host_address as *mut u8, self.memory_size) }
}
}
impl MemoryRegion for MmapMemory {
fn flags(&self) -> u32 {
self.flags
}
fn memory_size(&self) -> usize {
self.memory_size
}
fn guest_address(&self) -> usize {
self.guest_address
}
fn host_address(&self) -> usize {
self.host_address
}
}
impl Drop for MmapMemory {
fn drop(&mut self) {
if self.memory_size() > 0 {
unsafe {
if munmap(self.host_address() as *mut c_void, self.memory_size()).is_err() {
panic!("munmap failed");
}
}
}
}
}
| |
TodoList.js
|
/**
* @author Mahmud Ahsan <https://github.com/mahmudahsan>
*/
import React from 'react';
import { View, FlatList } from 'react-native';
import TodoListItem from '../components/TodoListItem';
export default class
|
extends React.Component {
_keyExtractor = (item) => item.id
rgbToHex = (rgb) => {
var hex = (Number(rgb) % 255).toString(16);
if (hex.length < 2) {
hex = "0" + hex;
}
return hex;
};
fullColorHex = (r,g,b) => {
var red = this.rgbToHex(r);
var green = this.rgbToHex(g);
var blue = this.rgbToHex(b);
return red+green+blue; // 1098c2 : 6 digit
};
_renderItem = ({item, index}) => {
/**
* Start Color to Target Color
* 1098c2 : R 16, G 152, B 194
a8dae9 : R 168, G 218, B 233
Increase rate = 5
R: 16/255*100 = 6.27 | 6.27/100 * 5 = 0.3135
G: 152/218*100 = 59.6 | 59.6/100 * 5 = 2.98
B: 194/233*100 = 76.0 | 76.0/100 * 5 = 7.6
*/
const r = Math.floor(0.3135 * index);
const g = Math.floor(2.98 * index);
const b = Math.floor(3.8 * index);
let startColor = this.fullColorHex(Number(16+r), Number(152+g), Number(194+b));
return (
<TodoListItem
todo={item}
startColor={`#${startColor}`}
endColor={'#e0ecf0'}
onItemPress={()=>{this.onTodoItemPressed(index)}}
/>
)
};
render(){
return (
<View>
<FlatList
data={this.props.data}
keyExtractor={this._keyExtractor}
renderItem={this._renderItem}
/>
</View>
)
}
// When user pressed todo item
onTodoItemPressed = (indexOfTodoItem) => {
this.props.onTodoUpdate(indexOfTodoItem);
this.startColor = this.props.startColor;
}
}
|
TodoList
|
goop_test.go
|
package goop_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func
|
(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "goop")
}
|
Test
|
config.rs
|
// Copyright 2020-2021 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
#![cfg(feature = "full")]
use crate::alias;
use libp2p::{multiaddr::Protocol, Multiaddr, PeerId};
use serde::Deserialize;
use std::{borrow::Cow, collections::HashSet};
const DEFAULT_BIND_MULTIADDR: &str = "/ip4/0.0.0.0/tcp/15600";
pub const DEFAULT_RECONNECT_INTERVAL_SECS: u64 = 30;
const MIN_RECONNECT_INTERVAL_SECS: u64 = 1;
pub const DEFAULT_MAX_UNKNOWN_PEERS: usize = 4;
pub const DEFAULT_MAX_DISCOVERED_PEERS: usize = 4;
/// [`NetworkConfigBuilder`] errors.
#[derive(Debug, thiserror::Error)]
pub enum Error {
/// The provided [`Multiaddr`] has too few protocols in it.
#[error("Multiaddr is underspecified.")]
MultiaddrUnderspecified,
/// The provided [`Multiaddr`] has too many protocols in it.
#[error("Multiaddr is overspecified.")]
MultiaddrOverspecified,
/// The provided [`Protocol`] is invalid.
#[error("Invalid Multiaddr protocol at {}.", .0)]
InvalidProtocol(usize),
/// The provided address is invalid.
#[error("Invalid address protocol.")]
InvalidAddressProtocol,
/// The provided port is invalid.
#[error("Invalid port protocol.")]
InvalidPortProtocol,
/// The peer was already added.
#[error("Static peer {} already added.", alias!(.0))]
DuplicateStaticPeer(PeerId),
/// The domain was unresolvable.
#[error("Domain name '{}' couldn't be resolved to an IP address", .0)]
UnresolvableDomain(String),
/// Parsing of a [`Multiaddr`] failed.
#[error("Parsing of '{}' to a Multiaddr failed.", 0)]
ParsingFailed(String),
/// The provided [`Multiaddr`] lacks the P2p [`Protocol`].
#[error("Invalid P2p Multiaddr. Did you forget to add '.../p2p/12D3Koo...'?")]
MissingP2pProtocol,
}
/// The network configuration.
#[derive(Clone)]
pub struct NetworkConfig {
pub(crate) bind_multiaddr: Multiaddr,
pub(crate) reconnect_interval_secs: u64,
pub(crate) max_unknown_peers: usize,
pub(crate) max_discovered_peers: usize,
pub(crate) static_peers: HashSet<Peer>,
}
impl NetworkConfig {
/// Creates a new [`NetworkConfig`].
pub fn new() -> Self {
Self::default()
}
/// Returns a [`NetworkConfigBuilder`] to construct a [`NetworkConfig`] iteratively.
pub fn build() -> NetworkConfigBuilder {
NetworkConfigBuilder::new()
}
/// Returns an in-memory config builder to construct a [`NetworkConfig`] iteratively.
#[cfg(test)]
pub fn build_in_memory() -> InMemoryNetworkConfigBuilder {
InMemoryNetworkConfigBuilder::new()
}
/// Replaces the address, but keeps the port of the bind address.
///
/// The argument `addr` must be either the `Ip4`, `Ip6`, or `Dns` variant of [`Protocol`].
pub fn replace_addr(&mut self, mut addr: Protocol) -> Result<(), Error> {
if !matches!(addr, Protocol::Ip4(_) | Protocol::Ip6(_) | Protocol::Dns(_)) {
return Err(Error::InvalidAddressProtocol);
}
if let Protocol::Dns(dns) = addr {
addr = resolve_dns_multiaddr(dns)?;
}
// Panic:
// The builder ensures that the following unwraps are fine.
let port = self.bind_multiaddr.pop().unwrap();
let _ = self.bind_multiaddr.pop().unwrap();
self.bind_multiaddr.push(addr);
self.bind_multiaddr.push(port);
Ok(())
}
/// Replaces the port of the bind address.
///
/// The argument `port` must be the TCP variant of [`Protocol`].
pub fn replace_port(&mut self, port: Protocol) -> Result<(), Error> {
if !matches!(port, Protocol::Tcp(_)) {
return Err(Error::InvalidPortProtocol);
}
self.bind_multiaddr.pop();
self.bind_multiaddr.push(port);
Ok(())
}
/// Adds a static peer.
pub fn add_static_peer(
&mut self,
peer_id: PeerId,
multiaddr: Multiaddr,
alias: Option<String>,
) -> Result<(), Error> {
if !self.static_peers.insert(Peer {
peer_id,
multiaddr,
alias,
}) {
return Err(Error::DuplicateStaticPeer(peer_id));
}
Ok(())
}
/// Returns the configured bind address as a [`Multiaddr`].
pub fn bind_multiaddr(&self) -> &Multiaddr {
&self.bind_multiaddr
}
/// Returns the number of seconds at which reconnect attempts occur.
pub fn reconnect_interval_secs(&self) -> u64 {
self.reconnect_interval_secs
}
/// Returns the maximum number of unknown peers that are allowed to connect.
pub fn max_unknown_peers(&self) -> usize {
self.max_unknown_peers
}
/// Returns the maximum number of discovered peers that are allowed to connect.
pub fn max_discovered_peers(&self) -> usize {
self.max_discovered_peers
}
/// Returns the statically configured peers.
pub fn static_peers(&self) -> &HashSet<Peer> {
&self.static_peers
}
}
fn resolve_dns_multiaddr(dns: Cow<'_, str>) -> Result<Protocol, Error> {
use std::net::{IpAddr, ToSocketAddrs};
match dns
.to_socket_addrs()
.map_err(|_| Error::UnresolvableDomain(dns.to_string()))?
.next()
.ok_or_else(|| Error::UnresolvableDomain(dns.to_string()))?
.ip()
{
IpAddr::V4(ip4) => return Ok(Protocol::Ip4(ip4)),
IpAddr::V6(ip6) => return Ok(Protocol::Ip6(ip6)),
}
}
impl Default for NetworkConfig {
fn default() -> Self {
Self {
// Panic:
// Unwrapping is fine, because we made sure that the default is parsable.
bind_multiaddr: DEFAULT_BIND_MULTIADDR.parse().unwrap(),
reconnect_interval_secs: DEFAULT_RECONNECT_INTERVAL_SECS,
max_unknown_peers: DEFAULT_MAX_UNKNOWN_PEERS,
max_discovered_peers: DEFAULT_MAX_DISCOVERED_PEERS,
static_peers: Default::default(),
}
}
}
/// A network configuration builder.
#[derive(Default, Deserialize)]
#[must_use]
pub struct NetworkConfigBuilder {
#[serde(rename = "bind_address")]
bind_multiaddr: Option<Multiaddr>,
reconnect_interval_secs: Option<u64>,
max_unknown_peers: Option<usize>,
max_discovered_peers: Option<usize>,
peering: ManualPeeringConfigBuilder,
}
impl NetworkConfigBuilder {
/// Creates a new default builder.
pub fn new() -> Self {
Self::default()
}
/// Specifies the bind addresses.
pub fn with_bind_multiaddr(mut self, mut multiaddr: Multiaddr) -> Result<Self, Error> {
let mut valid = false;
let mut is_dns = false;
for (i, p) in multiaddr.iter().enumerate() {
match i {
0 => {
if !matches!(p, Protocol::Ip4(_) | Protocol::Ip6(_) | Protocol::Dns(_)) {
return Err(Error::InvalidProtocol(0));
}
if matches!(p, Protocol::Dns(_)) {
is_dns = true;
}
}
1 => {
if !matches!(p, Protocol::Tcp(_)) {
return Err(Error::InvalidProtocol(1));
}
valid = true;
}
_ => return Err(Error::MultiaddrOverspecified),
}
}
if !valid {
return Err(Error::MultiaddrUnderspecified);
}
if is_dns {
let port = multiaddr.pop().unwrap();
let port = if let Protocol::Tcp(port) = port {
port
} else {
unreachable!("already checked");
};
// Panic:
// We know at this point, that `multiaddr` is valid, so unwrapping is fine.
let ip = if let Protocol::Dns(dns) = multiaddr.pop().unwrap() {
let socket_dns = {
let mut socket_addr = String::with_capacity(16);
socket_addr.push_str(&dns);
socket_addr.push(':');
socket_addr.push_str(&port.to_string());
socket_addr
};
resolve_dns_multiaddr(socket_dns.into())?
} else {
unreachable!("already checked");
};
multiaddr.push(ip);
multiaddr.push(Protocol::Tcp(port));
}
self.bind_multiaddr.replace(multiaddr);
Ok(self)
}
/// Specifies the interval (in seconds) at which known peers are automatically reconnected if possible.
///
/// The allowed minimum value for the `secs` argument is `1`.
pub fn with_reconnect_interval_secs(mut self, secs: u64) -> Self {
let secs = secs.max(MIN_RECONNECT_INTERVAL_SECS);
self.reconnect_interval_secs.replace(secs);
self
}
/// Specifies the maximum number of gossip connections with unknown peers.
pub fn with_max_unknown_peers(mut self, n: usize) -> Self {
self.max_unknown_peers.replace(n);
self
}
/// Specifies the maximum number of gossip connections with discovered peers.
pub fn with_max_discovered_peers(mut self, n: usize) -> Self {
self.max_discovered_peers.replace(n);
self
}
/// Builds the network config.
pub fn finish(self) -> Result<NetworkConfig, Error> {
Ok(NetworkConfig {
bind_multiaddr: self
.bind_multiaddr
// Panic:
// We made sure that the default is parsable.
.unwrap_or_else(|| DEFAULT_BIND_MULTIADDR.parse().unwrap()),
reconnect_interval_secs: self.reconnect_interval_secs.unwrap_or(DEFAULT_RECONNECT_INTERVAL_SECS),
max_unknown_peers: self.max_unknown_peers.unwrap_or(DEFAULT_MAX_UNKNOWN_PEERS),
max_discovered_peers: self.max_discovered_peers.unwrap_or(DEFAULT_MAX_DISCOVERED_PEERS),
static_peers: self.peering.finish()?.peers,
})
}
}
/// An in-memory network config builder, that becomes useful as part of integration testing.
#[cfg(test)]
#[derive(Default)]
#[must_use]
pub struct InMemoryNetworkConfigBuilder {
bind_multiaddr: Option<Multiaddr>,
}
#[cfg(test)]
impl InMemoryNetworkConfigBuilder {
/// Creates a new default builder.
pub fn new() -> Self {
Self::default()
}
/// Specifies the bind addresses.
pub fn with_bind_multiaddr(mut self, multiaddr: Multiaddr) -> Self {
for (i, p) in multiaddr.iter().enumerate() {
match i {
0 => {
if !matches!(p, Protocol::Memory(_)) {
panic!("Invalid Multiaddr")
}
}
_ => panic!("Invalid Multiaddr"),
}
}
self.bind_multiaddr.replace(multiaddr);
self
}
/// Builds the in-memory network config.
#[must_use]
pub fn finish(self) -> NetworkConfig {
const DEFAULT_BIND_MULTIADDR_MEM: &str = "/memory/0";
NetworkConfig {
bind_multiaddr: self
.bind_multiaddr
.unwrap_or_else(|| DEFAULT_BIND_MULTIADDR_MEM.parse().unwrap()),
reconnect_interval_secs: DEFAULT_RECONNECT_INTERVAL_SECS,
max_unknown_peers: DEFAULT_MAX_UNKNOWN_PEERS,
max_discovered_peers: DEFAULT_MAX_DISCOVERED_PEERS,
static_peers: Default::default(),
}
}
}
#[derive(Clone)]
pub struct ManualPeeringConfig {
pub peers: HashSet<Peer>,
}
#[derive(Clone)]
pub struct Peer {
pub peer_id: PeerId,
pub multiaddr: Multiaddr,
pub alias: Option<String>,
}
impl Eq for Peer {}
impl PartialEq for Peer {
fn eq(&self, other: &Self) -> bool {
self.peer_id.eq(&other.peer_id)
}
}
impl std::hash::Hash for Peer {
fn hash<H: std::hash::Hasher>(&self, state: &mut H)
|
}
#[derive(Default, Deserialize)]
#[must_use]
pub struct ManualPeeringConfigBuilder {
pub peers: Option<Vec<PeerBuilder>>,
}
impl ManualPeeringConfigBuilder {
pub fn finish(self) -> Result<ManualPeeringConfig, Error> {
let peers = match self.peers {
None => Default::default(),
Some(peer_builders) => {
// NOTE: Switch back to combinators once `map_while` is stable.
let mut peers = HashSet::with_capacity(peer_builders.len());
for builder in peer_builders {
let (multiaddr, peer_id) = split_multiaddr(&builder.multiaddr)?;
if !peers.insert(Peer {
peer_id,
multiaddr,
alias: builder.alias,
}) {
return Err(Error::DuplicateStaticPeer(peer_id));
}
}
peers
}
};
Ok(ManualPeeringConfig { peers })
}
}
fn split_multiaddr(multiaddr: &str) -> Result<(Multiaddr, PeerId), Error> {
let mut multiaddr: Multiaddr = multiaddr
.parse()
.map_err(|_| Error::ParsingFailed(multiaddr.to_string()))?;
if let Protocol::P2p(multihash) = multiaddr.pop().ok_or(Error::MultiaddrUnderspecified)? {
Ok((
multiaddr,
PeerId::from_multihash(multihash).expect("Invalid peer Multiaddr: Make sure your peer's Id is complete."),
))
} else {
Err(Error::MissingP2pProtocol)
}
}
#[derive(Deserialize)]
#[must_use]
pub struct PeerBuilder {
#[serde(rename = "address")]
multiaddr: String,
alias: Option<String>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn create_default_network_config() {
let config = NetworkConfig::default();
assert_eq!(
config.bind_multiaddr(),
&DEFAULT_BIND_MULTIADDR.parse::<Multiaddr>().unwrap()
);
}
#[test]
#[should_panic]
fn create_with_builder_and_too_short_bind_address() {
let _config = NetworkConfig::build()
.with_bind_multiaddr("/ip4/127.0.0.1".parse().unwrap())
.unwrap()
.finish();
}
#[test]
#[should_panic]
fn create_with_builder_and_too_long_bind_address() {
let _config = NetworkConfig::build()
.with_bind_multiaddr(
"/ip4/127.0.0.1/p2p/12D3KooWJWEKvSFbben74C7H4YtKjhPMTDxd7gP7zxWSUEeF27st"
.parse()
.unwrap(),
)
.unwrap()
.finish();
}
#[test]
fn create_with_builder_and_valid_ip_bind_address() {
let _config = NetworkConfig::build()
.with_bind_multiaddr("/ip4/127.0.0.1/tcp/1337".parse().unwrap())
.unwrap()
.finish();
}
#[test]
fn create_with_builder_and_valid_dns_bind_address() {
let _config = NetworkConfig::build()
.with_bind_multiaddr("/dns/localhost/tcp/1337".parse().unwrap())
.unwrap()
.finish();
}
#[test]
#[should_panic]
fn create_with_mem_builder_and_non_mem_multiaddr() {
let _config = NetworkConfig::build_in_memory()
.with_bind_multiaddr("/ip4/127.0.0.1/tcp/1337".parse().unwrap())
.finish();
}
#[test]
fn create_with_mem_builder_and_valid_mem_multiaddr() {
let _config = NetworkConfig::build_in_memory()
.with_bind_multiaddr("/memory/1337".parse().unwrap())
.finish();
}
}
|
{
self.peer_id.hash(state)
}
|
array.go
|
package arrays
func Contains(slice []string, str string) bool {
for _, value := range slice {
if value == str
|
}
return false
}
|
{
return true
}
|
response_policy_decision.rs
|
// This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/wusyong/gir-files)
// DO NOT EDIT
use crate::{PolicyDecision, URIRequest, URIResponse};
use glib::{
|
translate::*,
};
use std::{boxed::Box as Box_, fmt, mem::transmute};
glib::wrapper! {
#[doc(alias = "WebKitResponsePolicyDecision")]
pub struct ResponsePolicyDecision(Object<ffi::WebKitResponsePolicyDecision, ffi::WebKitResponsePolicyDecisionClass>) @extends PolicyDecision;
match fn {
type_ => || ffi::webkit_response_policy_decision_get_type(),
}
}
pub const NONE_RESPONSE_POLICY_DECISION: Option<&ResponsePolicyDecision> = None;
pub trait ResponsePolicyDecisionExt: 'static {
#[doc(alias = "webkit_response_policy_decision_get_request")]
#[doc(alias = "get_request")]
fn request(&self) -> Option<URIRequest>;
#[doc(alias = "webkit_response_policy_decision_get_response")]
#[doc(alias = "get_response")]
fn response(&self) -> Option<URIResponse>;
#[cfg(any(feature = "v2_4", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v2_4")))]
#[doc(alias = "webkit_response_policy_decision_is_mime_type_supported")]
fn is_mime_type_supported(&self) -> bool;
#[doc(alias = "request")]
fn connect_request_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
#[doc(alias = "response")]
fn connect_response_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<ResponsePolicyDecision>> ResponsePolicyDecisionExt for O {
fn request(&self) -> Option<URIRequest> {
unsafe {
from_glib_none(ffi::webkit_response_policy_decision_get_request(
self.as_ref().to_glib_none().0,
))
}
}
fn response(&self) -> Option<URIResponse> {
unsafe {
from_glib_none(ffi::webkit_response_policy_decision_get_response(
self.as_ref().to_glib_none().0,
))
}
}
#[cfg(any(feature = "v2_4", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v2_4")))]
fn is_mime_type_supported(&self) -> bool {
unsafe {
from_glib(ffi::webkit_response_policy_decision_is_mime_type_supported(
self.as_ref().to_glib_none().0,
))
}
}
fn connect_request_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_request_trampoline<
P: IsA<ResponsePolicyDecision>,
F: Fn(&P) + 'static,
>(
this: *mut ffi::WebKitResponsePolicyDecision,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(ResponsePolicyDecision::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::request\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_request_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_response_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_response_trampoline<
P: IsA<ResponsePolicyDecision>,
F: Fn(&P) + 'static,
>(
this: *mut ffi::WebKitResponsePolicyDecision,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(ResponsePolicyDecision::from_glib_borrow(this).unsafe_cast_ref())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::response\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_response_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
impl fmt::Display for ResponsePolicyDecision {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("ResponsePolicyDecision")
}
}
|
object::{Cast, IsA},
signal::{connect_raw, SignalHandlerId},
|
handler.go
|
package rpc
import (
"encoding/hex"
"encoding/json"
"io/ioutil"
"net/http"
"strconv"
"github.com/s1na/nano/account"
"github.com/s1na/nano/blocks"
"github.com/s1na/nano/types"
"github.com/s1na/nano/types/uint128"
"github.com/s1na/nano/wallet"
"github.com/pkg/errors"
"github.com/tidwall/gjson"
)
type handlerFn func(http.ResponseWriter, *gjson.Result) error
type Handler struct {
fns map[string]handlerFn
}
func NewHandler() *Handler {
h := new(Handler)
h.registerHandlers()
return h
}
func (h *Handler) registerHandlers() {
h.fns = map[string]handlerFn{
"account_get": handlerFn(accountGet),
"wallet_create": handlerFn(walletCreate),
"wallet_add": handlerFn(walletAdd),
"send": handlerFn(send),
}
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
respErr(w, "Error in reading request")
return
}
bs := string(b)
if !gjson.Valid(bs) {
respErr(w, "Invalid json")
return
}
body := gjson.Parse(bs)
action := body.Get("action").String()
handler, ok := h.fns[action]
if !ok {
respErr(w, "Action not found")
return
}
if err := handler(w, &body); err != nil {
respErr(w, err.Error())
}
}
func accountGet(w http.ResponseWriter, body *gjson.Result) error {
res := make(map[string]string)
v := body.Get("key").String()
pubBytes, err := hex.DecodeString(v)
if err != nil {
return errors.New("invalid key")
}
res["account"] = types.PubKey(pubBytes).Address()
json.NewEncoder(w).Encode(res)
return nil
}
func walletCreate(w http.ResponseWriter, body *gjson.Result) error {
res := make(map[string]string)
wal := wallet.NewWallet()
id, err := wal.Init()
if err != nil
|
ws := wallet.NewWalletStore(db)
if err = ws.SetWallet(wal); err != nil {
return errors.New("internal error")
}
walletsCh <- wal
res["wallet"] = id.Hex()
json.NewEncoder(w).Encode(res)
return nil
}
func walletAdd(w http.ResponseWriter, body *gjson.Result) error {
res := make(map[string]string)
wid, err := types.PubKeyFromHex(body.Get("wallet").String())
if err != nil {
return err
}
key, err := types.PrvKeyFromString(body.Get("key").String())
if err != nil {
return err
}
ws := wallet.NewWalletStore(db)
wal, err := ws.GetWallet(wid)
if err != nil {
return err
}
pub, prv, err := types.KeypairFromPrvKey(key)
if err != nil {
return err
}
wal.InsertAdhoc(pub, prv)
if err = ws.SetWallet(wal); err != nil {
return errors.New("internal error")
}
walletsCh <- wal
res["account"] = pub.Address()
json.NewEncoder(w).Encode(res)
return nil
}
func send(w http.ResponseWriter, body *gjson.Result) error {
res := make(map[string]string)
wid, err := types.PubKeyFromHex(body.Get("wallet").String())
if err != nil {
return err
}
source, err := types.PubKeyFromAddress(body.Get("source").String())
if err != nil {
return err
}
dest, err := types.PubKeyFromAddress(body.Get("destination").String())
if err != nil {
return err
}
auint64, err := strconv.ParseUint(body.Get("amount").String(), 10, 64)
if err != nil {
return err
}
amount := uint128.FromInts(0, auint64)
ws := wallet.NewWalletStore(db)
wal, err := ws.GetWallet(wid)
if err != nil {
return errors.New("wallet not found")
}
ok := wal.HasAccount(source.Address())
if !ok {
return errors.New("account not found")
}
as := account.NewAccountStore(db)
acc, err := as.GetAccount(source)
if err != nil {
return errors.New("account info not found")
}
b := &blocks.SendBlock{
Previous: acc.Head,
Destination: dest,
Balance: acc.Balance.Sub(amount),
CommonBlock: blocks.CommonBlock{
Account: source,
},
}
b.Work = types.GenerateWorkForHash(acc.Head)
b.Signature = wal.Accounts[source.Address()].Sign(b.Hash().Slice())
blocksCh <- b
res["sent"] = "true"
json.NewEncoder(w).Encode(res)
return nil
}
|
{
return errors.New("internal error")
}
|
version.go
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"encoding/json"
"fmt"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"sigs.k8s.io/cluster-api/version"
"sigs.k8s.io/yaml"
)
// Version provides the version information of clusterctl
type Version struct {
ClientVersion *version.Info `json:"clusterctl"`
}
type versionOptions struct {
output string
}
var vo = &versionOptions{}
var versionCmd = &cobra.Command{
Use: "version",
Short: "Print clusterctl version.",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runVersion()
},
}
func init() {
versionCmd.Flags().StringVarP(&vo.output, "output", "o", "", "Output format; available options are 'yaml', 'json' and 'short'")
|
clientVersion := version.Get()
v := Version{
ClientVersion: &clientVersion,
}
switch vo.output {
case "":
fmt.Printf("clusterctl version: %#v\n", v.ClientVersion)
case "short":
fmt.Printf("%s\n", v.ClientVersion.GitVersion)
case "yaml":
y, err := yaml.Marshal(&v)
if err != nil {
return err
}
fmt.Print(string(y))
case "json":
y, err := json.MarshalIndent(&v, "", " ")
if err != nil {
return err
}
fmt.Println(string(y))
default:
return errors.Errorf("invalid output format: %s", vo.output)
}
return nil
}
|
RootCmd.AddCommand(versionCmd)
}
func runVersion() error {
|
metrics.go
|
/*
Copyright 2019 Red Hat Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package migmigration
import (
"time"
migapi "github.com/konveyor/mig-controller/pkg/apis/migration/v1alpha1"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
// 'status' - [ idle, running, completed, error ]
// 'type' - [ stage, final ]
migrationGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "cam_app_workload_migrations",
Help: "Count of MigMigrations sorted by status and type",
},
[]string{"type", "status"},
)
)
func recordMetrics(client client.Client) {
const (
// Metrics const values
// Separate from mig-controller consts to keep a stable interface for metrics systems
// configured to pull from static metrics endpoints.
// Migration Type
stage = "stage"
final = "final"
// Migration Status
idle = "idle"
running = "running"
completed = "completed"
failed = "failed"
)
go func() {
for {
time.Sleep(10 * time.Second)
// get all migmigration objects
migrations, err := migapi.ListMigrations(client)
// if error occurs, retry 10 seconds later
if err != nil {
continue
}
// Holding counter vars used to make gauge update "atomic"
var stageIdle, stageRunning, stageCompleted, stageFailed float64
|
// for all migmigrations, count # in idle, running, completed, failed
for _, m := range migrations {
// Stage
if m.Spec.Stage && m.Status.HasCondition(Running) {
stageRunning++
continue
}
if m.Spec.Stage && m.Status.HasCondition(Succeeded) {
stageCompleted++
continue
}
if m.Spec.Stage && m.Status.HasCondition(Failed) {
stageFailed++
continue
}
if m.Spec.Stage {
stageIdle++
continue
}
// Final
if !m.Spec.Stage && m.Status.HasCondition(Running) {
finalRunning++
continue
}
if !m.Spec.Stage && m.Status.HasCondition(Succeeded) {
finalCompleted++
continue
}
if !m.Spec.Stage && m.Status.HasCondition(Failed) {
finalFailed++
continue
}
if !m.Spec.Stage {
finalIdle++
continue
}
}
// Stage
migrationGauge.With(
prometheus.Labels{"type": stage, "status": idle}).Set(stageIdle)
migrationGauge.With(
prometheus.Labels{"type": stage, "status": running}).Set(stageRunning)
migrationGauge.With(
prometheus.Labels{"type": stage, "status": completed}).Set(stageCompleted)
migrationGauge.With(
prometheus.Labels{"type": stage, "status": failed}).Set(stageFailed)
// Final
migrationGauge.With(
prometheus.Labels{"type": final, "status": idle}).Set(finalIdle)
migrationGauge.With(
prometheus.Labels{"type": final, "status": running}).Set(finalRunning)
migrationGauge.With(
prometheus.Labels{"type": final, "status": completed}).Set(finalCompleted)
migrationGauge.With(
prometheus.Labels{"type": final, "status": failed}).Set(finalFailed)
}
}()
}
|
var finalIdle, finalRunning, finalCompleted, finalFailed float64
|
resnet.py
|
import torch
import torch.nn as nn
from typing import Dict, List
from functools import partial
from fvcore.common.config import CfgNode
from giung2.layers import *
__all__ = [
"build_resnet_backbone",
]
class IdentityShortcut(nn.Module):
def __init__(
self,
in_planes: int,
planes: int,
stride: int,
expansion: int,
conv: nn.Module = Conv2d,
norm: nn.Module = BatchNorm2d,
relu: nn.Module = ReLU,
**kwargs
) -> None:
super(IdentityShortcut, self).__init__()
self.identity = MaxPool2d(kernel_size=1, stride=stride)
self.pad_size = expansion * planes - in_planes
def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:
out = self.identity(x)
out = nn.functional.pad(out, (0, 0, 0, 0, 0, self.pad_size), mode="constant", value=0)
return out
class ProjectionShortcut(nn.Module):
def __init__(
self,
in_planes: int,
planes: int,
stride: int,
expansion: int,
conv: nn.Module = Conv2d,
norm: nn.Module = BatchNorm2d,
relu: nn.Module = ReLU,
**kwargs
) -> None:
super(ProjectionShortcut, self).__init__()
self.conv = conv(in_channels=in_planes, out_channels=expansion*planes,
kernel_size=1, stride=stride, padding=0, **kwargs)
self.norm = norm(num_features=expansion*planes)
def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:
out = self.norm(self.conv(x, **kwargs), **kwargs)
return out
class FirstBlock(nn.Module):
def
|
(
self,
in_planes: int,
planes: int,
conv: nn.Module,
conv_ksp: List[int],
norm: nn.Module,
relu: nn.Module,
pool: nn.Module,
pool_ksp: List[int],
**kwargs
) -> None:
super(FirstBlock, self).__init__()
self.conv1 = conv(in_channels=in_planes, out_channels=planes,
kernel_size=conv_ksp[0], stride=conv_ksp[1], padding=conv_ksp[2], **kwargs)
self.norm1 = norm(num_features=planes)
self.relu1 = relu()
self.pool1 = pool(kernel_size=pool_ksp[0], stride=pool_ksp[1], padding=pool_ksp[2])
def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:
out = self.pool1(self.relu1(self.norm1(self.conv1(x, **kwargs), **kwargs), **kwargs), **kwargs)
return out
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
in_planes: int,
planes: int,
stride: int,
shortcut: nn.Module,
conv: nn.Module = Conv2d,
norm: nn.Module = BatchNorm2d,
relu: nn.Module = ReLU,
**kwargs
) -> None:
super(BasicBlock,self).__init__()
self.conv1 = conv(in_channels=in_planes, out_channels=planes,
kernel_size=3, stride=stride, padding=1, **kwargs)
self.norm1 = norm(num_features=planes)
self.relu1 = relu()
self.conv2 = conv(in_channels=planes, out_channels=self.expansion*planes,
kernel_size=3, stride=1, padding=1, **kwargs)
self.norm2 = norm(num_features=self.expansion*planes)
self.relu2 = relu()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = shortcut(
in_planes, planes, stride, self.expansion, conv, norm, **kwargs
)
else:
self.shortcut = Identity()
def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:
out = self.relu1(self.norm1(self.conv1(x, **kwargs), **kwargs), **kwargs)
out = self.relu2(self.norm2(self.conv2(out, **kwargs), **kwargs) + self.shortcut(x, **kwargs), **kwargs)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
in_planes: int,
planes: int,
stride: int,
shortcut: nn.Module,
conv: nn.Module = Conv2d,
norm: nn.Module = BatchNorm2d,
relu: nn.Module = ReLU,
**kwargs
) -> None:
super(Bottleneck,self).__init__()
self.conv1 = conv(in_channels=in_planes, out_channels=planes,
kernel_size=1, stride=1, padding=0, **kwargs)
self.norm1 = norm(num_features=planes)
self.relu1 = relu()
self.conv2 = conv(in_channels=planes, out_channels=planes,
kernel_size=3, stride=stride, padding=1, **kwargs)
self.norm2 = norm(num_features=planes)
self.relu2 = relu()
self.conv3 = conv(in_channels=planes, out_channels=self.expansion*planes,
kernel_size=1, stride=1, padding=0, **kwargs)
self.norm3 = norm(num_features=self.expansion*planes)
self.relu3 = relu()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = shortcut(
in_planes, planes, stride, self.expansion, conv, norm, **kwargs
)
else:
self.shortcut = Identity()
def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor:
out = self.relu1(self.norm1(self.conv1(x, **kwargs), **kwargs), **kwargs)
out = self.relu2(self.norm2(self.conv2(out, **kwargs), **kwargs), **kwargs)
out = self.relu3(self.norm3(self.conv3(out, **kwargs), **kwargs) + self.shortcut(x, **kwargs), **kwargs)
return out
class ResNet(nn.Module):
def __init__(
self,
channels: int,
in_planes: int,
first_block: nn.Module,
block: nn.Module,
shortcut: nn.Module,
num_blocks: List[int],
widen_factor: int,
conv: nn.Module = Conv2d,
norm: nn.Module = BatchNorm2d,
relu: nn.Module = ReLU,
**kwargs
) -> None:
super(ResNet, self).__init__()
self.channels = channels
self.in_planes = in_planes
self._in_planes = in_planes
self.first_block = first_block
self.block = block
self.shortcut = shortcut
self.num_blocks = num_blocks
self.widen_factor = widen_factor
self.conv = conv
self.norm = norm
self.relu = relu
_layers = [self.first_block(in_planes=self.channels, planes=self.in_planes, **kwargs)]
_layers += self._make_layer(
self.in_planes * self.widen_factor, self.num_blocks[0], stride=1, **kwargs
)
for idx, num_block in enumerate(self.num_blocks[1:], start=1):
_layers += self._make_layer(
self.in_planes * (2 ** idx) * self.widen_factor, num_block, stride=2, **kwargs
)
self.layers = nn.Sequential(*_layers)
def _make_layer(self, planes: int, num_block: int, stride: int, **kwargs) -> List[nn.Module]:
strides = [stride] + [1] * (num_block - 1)
_layers = []
for stride in strides:
_layers.append(self.block(self._in_planes, planes, stride,
self.shortcut, self.conv, self.norm, self.relu, **kwargs))
self._in_planes = planes * self.block.expansion
return _layers
def forward(self, x: torch.Tensor, **kwargs) -> Dict[str, torch.Tensor]:
outputs = dict()
# intermediate feature maps
for layer_idx, layer in enumerate(self.layers):
x = layer(x, **kwargs)
outputs[f"layer{layer_idx}"] = x
# final feature vector
x = nn.functional.adaptive_avg_pool2d(x, (1, 1))
x = x.view(x.size(0), -1)
outputs["features"] = x
return outputs
def build_resnet_backbone(cfg: CfgNode) -> nn.Module:
# Conv2d layers may be replaced by its variations
_conv_layers = cfg.MODEL.BACKBONE.RESNET.CONV_LAYERS
kwargs = {
"bias": cfg.MODEL.BACKBONE.RESNET.CONV_LAYERS_BIAS,
"same_padding": cfg.MODEL.BACKBONE.RESNET.CONV_LAYERS_SAME_PADDING,
}
if _conv_layers == "Conv2d":
conv_layers = Conv2d
elif _conv_layers == "Conv2d_Bezier":
conv_layers = Conv2d_Bezier
elif _conv_layers in ["Conv2d_BatchEnsemble", "Conv2d_BatchEnsembleV2",]:
if cfg.MODEL.BATCH_ENSEMBLE.ENABLED is False:
raise AssertionError(
f"Set MODEL.BATCH_ENSEMBLE.ENABLED=True to use {_conv_layers}"
)
if _conv_layers == "Conv2d_BatchEnsemble":
conv_layers = Conv2d_BatchEnsemble
if _conv_layers == "Conv2d_BatchEnsembleV2":
conv_layers = Conv2d_BatchEnsembleV2
kwargs.update({
"ensemble_size": cfg.MODEL.BATCH_ENSEMBLE.ENSEMBLE_SIZE,
"use_ensemble_bias": cfg.MODEL.BATCH_ENSEMBLE.USE_ENSEMBLE_BIAS,
"alpha_initializer": {
"initializer": cfg.MODEL.BATCH_ENSEMBLE.ALPHA_INITIALIZER.NAME,
"init_values": cfg.MODEL.BATCH_ENSEMBLE.ALPHA_INITIALIZER.VALUES,
},
"gamma_initializer": {
"initializer": cfg.MODEL.BATCH_ENSEMBLE.GAMMA_INITIALIZER.NAME,
"init_values": cfg.MODEL.BATCH_ENSEMBLE.GAMMA_INITIALIZER.VALUES,
},
})
elif _conv_layers == "Conv2d_Dropout":
if cfg.MODEL.DROPOUT.ENABLED is False:
raise AssertionError(
f"Set MODEL.DROPOUT.ENABLED=True to use {_conv_layers}"
)
conv_layers = Conv2d_Dropout
kwargs.update({
"drop_p": cfg.MODEL.DROPOUT.DROP_PROBABILITY,
})
elif _conv_layers == "Conv2d_SpatialDropout":
if cfg.MODEL.SPATIAL_DROPOUT.ENABLED is False:
raise AssertionError(
f"Set MODEL.SPATIAL_DROPOUT.ENABLED=True to use {_conv_layers}"
)
conv_layers = Conv2d_SpatialDropout
kwargs.update({
"drop_p": cfg.MODEL.SPATIAL_DROPOUT.DROP_PROBABILITY,
})
elif _conv_layers == "Conv2d_DropBlock":
if cfg.MODEL.DROP_BLOCK.ENABLED is False:
raise AssertionError(
f"Set MODEL.DROP_BLOCK.ENABLED=True to use {_conv_layers}"
)
conv_layers = Conv2d_DropBlock
kwargs.update({
"drop_p": cfg.MODEL.DROP_BLOCK.DROP_PROBABILITY,
"block_size": cfg.MODEL.DROP_BLOCK.BLOCK_SIZE,
"use_shared_masks": cfg.MODEL.DROP_BLOCK.USE_SHARED_MASKS,
})
else:
raise NotImplementedError(
f"Unknown MODEL.BACKBONE.RESNET.CONV_LAYERS: {_conv_layers}"
)
# BatchNorm2d layers may be replaced by its variations
_norm_layers = cfg.MODEL.BACKBONE.RESNET.NORM_LAYERS
if _norm_layers == "NONE":
norm_layers = Identity
elif _norm_layers == "BatchNorm2d":
norm_layers = BatchNorm2d
elif _norm_layers == "GroupNorm2d":
norm_layers = partial(GroupNorm2d, num_groups=cfg.MODEL.BACKBONE.RESNET.IN_PLANES // 2)
elif _norm_layers == "FilterResponseNorm2d":
norm_layers = FilterResponseNorm2d
elif _norm_layers == "FilterResponseNorm2d_Bezier":
norm_layers = FilterResponseNorm2d_Bezier
else:
raise NotImplementedError(
f"Unknown MODEL.BACKBONE.RESNET.NORM_LAYERS: {_norm_layers}"
)
# ReLU layers may be replaced by its variations
_activations = cfg.MODEL.BACKBONE.RESNET.ACTIVATIONS
if _activations == "NONE":
activations = Identity
elif _activations == "ReLU":
activations = ReLU
elif _activations == "SiLU":
activations = SiLU
else:
raise NotImplementedError(
f"Unknown MODEL.BACKBONE.RESNET.ACTIVATIONS: {_activations}"
)
# specify the first block
first_block = partial(
FirstBlock,
conv = conv_layers,
conv_ksp = cfg.MODEL.BACKBONE.RESNET.FIRST_BLOCK.CONV_KSP,
norm = norm_layers if cfg.MODEL.BACKBONE.RESNET.FIRST_BLOCK.USE_NORM_LAYER else Identity,
relu = activations if cfg.MODEL.BACKBONE.RESNET.FIRST_BLOCK.USE_ACTIVATION else Identity,
pool = MaxPool2d if cfg.MODEL.BACKBONE.RESNET.FIRST_BLOCK.USE_POOL_LAYER else Identity,
pool_ksp = cfg.MODEL.BACKBONE.RESNET.FIRST_BLOCK.POOL_KSP,
)
# specify block
_block = cfg.MODEL.BACKBONE.RESNET.BLOCK
if _block == "BasicBlock":
block = BasicBlock
elif _block == "Bottleneck":
block = Bottleneck
else:
raise NotImplementedError(
f"Unknown MODEL.BACKBONE.RESNET.BLOCK: {_block}"
)
# specify shortcut
_shortcut = cfg.MODEL.BACKBONE.RESNET.SHORTCUT
if _shortcut == "IdentityShortcut":
shortcut = IdentityShortcut
elif _shortcut == "ProjectionShortcut":
shortcut = ProjectionShortcut
else:
raise NotImplementedError(
f"Unknown MODEL.BACKBONE.RESNET.SHORTCUT: {_shortcut}"
)
# build backbone
backbone = ResNet(
channels = cfg.MODEL.BACKBONE.RESNET.CHANNELS,
in_planes = cfg.MODEL.BACKBONE.RESNET.IN_PLANES,
first_block = first_block,
block = block,
shortcut = shortcut,
num_blocks = cfg.MODEL.BACKBONE.RESNET.NUM_BLOCKS,
widen_factor = cfg.MODEL.BACKBONE.RESNET.WIDEN_FACTOR,
conv = conv_layers,
norm = norm_layers,
relu = activations,
**kwargs
)
# initialize weights
for m in backbone.modules():
if isinstance(m, Conv2d):
if isinstance(m.weight, nn.ParameterList):
for idx in range(len(m.weight)):
nn.init.kaiming_normal_(m.weight[idx], mode="fan_out", nonlinearity="relu")
else:
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
return backbone
|
__init__
|
utils.ts
|
import { utilsBr } from '../index';
import { expect } from 'chai';
describe('Utils test', () => {
it('Utils currencyToNumber Currency', () => {
const currency = utilsBr.currencyToNumber(' R$ 1.234.456,44 ');
expect(currency).to.be.equal(1234456.44);
});
|
const percent2 = utilsBr.currencyToNumber(' 12 % ');
expect(percent2).to.be.equal(12);
});
it('Utils numberToCurrency R$ 12,00', () => {
const currencyNumber = utilsBr.numberToCurrency(12);
expect(currencyNumber).to.be.equal(' R$ 12,00 ');
});
it('Utils numberToCurrency R$ 0.95 ', () => {
const currencyNumber = utilsBr.numberToCurrency(0.95);
expect(currencyNumber).to.be.equal(' R$ 0,95 ');
});
it('Utils numberToCurrency R$ 0,10 ', () => {
const currencyNumber = utilsBr.numberToCurrency(0.1);
expect(currencyNumber).to.be.equal(' R$ 0,10 ');
})
});
|
it('Utils currencyToNumber Porcentagem', () => {
const percent = utilsBr.currencyToNumber(' 1.234.456,44%');
expect(percent).to.be.equal(1234456.44);
|
graph_wsd_test_v1.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 7 17:13:25 2018
@author: dorgham
"""
import networkx as nx
from nltk.corpus import wordnet as wn
from nltk.corpus import wordnet_ic
from nltk.stem import WordNetLemmatizer
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
from collections import OrderedDict
import codecs
import string
from nltk.corpus import stopwords
from sklearn.metrics import f1_score, precision_score, recall_score
#algorithm parameters
USE_POS_INFO = True
USE_LESK = False
USE_PAGERANK = True
AVG_METHOD = 'micro'
MAX_DEPTH = 3
LESK_NORM_FACTOR = 20 #this value is emperical
senseval_fpath = 'WSD_Unified_Evaluation_Datasets/senseval2/senseval2.data.xml'
gold_tags_fpath = 'WSD_Unified_Evaluation_Datasets/senseval2/senseval2.gold.key.txt'
info_content = wordnet_ic.ic('ic-semcor.dat')
wnlemmatizer = WordNetLemmatizer()
pywsd_stopwords = [u"'s", u"``", u"`"]
STOPWORDS = set(stopwords.words('english') + list(string.punctuation) + pywsd_stopwords)
def lch_similarity(synset1, synset2):
return wn.lch_similarity(synset1, synset2)
def jcn_similarity(synset1, synset2):
|
def lesk_similarity(synset1, synset2):
str1 = str(synset1.definition()).translate(str.maketrans('','',string.punctuation))
for example in synset1.examples():
str1 += ' ' + str(example).translate(str.maketrans('','',string.punctuation))
lemmatized_str1=''
for word in set(str1.split()):
lemmatized_str1 += wnlemmatizer.lemmatize(word) + ' '
for lemma in synset1.lemma_names():
lemmatized_str1 += ' ' + lemma
hyper_hypo = set(synset1.hyponyms() + synset1.hypernyms() + synset1.instance_hyponyms() + synset1.instance_hypernyms())
for hh in hyper_hypo:
for lemma in hh.lemma_names():
lemmatized_str1 += ' ' + lemma
current_set = set(lemmatized_str1.split())
current_set = set(cs.lower() for cs in current_set)
current_set = current_set.difference(STOPWORDS)
#print (current_set)
str2 = str(synset2.definition()).translate(str.maketrans('','',string.punctuation))
for example in synset2.examples():
str2 += ' ' + str(example).translate(str.maketrans('','',string.punctuation))
lemmatized_str2=''
for word in set(str2.split()):
lemmatized_str2 += wnlemmatizer.lemmatize(word) + ' '
for lemma in synset2.lemma_names():
lemmatized_str2 += ' ' + lemma
hyper_hypo = set(synset2.hyponyms() + synset2.hypernyms() + synset2.instance_hyponyms() + synset2.instance_hypernyms())
for hh in hyper_hypo:
for lemma in hh.lemma_names():
lemmatized_str2 += ' ' + lemma
neighbor_set = set(lemmatized_str2.split())
neighbor_set = set(ns.lower() for ns in neighbor_set)
neighbor_set = neighbor_set.difference(STOPWORDS)
#print (neighbor_set)
return len(current_set.intersection(neighbor_set))
def convert_to_wordnet_pos(senseval_pos):
if senseval_pos == 'VERB':
return wn.VERB
elif senseval_pos == 'NOUN':
return wn.NOUN
elif senseval_pos == 'ADV':
return wn.ADV
elif senseval_pos == 'ADJ':
return wn.ADJ
else:
return None
def sentence_wsd(sentences, poses):
counter=0
output_dict = dict()
for sentence in sentences:
G=nx.Graph()
sent_len = len(sentence.keys())
G_pos = dict() #used for aligning the nodes when drawing the graph
pos_idx=1
token_nodeNames_map = dict()
pos_dict = poses[counter]
#construct the nodes of the graph
for i, _id in enumerate(sentence.keys()):
if USE_POS_INFO: #restrict the retrieved snysets from wordnet to the target pos
wn_pos = convert_to_wordnet_pos(pos_dict[_id])
else:
wn_pos = None
synsets_list = list(wn.synsets(sentence[_id], pos=wn_pos))
if len(synsets_list) > 0:
node_names = []
for synset in synsets_list:
node_name = str(i) + ' ' + synset.name()
#adding the index to the node name is important in the case of
#having a word that is repeated in the sentence but with
#different sense each time, so we want unique node for each one.
G.add_node(node_name)
node_names.append(node_name)
token_nodeNames_map[_id] = node_names
G_pos.update( (label, (pos_idx, j)) for j, label in enumerate(node_names) )
pos_idx+=1
#compute word similarity
ids_list = list(sentence.keys())
lch_sim_dict = dict()
jcn_sim_dict = dict()
lesk_sim_dict = dict()
#print sentence.values()
for idx, key in enumerate(ids_list):
if USE_POS_INFO:
wn_pos = convert_to_wordnet_pos(pos_dict[ids_list[idx]])
else:
wn_pos = None
synsets_list = list(wn.synsets(sentence[ids_list[idx]], pos=wn_pos))
if len(synsets_list) > 0:
i = 1
while i<=MAX_DEPTH and idx+i<sent_len:
if USE_POS_INFO:
wn_pos = convert_to_wordnet_pos(pos_dict[ids_list[idx+i]])
else:
wn_pos = None
next_synsets_list = list(wn.synsets(sentence[ids_list[idx+i]], pos=wn_pos))
if len(next_synsets_list) > 0:
for current_synset in synsets_list:
for neighbor_synset in next_synsets_list:
nodes = str(idx) + ' ' + current_synset.name() + ';'
nodes += str(idx+i) + ' ' + neighbor_synset.name()
if current_synset.pos() == 'v' and neighbor_synset.pos() == 'v':
sim_weight = lch_similarity(current_synset, neighbor_synset)
lch_sim_dict[nodes] = sim_weight
elif current_synset.pos() == 'n' and neighbor_synset.pos() == 'n':
sim_weight = jcn_similarity(current_synset, neighbor_synset)
jcn_sim_dict[nodes] = sim_weight
elif USE_LESK:
sim_weight = lesk_similarity(current_synset, neighbor_synset)
lesk_sim_dict[nodes] = sim_weight
i+=1
#normalize the similarity weights and build edges
if lch_sim_dict:
max_lch_score = max(lch_sim_dict.values())
for key in lch_sim_dict:
nodeIds = key.split(';')
G.add_edge(nodeIds[0],nodeIds[1], weight=(lch_sim_dict[key]/max_lch_score))
if jcn_sim_dict:
max_jcn_score = max(jcn_sim_dict.values())
for key in jcn_sim_dict:
nodeIds = key.split(';')
G.add_edge(nodeIds[0],nodeIds[1], weight=(jcn_sim_dict[key]/max_jcn_score))
if USE_LESK:
if lesk_sim_dict:
max_lesk_score = max(lesk_sim_dict.values())
if max_lesk_score > 0:
for key in lesk_sim_dict:
nodeIds = key.split(';')
G.add_edge(nodeIds[0],nodeIds[1], weight=(lesk_sim_dict[key]/LESK_NORM_FACTOR))
#compute graph centrality
node_scores = dict()
if USE_PAGERANK:
node_scores = nx.pagerank(G)
else:
node_scores = G.degree(G.nodes(), "weight")
for token_id in ids_list:
nodeNames = token_nodeNames_map.get(token_id)
scores = []
max_label = ""
wordnet_key = ""
if nodeNames:
for nodeName in nodeNames:
scores.append(node_scores[nodeName])
if scores:
max_index = max(range(len(scores)), key=scores.__getitem__)
max_label = nodeNames[max_index]
if max_label:
i = max_label.find(' ')
lemmas = wn.synset(max_label[i+1:]).lemmas()
for lemma in lemmas:
wordnet_key += lemma.key()+';'
wordnet_key = wordnet_key[0:-1]
output_dict[token_id] = wordnet_key
#add the weight as attribute to the nodes of the graph
#for node in node_scores.keys():
# G.node[node]['weight']=node_scores[node]
counter += 1
if counter==1: #draw the graph of the first sentence
plt.close()
nx.draw(G, pos=G_pos, with_labels = True)
plt.show()
G.clear()
return output_dict
def load_senseval_data(file_path):
tokens_dict = OrderedDict()
pos_dict = OrderedDict()
sentences = []
pos_list = []
tree = ET.parse(file_path)
root = tree.getroot()
for text in root:
for sentence in text:
for word in sentence:
if word.tag == 'instance' and word.attrib['id']: #only include words with the <instance> tag
tokens_dict[word.attrib['id']] = word.text
pos_dict[word.attrib['id']] = word.attrib['pos']
if tokens_dict:
sentences.append(tokens_dict)
pos_list.append(pos_dict)
tokens_dict = dict()
pos_dict = dict()
return sentences, pos_list
if __name__ == "__main__":
sents, poses = load_senseval_data(senseval_fpath)
output_dict = sentence_wsd(sents, poses)
#load the gold results
with codecs.open(gold_tags_fpath, 'r', 'utf-8') as f:
lines = f.readlines()
wsd_output = []
gold_output = []
for line in lines:
id_key_pair = line.split()
predicted_keys = output_dict[id_key_pair[0]].split(';')
gold_keys_set = set(id_key_pair[1:])
predected_keys_set = set(predicted_keys)
if len(predected_keys_set.intersection(gold_keys_set)) > 0:
wsd_output.append(predicted_keys[0])
gold_output.append(predicted_keys[0])
else:
wsd_output.append(predicted_keys[0])
gold_output.append(id_key_pair[1])
assert len(wsd_output) == len(gold_output)
f1 = f1_score(gold_output, wsd_output, average=AVG_METHOD)
precision = precision_score(gold_output, wsd_output, average=AVG_METHOD)
recall = recall_score(gold_output, wsd_output, average=AVG_METHOD)
print ('F-score: %1.4f' % f1, ' Precision: %1.4f' % precision, ' Recall: %1.4f' % recall)
|
return wn.jcn_similarity(synset1, synset2, info_content)
|
mod.rs
|
/*!
# typeck: check phase
Within the check phase of type check, we check each item one at a time
(bodies of function expressions are checked as part of the containing
function). Inference is used to supply types wherever they are unknown.
By far the most complex case is checking the body of a function. This
can be broken down into several distinct phases:
- gather: creates type variables to represent the type of each local
variable and pattern binding.
- main: the main pass does the lion's share of the work: it
determines the types of all expressions, resolves
methods, checks for most invalid conditions, and so forth. In
some cases, where a type is unknown, it may create a type or region
variable and use that as the type of an expression.
In the process of checking, various constraints will be placed on
these type variables through the subtyping relationships requested
through the `demand` module. The `infer` module is in charge
of resolving those constraints.
- regionck: after main is complete, the regionck pass goes over all
types looking for regions and making sure that they did not escape
into places they are not in scope. This may also influence the
final assignments of the various region variables if there is some
flexibility.
- vtable: find and records the impls to use for each trait bound that
appears on a type parameter.
- writeback: writes the final types within a function body, replacing
type variables with their final inferred types. These final types
are written into the `tcx.node_types` table, which should *never* contain
any reference to a type variable.
## Intermediate types
While type checking a function, the intermediate types for the
expressions, blocks, and so forth contained within the function are
stored in `fcx.node_types` and `fcx.node_substs`. These types
may contain unresolved type variables. After type checking is
complete, the functions in the writeback module are used to take the
types from this table, resolve them, and then write them into their
permanent home in the type context `tcx`.
This means that during inferencing you should use `fcx.write_ty()`
and `fcx.expr_ty()` / `fcx.node_ty()` to write/obtain the types of
nodes within the function.
The types of top-level items, which never contain unbound type
variables, are stored directly into the `tcx` tables.
N.B., a type variable is not the same thing as a type parameter. A
type variable is rather an "instance" of a type parameter: that is,
given a generic function `fn foo<T>(t: T)`: while checking the
function `foo`, the type `ty_param(0)` refers to the type `T`, which
is treated in abstract. When `foo()` is called, however, `T` will be
substituted for a fresh type variable `N`. This variable will
eventually be resolved to some concrete type (which might itself be
type parameter).
*/
mod autoderef;
pub mod dropck;
pub mod _match;
pub mod writeback;
mod regionck;
pub mod coercion;
pub mod demand;
pub mod method;
mod upvar;
mod wfcheck;
mod cast;
mod closure;
mod callee;
mod compare_method;
mod generator_interior;
pub mod intrinsic;
mod op;
use crate::astconv::{AstConv, PathSeg};
use errors::{Applicability, DiagnosticBuilder, DiagnosticId};
use rustc::hir::{self, ExprKind, GenericArg, ItemKind, Node, PatKind, QPath};
use rustc::hir::def::{CtorOf, CtorKind, Def};
use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap};
use rustc::hir::itemlikevisit::ItemLikeVisitor;
use crate::middle::lang_items;
use crate::namespace::Namespace;
use rustc::infer::{self, InferCtxt, InferOk, InferResult};
use rustc::infer::canonical::{Canonical, OriginalQueryValues, QueryResponse};
use rustc_data_structures::indexed_vec::Idx;
use rustc_data_structures::sync::Lrc;
use rustc_target::spec::abi::Abi;
use rustc::infer::opaque_types::OpaqueTypeDecl;
use rustc::infer::type_variable::{TypeVariableOrigin};
use rustc::middle::region;
use rustc::mir::interpret::{ConstValue, GlobalId};
use rustc::traits::{self, ObligationCause, ObligationCauseCode, TraitEngine};
use rustc::ty::{
self, AdtKind, CanonicalUserType, Ty, TyCtxt, GenericParamDefKind, Visibility,
ToPolyTraitRef, ToPredicate, RegionKind, UserType
};
use rustc::ty::adjustment::{
Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability, PointerCast
};
use rustc::ty::fold::TypeFoldable;
use rustc::ty::query::Providers;
use rustc::ty::subst::{UnpackedKind, Subst, InternalSubsts, SubstsRef, UserSelfTy, UserSubsts};
use rustc::ty::util::{Representability, IntTypeExt, Discr};
use rustc::ty::layout::VariantIdx;
use syntax_pos::{self, BytePos, Span, MultiSpan};
use syntax::ast;
use syntax::attr;
use syntax::feature_gate::{GateIssue, emit_feature_err};
use syntax::ptr::P;
use syntax::source_map::{DUMMY_SP, original_sp};
use syntax::symbol::{Symbol, LocalInternedString, keywords};
use syntax::util::lev_distance::find_best_match_for_name;
use std::cell::{Cell, RefCell, Ref, RefMut};
use std::collections::hash_map::Entry;
use std::cmp;
use std::fmt::Display;
use std::iter;
use std::mem::replace;
use std::ops::{self, Deref};
use std::slice;
use crate::require_c_abi_if_c_variadic;
use crate::session::Session;
use crate::session::config::EntryFnType;
use crate::TypeAndSubsts;
use crate::lint;
use crate::util::captures::Captures;
use crate::util::common::{ErrorReported, indenter};
use crate::util::nodemap::{DefIdMap, DefIdSet, FxHashMap, FxHashSet, HirIdMap};
pub use self::Expectation::*;
use self::autoderef::Autoderef;
use self::callee::DeferredCallResolution;
use self::coercion::{CoerceMany, DynamicCoerceMany};
pub use self::compare_method::{compare_impl_method, compare_const_impl};
use self::method::{MethodCallee, SelfSource};
use self::TupleArgumentsFlag::*;
/// The type of a local binding, including the revealed type for anon types.
#[derive(Copy, Clone)]
pub struct LocalTy<'tcx> {
decl_ty: Ty<'tcx>,
revealed_ty: Ty<'tcx>
}
/// A wrapper for `InferCtxt`'s `in_progress_tables` field.
#[derive(Copy, Clone)]
struct MaybeInProgressTables<'a, 'tcx: 'a> {
maybe_tables: Option<&'a RefCell<ty::TypeckTables<'tcx>>>,
}
impl<'a, 'tcx> MaybeInProgressTables<'a, 'tcx> {
fn borrow(self) -> Ref<'a, ty::TypeckTables<'tcx>> {
match self.maybe_tables {
Some(tables) => tables.borrow(),
None => {
bug!("MaybeInProgressTables: inh/fcx.tables.borrow() with no tables")
}
}
}
fn borrow_mut(self) -> RefMut<'a, ty::TypeckTables<'tcx>> {
match self.maybe_tables {
Some(tables) => tables.borrow_mut(),
None => {
bug!("MaybeInProgressTables: inh/fcx.tables.borrow_mut() with no tables")
}
}
}
}
/// Closures defined within the function. For example:
///
/// fn foo() {
/// bar(move|| { ... })
/// }
///
/// Here, the function `foo()` and the closure passed to
/// `bar()` will each have their own `FnCtxt`, but they will
/// share the inherited fields.
pub struct Inherited<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
infcx: InferCtxt<'a, 'gcx, 'tcx>,
tables: MaybeInProgressTables<'a, 'tcx>,
locals: RefCell<HirIdMap<LocalTy<'tcx>>>,
fulfillment_cx: RefCell<Box<dyn TraitEngine<'tcx>>>,
// Some additional `Sized` obligations badly affect type inference.
// These obligations are added in a later stage of typeck.
deferred_sized_obligations: RefCell<Vec<(Ty<'tcx>, Span, traits::ObligationCauseCode<'tcx>)>>,
// When we process a call like `c()` where `c` is a closure type,
// we may not have decided yet whether `c` is a `Fn`, `FnMut`, or
// `FnOnce` closure. In that case, we defer full resolution of the
// call until upvar inference can kick in and make the
// decision. We keep these deferred resolutions grouped by the
// def-id of the closure, so that once we decide, we can easily go
// back and process them.
deferred_call_resolutions: RefCell<DefIdMap<Vec<DeferredCallResolution<'gcx, 'tcx>>>>,
deferred_cast_checks: RefCell<Vec<cast::CastCheck<'tcx>>>,
deferred_generator_interiors: RefCell<Vec<(hir::BodyId, Ty<'tcx>)>>,
// Opaque types found in explicit return types and their
// associated fresh inference variable. Writeback resolves these
// variables to get the concrete type, which can be used to
// 'de-opaque' OpaqueTypeDecl, after typeck is done with all functions.
opaque_types: RefCell<DefIdMap<OpaqueTypeDecl<'tcx>>>,
/// Each type parameter has an implicit region bound that
/// indicates it must outlive at least the function body (the user
/// may specify stronger requirements). This field indicates the
/// region of the callee. If it is `None`, then the parameter
/// environment is for an item or something where the "callee" is
/// not clear.
implicit_region_bound: Option<ty::Region<'tcx>>,
body_id: Option<hir::BodyId>,
}
impl<'a, 'gcx, 'tcx> Deref for Inherited<'a, 'gcx, 'tcx> {
type Target = InferCtxt<'a, 'gcx, 'tcx>;
fn deref(&self) -> &Self::Target {
&self.infcx
}
}
/// When type-checking an expression, we propagate downward
/// whatever type hint we are able in the form of an `Expectation`.
#[derive(Copy, Clone, Debug)]
pub enum Expectation<'tcx> {
/// We know nothing about what type this expression should have.
NoExpectation,
/// This expression should have the type given (or some subtype).
ExpectHasType(Ty<'tcx>),
/// This expression will be cast to the `Ty`.
ExpectCastableToType(Ty<'tcx>),
/// This rvalue expression will be wrapped in `&` or `Box` and coerced
/// to `&Ty` or `Box<Ty>`, respectively. `Ty` is `[A]` or `Trait`.
ExpectRvalueLikeUnsized(Ty<'tcx>),
}
impl<'a, 'gcx, 'tcx> Expectation<'tcx> {
// Disregard "castable to" expectations because they
// can lead us astray. Consider for example `if cond
// {22} else {c} as u8` -- if we propagate the
// "castable to u8" constraint to 22, it will pick the
// type 22u8, which is overly constrained (c might not
// be a u8). In effect, the problem is that the
// "castable to" expectation is not the tightest thing
// we can say, so we want to drop it in this case.
// The tightest thing we can say is "must unify with
// else branch". Note that in the case of a "has type"
// constraint, this limitation does not hold.
// If the expected type is just a type variable, then don't use
// an expected type. Otherwise, we might write parts of the type
// when checking the 'then' block which are incompatible with the
// 'else' branch.
fn adjust_for_branches(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Expectation<'tcx> {
match *self {
ExpectHasType(ety) => {
let ety = fcx.shallow_resolve(ety);
if !ety.is_ty_var() {
ExpectHasType(ety)
} else {
NoExpectation
}
}
ExpectRvalueLikeUnsized(ety) => {
ExpectRvalueLikeUnsized(ety)
}
_ => NoExpectation
}
}
/// Provides an expectation for an rvalue expression given an *optional*
/// hint, which is not required for type safety (the resulting type might
/// be checked higher up, as is the case with `&expr` and `box expr`), but
/// is useful in determining the concrete type.
///
/// The primary use case is where the expected type is a fat pointer,
/// like `&[isize]`. For example, consider the following statement:
///
/// let x: &[isize] = &[1, 2, 3];
///
/// In this case, the expected type for the `&[1, 2, 3]` expression is
/// `&[isize]`. If however we were to say that `[1, 2, 3]` has the
/// expectation `ExpectHasType([isize])`, that would be too strong --
/// `[1, 2, 3]` does not have the type `[isize]` but rather `[isize; 3]`.
/// It is only the `&[1, 2, 3]` expression as a whole that can be coerced
/// to the type `&[isize]`. Therefore, we propagate this more limited hint,
/// which still is useful, because it informs integer literals and the like.
/// See the test case `test/run-pass/coerce-expect-unsized.rs` and #20169
/// for examples of where this comes up,.
fn rvalue_hint(fcx: &FnCtxt<'a, 'gcx, 'tcx>, ty: Ty<'tcx>) -> Expectation<'tcx> {
match fcx.tcx.struct_tail(ty).sty {
ty::Slice(_) | ty::Str | ty::Dynamic(..) => {
ExpectRvalueLikeUnsized(ty)
}
_ => ExpectHasType(ty)
}
}
// Resolves `expected` by a single level if it is a variable. If
// there is no expected type or resolution is not possible (e.g.,
// no constraints yet present), just returns `None`.
fn resolve(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Expectation<'tcx> {
match self {
NoExpectation => NoExpectation,
ExpectCastableToType(t) => {
ExpectCastableToType(fcx.resolve_type_vars_if_possible(&t))
}
ExpectHasType(t) => {
ExpectHasType(fcx.resolve_type_vars_if_possible(&t))
}
ExpectRvalueLikeUnsized(t) => {
ExpectRvalueLikeUnsized(fcx.resolve_type_vars_if_possible(&t))
}
}
}
fn to_option(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Option<Ty<'tcx>> {
match self.resolve(fcx) {
NoExpectation => None,
ExpectCastableToType(ty) |
ExpectHasType(ty) |
ExpectRvalueLikeUnsized(ty) => Some(ty),
}
}
/// It sometimes happens that we want to turn an expectation into
/// a **hard constraint** (i.e., something that must be satisfied
/// for the program to type-check). `only_has_type` will return
/// such a constraint, if it exists.
fn only_has_type(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Option<Ty<'tcx>> {
match self.resolve(fcx) {
ExpectHasType(ty) => Some(ty),
NoExpectation | ExpectCastableToType(_) | ExpectRvalueLikeUnsized(_) => None,
}
}
/// Like `only_has_type`, but instead of returning `None` if no
/// hard constraint exists, creates a fresh type variable.
fn coercion_target_type(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>, span: Span) -> Ty<'tcx> {
self.only_has_type(fcx)
.unwrap_or_else(|| fcx.next_ty_var(TypeVariableOrigin::MiscVariable(span)))
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Needs {
MutPlace,
None
}
impl Needs {
fn maybe_mut_place(m: hir::Mutability) -> Self {
match m {
hir::MutMutable => Needs::MutPlace,
hir::MutImmutable => Needs::None,
}
}
}
#[derive(Copy, Clone)]
pub struct UnsafetyState {
pub def: hir::HirId,
pub unsafety: hir::Unsafety,
pub unsafe_push_count: u32,
from_fn: bool
}
impl UnsafetyState {
pub fn function(unsafety: hir::Unsafety, def: hir::HirId) -> UnsafetyState {
UnsafetyState { def: def, unsafety: unsafety, unsafe_push_count: 0, from_fn: true }
}
pub fn recurse(&mut self, blk: &hir::Block) -> UnsafetyState {
match self.unsafety {
// If this unsafe, then if the outer function was already marked as
// unsafe we shouldn't attribute the unsafe'ness to the block. This
// way the block can be warned about instead of ignoring this
// extraneous block (functions are never warned about).
hir::Unsafety::Unsafe if self.from_fn => *self,
unsafety => {
let (unsafety, def, count) = match blk.rules {
hir::PushUnsafeBlock(..) =>
(unsafety, blk.hir_id, self.unsafe_push_count.checked_add(1).unwrap()),
hir::PopUnsafeBlock(..) =>
(unsafety, blk.hir_id, self.unsafe_push_count.checked_sub(1).unwrap()),
hir::UnsafeBlock(..) =>
(hir::Unsafety::Unsafe, blk.hir_id, self.unsafe_push_count),
hir::DefaultBlock =>
(unsafety, self.def, self.unsafe_push_count),
};
UnsafetyState{ def,
unsafety,
unsafe_push_count: count,
from_fn: false }
}
}
}
}
#[derive(Debug, Copy, Clone)]
pub enum PlaceOp {
Deref,
Index
}
/// Tracks whether executing a node may exit normally (versus
/// return/break/panic, which "diverge", leaving dead code in their
/// wake). Tracked semi-automatically (through type variables marked
/// as diverging), with some manual adjustments for control-flow
/// primitives (approximating a CFG).
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum Diverges {
/// Potentially unknown, some cases converge,
/// others require a CFG to determine them.
Maybe,
/// Definitely known to diverge and therefore
/// not reach the next sibling or its parent.
Always,
/// Same as `Always` but with a reachability
/// warning already emitted.
WarnedAlways
}
// Convenience impls for combinig `Diverges`.
impl ops::BitAnd for Diverges {
type Output = Self;
fn bitand(self, other: Self) -> Self {
cmp::min(self, other)
}
}
impl ops::BitOr for Diverges {
type Output = Self;
fn bitor(self, other: Self) -> Self {
cmp::max(self, other)
}
}
impl ops::BitAndAssign for Diverges {
fn bitand_assign(&mut self, other: Self) {
*self = *self & other;
}
}
impl ops::BitOrAssign for Diverges {
fn bitor_assign(&mut self, other: Self) {
*self = *self | other;
}
}
impl Diverges {
fn always(self) -> bool {
self >= Diverges::Always
}
}
pub struct BreakableCtxt<'gcx: 'tcx, 'tcx> {
may_break: bool,
// this is `null` for loops where break with a value is illegal,
// such as `while`, `for`, and `while let`
coerce: Option<DynamicCoerceMany<'gcx, 'tcx>>,
}
pub struct EnclosingBreakables<'gcx: 'tcx, 'tcx> {
stack: Vec<BreakableCtxt<'gcx, 'tcx>>,
by_id: HirIdMap<usize>,
}
impl<'gcx, 'tcx> EnclosingBreakables<'gcx, 'tcx> {
fn find_breakable(&mut self, target_id: hir::HirId) -> &mut BreakableCtxt<'gcx, 'tcx> {
let ix = *self.by_id.get(&target_id).unwrap_or_else(|| {
bug!("could not find enclosing breakable with id {}", target_id);
});
&mut self.stack[ix]
}
}
pub struct FnCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
body_id: hir::HirId,
/// The parameter environment used for proving trait obligations
/// in this function. This can change when we descend into
/// closures (as they bring new things into scope), hence it is
/// not part of `Inherited` (as of the time of this writing,
/// closures do not yet change the environment, but they will
/// eventually).
param_env: ty::ParamEnv<'tcx>,
// Number of errors that had been reported when we started
// checking this function. On exit, if we find that *more* errors
// have been reported, we will skip regionck and other work that
// expects the types within the function to be consistent.
err_count_on_creation: usize,
ret_coercion: Option<RefCell<DynamicCoerceMany<'gcx, 'tcx>>>,
ret_coercion_span: RefCell<Option<Span>>,
yield_ty: Option<Ty<'tcx>>,
ps: RefCell<UnsafetyState>,
/// Whether the last checked node generates a divergence (e.g.,
/// `return` will set this to `Always`). In general, when entering
/// an expression or other node in the tree, the initial value
/// indicates whether prior parts of the containing expression may
/// have diverged. It is then typically set to `Maybe` (and the
/// old value remembered) for processing the subparts of the
/// current expression. As each subpart is processed, they may set
/// the flag to `Always`, etc. Finally, at the end, we take the
/// result and "union" it with the original value, so that when we
/// return the flag indicates if any subpart of the parent
/// expression (up to and including this part) has diverged. So,
/// if you read it after evaluating a subexpression `X`, the value
/// you get indicates whether any subexpression that was
/// evaluating up to and including `X` diverged.
///
/// We currently use this flag only for diagnostic purposes:
///
/// - To warn about unreachable code: if, after processing a
/// sub-expression but before we have applied the effects of the
/// current node, we see that the flag is set to `Always`, we
/// can issue a warning. This corresponds to something like
/// `foo(return)`; we warn on the `foo()` expression. (We then
/// update the flag to `WarnedAlways` to suppress duplicate
/// reports.) Similarly, if we traverse to a fresh statement (or
/// tail expression) from a `Always` setting, we will issue a
/// warning. This corresponds to something like `{return;
/// foo();}` or `{return; 22}`, where we would warn on the
/// `foo()` or `22`.
///
/// An expression represents dead code if, after checking it,
/// the diverges flag is set to something other than `Maybe`.
diverges: Cell<Diverges>,
/// Whether any child nodes have any type errors.
has_errors: Cell<bool>,
enclosing_breakables: RefCell<EnclosingBreakables<'gcx, 'tcx>>,
inh: &'a Inherited<'a, 'gcx, 'tcx>,
}
impl<'a, 'gcx, 'tcx> Deref for FnCtxt<'a, 'gcx, 'tcx> {
type Target = Inherited<'a, 'gcx, 'tcx>;
fn deref(&self) -> &Self::Target {
&self.inh
}
}
/// Helper type of a temporary returned by `Inherited::build(...)`.
/// Necessary because we can't write the following bound:
/// `F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(Inherited<'b, 'gcx, 'tcx>)`.
pub struct InheritedBuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
infcx: infer::InferCtxtBuilder<'a, 'gcx, 'tcx>,
def_id: DefId,
}
impl<'a, 'gcx, 'tcx> Inherited<'a, 'gcx, 'tcx> {
pub fn build(tcx: TyCtxt<'a, 'gcx, 'gcx>, def_id: DefId)
-> InheritedBuilder<'a, 'gcx, 'tcx> {
let hir_id_root = if def_id.is_local() {
let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap();
DefId::local(hir_id.owner)
} else {
def_id
};
InheritedBuilder {
infcx: tcx.infer_ctxt().with_fresh_in_progress_tables(hir_id_root),
def_id,
}
}
}
impl<'a, 'gcx, 'tcx> InheritedBuilder<'a, 'gcx, 'tcx> {
fn enter<F, R>(&'tcx mut self, f: F) -> R
where F: for<'b> FnOnce(Inherited<'b, 'gcx, 'tcx>) -> R
{
let def_id = self.def_id;
self.infcx.enter(|infcx| f(Inherited::new(infcx, def_id)))
}
}
impl<'a, 'gcx, 'tcx> Inherited<'a, 'gcx, 'tcx> {
fn new(infcx: InferCtxt<'a, 'gcx, 'tcx>, def_id: DefId) -> Self {
let tcx = infcx.tcx;
let item_id = tcx.hir().as_local_hir_id(def_id);
let body_id = item_id.and_then(|id| tcx.hir().maybe_body_owned_by_by_hir_id(id));
let implicit_region_bound = body_id.map(|body_id| {
let body = tcx.hir().body(body_id);
tcx.mk_region(ty::ReScope(region::Scope {
id: body.value.hir_id.local_id,
data: region::ScopeData::CallSite
}))
});
Inherited {
tables: MaybeInProgressTables {
maybe_tables: infcx.in_progress_tables,
},
infcx,
fulfillment_cx: RefCell::new(TraitEngine::new(tcx)),
locals: RefCell::new(Default::default()),
deferred_sized_obligations: RefCell::new(Vec::new()),
deferred_call_resolutions: RefCell::new(Default::default()),
deferred_cast_checks: RefCell::new(Vec::new()),
deferred_generator_interiors: RefCell::new(Vec::new()),
opaque_types: RefCell::new(Default::default()),
implicit_region_bound,
body_id,
}
}
fn register_predicate(&self, obligation: traits::PredicateObligation<'tcx>) {
debug!("register_predicate({:?})", obligation);
if obligation.has_escaping_bound_vars() {
span_bug!(obligation.cause.span, "escaping bound vars in predicate {:?}",
obligation);
}
self.fulfillment_cx
.borrow_mut()
.register_predicate_obligation(self, obligation);
}
fn register_predicates<I>(&self, obligations: I)
where I: IntoIterator<Item = traits::PredicateObligation<'tcx>>
{
for obligation in obligations {
self.register_predicate(obligation);
}
}
fn register_infer_ok_obligations<T>(&self, infer_ok: InferOk<'tcx, T>) -> T {
self.register_predicates(infer_ok.obligations);
infer_ok.value
}
fn normalize_associated_types_in<T>(&self,
span: Span,
body_id: hir::HirId,
param_env: ty::ParamEnv<'tcx>,
value: &T) -> T
where T : TypeFoldable<'tcx>
{
let ok = self.partially_normalize_associated_types_in(span, body_id, param_env, value);
self.register_infer_ok_obligations(ok)
}
}
struct CheckItemTypesVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx> }
impl<'a, 'tcx> ItemLikeVisitor<'tcx> for CheckItemTypesVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &'tcx hir::Item) {
check_item_type(self.tcx, i);
}
fn visit_trait_item(&mut self, _: &'tcx hir::TraitItem) { }
fn visit_impl_item(&mut self, _: &'tcx hir::ImplItem) { }
}
pub fn check_wf_new<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Result<(), ErrorReported> {
tcx.sess.track_errors(|| {
let mut visit = wfcheck::CheckTypeWellFormedVisitor::new(tcx);
tcx.hir().krate().par_visit_all_item_likes(&mut visit);
})
}
fn check_mod_item_types<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>, module_def_id: DefId) {
tcx.hir().visit_item_likes_in_module(module_def_id, &mut CheckItemTypesVisitor { tcx });
}
fn typeck_item_bodies<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) {
debug_assert!(crate_num == LOCAL_CRATE);
tcx.par_body_owners(|body_owner_def_id| {
tcx.ensure().typeck_tables_of(body_owner_def_id);
});
}
fn check_item_well_formed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) {
wfcheck::check_item_well_formed(tcx, def_id);
}
fn check_trait_item_well_formed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) {
wfcheck::check_trait_item(tcx, def_id);
}
fn check_impl_item_well_formed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) {
wfcheck::check_impl_item(tcx, def_id);
}
pub fn provide(providers: &mut Providers<'_>) {
method::provide(providers);
*providers = Providers {
typeck_item_bodies,
typeck_tables_of,
has_typeck_tables,
adt_destructor,
used_trait_imports,
check_item_well_formed,
check_trait_item_well_formed,
check_impl_item_well_formed,
check_mod_item_types,
..*providers
};
}
fn adt_destructor<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> Option<ty::Destructor> {
tcx.calculate_dtor(def_id, &mut dropck::check_drop_impl)
}
/// If this `DefId` is a "primary tables entry", returns `Some((body_id, decl))`
/// with information about it's body-id and fn-decl (if any). Otherwise,
/// returns `None`.
///
/// If this function returns "some", then `typeck_tables(def_id)` will
/// succeed; if it returns `None`, then `typeck_tables(def_id)` may or
/// may not succeed. In some cases where this function returns `None`
/// (notably closures), `typeck_tables(def_id)` would wind up
/// redirecting to the owning function.
fn primary_body_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
id: hir::HirId)
-> Option<(hir::BodyId, Option<&'tcx hir::FnDecl>)>
{
match tcx.hir().get_by_hir_id(id) {
Node::Item(item) => {
match item.node {
hir::ItemKind::Const(_, body) |
hir::ItemKind::Static(_, _, body) =>
Some((body, None)),
hir::ItemKind::Fn(ref decl, .., body) =>
Some((body, Some(decl))),
_ =>
None,
}
}
Node::TraitItem(item) => {
match item.node {
hir::TraitItemKind::Const(_, Some(body)) =>
Some((body, None)),
hir::TraitItemKind::Method(ref sig, hir::TraitMethod::Provided(body)) =>
Some((body, Some(&sig.decl))),
_ =>
None,
}
}
Node::ImplItem(item) => {
match item.node {
hir::ImplItemKind::Const(_, body) =>
Some((body, None)),
hir::ImplItemKind::Method(ref sig, body) =>
Some((body, Some(&sig.decl))),
_ =>
None,
}
}
Node::AnonConst(constant) => Some((constant.body, None)),
_ => None,
}
}
fn has_typeck_tables<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> bool {
// Closures' tables come from their outermost function,
// as they are part of the same "inference environment".
let outer_def_id = tcx.closure_base_def_id(def_id);
if outer_def_id != def_id {
return tcx.has_typeck_tables(outer_def_id);
}
let id = tcx.hir().as_local_hir_id(def_id).unwrap();
primary_body_of(tcx, id).is_some()
}
fn used_trait_imports<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> Lrc<DefIdSet> {
tcx.typeck_tables_of(def_id).used_trait_imports.clone()
}
fn typeck_tables_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> &'tcx ty::TypeckTables<'tcx> {
// Closures' tables come from their outermost function,
// as they are part of the same "inference environment".
let outer_def_id = tcx.closure_base_def_id(def_id);
if outer_def_id != def_id {
return tcx.typeck_tables_of(outer_def_id);
}
let id = tcx.hir().as_local_hir_id(def_id).unwrap();
let span = tcx.hir().span_by_hir_id(id);
// Figure out what primary body this item has.
let (body_id, fn_decl) = primary_body_of(tcx, id).unwrap_or_else(|| {
span_bug!(span, "can't type-check body of {:?}", def_id);
});
let body = tcx.hir().body(body_id);
let tables = Inherited::build(tcx, def_id).enter(|inh| {
let param_env = tcx.param_env(def_id);
let fcx = if let Some(decl) = fn_decl {
let fn_sig = tcx.fn_sig(def_id);
check_abi(tcx, span, fn_sig.abi());
// Compute the fty from point of view of inside the fn.
let fn_sig =
tcx.liberate_late_bound_regions(def_id, &fn_sig);
let fn_sig =
inh.normalize_associated_types_in(body.value.span,
body_id.hir_id,
param_env,
&fn_sig);
let fcx = check_fn(&inh, param_env, fn_sig, decl, id, body, None).0;
fcx
} else {
let fcx = FnCtxt::new(&inh, param_env, body.value.hir_id);
let expected_type = tcx.type_of(def_id);
let expected_type = fcx.normalize_associated_types_in(body.value.span, &expected_type);
fcx.require_type_is_sized(expected_type, body.value.span, traits::ConstSized);
let revealed_ty = if tcx.features().impl_trait_in_bindings {
fcx.instantiate_opaque_types_from_value(
id,
&expected_type
)
} else {
expected_type
};
// Gather locals in statics (because of block expressions).
GatherLocalsVisitor { fcx: &fcx, parent_id: id, }.visit_body(body);
fcx.check_expr_coercable_to_type(&body.value, revealed_ty);
fcx
};
// All type checking constraints were added, try to fallback unsolved variables.
fcx.select_obligations_where_possible(false);
let mut fallback_has_occurred = false;
for ty in &fcx.unsolved_variables() {
fallback_has_occurred |= fcx.fallback_if_possible(ty);
}
fcx.select_obligations_where_possible(fallback_has_occurred);
// Even though coercion casts provide type hints, we check casts after fallback for
// backwards compatibility. This makes fallback a stronger type hint than a cast coercion.
fcx.check_casts();
// Closure and generator analysis may run after fallback
// because they don't constrain other type variables.
fcx.closure_analyze(body);
assert!(fcx.deferred_call_resolutions.borrow().is_empty());
fcx.resolve_generator_interiors(def_id);
for (ty, span, code) in fcx.deferred_sized_obligations.borrow_mut().drain(..) {
let ty = fcx.normalize_ty(span, ty);
fcx.require_type_is_sized(ty, span, code);
}
fcx.select_all_obligations_or_error();
if fn_decl.is_some() {
fcx.regionck_fn(id, body);
} else {
fcx.regionck_expr(body);
}
fcx.resolve_type_vars_in_body(body)
});
// Consistency check our TypeckTables instance can hold all ItemLocalIds
// it will need to hold.
assert_eq!(tables.local_id_root, Some(DefId::local(id.owner)));
tables
}
fn check_abi<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, span: Span, abi: Abi) {
if !tcx.sess.target.target.is_abi_supported(abi) {
struct_span_err!(tcx.sess, span, E0570,
"The ABI `{}` is not supported for the current target", abi).emit()
}
}
struct GatherLocalsVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
parent_id: hir::HirId,
}
impl<'a, 'gcx, 'tcx> GatherLocalsVisitor<'a, 'gcx, 'tcx> {
fn assign(&mut self, span: Span, nid: hir::HirId, ty_opt: Option<LocalTy<'tcx>>) -> Ty<'tcx> {
match ty_opt {
None => {
// infer the variable's type
let var_ty = self.fcx.next_ty_var(TypeVariableOrigin::TypeInference(span));
self.fcx.locals.borrow_mut().insert(nid, LocalTy {
decl_ty: var_ty,
revealed_ty: var_ty
});
var_ty
}
Some(typ) => {
// take type that the user specified
self.fcx.locals.borrow_mut().insert(nid, typ);
typ.revealed_ty
}
}
}
}
impl<'a, 'gcx, 'tcx> Visitor<'gcx> for GatherLocalsVisitor<'a, 'gcx, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'gcx> {
NestedVisitorMap::None
}
// Add explicitly-declared locals.
fn visit_local(&mut self, local: &'gcx hir::Local) {
let local_ty = match local.ty {
Some(ref ty) => {
let o_ty = self.fcx.to_ty(&ty);
let revealed_ty = if self.fcx.tcx.features().impl_trait_in_bindings {
self.fcx.instantiate_opaque_types_from_value(
self.parent_id,
&o_ty
)
} else {
o_ty
};
let c_ty = self.fcx.inh.infcx.canonicalize_user_type_annotation(
&UserType::Ty(revealed_ty)
);
debug!("visit_local: ty.hir_id={:?} o_ty={:?} revealed_ty={:?} c_ty={:?}",
ty.hir_id, o_ty, revealed_ty, c_ty);
self.fcx.tables.borrow_mut().user_provided_types_mut().insert(ty.hir_id, c_ty);
Some(LocalTy { decl_ty: o_ty, revealed_ty })
},
None => None,
};
self.assign(local.span, local.hir_id, local_ty);
debug!("Local variable {:?} is assigned type {}",
local.pat,
self.fcx.ty_to_string(
self.fcx.locals.borrow().get(&local.hir_id).unwrap().clone().decl_ty));
intravisit::walk_local(self, local);
}
// Add pattern bindings.
fn visit_pat(&mut self, p: &'gcx hir::Pat) {
if let PatKind::Binding(_, _, ident, _) = p.node {
let var_ty = self.assign(p.span, p.hir_id, None);
let node_id = self.fcx.tcx.hir().hir_to_node_id(p.hir_id);
if !self.fcx.tcx.features().unsized_locals {
self.fcx.require_type_is_sized(var_ty, p.span,
traits::VariableType(node_id));
}
debug!("Pattern binding {} is assigned to {} with type {:?}",
ident,
self.fcx.ty_to_string(
self.fcx.locals.borrow().get(&p.hir_id).unwrap().clone().decl_ty),
var_ty);
}
intravisit::walk_pat(self, p);
}
// Don't descend into the bodies of nested closures
fn visit_fn(&mut self, _: intravisit::FnKind<'gcx>, _: &'gcx hir::FnDecl,
_: hir::BodyId, _: Span, _: hir::HirId) { }
fn visit_argument_source(&mut self, s: &'gcx hir::ArgSource) {
match s {
// Don't visit the pattern in `ArgSource::AsyncFn`, it contains a pattern which has
// a `NodeId` w/out a type, as it is only used for getting the name of the original
// pattern for diagnostics where only an `hir::Arg` is present.
hir::ArgSource::AsyncFn(..) => {},
_ => intravisit::walk_argument_source(self, s),
}
}
}
/// When `check_fn` is invoked on a generator (i.e., a body that
/// includes yield), it returns back some information about the yield
/// points.
struct GeneratorTypes<'tcx> {
/// Type of value that is yielded.
yield_ty: ty::Ty<'tcx>,
/// Types that are captured (see `GeneratorInterior` for more).
interior: ty::Ty<'tcx>,
/// Indicates if the generator is movable or static (immovable).
movability: hir::GeneratorMovability,
}
/// Helper used for fns and closures. Does the grungy work of checking a function
/// body and returns the function context used for that purpose, since in the case of a fn item
/// there is still a bit more to do.
///
/// * ...
/// * inherited: other fields inherited from the enclosing fn (if any)
fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
fn_sig: ty::FnSig<'tcx>,
decl: &'gcx hir::FnDecl,
fn_id: hir::HirId,
body: &'gcx hir::Body,
can_be_generator: Option<hir::GeneratorMovability>)
-> (FnCtxt<'a, 'gcx, 'tcx>, Option<GeneratorTypes<'tcx>>)
{
let mut fn_sig = fn_sig.clone();
debug!("check_fn(sig={:?}, fn_id={}, param_env={:?})", fn_sig, fn_id, param_env);
// Create the function context. This is either derived from scratch or,
// in the case of closures, based on the outer context.
let mut fcx = FnCtxt::new(inherited, param_env, body.value.hir_id);
*fcx.ps.borrow_mut() = UnsafetyState::function(fn_sig.unsafety, fn_id);
let declared_ret_ty = fn_sig.output();
fcx.require_type_is_sized(declared_ret_ty, decl.output.span(), traits::SizedReturnType);
let revealed_ret_ty = fcx.instantiate_opaque_types_from_value(fn_id, &declared_ret_ty);
fcx.ret_coercion = Some(RefCell::new(CoerceMany::new(revealed_ret_ty)));
fn_sig = fcx.tcx.mk_fn_sig(
fn_sig.inputs().iter().cloned(),
revealed_ret_ty,
fn_sig.c_variadic,
fn_sig.unsafety,
fn_sig.abi
);
let span = body.value.span;
if body.is_generator && can_be_generator.is_some() {
let yield_ty = fcx.next_ty_var(TypeVariableOrigin::TypeInference(span));
fcx.require_type_is_sized(yield_ty, span, traits::SizedYieldType);
fcx.yield_ty = Some(yield_ty);
}
let outer_def_id = fcx.tcx.closure_base_def_id(fcx.tcx.hir().local_def_id_from_hir_id(fn_id));
let outer_hir_id = fcx.tcx.hir().as_local_hir_id(outer_def_id).unwrap();
GatherLocalsVisitor { fcx: &fcx, parent_id: outer_hir_id, }.visit_body(body);
// Add formal parameters.
for (arg_ty, arg) in fn_sig.inputs().iter().zip(&body.arguments) {
// Check the pattern.
fcx.check_pat_walk(
&arg.pat,
arg_ty,
ty::BindingMode::BindByValue(hir::Mutability::MutImmutable),
None,
);
// Check that argument is Sized.
// The check for a non-trivial pattern is a hack to avoid duplicate warnings
// for simple cases like `fn foo(x: Trait)`,
// where we would error once on the parameter as a whole, and once on the binding `x`.
if arg.pat.simple_ident().is_none() && !fcx.tcx.features().unsized_locals {
fcx.require_type_is_sized(arg_ty, decl.output.span(), traits::SizedArgumentType);
}
fcx.write_ty(arg.hir_id, arg_ty);
}
inherited.tables.borrow_mut().liberated_fn_sigs_mut().insert(fn_id, fn_sig);
fcx.check_return_expr(&body.value);
// We insert the deferred_generator_interiors entry after visiting the body.
// This ensures that all nested generators appear before the entry of this generator.
// resolve_generator_interiors relies on this property.
let gen_ty = if can_be_generator.is_some() && body.is_generator {
let interior = fcx.next_ty_var(TypeVariableOrigin::MiscVariable(span));
fcx.deferred_generator_interiors.borrow_mut().push((body.id(), interior));
Some(GeneratorTypes {
yield_ty: fcx.yield_ty.unwrap(),
interior,
movability: can_be_generator.unwrap(),
})
} else {
None
};
// Finalize the return check by taking the LUB of the return types
// we saw and assigning it to the expected return type. This isn't
// really expected to fail, since the coercions would have failed
// earlier when trying to find a LUB.
//
// However, the behavior around `!` is sort of complex. In the
// event that the `actual_return_ty` comes back as `!`, that
// indicates that the fn either does not return or "returns" only
// values of type `!`. In this case, if there is an expected
// return type that is *not* `!`, that should be ok. But if the
// return type is being inferred, we want to "fallback" to `!`:
//
// let x = move || panic!();
//
// To allow for that, I am creating a type variable with diverging
// fallback. This was deemed ever so slightly better than unifying
// the return value with `!` because it allows for the caller to
// make more assumptions about the return type (e.g., they could do
//
// let y: Option<u32> = Some(x());
//
// which would then cause this return type to become `u32`, not
// `!`).
let coercion = fcx.ret_coercion.take().unwrap().into_inner();
let mut actual_return_ty = coercion.complete(&fcx);
if actual_return_ty.is_never() {
actual_return_ty = fcx.next_diverging_ty_var(
TypeVariableOrigin::DivergingFn(span));
}
fcx.demand_suptype(span, revealed_ret_ty, actual_return_ty);
// Check that the main return type implements the termination trait.
if let Some(term_id) = fcx.tcx.lang_items().termination() {
if let Some((def_id, EntryFnType::Main)) = fcx.tcx.entry_fn(LOCAL_CRATE) {
let main_id = fcx.tcx.hir().as_local_hir_id(def_id).unwrap();
if main_id == fn_id {
let substs = fcx.tcx.mk_substs_trait(declared_ret_ty, &[]);
let trait_ref = ty::TraitRef::new(term_id, substs);
let return_ty_span = decl.output.span();
let cause = traits::ObligationCause::new(
return_ty_span, fn_id, ObligationCauseCode::MainFunctionType);
inherited.register_predicate(
traits::Obligation::new(
cause, param_env, trait_ref.to_predicate()));
}
}
}
// Check that a function marked as `#[panic_handler]` has signature `fn(&PanicInfo) -> !`
if let Some(panic_impl_did) = fcx.tcx.lang_items().panic_impl() {
if panic_impl_did == fcx.tcx.hir().local_def_id_from_hir_id(fn_id) {
if let Some(panic_info_did) = fcx.tcx.lang_items().panic_info() {
// at this point we don't care if there are duplicate handlers or if the handler has
// the wrong signature as this value we'll be used when writing metadata and that
// only happens if compilation succeeded
fcx.tcx.sess.has_panic_handler.try_set_same(true);
if declared_ret_ty.sty != ty::Never {
fcx.tcx.sess.span_err(
decl.output.span(),
"return type should be `!`",
);
}
let inputs = fn_sig.inputs();
let span = fcx.tcx.hir().span_by_hir_id(fn_id);
if inputs.len() == 1 {
let arg_is_panic_info = match inputs[0].sty {
ty::Ref(region, ty, mutbl) => match ty.sty {
ty::Adt(ref adt, _) => {
adt.did == panic_info_did &&
mutbl == hir::Mutability::MutImmutable &&
*region != RegionKind::ReStatic
},
_ => false,
},
_ => false,
};
if !arg_is_panic_info {
fcx.tcx.sess.span_err(
decl.inputs[0].span,
"argument should be `&PanicInfo`",
);
}
if let Node::Item(item) = fcx.tcx.hir().get_by_hir_id(fn_id) {
if let ItemKind::Fn(_, _, ref generics, _) = item.node {
if !generics.params.is_empty() {
fcx.tcx.sess.span_err(
span,
"should have no type parameters",
);
}
}
}
} else {
let span = fcx.tcx.sess.source_map().def_span(span);
fcx.tcx.sess.span_err(span, "function should have one argument");
}
} else {
fcx.tcx.sess.err("language item required, but not found: `panic_info`");
}
}
}
// Check that a function marked as `#[alloc_error_handler]` has signature `fn(Layout) -> !`
if let Some(alloc_error_handler_did) = fcx.tcx.lang_items().oom() {
if alloc_error_handler_did == fcx.tcx.hir().local_def_id_from_hir_id(fn_id) {
if let Some(alloc_layout_did) = fcx.tcx.lang_items().alloc_layout() {
if declared_ret_ty.sty != ty::Never {
fcx.tcx.sess.span_err(
decl.output.span(),
"return type should be `!`",
);
}
let inputs = fn_sig.inputs();
let span = fcx.tcx.hir().span_by_hir_id(fn_id);
if inputs.len() == 1 {
let arg_is_alloc_layout = match inputs[0].sty {
ty::Adt(ref adt, _) => {
adt.did == alloc_layout_did
},
_ => false,
};
if !arg_is_alloc_layout {
fcx.tcx.sess.span_err(
decl.inputs[0].span,
"argument should be `Layout`",
);
}
if let Node::Item(item) = fcx.tcx.hir().get_by_hir_id(fn_id) {
if let ItemKind::Fn(_, _, ref generics, _) = item.node {
if !generics.params.is_empty() {
fcx.tcx.sess.span_err(
span,
"`#[alloc_error_handler]` function should have no type \
parameters",
);
}
}
}
} else {
let span = fcx.tcx.sess.source_map().def_span(span);
fcx.tcx.sess.span_err(span, "function should have one argument");
}
} else {
fcx.tcx.sess.err("language item required, but not found: `alloc_layout`");
}
}
}
(fcx, gen_ty)
}
fn check_struct<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
id: hir::HirId,
span: Span) {
let def_id = tcx.hir().local_def_id_from_hir_id(id);
let def = tcx.adt_def(def_id);
def.destructor(tcx); // force the destructor to be evaluated
check_representable(tcx, span, def_id);
if def.repr.simd() {
check_simd(tcx, span, def_id);
}
check_transparent(tcx, span, def_id);
check_packed(tcx, span, def_id);
}
fn check_union<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
id: hir::HirId,
span: Span) {
let def_id = tcx.hir().local_def_id_from_hir_id(id);
let def = tcx.adt_def(def_id);
def.destructor(tcx); // force the destructor to be evaluated
check_representable(tcx, span, def_id);
check_packed(tcx, span, def_id);
}
fn check_opaque<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
substs: SubstsRef<'tcx>,
span: Span,
) {
if let Err(partially_expanded_type) = tcx.try_expand_impl_trait_type(def_id, substs) {
let mut err = struct_span_err!(
tcx.sess, span, E0720,
"opaque type expands to a recursive type",
);
err.span_label(span, "expands to self-referential type");
if let ty::Opaque(..) = partially_expanded_type.sty {
err.note("type resolves to itself");
} else {
err.note(&format!("expanded type is `{}`", partially_expanded_type));
}
err.emit();
}
}
pub fn check_item_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, it: &'tcx hir::Item) {
debug!(
"check_item_type(it.hir_id={}, it.name={})",
it.hir_id,
tcx.def_path_str(tcx.hir().local_def_id_from_hir_id(it.hir_id))
);
let _indenter = indenter();
match it.node {
// Consts can play a role in type-checking, so they are included here.
hir::ItemKind::Static(..) => {
let def_id = tcx.hir().local_def_id_from_hir_id(it.hir_id);
tcx.typeck_tables_of(def_id);
maybe_check_static_with_link_section(tcx, def_id, it.span);
}
hir::ItemKind::Const(..) => {
tcx.typeck_tables_of(tcx.hir().local_def_id_from_hir_id(it.hir_id));
}
hir::ItemKind::Enum(ref enum_definition, _) => {
check_enum(tcx, it.span, &enum_definition.variants, it.hir_id);
}
hir::ItemKind::Fn(..) => {} // entirely within check_item_body
hir::ItemKind::Impl(.., ref impl_item_refs) => {
debug!("ItemKind::Impl {} with id {}", it.ident, it.hir_id);
let impl_def_id = tcx.hir().local_def_id_from_hir_id(it.hir_id);
if let Some(impl_trait_ref) = tcx.impl_trait_ref(impl_def_id) {
check_impl_items_against_trait(
tcx,
it.span,
impl_def_id,
impl_trait_ref,
impl_item_refs,
);
let trait_def_id = impl_trait_ref.def_id;
check_on_unimplemented(tcx, trait_def_id, it);
}
}
hir::ItemKind::Trait(..) => {
let def_id = tcx.hir().local_def_id_from_hir_id(it.hir_id);
check_on_unimplemented(tcx, def_id, it);
}
hir::ItemKind::Struct(..) => {
check_struct(tcx, it.hir_id, it.span);
}
hir::ItemKind::Union(..) => {
check_union(tcx, it.hir_id, it.span);
}
hir::ItemKind::Existential(..) => {
let def_id = tcx.hir().local_def_id_from_hir_id(it.hir_id);
let substs = InternalSubsts::identity_for_item(tcx, def_id);
check_opaque(tcx, def_id, substs, it.span);
}
hir::ItemKind::Ty(..) => {
let def_id = tcx.hir().local_def_id_from_hir_id(it.hir_id);
let pty_ty = tcx.type_of(def_id);
let generics = tcx.generics_of(def_id);
check_bounds_are_used(tcx, &generics, pty_ty);
}
hir::ItemKind::ForeignMod(ref m) => {
check_abi(tcx, it.span, m.abi);
if m.abi == Abi::RustIntrinsic {
for item in &m.items {
intrinsic::check_intrinsic_type(tcx, item);
}
} else if m.abi == Abi::PlatformIntrinsic {
for item in &m.items {
intrinsic::check_platform_intrinsic_type(tcx, item);
}
} else {
for item in &m.items {
let generics = tcx.generics_of(tcx.hir().local_def_id_from_hir_id(item.hir_id));
if generics.params.len() - generics.own_counts().lifetimes != 0 {
let mut err = struct_span_err!(
|
);
err.span_label(item.span, "can't have type parameters");
// FIXME: once we start storing spans for type arguments, turn this into a
// suggestion.
err.help(
"use specialization instead of type parameters by replacing them \
with concrete types like `u32`",
);
err.emit();
}
if let hir::ForeignItemKind::Fn(ref fn_decl, _, _) = item.node {
require_c_abi_if_c_variadic(tcx, fn_decl, m.abi, item.span);
}
}
}
}
_ => { /* nothing to do */ }
}
}
fn maybe_check_static_with_link_section(tcx: TyCtxt<'_, '_, '_>, id: DefId, span: Span) {
// Only restricted on wasm32 target for now
if !tcx.sess.opts.target_triple.triple().starts_with("wasm32") {
return
}
// If `#[link_section]` is missing, then nothing to verify
let attrs = tcx.codegen_fn_attrs(id);
if attrs.link_section.is_none() {
return
}
// For the wasm32 target statics with #[link_section] are placed into custom
// sections of the final output file, but this isn't link custom sections of
// other executable formats. Namely we can only embed a list of bytes,
// nothing with pointers to anything else or relocations. If any relocation
// show up, reject them here.
let instance = ty::Instance::mono(tcx, id);
let cid = GlobalId {
instance,
promoted: None
};
let param_env = ty::ParamEnv::reveal_all();
if let Ok(static_) = tcx.const_eval(param_env.and(cid)) {
let alloc = if let ConstValue::ByRef(_, allocation) = static_.val {
allocation
} else {
bug!("Matching on non-ByRef static")
};
if alloc.relocations.len() != 0 {
let msg = "statics with a custom `#[link_section]` must be a \
simple list of bytes on the wasm target with no \
extra levels of indirection such as references";
tcx.sess.span_err(span, msg);
}
}
}
fn check_on_unimplemented<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
trait_def_id: DefId,
item: &hir::Item) {
let item_def_id = tcx.hir().local_def_id_from_hir_id(item.hir_id);
// an error would be reported if this fails.
let _ = traits::OnUnimplementedDirective::of_item(tcx, trait_def_id, item_def_id);
}
fn report_forbidden_specialization<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
impl_item: &hir::ImplItem,
parent_impl: DefId)
{
let mut err = struct_span_err!(
tcx.sess, impl_item.span, E0520,
"`{}` specializes an item from a parent `impl`, but \
that item is not marked `default`",
impl_item.ident);
err.span_label(impl_item.span, format!("cannot specialize default item `{}`",
impl_item.ident));
match tcx.span_of_impl(parent_impl) {
Ok(span) => {
err.span_label(span, "parent `impl` is here");
err.note(&format!("to specialize, `{}` in the parent `impl` must be marked `default`",
impl_item.ident));
}
Err(cname) => {
err.note(&format!("parent implementation is in crate `{}`", cname));
}
}
err.emit();
}
fn check_specialization_validity<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
trait_def: &ty::TraitDef,
trait_item: &ty::AssociatedItem,
impl_id: DefId,
impl_item: &hir::ImplItem)
{
let ancestors = trait_def.ancestors(tcx, impl_id);
let kind = match impl_item.node {
hir::ImplItemKind::Const(..) => ty::AssociatedKind::Const,
hir::ImplItemKind::Method(..) => ty::AssociatedKind::Method,
hir::ImplItemKind::Existential(..) => ty::AssociatedKind::Existential,
hir::ImplItemKind::Type(_) => ty::AssociatedKind::Type
};
let parent = ancestors.defs(tcx, trait_item.ident, kind, trait_def.def_id).nth(1)
.map(|node_item| node_item.map(|parent| parent.defaultness));
if let Some(parent) = parent {
if tcx.impl_item_is_final(&parent) {
report_forbidden_specialization(tcx, impl_item, parent.node.def_id());
}
}
}
fn check_impl_items_against_trait<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
impl_span: Span,
impl_id: DefId,
impl_trait_ref: ty::TraitRef<'tcx>,
impl_item_refs: &[hir::ImplItemRef]) {
let impl_span = tcx.sess.source_map().def_span(impl_span);
// If the trait reference itself is erroneous (so the compilation is going
// to fail), skip checking the items here -- the `impl_item` table in `tcx`
// isn't populated for such impls.
if impl_trait_ref.references_error() { return; }
// Locate trait definition and items
let trait_def = tcx.trait_def(impl_trait_ref.def_id);
let mut overridden_associated_type = None;
let impl_items = || impl_item_refs.iter().map(|iiref| tcx.hir().impl_item(iiref.id));
// Check existing impl methods to see if they are both present in trait
// and compatible with trait signature
for impl_item in impl_items() {
let ty_impl_item = tcx.associated_item(
tcx.hir().local_def_id_from_hir_id(impl_item.hir_id));
let ty_trait_item = tcx.associated_items(impl_trait_ref.def_id)
.find(|ac| Namespace::from(&impl_item.node) == Namespace::from(ac.kind) &&
tcx.hygienic_eq(ty_impl_item.ident, ac.ident, impl_trait_ref.def_id))
.or_else(|| {
// Not compatible, but needed for the error message
tcx.associated_items(impl_trait_ref.def_id)
.find(|ac| tcx.hygienic_eq(ty_impl_item.ident, ac.ident, impl_trait_ref.def_id))
});
// Check that impl definition matches trait definition
if let Some(ty_trait_item) = ty_trait_item {
match impl_item.node {
hir::ImplItemKind::Const(..) => {
// Find associated const definition.
if ty_trait_item.kind == ty::AssociatedKind::Const {
compare_const_impl(tcx,
&ty_impl_item,
impl_item.span,
&ty_trait_item,
impl_trait_ref);
} else {
let mut err = struct_span_err!(tcx.sess, impl_item.span, E0323,
"item `{}` is an associated const, \
which doesn't match its trait `{}`",
ty_impl_item.ident,
impl_trait_ref);
err.span_label(impl_item.span, "does not match trait");
// We can only get the spans from local trait definition
// Same for E0324 and E0325
if let Some(trait_span) = tcx.hir().span_if_local(ty_trait_item.def_id) {
err.span_label(trait_span, "item in trait");
}
err.emit()
}
}
hir::ImplItemKind::Method(..) => {
let trait_span = tcx.hir().span_if_local(ty_trait_item.def_id);
if ty_trait_item.kind == ty::AssociatedKind::Method {
compare_impl_method(tcx,
&ty_impl_item,
impl_item.span,
&ty_trait_item,
impl_trait_ref,
trait_span);
} else {
let mut err = struct_span_err!(tcx.sess, impl_item.span, E0324,
"item `{}` is an associated method, \
which doesn't match its trait `{}`",
ty_impl_item.ident,
impl_trait_ref);
err.span_label(impl_item.span, "does not match trait");
if let Some(trait_span) = tcx.hir().span_if_local(ty_trait_item.def_id) {
err.span_label(trait_span, "item in trait");
}
err.emit()
}
}
hir::ImplItemKind::Existential(..) |
hir::ImplItemKind::Type(_) => {
if ty_trait_item.kind == ty::AssociatedKind::Type {
if ty_trait_item.defaultness.has_value() {
overridden_associated_type = Some(impl_item);
}
} else {
let mut err = struct_span_err!(tcx.sess, impl_item.span, E0325,
"item `{}` is an associated type, \
which doesn't match its trait `{}`",
ty_impl_item.ident,
impl_trait_ref);
err.span_label(impl_item.span, "does not match trait");
if let Some(trait_span) = tcx.hir().span_if_local(ty_trait_item.def_id) {
err.span_label(trait_span, "item in trait");
}
err.emit()
}
}
}
check_specialization_validity(tcx, trait_def, &ty_trait_item, impl_id, impl_item);
}
}
// Check for missing items from trait
let mut missing_items = Vec::new();
let mut invalidated_items = Vec::new();
let associated_type_overridden = overridden_associated_type.is_some();
for trait_item in tcx.associated_items(impl_trait_ref.def_id) {
let is_implemented = trait_def.ancestors(tcx, impl_id)
.defs(tcx, trait_item.ident, trait_item.kind, impl_trait_ref.def_id)
.next()
.map(|node_item| !node_item.node.is_from_trait())
.unwrap_or(false);
if !is_implemented && !tcx.impl_is_default(impl_id) {
if !trait_item.defaultness.has_value() {
missing_items.push(trait_item);
} else if associated_type_overridden {
invalidated_items.push(trait_item.ident);
}
}
}
if !missing_items.is_empty() {
let mut err = struct_span_err!(tcx.sess, impl_span, E0046,
"not all trait items implemented, missing: `{}`",
missing_items.iter()
.map(|trait_item| trait_item.ident.to_string())
.collect::<Vec<_>>().join("`, `"));
err.span_label(impl_span, format!("missing `{}` in implementation",
missing_items.iter()
.map(|trait_item| trait_item.ident.to_string())
.collect::<Vec<_>>().join("`, `")));
for trait_item in missing_items {
if let Some(span) = tcx.hir().span_if_local(trait_item.def_id) {
err.span_label(span, format!("`{}` from trait", trait_item.ident));
} else {
err.note_trait_signature(trait_item.ident.to_string(),
trait_item.signature(&tcx));
}
}
err.emit();
}
if !invalidated_items.is_empty() {
let invalidator = overridden_associated_type.unwrap();
span_err!(tcx.sess, invalidator.span, E0399,
"the following trait items need to be reimplemented \
as `{}` was overridden: `{}`",
invalidator.ident,
invalidated_items.iter()
.map(|name| name.to_string())
.collect::<Vec<_>>().join("`, `"))
}
}
/// Checks whether a type can be represented in memory. In particular, it
/// identifies types that contain themselves without indirection through a
/// pointer, which would mean their size is unbounded.
fn check_representable<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
sp: Span,
item_def_id: DefId)
-> bool {
let rty = tcx.type_of(item_def_id);
// Check that it is possible to represent this type. This call identifies
// (1) types that contain themselves and (2) types that contain a different
// recursive type. It is only necessary to throw an error on those that
// contain themselves. For case 2, there must be an inner type that will be
// caught by case 1.
match rty.is_representable(tcx, sp) {
Representability::SelfRecursive(spans) => {
let mut err = tcx.recursive_type_with_infinite_size_error(item_def_id);
for span in spans {
err.span_label(span, "recursive without indirection");
}
err.emit();
return false
}
Representability::Representable | Representability::ContainsRecursive => (),
}
return true
}
pub fn check_simd<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: DefId) {
let t = tcx.type_of(def_id);
if let ty::Adt(def, substs) = t.sty {
if def.is_struct() {
let fields = &def.non_enum_variant().fields;
if fields.is_empty() {
span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty");
return;
}
let e = fields[0].ty(tcx, substs);
if !fields.iter().all(|f| f.ty(tcx, substs) == e) {
struct_span_err!(tcx.sess, sp, E0076, "SIMD vector should be homogeneous")
.span_label(sp, "SIMD elements must have the same type")
.emit();
return;
}
match e.sty {
ty::Param(_) => { /* struct<T>(T, T, T, T) is ok */ }
_ if e.is_machine() => { /* struct(u8, u8, u8, u8) is ok */ }
_ => {
span_err!(tcx.sess, sp, E0077,
"SIMD vector element type should be machine type");
return;
}
}
}
}
}
fn check_packed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: DefId) {
let repr = tcx.adt_def(def_id).repr;
if repr.packed() {
for attr in tcx.get_attrs(def_id).iter() {
for r in attr::find_repr_attrs(&tcx.sess.parse_sess, attr) {
if let attr::ReprPacked(pack) = r {
if pack != repr.pack {
struct_span_err!(tcx.sess, sp, E0634,
"type has conflicting packed representation hints").emit();
}
}
}
}
if repr.align > 0 {
struct_span_err!(tcx.sess, sp, E0587,
"type has conflicting packed and align representation hints").emit();
}
else if check_packed_inner(tcx, def_id, &mut Vec::new()) {
struct_span_err!(tcx.sess, sp, E0588,
"packed type cannot transitively contain a `[repr(align)]` type").emit();
}
}
}
fn check_packed_inner<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
stack: &mut Vec<DefId>) -> bool {
let t = tcx.type_of(def_id);
if stack.contains(&def_id) {
debug!("check_packed_inner: {:?} is recursive", t);
return false;
}
if let ty::Adt(def, substs) = t.sty {
if def.is_struct() || def.is_union() {
if tcx.adt_def(def.did).repr.align > 0 {
return true;
}
// push struct def_id before checking fields
stack.push(def_id);
for field in &def.non_enum_variant().fields {
let f = field.ty(tcx, substs);
if let ty::Adt(def, _) = f.sty {
if check_packed_inner(tcx, def.did, stack) {
return true;
}
}
}
// only need to pop if not early out
stack.pop();
}
}
false
}
fn check_transparent<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: DefId) {
let adt = tcx.adt_def(def_id);
if !adt.repr.transparent() {
return;
}
// For each field, figure out if it's known to be a ZST and align(1)
let field_infos = adt.non_enum_variant().fields.iter().map(|field| {
let ty = field.ty(tcx, InternalSubsts::identity_for_item(tcx, field.did));
let param_env = tcx.param_env(field.did);
let layout = tcx.layout_of(param_env.and(ty));
// We are currently checking the type this field came from, so it must be local
let span = tcx.hir().span_if_local(field.did).unwrap();
let zst = layout.map(|layout| layout.is_zst()).unwrap_or(false);
let align1 = layout.map(|layout| layout.align.abi.bytes() == 1).unwrap_or(false);
(span, zst, align1)
});
let non_zst_fields = field_infos.clone().filter(|(_span, zst, _align1)| !*zst);
let non_zst_count = non_zst_fields.clone().count();
if non_zst_count != 1 {
let field_spans: Vec<_> = non_zst_fields.map(|(span, _zst, _align1)| span).collect();
struct_span_err!(tcx.sess, sp, E0690,
"transparent struct needs exactly one non-zero-sized field, but has {}",
non_zst_count)
.span_note(field_spans, "non-zero-sized field")
.emit();
}
for (span, zst, align1) in field_infos {
if zst && !align1 {
span_err!(tcx.sess, span, E0691,
"zero-sized field in transparent struct has alignment larger than 1");
}
}
}
#[allow(trivial_numeric_casts)]
pub fn check_enum<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
sp: Span,
vs: &'tcx [hir::Variant],
id: hir::HirId) {
let def_id = tcx.hir().local_def_id_from_hir_id(id);
let def = tcx.adt_def(def_id);
def.destructor(tcx); // force the destructor to be evaluated
if vs.is_empty() {
let attributes = tcx.get_attrs(def_id);
if let Some(attr) = attr::find_by_name(&attributes, "repr") {
struct_span_err!(
tcx.sess, attr.span, E0084,
"unsupported representation for zero-variant enum")
.span_label(sp, "zero-variant enum")
.emit();
}
}
let repr_type_ty = def.repr.discr_type().to_ty(tcx);
if repr_type_ty == tcx.types.i128 || repr_type_ty == tcx.types.u128 {
if !tcx.features().repr128 {
emit_feature_err(&tcx.sess.parse_sess,
"repr128",
sp,
GateIssue::Language,
"repr with 128-bit type is unstable");
}
}
for v in vs {
if let Some(ref e) = v.node.disr_expr {
tcx.typeck_tables_of(tcx.hir().local_def_id_from_hir_id(e.hir_id));
}
}
let mut disr_vals: Vec<Discr<'tcx>> = Vec::with_capacity(vs.len());
for ((_, discr), v) in def.discriminants(tcx).zip(vs) {
// Check for duplicate discriminant values
if let Some(i) = disr_vals.iter().position(|&x| x.val == discr.val) {
let variant_did = def.variants[VariantIdx::new(i)].def_id;
let variant_i_hir_id = tcx.hir().as_local_hir_id(variant_did).unwrap();
let variant_i = tcx.hir().expect_variant(variant_i_hir_id);
let i_span = match variant_i.node.disr_expr {
Some(ref expr) => tcx.hir().span_by_hir_id(expr.hir_id),
None => tcx.hir().span_by_hir_id(variant_i_hir_id)
};
let span = match v.node.disr_expr {
Some(ref expr) => tcx.hir().span_by_hir_id(expr.hir_id),
None => v.span
};
struct_span_err!(tcx.sess, span, E0081,
"discriminant value `{}` already exists", disr_vals[i])
.span_label(i_span, format!("first use of `{}`", disr_vals[i]))
.span_label(span , format!("enum already has `{}`", disr_vals[i]))
.emit();
}
disr_vals.push(discr);
}
check_representable(tcx, sp, def_id);
}
fn report_unexpected_variant_def<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
def: &Def,
span: Span,
qpath: &QPath) {
span_err!(tcx.sess, span, E0533,
"expected unit struct/variant or constant, found {} `{}`",
def.kind_name(),
hir::print::to_string(tcx.hir(), |s| s.print_qpath(qpath, false)));
}
impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx }
fn get_type_parameter_bounds(&self, _: Span, def_id: DefId)
-> Lrc<ty::GenericPredicates<'tcx>>
{
let tcx = self.tcx;
let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap();
let item_id = tcx.hir().ty_param_owner(hir_id);
let item_def_id = tcx.hir().local_def_id_from_hir_id(item_id);
let generics = tcx.generics_of(item_def_id);
let index = generics.param_def_id_to_index[&def_id];
Lrc::new(ty::GenericPredicates {
parent: None,
predicates: self.param_env.caller_bounds.iter().filter_map(|&predicate| {
match predicate {
ty::Predicate::Trait(ref data)
if data.skip_binder().self_ty().is_param(index) => {
// HACK(eddyb) should get the original `Span`.
let span = tcx.def_span(def_id);
Some((predicate, span))
}
_ => None
}
}).collect()
})
}
fn re_infer(&self, span: Span, def: Option<&ty::GenericParamDef>)
-> Option<ty::Region<'tcx>> {
let v = match def {
Some(def) => infer::EarlyBoundRegion(span, def.name),
None => infer::MiscVariable(span)
};
Some(self.next_region_var(v))
}
fn ty_infer(&self, span: Span) -> Ty<'tcx> {
self.next_ty_var(TypeVariableOrigin::TypeInference(span))
}
fn ty_infer_for_def(&self,
ty_param_def: &ty::GenericParamDef,
span: Span) -> Ty<'tcx> {
if let UnpackedKind::Type(ty) = self.var_for_def(span, ty_param_def).unpack() {
return ty;
}
unreachable!()
}
fn projected_ty_from_poly_trait_ref(&self,
span: Span,
item_def_id: DefId,
poly_trait_ref: ty::PolyTraitRef<'tcx>)
-> Ty<'tcx>
{
let (trait_ref, _) = self.replace_bound_vars_with_fresh_vars(
span,
infer::LateBoundRegionConversionTime::AssocTypeProjection(item_def_id),
&poly_trait_ref
);
self.tcx().mk_projection(item_def_id, trait_ref.substs)
}
fn normalize_ty(&self, span: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
if ty.has_escaping_bound_vars() {
ty // FIXME: normalization and escaping regions
} else {
self.normalize_associated_types_in(span, &ty)
}
}
fn set_tainted_by_errors(&self) {
self.infcx.set_tainted_by_errors()
}
fn record_ty(&self, hir_id: hir::HirId, ty: Ty<'tcx>, _span: Span) {
self.write_ty(hir_id, ty)
}
}
/// Controls whether the arguments are tupled. This is used for the call
/// operator.
///
/// Tupling means that all call-side arguments are packed into a tuple and
/// passed as a single parameter. For example, if tupling is enabled, this
/// function:
///
/// fn f(x: (isize, isize))
///
/// Can be called as:
///
/// f(1, 2);
///
/// Instead of:
///
/// f((1, 2));
#[derive(Clone, Eq, PartialEq)]
enum TupleArgumentsFlag {
DontTupleArguments,
TupleArguments,
}
impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
pub fn new(inh: &'a Inherited<'a, 'gcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
body_id: hir::HirId)
-> FnCtxt<'a, 'gcx, 'tcx> {
FnCtxt {
body_id,
param_env,
err_count_on_creation: inh.tcx.sess.err_count(),
ret_coercion: None,
ret_coercion_span: RefCell::new(None),
yield_ty: None,
ps: RefCell::new(UnsafetyState::function(hir::Unsafety::Normal,
hir::CRATE_HIR_ID)),
diverges: Cell::new(Diverges::Maybe),
has_errors: Cell::new(false),
enclosing_breakables: RefCell::new(EnclosingBreakables {
stack: Vec::new(),
by_id: Default::default(),
}),
inh,
}
}
pub fn sess(&self) -> &Session {
&self.tcx.sess
}
pub fn err_count_since_creation(&self) -> usize {
self.tcx.sess.err_count() - self.err_count_on_creation
}
/// Produces warning on the given node, if the current point in the
/// function is unreachable, and there hasn't been another warning.
fn warn_if_unreachable(&self, id: hir::HirId, span: Span, kind: &str) {
if self.diverges.get() == Diverges::Always {
self.diverges.set(Diverges::WarnedAlways);
debug!("warn_if_unreachable: id={:?} span={:?} kind={}", id, span, kind);
self.tcx().lint_hir(
lint::builtin::UNREACHABLE_CODE,
id, span,
&format!("unreachable {}", kind));
}
}
pub fn cause(&self,
span: Span,
code: ObligationCauseCode<'tcx>)
-> ObligationCause<'tcx> {
ObligationCause::new(span, self.body_id, code)
}
pub fn misc(&self, span: Span) -> ObligationCause<'tcx> {
self.cause(span, ObligationCauseCode::MiscObligation)
}
/// Resolves type variables in `ty` if possible. Unlike the infcx
/// version (resolve_type_vars_if_possible), this version will
/// also select obligations if it seems useful, in an effort
/// to get more type information.
fn resolve_type_vars_with_obligations(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
debug!("resolve_type_vars_with_obligations(ty={:?})", ty);
// No Infer()? Nothing needs doing.
if !ty.has_infer_types() {
debug!("resolve_type_vars_with_obligations: ty={:?}", ty);
return ty;
}
// If `ty` is a type variable, see whether we already know what it is.
ty = self.resolve_type_vars_if_possible(&ty);
if !ty.has_infer_types() {
debug!("resolve_type_vars_with_obligations: ty={:?}", ty);
return ty;
}
// If not, try resolving pending obligations as much as
// possible. This can help substantially when there are
// indirect dependencies that don't seem worth tracking
// precisely.
self.select_obligations_where_possible(false);
ty = self.resolve_type_vars_if_possible(&ty);
debug!("resolve_type_vars_with_obligations: ty={:?}", ty);
ty
}
fn record_deferred_call_resolution(&self,
closure_def_id: DefId,
r: DeferredCallResolution<'gcx, 'tcx>) {
let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut();
deferred_call_resolutions.entry(closure_def_id).or_default().push(r);
}
fn remove_deferred_call_resolutions(&self,
closure_def_id: DefId)
-> Vec<DeferredCallResolution<'gcx, 'tcx>>
{
let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut();
deferred_call_resolutions.remove(&closure_def_id).unwrap_or(vec![])
}
pub fn tag(&self) -> String {
let self_ptr: *const FnCtxt<'_, '_, '_> = self;
format!("{:?}", self_ptr)
}
pub fn local_ty(&self, span: Span, nid: hir::HirId) -> LocalTy<'tcx> {
self.locals.borrow().get(&nid).cloned().unwrap_or_else(||
span_bug!(span, "no type for local variable {}",
self.tcx.hir().hir_to_string(nid))
)
}
#[inline]
pub fn write_ty(&self, id: hir::HirId, ty: Ty<'tcx>) {
debug!("write_ty({:?}, {:?}) in fcx {}",
id, self.resolve_type_vars_if_possible(&ty), self.tag());
self.tables.borrow_mut().node_types_mut().insert(id, ty);
if ty.references_error() {
self.has_errors.set(true);
self.set_tainted_by_errors();
}
}
pub fn write_field_index(&self, hir_id: hir::HirId, index: usize) {
self.tables.borrow_mut().field_indices_mut().insert(hir_id, index);
}
pub fn write_method_call(&self,
hir_id: hir::HirId,
method: MethodCallee<'tcx>) {
debug!("write_method_call(hir_id={:?}, method={:?})", hir_id, method);
self.tables
.borrow_mut()
.type_dependent_defs_mut()
.insert(hir_id, Def::Method(method.def_id));
self.write_substs(hir_id, method.substs);
// When the method is confirmed, the `method.substs` includes
// parameters from not just the method, but also the impl of
// the method -- in particular, the `Self` type will be fully
// resolved. However, those are not something that the "user
// specified" -- i.e., those types come from the inferred type
// of the receiver, not something the user wrote. So when we
// create the user-substs, we want to replace those earlier
// types with just the types that the user actually wrote --
// that is, those that appear on the *method itself*.
//
// As an example, if the user wrote something like
// `foo.bar::<u32>(...)` -- the `Self` type here will be the
// type of `foo` (possibly adjusted), but we don't want to
// include that. We want just the `[_, u32]` part.
if !method.substs.is_noop() {
let method_generics = self.tcx.generics_of(method.def_id);
if !method_generics.params.is_empty() {
let user_type_annotation = self.infcx.probe(|_| {
let user_substs = UserSubsts {
substs: InternalSubsts::for_item(self.tcx, method.def_id, |param, _| {
let i = param.index as usize;
if i < method_generics.parent_count {
self.infcx.var_for_def(DUMMY_SP, param)
} else {
method.substs[i]
}
}),
user_self_ty: None, // not relevant here
};
self.infcx.canonicalize_user_type_annotation(&UserType::TypeOf(
method.def_id,
user_substs,
))
});
debug!("write_method_call: user_type_annotation={:?}", user_type_annotation);
self.write_user_type_annotation(hir_id, user_type_annotation);
}
}
}
pub fn write_substs(&self, node_id: hir::HirId, substs: SubstsRef<'tcx>) {
if !substs.is_noop() {
debug!("write_substs({:?}, {:?}) in fcx {}",
node_id,
substs,
self.tag());
self.tables.borrow_mut().node_substs_mut().insert(node_id, substs);
}
}
/// Given the substs that we just converted from the HIR, try to
/// canonicalize them and store them as user-given substitutions
/// (i.e., substitutions that must be respected by the NLL check).
///
/// This should be invoked **before any unifications have
/// occurred**, so that annotations like `Vec<_>` are preserved
/// properly.
pub fn write_user_type_annotation_from_substs(
&self,
hir_id: hir::HirId,
def_id: DefId,
substs: SubstsRef<'tcx>,
user_self_ty: Option<UserSelfTy<'tcx>>,
) {
debug!(
"write_user_type_annotation_from_substs: hir_id={:?} def_id={:?} substs={:?} \
user_self_ty={:?} in fcx {}",
hir_id, def_id, substs, user_self_ty, self.tag(),
);
if Self::can_contain_user_lifetime_bounds((substs, user_self_ty)) {
let canonicalized = self.infcx.canonicalize_user_type_annotation(
&UserType::TypeOf(def_id, UserSubsts {
substs,
user_self_ty,
})
);
debug!("write_user_type_annotation_from_substs: canonicalized={:?}", canonicalized);
self.write_user_type_annotation(hir_id, canonicalized);
}
}
pub fn write_user_type_annotation(
&self,
hir_id: hir::HirId,
canonical_user_type_annotation: CanonicalUserType<'tcx>,
) {
debug!(
"write_user_type_annotation: hir_id={:?} canonical_user_type_annotation={:?} tag={}",
hir_id, canonical_user_type_annotation, self.tag(),
);
if !canonical_user_type_annotation.is_identity() {
self.tables.borrow_mut().user_provided_types_mut().insert(
hir_id, canonical_user_type_annotation
);
} else {
debug!("write_user_type_annotation: skipping identity substs");
}
}
pub fn apply_adjustments(&self, expr: &hir::Expr, adj: Vec<Adjustment<'tcx>>) {
debug!("apply_adjustments(expr={:?}, adj={:?})", expr, adj);
if adj.is_empty() {
return;
}
match self.tables.borrow_mut().adjustments_mut().entry(expr.hir_id) {
Entry::Vacant(entry) => { entry.insert(adj); },
Entry::Occupied(mut entry) => {
debug!(" - composing on top of {:?}", entry.get());
match (&entry.get()[..], &adj[..]) {
// Applying any adjustment on top of a NeverToAny
// is a valid NeverToAny adjustment, because it can't
// be reached.
(&[Adjustment { kind: Adjust::NeverToAny, .. }], _) => return,
(&[
Adjustment { kind: Adjust::Deref(_), .. },
Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(..)), .. },
], &[
Adjustment { kind: Adjust::Deref(_), .. },
.. // Any following adjustments are allowed.
]) => {
// A reborrow has no effect before a dereference.
}
// FIXME: currently we never try to compose autoderefs
// and ReifyFnPointer/UnsafeFnPointer, but we could.
_ =>
bug!("while adjusting {:?}, can't compose {:?} and {:?}",
expr, entry.get(), adj)
};
*entry.get_mut() = adj;
}
}
}
/// Basically whenever we are converting from a type scheme into
/// the fn body space, we always want to normalize associated
/// types as well. This function combines the two.
fn instantiate_type_scheme<T>(&self,
span: Span,
substs: SubstsRef<'tcx>,
value: &T)
-> T
where T : TypeFoldable<'tcx>
{
let value = value.subst(self.tcx, substs);
let result = self.normalize_associated_types_in(span, &value);
debug!("instantiate_type_scheme(value={:?}, substs={:?}) = {:?}",
value,
substs,
result);
result
}
/// As `instantiate_type_scheme`, but for the bounds found in a
/// generic type scheme.
fn instantiate_bounds(&self, span: Span, def_id: DefId, substs: SubstsRef<'tcx>)
-> ty::InstantiatedPredicates<'tcx> {
let bounds = self.tcx.predicates_of(def_id);
let result = bounds.instantiate(self.tcx, substs);
let result = self.normalize_associated_types_in(span, &result);
debug!("instantiate_bounds(bounds={:?}, substs={:?}) = {:?}",
bounds,
substs,
result);
result
}
/// Replaces the opaque types from the given value with type variables,
/// and records the `OpaqueTypeMap` for later use during writeback. See
/// `InferCtxt::instantiate_opaque_types` for more details.
fn instantiate_opaque_types_from_value<T: TypeFoldable<'tcx>>(
&self,
parent_id: hir::HirId,
value: &T,
) -> T {
let parent_def_id = self.tcx.hir().local_def_id_from_hir_id(parent_id);
debug!("instantiate_opaque_types_from_value(parent_def_id={:?}, value={:?})",
parent_def_id,
value);
let (value, opaque_type_map) = self.register_infer_ok_obligations(
self.instantiate_opaque_types(
parent_def_id,
self.body_id,
self.param_env,
value,
)
);
let mut opaque_types = self.opaque_types.borrow_mut();
for (ty, decl) in opaque_type_map {
let old_value = opaque_types.insert(ty, decl);
assert!(old_value.is_none(), "instantiated twice: {:?}/{:?}", ty, decl);
}
value
}
fn normalize_associated_types_in<T>(&self, span: Span, value: &T) -> T
where T : TypeFoldable<'tcx>
{
self.inh.normalize_associated_types_in(span, self.body_id, self.param_env, value)
}
fn normalize_associated_types_in_as_infer_ok<T>(&self, span: Span, value: &T)
-> InferOk<'tcx, T>
where T : TypeFoldable<'tcx>
{
self.inh.partially_normalize_associated_types_in(span,
self.body_id,
self.param_env,
value)
}
pub fn require_type_meets(&self,
ty: Ty<'tcx>,
span: Span,
code: traits::ObligationCauseCode<'tcx>,
def_id: DefId)
{
self.register_bound(
ty,
def_id,
traits::ObligationCause::new(span, self.body_id, code));
}
pub fn require_type_is_sized(&self,
ty: Ty<'tcx>,
span: Span,
code: traits::ObligationCauseCode<'tcx>)
{
let lang_item = self.tcx.require_lang_item(lang_items::SizedTraitLangItem);
self.require_type_meets(ty, span, code, lang_item);
}
pub fn require_type_is_sized_deferred(&self,
ty: Ty<'tcx>,
span: Span,
code: traits::ObligationCauseCode<'tcx>)
{
self.deferred_sized_obligations.borrow_mut().push((ty, span, code));
}
pub fn register_bound(&self,
ty: Ty<'tcx>,
def_id: DefId,
cause: traits::ObligationCause<'tcx>)
{
self.fulfillment_cx.borrow_mut()
.register_bound(self, self.param_env, ty, def_id, cause);
}
pub fn to_ty(&self, ast_t: &hir::Ty) -> Ty<'tcx> {
let t = AstConv::ast_ty_to_ty(self, ast_t);
self.register_wf_obligation(t, ast_t.span, traits::MiscObligation);
t
}
pub fn to_ty_saving_user_provided_ty(&self, ast_ty: &hir::Ty) -> Ty<'tcx> {
let ty = self.to_ty(ast_ty);
debug!("to_ty_saving_user_provided_ty: ty={:?}", ty);
if Self::can_contain_user_lifetime_bounds(ty) {
let c_ty = self.infcx.canonicalize_response(&UserType::Ty(ty));
debug!("to_ty_saving_user_provided_ty: c_ty={:?}", c_ty);
self.tables.borrow_mut().user_provided_types_mut().insert(ast_ty.hir_id, c_ty);
}
ty
}
pub fn to_const(&self, ast_c: &hir::AnonConst, ty: Ty<'tcx>) -> &'tcx ty::Const<'tcx> {
AstConv::ast_const_to_const(self, ast_c, ty)
}
// If the type given by the user has free regions, save it for later, since
// NLL would like to enforce those. Also pass in types that involve
// projections, since those can resolve to `'static` bounds (modulo #54940,
// which hopefully will be fixed by the time you see this comment, dear
// reader, although I have my doubts). Also pass in types with inference
// types, because they may be repeated. Other sorts of things are already
// sufficiently enforced with erased regions. =)
fn can_contain_user_lifetime_bounds<T>(t: T) -> bool
where
T: TypeFoldable<'tcx>
{
t.has_free_regions() || t.has_projections() || t.has_infer_types()
}
pub fn node_ty(&self, id: hir::HirId) -> Ty<'tcx> {
match self.tables.borrow().node_types().get(id) {
Some(&t) => t,
None if self.is_tainted_by_errors() => self.tcx.types.err,
None => {
let node_id = self.tcx.hir().hir_to_node_id(id);
bug!("no type for node {}: {} in fcx {}",
node_id, self.tcx.hir().node_to_string(node_id),
self.tag());
}
}
}
/// Registers an obligation for checking later, during regionck, that the type `ty` must
/// outlive the region `r`.
pub fn register_wf_obligation(&self,
ty: Ty<'tcx>,
span: Span,
code: traits::ObligationCauseCode<'tcx>)
{
// WF obligations never themselves fail, so no real need to give a detailed cause:
let cause = traits::ObligationCause::new(span, self.body_id, code);
self.register_predicate(traits::Obligation::new(cause,
self.param_env,
ty::Predicate::WellFormed(ty)));
}
/// Registers obligations that all types appearing in `substs` are well-formed.
pub fn add_wf_bounds(&self, substs: SubstsRef<'tcx>, expr: &hir::Expr) {
for ty in substs.types() {
self.register_wf_obligation(ty, expr.span, traits::MiscObligation);
}
}
/// Given a fully substituted set of bounds (`generic_bounds`), and the values with which each
/// type/region parameter was instantiated (`substs`), creates and registers suitable
/// trait/region obligations.
///
/// For example, if there is a function:
///
/// ```
/// fn foo<'a,T:'a>(...)
/// ```
///
/// and a reference:
///
/// ```
/// let f = foo;
/// ```
///
/// Then we will create a fresh region variable `'$0` and a fresh type variable `$1` for `'a`
/// and `T`. This routine will add a region obligation `$1:'$0` and register it locally.
pub fn add_obligations_for_parameters(&self,
cause: traits::ObligationCause<'tcx>,
predicates: &ty::InstantiatedPredicates<'tcx>)
{
assert!(!predicates.has_escaping_bound_vars());
debug!("add_obligations_for_parameters(predicates={:?})",
predicates);
for obligation in traits::predicates_for_generics(cause, self.param_env, predicates) {
self.register_predicate(obligation);
}
}
// FIXME(arielb1): use this instead of field.ty everywhere
// Only for fields! Returns <none> for methods>
// Indifferent to privacy flags
pub fn field_ty(&self,
span: Span,
field: &'tcx ty::FieldDef,
substs: SubstsRef<'tcx>)
-> Ty<'tcx>
{
self.normalize_associated_types_in(span, &field.ty(self.tcx, substs))
}
fn check_casts(&self) {
let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut();
for cast in deferred_cast_checks.drain(..) {
cast.check(self);
}
}
fn resolve_generator_interiors(&self, def_id: DefId) {
let mut generators = self.deferred_generator_interiors.borrow_mut();
for (body_id, interior) in generators.drain(..) {
self.select_obligations_where_possible(false);
generator_interior::resolve_interior(self, def_id, body_id, interior);
}
}
// Tries to apply a fallback to `ty` if it is an unsolved variable.
// Non-numerics get replaced with ! or () (depending on whether
// feature(never_type) is enabled, unconstrained ints with i32,
// unconstrained floats with f64.
// Fallback becomes very dubious if we have encountered type-checking errors.
// In that case, fallback to Error.
// The return value indicates whether fallback has occurred.
fn fallback_if_possible(&self, ty: Ty<'tcx>) -> bool {
use rustc::ty::error::UnconstrainedNumeric::Neither;
use rustc::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat};
assert!(ty.is_ty_infer());
let fallback = match self.type_is_unconstrained_numeric(ty) {
_ if self.is_tainted_by_errors() => self.tcx().types.err,
UnconstrainedInt => self.tcx.types.i32,
UnconstrainedFloat => self.tcx.types.f64,
Neither if self.type_var_diverges(ty) => self.tcx.mk_diverging_default(),
Neither => return false,
};
debug!("fallback_if_possible: defaulting `{:?}` to `{:?}`", ty, fallback);
self.demand_eqtype(syntax_pos::DUMMY_SP, ty, fallback);
true
}
fn select_all_obligations_or_error(&self) {
debug!("select_all_obligations_or_error");
if let Err(errors) = self.fulfillment_cx.borrow_mut().select_all_or_error(&self) {
self.report_fulfillment_errors(&errors, self.inh.body_id, false);
}
}
/// Select as many obligations as we can at present.
fn select_obligations_where_possible(&self, fallback_has_occurred: bool) {
if let Err(errors) = self.fulfillment_cx.borrow_mut().select_where_possible(self) {
self.report_fulfillment_errors(&errors, self.inh.body_id, fallback_has_occurred);
}
}
/// For the overloaded place expressions (`*x`, `x[3]`), the trait
/// returns a type of `&T`, but the actual type we assign to the
/// *expression* is `T`. So this function just peels off the return
/// type by one layer to yield `T`.
fn make_overloaded_place_return_type(&self,
method: MethodCallee<'tcx>)
-> ty::TypeAndMut<'tcx>
{
// extract method return type, which will be &T;
let ret_ty = method.sig.output();
// method returns &T, but the type as visible to user is T, so deref
ret_ty.builtin_deref(true).unwrap()
}
fn lookup_indexing(&self,
expr: &hir::Expr,
base_expr: &'gcx hir::Expr,
base_ty: Ty<'tcx>,
idx_ty: Ty<'tcx>,
needs: Needs)
-> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)>
{
// FIXME(#18741) -- this is almost but not quite the same as the
// autoderef that normal method probing does. They could likely be
// consolidated.
let mut autoderef = self.autoderef(base_expr.span, base_ty);
let mut result = None;
while result.is_none() && autoderef.next().is_some() {
result = self.try_index_step(expr, base_expr, &autoderef, needs, idx_ty);
}
autoderef.finalize(self);
result
}
/// To type-check `base_expr[index_expr]`, we progressively autoderef
/// (and otherwise adjust) `base_expr`, looking for a type which either
/// supports builtin indexing or overloaded indexing.
/// This loop implements one step in that search; the autoderef loop
/// is implemented by `lookup_indexing`.
fn try_index_step(&self,
expr: &hir::Expr,
base_expr: &hir::Expr,
autoderef: &Autoderef<'a, 'gcx, 'tcx>,
needs: Needs,
index_ty: Ty<'tcx>)
-> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)>
{
let adjusted_ty = autoderef.unambiguous_final_ty(self);
debug!("try_index_step(expr={:?}, base_expr={:?}, adjusted_ty={:?}, \
index_ty={:?})",
expr,
base_expr,
adjusted_ty,
index_ty);
for &unsize in &[false, true] {
let mut self_ty = adjusted_ty;
if unsize {
// We only unsize arrays here.
if let ty::Array(element_ty, _) = adjusted_ty.sty {
self_ty = self.tcx.mk_slice(element_ty);
} else {
continue;
}
}
// If some lookup succeeds, write callee into table and extract index/element
// type from the method signature.
// If some lookup succeeded, install method in table
let input_ty = self.next_ty_var(TypeVariableOrigin::AutoDeref(base_expr.span));
let method = self.try_overloaded_place_op(
expr.span, self_ty, &[input_ty], needs, PlaceOp::Index);
let result = method.map(|ok| {
debug!("try_index_step: success, using overloaded indexing");
let method = self.register_infer_ok_obligations(ok);
let mut adjustments = autoderef.adjust_steps(self, needs);
if let ty::Ref(region, _, r_mutbl) = method.sig.inputs()[0].sty {
let mutbl = match r_mutbl {
hir::MutImmutable => AutoBorrowMutability::Immutable,
hir::MutMutable => AutoBorrowMutability::Mutable {
// Indexing can be desugared to a method call,
// so maybe we could use two-phase here.
// See the documentation of AllowTwoPhase for why that's
// not the case today.
allow_two_phase_borrow: AllowTwoPhase::No,
}
};
adjustments.push(Adjustment {
kind: Adjust::Borrow(AutoBorrow::Ref(region, mutbl)),
target: self.tcx.mk_ref(region, ty::TypeAndMut {
mutbl: r_mutbl,
ty: adjusted_ty
})
});
}
if unsize {
adjustments.push(Adjustment {
kind: Adjust::Pointer(PointerCast::Unsize),
target: method.sig.inputs()[0]
});
}
self.apply_adjustments(base_expr, adjustments);
self.write_method_call(expr.hir_id, method);
(input_ty, self.make_overloaded_place_return_type(method).ty)
});
if result.is_some() {
return result;
}
}
None
}
fn resolve_place_op(&self, op: PlaceOp, is_mut: bool) -> (Option<DefId>, ast::Ident) {
let (tr, name) = match (op, is_mut) {
(PlaceOp::Deref, false) =>
(self.tcx.lang_items().deref_trait(), "deref"),
(PlaceOp::Deref, true) =>
(self.tcx.lang_items().deref_mut_trait(), "deref_mut"),
(PlaceOp::Index, false) =>
(self.tcx.lang_items().index_trait(), "index"),
(PlaceOp::Index, true) =>
(self.tcx.lang_items().index_mut_trait(), "index_mut"),
};
(tr, ast::Ident::from_str(name))
}
fn try_overloaded_place_op(&self,
span: Span,
base_ty: Ty<'tcx>,
arg_tys: &[Ty<'tcx>],
needs: Needs,
op: PlaceOp)
-> Option<InferOk<'tcx, MethodCallee<'tcx>>>
{
debug!("try_overloaded_place_op({:?},{:?},{:?},{:?})",
span,
base_ty,
needs,
op);
// Try Mut first, if needed.
let (mut_tr, mut_op) = self.resolve_place_op(op, true);
let method = match (needs, mut_tr) {
(Needs::MutPlace, Some(trait_did)) => {
self.lookup_method_in_trait(span, mut_op, trait_did, base_ty, Some(arg_tys))
}
_ => None,
};
// Otherwise, fall back to the immutable version.
let (imm_tr, imm_op) = self.resolve_place_op(op, false);
let method = match (method, imm_tr) {
(None, Some(trait_did)) => {
self.lookup_method_in_trait(span, imm_op, trait_did, base_ty, Some(arg_tys))
}
(method, _) => method,
};
method
}
fn check_method_argument_types(&self,
sp: Span,
expr_sp: Span,
method: Result<MethodCallee<'tcx>, ()>,
args_no_rcvr: &'gcx [hir::Expr],
tuple_arguments: TupleArgumentsFlag,
expected: Expectation<'tcx>)
-> Ty<'tcx> {
let has_error = match method {
Ok(method) => {
method.substs.references_error() || method.sig.references_error()
}
Err(_) => true
};
if has_error {
let err_inputs = self.err_args(args_no_rcvr.len());
let err_inputs = match tuple_arguments {
DontTupleArguments => err_inputs,
TupleArguments => vec![self.tcx.intern_tup(&err_inputs[..])],
};
self.check_argument_types(sp, expr_sp, &err_inputs[..], &[], args_no_rcvr,
false, tuple_arguments, None);
return self.tcx.types.err;
}
let method = method.unwrap();
// HACK(eddyb) ignore self in the definition (see above).
let expected_arg_tys = self.expected_inputs_for_expected_output(
sp,
expected,
method.sig.output(),
&method.sig.inputs()[1..]
);
self.check_argument_types(sp, expr_sp, &method.sig.inputs()[1..], &expected_arg_tys[..],
args_no_rcvr, method.sig.c_variadic, tuple_arguments,
self.tcx.hir().span_if_local(method.def_id));
method.sig.output()
}
fn self_type_matches_expected_vid(
&self,
trait_ref: ty::PolyTraitRef<'tcx>,
expected_vid: ty::TyVid,
) -> bool {
let self_ty = self.shallow_resolve(trait_ref.self_ty());
debug!(
"self_type_matches_expected_vid(trait_ref={:?}, self_ty={:?}, expected_vid={:?})",
trait_ref, self_ty, expected_vid
);
match self_ty.sty {
ty::Infer(ty::TyVar(found_vid)) => {
// FIXME: consider using `sub_root_var` here so we
// can see through subtyping.
let found_vid = self.root_var(found_vid);
debug!("self_type_matches_expected_vid - found_vid={:?}", found_vid);
expected_vid == found_vid
}
_ => false
}
}
fn obligations_for_self_ty<'b>(&'b self, self_ty: ty::TyVid)
-> impl Iterator<Item=(ty::PolyTraitRef<'tcx>, traits::PredicateObligation<'tcx>)>
+ Captures<'gcx> + 'b
{
// FIXME: consider using `sub_root_var` here so we
// can see through subtyping.
let ty_var_root = self.root_var(self_ty);
debug!("obligations_for_self_ty: self_ty={:?} ty_var_root={:?} pending_obligations={:?}",
self_ty, ty_var_root,
self.fulfillment_cx.borrow().pending_obligations());
self.fulfillment_cx
.borrow()
.pending_obligations()
.into_iter()
.filter_map(move |obligation| match obligation.predicate {
ty::Predicate::Projection(ref data) =>
Some((data.to_poly_trait_ref(self.tcx), obligation)),
ty::Predicate::Trait(ref data) =>
Some((data.to_poly_trait_ref(), obligation)),
ty::Predicate::Subtype(..) => None,
ty::Predicate::RegionOutlives(..) => None,
ty::Predicate::TypeOutlives(..) => None,
ty::Predicate::WellFormed(..) => None,
ty::Predicate::ObjectSafe(..) => None,
ty::Predicate::ConstEvaluatable(..) => None,
// N.B., this predicate is created by breaking down a
// `ClosureType: FnFoo()` predicate, where
// `ClosureType` represents some `Closure`. It can't
// possibly be referring to the current closure,
// because we haven't produced the `Closure` for
// this closure yet; this is exactly why the other
// code is looking for a self type of a unresolved
// inference variable.
ty::Predicate::ClosureKind(..) => None,
}).filter(move |(tr, _)| self.self_type_matches_expected_vid(*tr, ty_var_root))
}
fn type_var_is_sized(&self, self_ty: ty::TyVid) -> bool {
self.obligations_for_self_ty(self_ty).any(|(tr, _)| {
Some(tr.def_id()) == self.tcx.lang_items().sized_trait()
})
}
/// Generic function that factors out common logic from function calls,
/// method calls and overloaded operators.
fn check_argument_types(&self,
sp: Span,
expr_sp: Span,
fn_inputs: &[Ty<'tcx>],
mut expected_arg_tys: &[Ty<'tcx>],
args: &'gcx [hir::Expr],
c_variadic: bool,
tuple_arguments: TupleArgumentsFlag,
def_span: Option<Span>) {
let tcx = self.tcx;
// Grab the argument types, supplying fresh type variables
// if the wrong number of arguments were supplied
let supplied_arg_count = if tuple_arguments == DontTupleArguments {
args.len()
} else {
1
};
// All the input types from the fn signature must outlive the call
// so as to validate implied bounds.
for &fn_input_ty in fn_inputs {
self.register_wf_obligation(fn_input_ty, sp, traits::MiscObligation);
}
let expected_arg_count = fn_inputs.len();
let param_count_error = |expected_count: usize,
arg_count: usize,
error_code: &str,
c_variadic: bool,
sugg_unit: bool| {
let mut err = tcx.sess.struct_span_err_with_code(sp,
&format!("this function takes {}{} but {} {} supplied",
if c_variadic { "at least " } else { "" },
potentially_plural_count(expected_count, "parameter"),
potentially_plural_count(arg_count, "parameter"),
if arg_count == 1 {"was"} else {"were"}),
DiagnosticId::Error(error_code.to_owned()));
if let Some(def_s) = def_span.map(|sp| tcx.sess.source_map().def_span(sp)) {
err.span_label(def_s, "defined here");
}
if sugg_unit {
let sugg_span = tcx.sess.source_map().end_point(expr_sp);
// remove closing `)` from the span
let sugg_span = sugg_span.shrink_to_lo();
err.span_suggestion(
sugg_span,
"expected the unit value `()`; create it with empty parentheses",
String::from("()"),
Applicability::MachineApplicable);
} else {
err.span_label(sp, format!("expected {}{}",
if c_variadic { "at least " } else { "" },
potentially_plural_count(expected_count, "parameter")));
}
err.emit();
};
let formal_tys = if tuple_arguments == TupleArguments {
let tuple_type = self.structurally_resolved_type(sp, fn_inputs[0]);
match tuple_type.sty {
ty::Tuple(arg_types) if arg_types.len() != args.len() => {
param_count_error(arg_types.len(), args.len(), "E0057", false, false);
expected_arg_tys = &[];
self.err_args(args.len())
}
ty::Tuple(arg_types) => {
expected_arg_tys = match expected_arg_tys.get(0) {
Some(&ty) => match ty.sty {
ty::Tuple(ref tys) => &tys,
_ => &[]
},
None => &[]
};
arg_types.to_vec()
}
_ => {
span_err!(tcx.sess, sp, E0059,
"cannot use call notation; the first type parameter \
for the function trait is neither a tuple nor unit");
expected_arg_tys = &[];
self.err_args(args.len())
}
}
} else if expected_arg_count == supplied_arg_count {
fn_inputs.to_vec()
} else if c_variadic {
if supplied_arg_count >= expected_arg_count {
fn_inputs.to_vec()
} else {
param_count_error(expected_arg_count, supplied_arg_count, "E0060", true, false);
expected_arg_tys = &[];
self.err_args(supplied_arg_count)
}
} else {
// is the missing argument of type `()`?
let sugg_unit = if expected_arg_tys.len() == 1 && supplied_arg_count == 0 {
self.resolve_type_vars_if_possible(&expected_arg_tys[0]).is_unit()
} else if fn_inputs.len() == 1 && supplied_arg_count == 0 {
self.resolve_type_vars_if_possible(&fn_inputs[0]).is_unit()
} else {
false
};
param_count_error(expected_arg_count, supplied_arg_count, "E0061", false, sugg_unit);
expected_arg_tys = &[];
self.err_args(supplied_arg_count)
};
// If there is no expectation, expect formal_tys.
let expected_arg_tys = if !expected_arg_tys.is_empty() {
expected_arg_tys
} else {
&formal_tys
};
debug!("check_argument_types: formal_tys={:?}",
formal_tys.iter().map(|t| self.ty_to_string(*t)).collect::<Vec<String>>());
// Check the arguments.
// We do this in a pretty awful way: first we type-check any arguments
// that are not closures, then we type-check the closures. This is so
// that we have more information about the types of arguments when we
// type-check the functions. This isn't really the right way to do this.
for &check_closures in &[false, true] {
debug!("check_closures={}", check_closures);
// More awful hacks: before we check argument types, try to do
// an "opportunistic" vtable resolution of any trait bounds on
// the call. This helps coercions.
if check_closures {
self.select_obligations_where_possible(false);
}
// For C-variadic functions, we don't have a declared type for all of
// the arguments hence we only do our usual type checking with
// the arguments who's types we do know.
let t = if c_variadic {
expected_arg_count
} else if tuple_arguments == TupleArguments {
args.len()
} else {
supplied_arg_count
};
for (i, arg) in args.iter().take(t).enumerate() {
// Warn only for the first loop (the "no closures" one).
// Closure arguments themselves can't be diverging, but
// a previous argument can, e.g., `foo(panic!(), || {})`.
if !check_closures {
self.warn_if_unreachable(arg.hir_id, arg.span, "expression");
}
let is_closure = match arg.node {
ExprKind::Closure(..) => true,
_ => false
};
if is_closure != check_closures {
continue;
}
debug!("checking the argument");
let formal_ty = formal_tys[i];
// The special-cased logic below has three functions:
// 1. Provide as good of an expected type as possible.
let expected = Expectation::rvalue_hint(self, expected_arg_tys[i]);
let checked_ty = self.check_expr_with_expectation(&arg, expected);
// 2. Coerce to the most detailed type that could be coerced
// to, which is `expected_ty` if `rvalue_hint` returns an
// `ExpectHasType(expected_ty)`, or the `formal_ty` otherwise.
let coerce_ty = expected.only_has_type(self).unwrap_or(formal_ty);
// We're processing function arguments so we definitely want to use
// two-phase borrows.
self.demand_coerce(&arg, checked_ty, coerce_ty, AllowTwoPhase::Yes);
// 3. Relate the expected type and the formal one,
// if the expected type was used for the coercion.
self.demand_suptype(arg.span, formal_ty, coerce_ty);
}
}
// We also need to make sure we at least write the ty of the other
// arguments which we skipped above.
if c_variadic {
fn variadic_error<'tcx>(s: &Session, span: Span, t: Ty<'tcx>, cast_ty: &str) {
use crate::structured_errors::{VariadicError, StructuredDiagnostic};
VariadicError::new(s, span, t, cast_ty).diagnostic().emit();
}
for arg in args.iter().skip(expected_arg_count) {
let arg_ty = self.check_expr(&arg);
// There are a few types which get autopromoted when passed via varargs
// in C but we just error out instead and require explicit casts.
let arg_ty = self.structurally_resolved_type(arg.span, arg_ty);
match arg_ty.sty {
ty::Float(ast::FloatTy::F32) => {
variadic_error(tcx.sess, arg.span, arg_ty, "c_double");
}
ty::Int(ast::IntTy::I8) | ty::Int(ast::IntTy::I16) | ty::Bool => {
variadic_error(tcx.sess, arg.span, arg_ty, "c_int");
}
ty::Uint(ast::UintTy::U8) | ty::Uint(ast::UintTy::U16) => {
variadic_error(tcx.sess, arg.span, arg_ty, "c_uint");
}
ty::FnDef(..) => {
let ptr_ty = self.tcx.mk_fn_ptr(arg_ty.fn_sig(self.tcx));
let ptr_ty = self.resolve_type_vars_if_possible(&ptr_ty);
variadic_error(tcx.sess, arg.span, arg_ty, &ptr_ty.to_string());
}
_ => {}
}
}
}
}
fn err_args(&self, len: usize) -> Vec<Ty<'tcx>> {
vec![self.tcx.types.err; len]
}
// AST fragment checking
fn check_lit(&self,
lit: &ast::Lit,
expected: Expectation<'tcx>)
-> Ty<'tcx>
{
let tcx = self.tcx;
match lit.node {
ast::LitKind::Str(..) => tcx.mk_static_str(),
ast::LitKind::ByteStr(ref v) => {
tcx.mk_imm_ref(tcx.types.re_static,
tcx.mk_array(tcx.types.u8, v.len() as u64))
}
ast::LitKind::Byte(_) => tcx.types.u8,
ast::LitKind::Char(_) => tcx.types.char,
ast::LitKind::Int(_, ast::LitIntType::Signed(t)) => tcx.mk_mach_int(t),
ast::LitKind::Int(_, ast::LitIntType::Unsigned(t)) => tcx.mk_mach_uint(t),
ast::LitKind::Int(_, ast::LitIntType::Unsuffixed) => {
let opt_ty = expected.to_option(self).and_then(|ty| {
match ty.sty {
ty::Int(_) | ty::Uint(_) => Some(ty),
ty::Char => Some(tcx.types.u8),
ty::RawPtr(..) => Some(tcx.types.usize),
ty::FnDef(..) | ty::FnPtr(_) => Some(tcx.types.usize),
_ => None
}
});
opt_ty.unwrap_or_else(|| self.next_int_var())
}
ast::LitKind::Float(_, t) => tcx.mk_mach_float(t),
ast::LitKind::FloatUnsuffixed(_) => {
let opt_ty = expected.to_option(self).and_then(|ty| {
match ty.sty {
ty::Float(_) => Some(ty),
_ => None
}
});
opt_ty.unwrap_or_else(|| self.next_float_var())
}
ast::LitKind::Bool(_) => tcx.types.bool,
ast::LitKind::Err(_) => tcx.types.err,
}
}
fn check_expr_eq_type(&self,
expr: &'gcx hir::Expr,
expected: Ty<'tcx>) {
let ty = self.check_expr_with_hint(expr, expected);
self.demand_eqtype(expr.span, expected, ty);
}
pub fn check_expr_has_type_or_error(&self,
expr: &'gcx hir::Expr,
expected: Ty<'tcx>) -> Ty<'tcx> {
self.check_expr_meets_expectation_or_error(expr, ExpectHasType(expected))
}
fn check_expr_meets_expectation_or_error(&self,
expr: &'gcx hir::Expr,
expected: Expectation<'tcx>) -> Ty<'tcx> {
let expected_ty = expected.to_option(&self).unwrap_or(self.tcx.types.bool);
let mut ty = self.check_expr_with_expectation(expr, expected);
// While we don't allow *arbitrary* coercions here, we *do* allow
// coercions from ! to `expected`.
if ty.is_never() {
assert!(!self.tables.borrow().adjustments().contains_key(expr.hir_id),
"expression with never type wound up being adjusted");
let adj_ty = self.next_diverging_ty_var(
TypeVariableOrigin::AdjustmentType(expr.span));
self.apply_adjustments(expr, vec![Adjustment {
kind: Adjust::NeverToAny,
target: adj_ty
}]);
ty = adj_ty;
}
if let Some(mut err) = self.demand_suptype_diag(expr.span, expected_ty, ty) {
if self.is_assign_to_bool(expr, expected_ty) {
// Error reported in `check_assign` so avoid emitting error again.
// FIXME(centril): Consider removing if/when `if` desugars to `match`.
err.delay_as_bug();
} else {
err.emit();
}
}
ty
}
fn check_expr_coercable_to_type(&self,
expr: &'gcx hir::Expr,
expected: Ty<'tcx>) -> Ty<'tcx> {
let ty = self.check_expr_with_hint(expr, expected);
// checks don't need two phase
self.demand_coerce(expr, ty, expected, AllowTwoPhase::No)
}
fn check_expr_with_hint(&self,
expr: &'gcx hir::Expr,
expected: Ty<'tcx>) -> Ty<'tcx> {
self.check_expr_with_expectation(expr, ExpectHasType(expected))
}
fn check_expr_with_expectation(&self,
expr: &'gcx hir::Expr,
expected: Expectation<'tcx>) -> Ty<'tcx> {
self.check_expr_with_expectation_and_needs(expr, expected, Needs::None)
}
fn check_expr(&self, expr: &'gcx hir::Expr) -> Ty<'tcx> {
self.check_expr_with_expectation(expr, NoExpectation)
}
fn check_expr_with_needs(&self, expr: &'gcx hir::Expr, needs: Needs) -> Ty<'tcx> {
self.check_expr_with_expectation_and_needs(expr, NoExpectation, needs)
}
// Determine the `Self` type, using fresh variables for all variables
// declared on the impl declaration e.g., `impl<A,B> for Vec<(A,B)>`
// would return `($0, $1)` where `$0` and `$1` are freshly instantiated type
// variables.
pub fn impl_self_ty(&self,
span: Span, // (potential) receiver for this impl
did: DefId)
-> TypeAndSubsts<'tcx> {
let ity = self.tcx.type_of(did);
debug!("impl_self_ty: ity={:?}", ity);
let substs = self.fresh_substs_for_item(span, did);
let substd_ty = self.instantiate_type_scheme(span, &substs, &ity);
TypeAndSubsts { substs: substs, ty: substd_ty }
}
/// Unifies the output type with the expected type early, for more coercions
/// and forward type information on the input expressions.
fn expected_inputs_for_expected_output(&self,
call_span: Span,
expected_ret: Expectation<'tcx>,
formal_ret: Ty<'tcx>,
formal_args: &[Ty<'tcx>])
-> Vec<Ty<'tcx>> {
let formal_ret = self.resolve_type_vars_with_obligations(formal_ret);
let ret_ty = match expected_ret.only_has_type(self) {
Some(ret) => ret,
None => return Vec::new()
};
let expect_args = self.fudge_inference_if_ok(|| {
// Attempt to apply a subtyping relationship between the formal
// return type (likely containing type variables if the function
// is polymorphic) and the expected return type.
// No argument expectations are produced if unification fails.
let origin = self.misc(call_span);
let ures = self.at(&origin, self.param_env).sup(ret_ty, &formal_ret);
// FIXME(#27336) can't use ? here, Try::from_error doesn't default
// to identity so the resulting type is not constrained.
match ures {
Ok(ok) => {
// Process any obligations locally as much as
// we can. We don't care if some things turn
// out unconstrained or ambiguous, as we're
// just trying to get hints here.
self.save_and_restore_in_snapshot_flag(|_| {
let mut fulfill = TraitEngine::new(self.tcx);
for obligation in ok.obligations {
fulfill.register_predicate_obligation(self, obligation);
}
fulfill.select_where_possible(self)
}).map_err(|_| ())?;
}
Err(_) => return Err(()),
}
// Record all the argument types, with the substitutions
// produced from the above subtyping unification.
Ok(formal_args.iter().map(|ty| {
self.resolve_type_vars_if_possible(ty)
}).collect())
}).unwrap_or_default();
debug!("expected_inputs_for_expected_output(formal={:?} -> {:?}, expected={:?} -> {:?})",
formal_args, formal_ret,
expect_args, expected_ret);
expect_args
}
// Checks a method call.
fn check_method_call(&self,
expr: &'gcx hir::Expr,
segment: &hir::PathSegment,
span: Span,
args: &'gcx [hir::Expr],
expected: Expectation<'tcx>,
needs: Needs) -> Ty<'tcx> {
let rcvr = &args[0];
let rcvr_t = self.check_expr_with_needs(&rcvr, needs);
// no need to check for bot/err -- callee does that
let rcvr_t = self.structurally_resolved_type(args[0].span, rcvr_t);
let method = match self.lookup_method(rcvr_t,
segment,
span,
expr,
rcvr) {
Ok(method) => {
self.write_method_call(expr.hir_id, method);
Ok(method)
}
Err(error) => {
if segment.ident.name != keywords::Invalid.name() {
self.report_method_error(span,
rcvr_t,
segment.ident,
SelfSource::MethodCall(rcvr),
error,
Some(args));
}
Err(())
}
};
// Call the generic checker.
self.check_method_argument_types(span,
expr.span,
method,
&args[1..],
DontTupleArguments,
expected)
}
fn check_return_expr(&self, return_expr: &'gcx hir::Expr) {
let ret_coercion =
self.ret_coercion
.as_ref()
.unwrap_or_else(|| span_bug!(return_expr.span,
"check_return_expr called outside fn body"));
let ret_ty = ret_coercion.borrow().expected_ty();
let return_expr_ty = self.check_expr_with_hint(return_expr, ret_ty.clone());
ret_coercion.borrow_mut()
.coerce(self,
&self.cause(return_expr.span,
ObligationCauseCode::ReturnType(return_expr.hir_id)),
return_expr,
return_expr_ty);
}
// A generic function for checking the 'then' and 'else' clauses in an 'if'
// or 'if-else' expression.
fn check_then_else(&self,
cond_expr: &'gcx hir::Expr,
then_expr: &'gcx hir::Expr,
opt_else_expr: Option<&'gcx hir::Expr>,
sp: Span,
expected: Expectation<'tcx>) -> Ty<'tcx> {
let cond_ty = self.check_expr_has_type_or_error(cond_expr, self.tcx.types.bool);
let cond_diverges = self.diverges.get();
self.diverges.set(Diverges::Maybe);
let expected = expected.adjust_for_branches(self);
let then_ty = self.check_expr_with_expectation(then_expr, expected);
let then_diverges = self.diverges.get();
self.diverges.set(Diverges::Maybe);
// We've already taken the expected type's preferences
// into account when typing the `then` branch. To figure
// out the initial shot at a LUB, we thus only consider
// `expected` if it represents a *hard* constraint
// (`only_has_type`); otherwise, we just go with a
// fresh type variable.
let coerce_to_ty = expected.coercion_target_type(self, sp);
let mut coerce: DynamicCoerceMany<'_, '_> = CoerceMany::new(coerce_to_ty);
coerce.coerce(self, &self.misc(sp), then_expr, then_ty);
if let Some(else_expr) = opt_else_expr {
let else_ty = self.check_expr_with_expectation(else_expr, expected);
let else_diverges = self.diverges.get();
let mut outer_sp = if self.tcx.sess.source_map().is_multiline(sp) {
// The `if`/`else` isn't in one line in the output, include some context to make it
// clear it is an if/else expression:
// ```
// LL | let x = if true {
// | _____________-
// LL || 10i32
// || ----- expected because of this
// LL || } else {
// LL || 10u32
// || ^^^^^ expected i32, found u32
// LL || };
// ||_____- if and else have incompatible types
// ```
Some(sp)
} else {
// The entire expression is in one line, only point at the arms
// ```
// LL | let x = if true { 10i32 } else { 10u32 };
// | ----- ^^^^^ expected i32, found u32
// | |
// | expected because of this
// ```
None
};
let mut remove_semicolon = None;
let error_sp = if let ExprKind::Block(block, _) = &else_expr.node {
if let Some(expr) = &block.expr {
expr.span
} else if let Some(stmt) = block.stmts.last() {
// possibly incorrect trailing `;` in the else arm
remove_semicolon = self.could_remove_semicolon(block, then_ty);
stmt.span
} else { // empty block, point at its entirety
// Avoid overlapping spans that aren't as readable:
// ```
// 2 | let x = if true {
// | _____________-
// 3 | | 3
// | | - expected because of this
// 4 | | } else {
// | |____________^
// 5 | ||
// 6 | || };
// | || ^
// | ||_____|
// | |______if and else have incompatible types
// | expected integer, found ()
// ```
// by not pointing at the entire expression:
// ```
// 2 | let x = if true {
// | ------- if and else have incompatible types
// 3 | 3
// | - expected because of this
// 4 | } else {
// | ____________^
// 5 | |
// 6 | | };
// | |_____^ expected integer, found ()
// ```
if outer_sp.is_some() {
outer_sp = Some(self.tcx.sess.source_map().def_span(sp));
}
else_expr.span
}
} else { // shouldn't happen unless the parser has done something weird
else_expr.span
};
let then_sp = if let ExprKind::Block(block, _) = &then_expr.node {
if let Some(expr) = &block.expr {
expr.span
} else if let Some(stmt) = block.stmts.last() {
// possibly incorrect trailing `;` in the else arm
remove_semicolon = remove_semicolon.or(
self.could_remove_semicolon(block, else_ty));
stmt.span
} else { // empty block, point at its entirety
outer_sp = None; // same as in `error_sp`, cleanup output
then_expr.span
}
} else { // shouldn't happen unless the parser has done something weird
then_expr.span
};
let if_cause = self.cause(error_sp, ObligationCauseCode::IfExpression {
then: then_sp,
outer: outer_sp,
semicolon: remove_semicolon,
});
coerce.coerce(self, &if_cause, else_expr, else_ty);
// We won't diverge unless both branches do (or the condition does).
self.diverges.set(cond_diverges | then_diverges & else_diverges);
} else {
// If this `if` expr is the parent's function return expr, the cause of the type
// coercion is the return type, point at it. (#25228)
let ret_reason = self.maybe_get_coercion_reason(then_expr.hir_id, sp);
let else_cause = self.cause(sp, ObligationCauseCode::IfExpressionWithNoElse);
coerce.coerce_forced_unit(self, &else_cause, &mut |err| {
if let Some((sp, msg)) = &ret_reason {
err.span_label(*sp, msg.as_str());
} else if let ExprKind::Block(block, _) = &then_expr.node {
if let Some(expr) = &block.expr {
err.span_label(expr.span, "found here".to_string());
}
}
err.note("`if` expressions without `else` evaluate to `()`");
err.help("consider adding an `else` block that evaluates to the expected type");
}, ret_reason.is_none());
// If the condition is false we can't diverge.
self.diverges.set(cond_diverges);
}
let result_ty = coerce.complete(self);
if cond_ty.references_error() {
self.tcx.types.err
} else {
result_ty
}
}
fn maybe_get_coercion_reason(&self, hir_id: hir::HirId, sp: Span) -> Option<(Span, String)> {
let node = self.tcx.hir().get_by_hir_id(self.tcx.hir().get_parent_node_by_hir_id(
self.tcx.hir().get_parent_node_by_hir_id(hir_id),
));
if let Node::Block(block) = node {
// check that the body's parent is an fn
let parent = self.tcx.hir().get_by_hir_id(
self.tcx.hir().get_parent_node_by_hir_id(
self.tcx.hir().get_parent_node_by_hir_id(block.hir_id),
),
);
if let (Some(expr), Node::Item(hir::Item {
node: hir::ItemKind::Fn(..), ..
})) = (&block.expr, parent) {
// check that the `if` expr without `else` is the fn body's expr
if expr.span == sp {
return self.get_fn_decl(hir_id).map(|(fn_decl, _)| (
fn_decl.output.span(),
format!("expected `{}` because of this return type", fn_decl.output),
));
}
}
}
if let Node::Local(hir::Local {
ty: Some(_), pat, ..
}) = node {
return Some((pat.span, "expected because of this assignment".to_string()));
}
None
}
// Check field access expressions
fn check_field(&self,
expr: &'gcx hir::Expr,
needs: Needs,
base: &'gcx hir::Expr,
field: ast::Ident) -> Ty<'tcx> {
let expr_t = self.check_expr_with_needs(base, needs);
let expr_t = self.structurally_resolved_type(base.span,
expr_t);
let mut private_candidate = None;
let mut autoderef = self.autoderef(expr.span, expr_t);
while let Some((base_t, _)) = autoderef.next() {
match base_t.sty {
ty::Adt(base_def, substs) if !base_def.is_enum() => {
debug!("struct named {:?}", base_t);
let (ident, def_scope) =
self.tcx.adjust_ident(field, base_def.did, self.body_id);
let fields = &base_def.non_enum_variant().fields;
if let Some(index) = fields.iter().position(|f| f.ident.modern() == ident) {
let field = &fields[index];
let field_ty = self.field_ty(expr.span, field, substs);
// Save the index of all fields regardless of their visibility in case
// of error recovery.
self.write_field_index(expr.hir_id, index);
if field.vis.is_accessible_from(def_scope, self.tcx) {
let adjustments = autoderef.adjust_steps(self, needs);
self.apply_adjustments(base, adjustments);
autoderef.finalize(self);
self.tcx.check_stability(field.did, Some(expr.hir_id), expr.span);
return field_ty;
}
private_candidate = Some((base_def.did, field_ty));
}
}
ty::Tuple(ref tys) => {
let fstr = field.as_str();
if let Ok(index) = fstr.parse::<usize>() {
if fstr == index.to_string() {
if let Some(field_ty) = tys.get(index) {
let adjustments = autoderef.adjust_steps(self, needs);
self.apply_adjustments(base, adjustments);
autoderef.finalize(self);
self.write_field_index(expr.hir_id, index);
return field_ty;
}
}
}
}
_ => {}
}
}
autoderef.unambiguous_final_ty(self);
if let Some((did, field_ty)) = private_candidate {
let struct_path = self.tcx().def_path_str(did);
let mut err = struct_span_err!(self.tcx().sess, expr.span, E0616,
"field `{}` of struct `{}` is private",
field, struct_path);
// Also check if an accessible method exists, which is often what is meant.
if self.method_exists(field, expr_t, expr.hir_id, false)
&& !self.expr_in_place(expr.hir_id)
{
self.suggest_method_call(
&mut err,
&format!("a method `{}` also exists, call it with parentheses", field),
field,
expr_t,
expr.hir_id,
);
}
err.emit();
field_ty
} else if field.name == keywords::Invalid.name() {
self.tcx().types.err
} else if self.method_exists(field, expr_t, expr.hir_id, true) {
let mut err = type_error_struct!(self.tcx().sess, field.span, expr_t, E0615,
"attempted to take value of method `{}` on type `{}`",
field, expr_t);
if !self.expr_in_place(expr.hir_id) {
self.suggest_method_call(
&mut err,
"use parentheses to call the method",
field,
expr_t,
expr.hir_id
);
} else {
err.help("methods are immutable and cannot be assigned to");
}
err.emit();
self.tcx().types.err
} else {
if !expr_t.is_primitive_ty() {
let mut err = self.no_such_field_err(field.span, field, expr_t);
match expr_t.sty {
ty::Adt(def, _) if !def.is_enum() => {
if let Some(suggested_field_name) =
Self::suggest_field_name(def.non_enum_variant(),
&field.as_str(), vec![]) {
err.span_suggestion(
field.span,
"a field with a similar name exists",
suggested_field_name.to_string(),
Applicability::MaybeIncorrect,
);
} else {
err.span_label(field.span, "unknown field");
let struct_variant_def = def.non_enum_variant();
let field_names = self.available_field_names(struct_variant_def);
if !field_names.is_empty() {
err.note(&format!("available fields are: {}",
self.name_series_display(field_names)));
}
};
}
ty::Array(_, len) => {
if let (Some(len), Ok(user_index)) = (
len.assert_usize(self.tcx),
field.as_str().parse::<u64>()
) {
let base = self.tcx.sess.source_map()
.span_to_snippet(base.span)
.unwrap_or_else(|_|
self.tcx.hir().hir_to_pretty_string(base.hir_id));
let help = "instead of using tuple indexing, use array indexing";
let suggestion = format!("{}[{}]", base, field);
let applicability = if len < user_index {
Applicability::MachineApplicable
} else {
Applicability::MaybeIncorrect
};
err.span_suggestion(
expr.span, help, suggestion, applicability
);
}
}
ty::RawPtr(..) => {
let base = self.tcx.sess.source_map()
.span_to_snippet(base.span)
.unwrap_or_else(|_| self.tcx.hir().hir_to_pretty_string(base.hir_id));
let msg = format!("`{}` is a raw pointer; try dereferencing it", base);
let suggestion = format!("(*{}).{}", base, field);
err.span_suggestion(
expr.span,
&msg,
suggestion,
Applicability::MaybeIncorrect,
);
}
_ => {}
}
err
} else {
type_error_struct!(self.tcx().sess, field.span, expr_t, E0610,
"`{}` is a primitive type and therefore doesn't have fields",
expr_t)
}.emit();
self.tcx().types.err
}
}
// Return an hint about the closest match in field names
fn suggest_field_name(variant: &'tcx ty::VariantDef,
field: &str,
skip: Vec<LocalInternedString>)
-> Option<Symbol> {
let names = variant.fields.iter().filter_map(|field| {
// ignore already set fields and private fields from non-local crates
if skip.iter().any(|x| *x == field.ident.as_str()) ||
(!variant.def_id.is_local() && field.vis != Visibility::Public)
{
None
} else {
Some(&field.ident.name)
}
});
find_best_match_for_name(names, field, None)
}
fn available_field_names(&self, variant: &'tcx ty::VariantDef) -> Vec<ast::Name> {
variant.fields.iter().filter(|field| {
let def_scope = self.tcx.adjust_ident(field.ident, variant.def_id, self.body_id).1;
field.vis.is_accessible_from(def_scope, self.tcx)
})
.map(|field| field.ident.name)
.collect()
}
fn name_series_display(&self, names: Vec<ast::Name>) -> String {
// dynamic limit, to never omit just one field
let limit = if names.len() == 6 { 6 } else { 5 };
let mut display = names.iter().take(limit)
.map(|n| format!("`{}`", n)).collect::<Vec<_>>().join(", ");
if names.len() > limit {
display = format!("{} ... and {} others", display, names.len() - limit);
}
display
}
fn no_such_field_err<T: Display>(&self, span: Span, field: T, expr_t: &ty::TyS<'_>)
-> DiagnosticBuilder<'_> {
type_error_struct!(self.tcx().sess, span, expr_t, E0609,
"no field `{}` on type `{}`",
field, expr_t)
}
fn report_unknown_field(
&self,
ty: Ty<'tcx>,
variant: &'tcx ty::VariantDef,
field: &hir::Field,
skip_fields: &[hir::Field],
kind_name: &str,
) {
if variant.recovered {
return;
}
let mut err = self.type_error_struct_with_diag(
field.ident.span,
|actual| match ty.sty {
ty::Adt(adt, ..) if adt.is_enum() => {
struct_span_err!(self.tcx.sess, field.ident.span, E0559,
"{} `{}::{}` has no field named `{}`",
kind_name, actual, variant.ident, field.ident)
}
_ => {
struct_span_err!(self.tcx.sess, field.ident.span, E0560,
"{} `{}` has no field named `{}`",
kind_name, actual, field.ident)
}
},
ty);
// prevent all specified fields from being suggested
let skip_fields = skip_fields.iter().map(|ref x| x.ident.as_str());
if let Some(field_name) = Self::suggest_field_name(variant,
&field.ident.as_str(),
skip_fields.collect()) {
err.span_suggestion(
field.ident.span,
"a field with a similar name exists",
field_name.to_string(),
Applicability::MaybeIncorrect,
);
} else {
match ty.sty {
ty::Adt(adt, ..) => {
if adt.is_enum() {
err.span_label(field.ident.span,
format!("`{}::{}` does not have this field",
ty, variant.ident));
} else {
err.span_label(field.ident.span,
format!("`{}` does not have this field", ty));
}
let available_field_names = self.available_field_names(variant);
if !available_field_names.is_empty() {
err.note(&format!("available fields are: {}",
self.name_series_display(available_field_names)));
}
}
_ => bug!("non-ADT passed to report_unknown_field")
}
};
err.emit();
}
fn check_expr_struct_fields(&self,
adt_ty: Ty<'tcx>,
expected: Expectation<'tcx>,
expr_id: hir::HirId,
span: Span,
variant: &'tcx ty::VariantDef,
ast_fields: &'gcx [hir::Field],
check_completeness: bool) -> bool {
let tcx = self.tcx;
let adt_ty_hint =
self.expected_inputs_for_expected_output(span, expected, adt_ty, &[adt_ty])
.get(0).cloned().unwrap_or(adt_ty);
// re-link the regions that EIfEO can erase.
self.demand_eqtype(span, adt_ty_hint, adt_ty);
let (substs, adt_kind, kind_name) = match &adt_ty.sty {
&ty::Adt(adt, substs) => {
(substs, adt.adt_kind(), adt.variant_descr())
}
_ => span_bug!(span, "non-ADT passed to check_expr_struct_fields")
};
let mut remaining_fields = variant.fields.iter().enumerate().map(|(i, field)|
(field.ident.modern(), (i, field))
).collect::<FxHashMap<_, _>>();
let mut seen_fields = FxHashMap::default();
let mut error_happened = false;
// Type-check each field.
for field in ast_fields {
let ident = tcx.adjust_ident(field.ident, variant.def_id, self.body_id).0;
let field_type = if let Some((i, v_field)) = remaining_fields.remove(&ident) {
seen_fields.insert(ident, field.span);
self.write_field_index(field.hir_id, i);
// We don't look at stability attributes on
// struct-like enums (yet...), but it's definitely not
// a bug to have constructed one.
if adt_kind != AdtKind::Enum {
tcx.check_stability(v_field.did, Some(expr_id), field.span);
}
self.field_ty(field.span, v_field, substs)
} else {
error_happened = true;
if let Some(prev_span) = seen_fields.get(&ident) {
let mut err = struct_span_err!(self.tcx.sess,
field.ident.span,
E0062,
"field `{}` specified more than once",
ident);
err.span_label(field.ident.span, "used more than once");
err.span_label(*prev_span, format!("first use of `{}`", ident));
err.emit();
} else {
self.report_unknown_field(adt_ty, variant, field, ast_fields, kind_name);
}
tcx.types.err
};
// Make sure to give a type to the field even if there's
// an error, so we can continue type-checking.
self.check_expr_coercable_to_type(&field.expr, field_type);
}
// Make sure the programmer specified correct number of fields.
if kind_name == "union" {
if ast_fields.len() != 1 {
tcx.sess.span_err(span, "union expressions should have exactly one field");
}
} else if check_completeness && !error_happened && !remaining_fields.is_empty() {
let len = remaining_fields.len();
let mut displayable_field_names = remaining_fields
.keys()
.map(|ident| ident.as_str())
.collect::<Vec<_>>();
displayable_field_names.sort();
let truncated_fields_error = if len <= 3 {
String::new()
} else {
format!(" and {} other field{}", (len - 3), if len - 3 == 1 {""} else {"s"})
};
let remaining_fields_names = displayable_field_names.iter().take(3)
.map(|n| format!("`{}`", n))
.collect::<Vec<_>>()
.join(", ");
struct_span_err!(tcx.sess, span, E0063,
"missing field{} {}{} in initializer of `{}`",
if remaining_fields.len() == 1 { "" } else { "s" },
remaining_fields_names,
truncated_fields_error,
adt_ty)
.span_label(span, format!("missing {}{}",
remaining_fields_names,
truncated_fields_error))
.emit();
}
error_happened
}
fn check_struct_fields_on_error(&self,
fields: &'gcx [hir::Field],
base_expr: &'gcx Option<P<hir::Expr>>) {
for field in fields {
self.check_expr(&field.expr);
}
if let Some(ref base) = *base_expr {
self.check_expr(&base);
}
}
pub fn check_struct_path(&self,
qpath: &QPath,
hir_id: hir::HirId)
-> Option<(&'tcx ty::VariantDef, Ty<'tcx>)> {
let path_span = match *qpath {
QPath::Resolved(_, ref path) => path.span,
QPath::TypeRelative(ref qself, _) => qself.span
};
let (def, ty) = self.finish_resolving_struct_path(qpath, path_span, hir_id);
let variant = match def {
Def::Err => {
self.set_tainted_by_errors();
return None;
}
Def::Variant(..) => {
match ty.sty {
ty::Adt(adt, substs) => {
Some((adt.variant_of_def(def), adt.did, substs))
}
_ => bug!("unexpected type: {:?}", ty)
}
}
Def::Struct(..) | Def::Union(..) | Def::TyAlias(..) |
Def::AssociatedTy(..) | Def::SelfTy(..) => {
match ty.sty {
ty::Adt(adt, substs) if !adt.is_enum() => {
Some((adt.non_enum_variant(), adt.did, substs))
}
_ => None,
}
}
_ => bug!("unexpected definition: {:?}", def)
};
if let Some((variant, did, substs)) = variant {
debug!("check_struct_path: did={:?} substs={:?}", did, substs);
self.write_user_type_annotation_from_substs(hir_id, did, substs, None);
// Check bounds on type arguments used in the path.
let bounds = self.instantiate_bounds(path_span, did, substs);
let cause = traits::ObligationCause::new(path_span, self.body_id,
traits::ItemObligation(did));
self.add_obligations_for_parameters(cause, &bounds);
Some((variant, ty))
} else {
struct_span_err!(self.tcx.sess, path_span, E0071,
"expected struct, variant or union type, found {}",
ty.sort_string(self.tcx))
.span_label(path_span, "not a struct")
.emit();
None
}
}
fn check_expr_struct(&self,
expr: &hir::Expr,
expected: Expectation<'tcx>,
qpath: &QPath,
fields: &'gcx [hir::Field],
base_expr: &'gcx Option<P<hir::Expr>>) -> Ty<'tcx>
{
// Find the relevant variant
let (variant, adt_ty) =
if let Some(variant_ty) = self.check_struct_path(qpath, expr.hir_id) {
variant_ty
} else {
self.check_struct_fields_on_error(fields, base_expr);
return self.tcx.types.err;
};
let path_span = match *qpath {
QPath::Resolved(_, ref path) => path.span,
QPath::TypeRelative(ref qself, _) => qself.span
};
// Prohibit struct expressions when non-exhaustive flag is set.
let adt = adt_ty.ty_adt_def().expect("`check_struct_path` returned non-ADT type");
if !adt.did.is_local() && variant.is_field_list_non_exhaustive() {
span_err!(self.tcx.sess, expr.span, E0639,
"cannot create non-exhaustive {} using struct expression",
adt.variant_descr());
}
let error_happened = self.check_expr_struct_fields(adt_ty, expected, expr.hir_id, path_span,
variant, fields, base_expr.is_none());
if let &Some(ref base_expr) = base_expr {
// If check_expr_struct_fields hit an error, do not attempt to populate
// the fields with the base_expr. This could cause us to hit errors later
// when certain fields are assumed to exist that in fact do not.
if !error_happened {
self.check_expr_has_type_or_error(base_expr, adt_ty);
match adt_ty.sty {
ty::Adt(adt, substs) if adt.is_struct() => {
let fru_field_types = adt.non_enum_variant().fields.iter().map(|f| {
self.normalize_associated_types_in(expr.span, &f.ty(self.tcx, substs))
}).collect();
self.tables
.borrow_mut()
.fru_field_types_mut()
.insert(expr.hir_id, fru_field_types);
}
_ => {
span_err!(self.tcx.sess, base_expr.span, E0436,
"functional record update syntax requires a struct");
}
}
}
}
self.require_type_is_sized(adt_ty, expr.span, traits::StructInitializerSized);
adt_ty
}
/// Invariant:
/// If an expression has any sub-expressions that result in a type error,
/// inspecting that expression's type with `ty.references_error()` will return
/// true. Likewise, if an expression is known to diverge, inspecting its
/// type with `ty::type_is_bot` will return true (n.b.: since Rust is
/// strict, _|_ can appear in the type of an expression that does not,
/// itself, diverge: for example, fn() -> _|_.)
/// Note that inspecting a type's structure *directly* may expose the fact
/// that there are actually multiple representations for `Error`, so avoid
/// that when err needs to be handled differently.
fn check_expr_with_expectation_and_needs(&self,
expr: &'gcx hir::Expr,
expected: Expectation<'tcx>,
needs: Needs) -> Ty<'tcx> {
debug!(">> type-checking: expr={:?} expected={:?}",
expr, expected);
// Warn for expressions after diverging siblings.
self.warn_if_unreachable(expr.hir_id, expr.span, "expression");
// Hide the outer diverging and has_errors flags.
let old_diverges = self.diverges.get();
let old_has_errors = self.has_errors.get();
self.diverges.set(Diverges::Maybe);
self.has_errors.set(false);
let ty = self.check_expr_kind(expr, expected, needs);
// Warn for non-block expressions with diverging children.
match expr.node {
ExprKind::Block(..) |
ExprKind::Loop(..) | ExprKind::While(..) |
ExprKind::If(..) | ExprKind::Match(..) => {}
_ => self.warn_if_unreachable(expr.hir_id, expr.span, "expression")
}
// Any expression that produces a value of type `!` must have diverged
if ty.is_never() {
self.diverges.set(self.diverges.get() | Diverges::Always);
}
// Record the type, which applies it effects.
// We need to do this after the warning above, so that
// we don't warn for the diverging expression itself.
self.write_ty(expr.hir_id, ty);
// Combine the diverging and has_error flags.
self.diverges.set(self.diverges.get() | old_diverges);
self.has_errors.set(self.has_errors.get() | old_has_errors);
debug!("type of {} is...", self.tcx.hir().hir_to_string(expr.hir_id));
debug!("... {:?}, expected is {:?}", ty, expected);
ty
}
fn check_expr_kind(
&self,
expr: &'gcx hir::Expr,
expected: Expectation<'tcx>,
needs: Needs
) -> Ty<'tcx> {
debug!(
"check_expr_kind(expr={:?}, expected={:?}, needs={:?})",
expr,
expected,
needs,
);
let tcx = self.tcx;
let id = expr.hir_id;
match expr.node {
ExprKind::Box(ref subexpr) => {
let expected_inner = expected.to_option(self).map_or(NoExpectation, |ty| {
match ty.sty {
ty::Adt(def, _) if def.is_box()
=> Expectation::rvalue_hint(self, ty.boxed_ty()),
_ => NoExpectation
}
});
let referent_ty = self.check_expr_with_expectation(subexpr, expected_inner);
tcx.mk_box(referent_ty)
}
ExprKind::Lit(ref lit) => {
self.check_lit(&lit, expected)
}
ExprKind::Binary(op, ref lhs, ref rhs) => {
self.check_binop(expr, op, lhs, rhs)
}
ExprKind::AssignOp(op, ref lhs, ref rhs) => {
self.check_binop_assign(expr, op, lhs, rhs)
}
ExprKind::Unary(unop, ref oprnd) => {
let expected_inner = match unop {
hir::UnNot | hir::UnNeg => {
expected
}
hir::UnDeref => {
NoExpectation
}
};
let needs = match unop {
hir::UnDeref => needs,
_ => Needs::None
};
let mut oprnd_t = self.check_expr_with_expectation_and_needs(&oprnd,
expected_inner,
needs);
if !oprnd_t.references_error() {
oprnd_t = self.structurally_resolved_type(expr.span, oprnd_t);
match unop {
hir::UnDeref => {
if let Some(mt) = oprnd_t.builtin_deref(true) {
oprnd_t = mt.ty;
} else if let Some(ok) = self.try_overloaded_deref(
expr.span, oprnd_t, needs) {
let method = self.register_infer_ok_obligations(ok);
if let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].sty {
let mutbl = match mutbl {
hir::MutImmutable => AutoBorrowMutability::Immutable,
hir::MutMutable => AutoBorrowMutability::Mutable {
// (It shouldn't actually matter for unary ops whether
// we enable two-phase borrows or not, since a unary
// op has no additional operands.)
allow_two_phase_borrow: AllowTwoPhase::No,
}
};
self.apply_adjustments(oprnd, vec![Adjustment {
kind: Adjust::Borrow(AutoBorrow::Ref(region, mutbl)),
target: method.sig.inputs()[0]
}]);
}
oprnd_t = self.make_overloaded_place_return_type(method).ty;
self.write_method_call(expr.hir_id, method);
} else {
type_error_struct!(tcx.sess, expr.span, oprnd_t, E0614,
"type `{}` cannot be dereferenced",
oprnd_t).emit();
oprnd_t = tcx.types.err;
}
}
hir::UnNot => {
let result = self.check_user_unop(expr, oprnd_t, unop);
// If it's builtin, we can reuse the type, this helps inference.
if !(oprnd_t.is_integral() || oprnd_t.sty == ty::Bool) {
oprnd_t = result;
}
}
hir::UnNeg => {
let result = self.check_user_unop(expr, oprnd_t, unop);
// If it's builtin, we can reuse the type, this helps inference.
if !(oprnd_t.is_integral() || oprnd_t.is_fp()) {
oprnd_t = result;
}
}
}
}
oprnd_t
}
ExprKind::AddrOf(mutbl, ref oprnd) => {
let hint = expected.only_has_type(self).map_or(NoExpectation, |ty| {
match ty.sty {
ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
if oprnd.is_place_expr() {
// Places may legitimately have unsized types.
// For example, dereferences of a fat pointer and
// the last field of a struct can be unsized.
ExpectHasType(ty)
} else {
Expectation::rvalue_hint(self, ty)
}
}
_ => NoExpectation
}
});
let needs = Needs::maybe_mut_place(mutbl);
let ty = self.check_expr_with_expectation_and_needs(&oprnd, hint, needs);
let tm = ty::TypeAndMut { ty: ty, mutbl: mutbl };
if tm.ty.references_error() {
tcx.types.err
} else {
// Note: at this point, we cannot say what the best lifetime
// is to use for resulting pointer. We want to use the
// shortest lifetime possible so as to avoid spurious borrowck
// errors. Moreover, the longest lifetime will depend on the
// precise details of the value whose address is being taken
// (and how long it is valid), which we don't know yet until type
// inference is complete.
//
// Therefore, here we simply generate a region variable. The
// region inferencer will then select the ultimate value.
// Finally, borrowck is charged with guaranteeing that the
// value whose address was taken can actually be made to live
// as long as it needs to live.
let region = self.next_region_var(infer::AddrOfRegion(expr.span));
tcx.mk_ref(region, tm)
}
}
ExprKind::Path(ref qpath) => {
let (def, opt_ty, segs) = self.resolve_ty_and_def_ufcs(qpath, expr.hir_id,
expr.span);
let ty = match def {
Def::Err => {
self.set_tainted_by_errors();
tcx.types.err
}
Def::Ctor(_, _, CtorKind::Fictive) => {
report_unexpected_variant_def(tcx, &def, expr.span, qpath);
tcx.types.err
}
_ => self.instantiate_value_path(segs, opt_ty, def, expr.span, id).0,
};
if let ty::FnDef(..) = ty.sty {
let fn_sig = ty.fn_sig(tcx);
if !tcx.features().unsized_locals {
// We want to remove some Sized bounds from std functions,
// but don't want to expose the removal to stable Rust.
// i.e., we don't want to allow
//
// ```rust
// drop as fn(str);
// ```
//
// to work in stable even if the Sized bound on `drop` is relaxed.
for i in 0..fn_sig.inputs().skip_binder().len() {
// We just want to check sizedness, so instead of introducing
// placeholder lifetimes with probing, we just replace higher lifetimes
// with fresh vars.
let input = self.replace_bound_vars_with_fresh_vars(
expr.span,
infer::LateBoundRegionConversionTime::FnCall,
&fn_sig.input(i)).0;
self.require_type_is_sized_deferred(input, expr.span,
traits::SizedArgumentType);
}
}
// Here we want to prevent struct constructors from returning unsized types.
// There were two cases this happened: fn pointer coercion in stable
// and usual function call in presense of unsized_locals.
// Also, as we just want to check sizedness, instead of introducing
// placeholder lifetimes with probing, we just replace higher lifetimes
// with fresh vars.
let output = self.replace_bound_vars_with_fresh_vars(
expr.span,
infer::LateBoundRegionConversionTime::FnCall,
&fn_sig.output()).0;
self.require_type_is_sized_deferred(output, expr.span, traits::SizedReturnType);
}
// We always require that the type provided as the value for
// a type parameter outlives the moment of instantiation.
let substs = self.tables.borrow().node_substs(expr.hir_id);
self.add_wf_bounds(substs, expr);
ty
}
ExprKind::InlineAsm(_, ref outputs, ref inputs) => {
for expr in outputs.iter().chain(inputs.iter()) {
self.check_expr(expr);
}
tcx.mk_unit()
}
ExprKind::Break(destination, ref expr_opt) => {
if let Ok(target_id) = destination.target_id {
let (e_ty, cause);
if let Some(ref e) = *expr_opt {
// If this is a break with a value, we need to type-check
// the expression. Get an expected type from the loop context.
let opt_coerce_to = {
let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
enclosing_breakables.find_breakable(target_id)
.coerce
.as_ref()
.map(|coerce| coerce.expected_ty())
};
// If the loop context is not a `loop { }`, then break with
// a value is illegal, and `opt_coerce_to` will be `None`.
// Just set expectation to error in that case.
let coerce_to = opt_coerce_to.unwrap_or(tcx.types.err);
// Recurse without `enclosing_breakables` borrowed.
e_ty = self.check_expr_with_hint(e, coerce_to);
cause = self.misc(e.span);
} else {
// Otherwise, this is a break *without* a value. That's
// always legal, and is equivalent to `break ()`.
e_ty = tcx.mk_unit();
cause = self.misc(expr.span);
}
// Now that we have type-checked `expr_opt`, borrow
// the `enclosing_loops` field and let's coerce the
// type of `expr_opt` into what is expected.
let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
let ctxt = enclosing_breakables.find_breakable(target_id);
if let Some(ref mut coerce) = ctxt.coerce {
if let Some(ref e) = *expr_opt {
coerce.coerce(self, &cause, e, e_ty);
} else {
assert!(e_ty.is_unit());
coerce.coerce_forced_unit(self, &cause, &mut |_| (), true);
}
} else {
// If `ctxt.coerce` is `None`, we can just ignore
// the type of the expresison. This is because
// either this was a break *without* a value, in
// which case it is always a legal type (`()`), or
// else an error would have been flagged by the
// `loops` pass for using break with an expression
// where you are not supposed to.
assert!(expr_opt.is_none() || self.tcx.sess.err_count() > 0);
}
ctxt.may_break = true;
// the type of a `break` is always `!`, since it diverges
tcx.types.never
} else {
// Otherwise, we failed to find the enclosing loop;
// this can only happen if the `break` was not
// inside a loop at all, which is caught by the
// loop-checking pass.
if self.tcx.sess.err_count() == 0 {
self.tcx.sess.delay_span_bug(expr.span,
"break was outside loop, but no error was emitted");
}
// We still need to assign a type to the inner expression to
// prevent the ICE in #43162.
if let Some(ref e) = *expr_opt {
self.check_expr_with_hint(e, tcx.types.err);
// ... except when we try to 'break rust;'.
// ICE this expression in particular (see #43162).
if let ExprKind::Path(QPath::Resolved(_, ref path)) = e.node {
if path.segments.len() == 1 && path.segments[0].ident.name == "rust" {
fatally_break_rust(self.tcx.sess);
}
}
}
// There was an error; make type-check fail.
tcx.types.err
}
}
ExprKind::Continue(destination) => {
if destination.target_id.is_ok() {
tcx.types.never
} else {
// There was an error; make type-check fail.
tcx.types.err
}
}
ExprKind::Ret(ref expr_opt) => {
if self.ret_coercion.is_none() {
struct_span_err!(self.tcx.sess, expr.span, E0572,
"return statement outside of function body").emit();
} else if let Some(ref e) = *expr_opt {
if self.ret_coercion_span.borrow().is_none() {
*self.ret_coercion_span.borrow_mut() = Some(e.span);
}
self.check_return_expr(e);
} else {
let mut coercion = self.ret_coercion.as_ref().unwrap().borrow_mut();
if self.ret_coercion_span.borrow().is_none() {
*self.ret_coercion_span.borrow_mut() = Some(expr.span);
}
let cause = self.cause(expr.span, ObligationCauseCode::ReturnNoExpression);
if let Some((fn_decl, _)) = self.get_fn_decl(expr.hir_id) {
coercion.coerce_forced_unit(
self,
&cause,
&mut |db| {
db.span_label(
fn_decl.output.span(),
format!(
"expected `{}` because of this return type",
fn_decl.output,
),
);
},
true,
);
} else {
coercion.coerce_forced_unit(self, &cause, &mut |_| (), true);
}
}
tcx.types.never
}
ExprKind::Assign(ref lhs, ref rhs) => {
self.check_assign(expr, expected, lhs, rhs)
}
ExprKind::If(ref cond, ref then_expr, ref opt_else_expr) => {
self.check_then_else(&cond, then_expr, opt_else_expr.as_ref().map(|e| &**e),
expr.span, expected)
}
ExprKind::While(ref cond, ref body, _) => {
let ctxt = BreakableCtxt {
// cannot use break with a value from a while loop
coerce: None,
may_break: false, // Will get updated if/when we find a `break`.
};
let (ctxt, ()) = self.with_breakable_ctxt(expr.hir_id, ctxt, || {
self.check_expr_has_type_or_error(&cond, tcx.types.bool);
let cond_diverging = self.diverges.get();
self.check_block_no_value(&body);
// We may never reach the body so it diverging means nothing.
self.diverges.set(cond_diverging);
});
if ctxt.may_break {
// No way to know whether it's diverging because
// of a `break` or an outer `break` or `return`.
self.diverges.set(Diverges::Maybe);
}
self.tcx.mk_unit()
}
ExprKind::Loop(ref body, _, source) => {
let coerce = match source {
// you can only use break with a value from a normal `loop { }`
hir::LoopSource::Loop => {
let coerce_to = expected.coercion_target_type(self, body.span);
Some(CoerceMany::new(coerce_to))
}
hir::LoopSource::WhileLet |
hir::LoopSource::ForLoop => {
None
}
};
let ctxt = BreakableCtxt {
coerce,
may_break: false, // Will get updated if/when we find a `break`.
};
let (ctxt, ()) = self.with_breakable_ctxt(expr.hir_id, ctxt, || {
self.check_block_no_value(&body);
});
if ctxt.may_break {
// No way to know whether it's diverging because
// of a `break` or an outer `break` or `return`.
self.diverges.set(Diverges::Maybe);
}
// If we permit break with a value, then result type is
// the LUB of the breaks (possibly ! if none); else, it
// is nil. This makes sense because infinite loops
// (which would have type !) are only possible iff we
// permit break with a value [1].
if ctxt.coerce.is_none() && !ctxt.may_break {
// [1]
self.tcx.sess.delay_span_bug(body.span, "no coercion, but loop may not break");
}
ctxt.coerce.map(|c| c.complete(self)).unwrap_or_else(|| self.tcx.mk_unit())
}
ExprKind::Match(ref discrim, ref arms, match_src) => {
self.check_match(expr, &discrim, arms, expected, match_src)
}
ExprKind::Closure(capture, ref decl, body_id, _, gen) => {
self.check_expr_closure(expr, capture, &decl, body_id, gen, expected)
}
ExprKind::Block(ref body, _) => {
self.check_block_with_expected(&body, expected)
}
ExprKind::Call(ref callee, ref args) => {
self.check_call(expr, &callee, args, expected)
}
ExprKind::MethodCall(ref segment, span, ref args) => {
self.check_method_call(expr, segment, span, args, expected, needs)
}
ExprKind::Cast(ref e, ref t) => {
// Find the type of `e`. Supply hints based on the type we are casting to,
// if appropriate.
let t_cast = self.to_ty_saving_user_provided_ty(t);
let t_cast = self.resolve_type_vars_if_possible(&t_cast);
let t_expr = self.check_expr_with_expectation(e, ExpectCastableToType(t_cast));
let t_cast = self.resolve_type_vars_if_possible(&t_cast);
// Eagerly check for some obvious errors.
if t_expr.references_error() || t_cast.references_error() {
tcx.types.err
} else {
// Defer other checks until we're done type checking.
let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut();
match cast::CastCheck::new(self, e, t_expr, t_cast, t.span, expr.span) {
Ok(cast_check) => {
deferred_cast_checks.push(cast_check);
t_cast
}
Err(ErrorReported) => {
tcx.types.err
}
}
}
}
ExprKind::Type(ref e, ref t) => {
let ty = self.to_ty_saving_user_provided_ty(&t);
self.check_expr_eq_type(&e, ty);
ty
}
ExprKind::Array(ref args) => {
let uty = expected.to_option(self).and_then(|uty| {
match uty.sty {
ty::Array(ty, _) | ty::Slice(ty) => Some(ty),
_ => None
}
});
let element_ty = if !args.is_empty() {
let coerce_to = uty.unwrap_or_else(
|| self.next_ty_var(TypeVariableOrigin::TypeInference(expr.span)));
let mut coerce = CoerceMany::with_coercion_sites(coerce_to, args);
assert_eq!(self.diverges.get(), Diverges::Maybe);
for e in args {
let e_ty = self.check_expr_with_hint(e, coerce_to);
let cause = self.misc(e.span);
coerce.coerce(self, &cause, e, e_ty);
}
coerce.complete(self)
} else {
self.next_ty_var(TypeVariableOrigin::TypeInference(expr.span))
};
tcx.mk_array(element_ty, args.len() as u64)
}
ExprKind::Repeat(ref element, ref count) => {
let count_def_id = tcx.hir().local_def_id_from_hir_id(count.hir_id);
let param_env = ty::ParamEnv::empty();
let substs = InternalSubsts::identity_for_item(tcx.global_tcx(), count_def_id);
let instance = ty::Instance::resolve(
tcx.global_tcx(),
param_env,
count_def_id,
substs,
).unwrap();
let global_id = GlobalId {
instance,
promoted: None
};
let count = tcx.const_eval(param_env.and(global_id));
let uty = match expected {
ExpectHasType(uty) => {
match uty.sty {
ty::Array(ty, _) | ty::Slice(ty) => Some(ty),
_ => None
}
}
_ => None
};
let (element_ty, t) = match uty {
Some(uty) => {
self.check_expr_coercable_to_type(&element, uty);
(uty, uty)
}
None => {
let ty = self.next_ty_var(TypeVariableOrigin::MiscVariable(element.span));
let element_ty = self.check_expr_has_type_or_error(&element, ty);
(element_ty, ty)
}
};
if let Ok(count) = count {
let zero_or_one = count.assert_usize(tcx).map_or(false, |count| count <= 1);
if !zero_or_one {
// For [foo, ..n] where n > 1, `foo` must have
// Copy type:
let lang_item = self.tcx.require_lang_item(lang_items::CopyTraitLangItem);
self.require_type_meets(t, expr.span, traits::RepeatVec, lang_item);
}
}
if element_ty.references_error() {
tcx.types.err
} else if let Ok(count) = count {
tcx.mk_ty(ty::Array(t, tcx.mk_const(count)))
} else {
tcx.types.err
}
}
ExprKind::Tup(ref elts) => {
let flds = expected.only_has_type(self).and_then(|ty| {
let ty = self.resolve_type_vars_with_obligations(ty);
match ty.sty {
ty::Tuple(ref flds) => Some(&flds[..]),
_ => None
}
});
let elt_ts_iter = elts.iter().enumerate().map(|(i, e)| {
let t = match flds {
Some(ref fs) if i < fs.len() => {
let ety = fs[i];
self.check_expr_coercable_to_type(&e, ety);
ety
}
_ => {
self.check_expr_with_expectation(&e, NoExpectation)
}
};
t
});
let tuple = tcx.mk_tup(elt_ts_iter);
if tuple.references_error() {
tcx.types.err
} else {
self.require_type_is_sized(tuple, expr.span, traits::TupleInitializerSized);
tuple
}
}
ExprKind::Struct(ref qpath, ref fields, ref base_expr) => {
self.check_expr_struct(expr, expected, qpath, fields, base_expr)
}
ExprKind::Field(ref base, field) => {
self.check_field(expr, needs, &base, field)
}
ExprKind::Index(ref base, ref idx) => {
let base_t = self.check_expr_with_needs(&base, needs);
let idx_t = self.check_expr(&idx);
if base_t.references_error() {
base_t
} else if idx_t.references_error() {
idx_t
} else {
let base_t = self.structurally_resolved_type(base.span, base_t);
match self.lookup_indexing(expr, base, base_t, idx_t, needs) {
Some((index_ty, element_ty)) => {
// two-phase not needed because index_ty is never mutable
self.demand_coerce(idx, idx_t, index_ty, AllowTwoPhase::No);
element_ty
}
None => {
let mut err =
type_error_struct!(tcx.sess, expr.span, base_t, E0608,
"cannot index into a value of type `{}`",
base_t);
// Try to give some advice about indexing tuples.
if let ty::Tuple(..) = base_t.sty {
let mut needs_note = true;
// If the index is an integer, we can show the actual
// fixed expression:
if let ExprKind::Lit(ref lit) = idx.node {
if let ast::LitKind::Int(i,
ast::LitIntType::Unsuffixed) = lit.node {
let snip = tcx.sess.source_map().span_to_snippet(base.span);
if let Ok(snip) = snip {
err.span_suggestion(
expr.span,
"to access tuple elements, use",
format!("{}.{}", snip, i),
Applicability::MachineApplicable,
);
needs_note = false;
}
}
}
if needs_note {
err.help("to access tuple elements, use tuple indexing \
syntax (e.g., `tuple.0`)");
}
}
err.emit();
self.tcx.types.err
}
}
}
}
ExprKind::Yield(ref value) => {
match self.yield_ty {
Some(ty) => {
self.check_expr_coercable_to_type(&value, ty);
}
None => {
struct_span_err!(self.tcx.sess, expr.span, E0627,
"yield statement outside of generator literal").emit();
}
}
tcx.mk_unit()
}
hir::ExprKind::Err => {
tcx.types.err
}
}
}
/// Type check assignment expression `expr` of form `lhs = rhs`.
/// The expected type is `()` and is passsed to the function for the purposes of diagnostics.
fn check_assign(
&self,
expr: &'gcx hir::Expr,
expected: Expectation<'tcx>,
lhs: &'gcx hir::Expr,
rhs: &'gcx hir::Expr,
) -> Ty<'tcx> {
let lhs_ty = self.check_expr_with_needs(&lhs, Needs::MutPlace);
let rhs_ty = self.check_expr_coercable_to_type(&rhs, lhs_ty);
let expected_ty = expected.coercion_target_type(self, expr.span);
if expected_ty == self.tcx.types.bool {
// The expected type is `bool` but this will result in `()` so we can reasonably
// say that the user intended to write `lhs == rhs` instead of `lhs = rhs`.
// The likely cause of this is `if foo = bar { .. }`.
let actual_ty = self.tcx.mk_unit();
let mut err = self.demand_suptype_diag(expr.span, expected_ty, actual_ty).unwrap();
let msg = "try comparing for equality";
let left = self.tcx.sess.source_map().span_to_snippet(lhs.span);
let right = self.tcx.sess.source_map().span_to_snippet(rhs.span);
if let (Ok(left), Ok(right)) = (left, right) {
let help = format!("{} == {}", left, right);
err.span_suggestion(expr.span, msg, help, Applicability::MaybeIncorrect);
} else {
err.help(msg);
}
err.emit();
} else if !lhs.is_place_expr() {
struct_span_err!(self.tcx.sess, expr.span, E0070,
"invalid left-hand side expression")
.span_label(expr.span, "left-hand of expression not valid")
.emit();
}
self.require_type_is_sized(lhs_ty, lhs.span, traits::AssignmentLhsSized);
if lhs_ty.references_error() || rhs_ty.references_error() {
self.tcx.types.err
} else {
self.tcx.mk_unit()
}
}
// Finish resolving a path in a struct expression or pattern `S::A { .. }` if necessary.
// The newly resolved definition is written into `type_dependent_defs`.
fn finish_resolving_struct_path(&self,
qpath: &QPath,
path_span: Span,
hir_id: hir::HirId)
-> (Def, Ty<'tcx>)
{
match *qpath {
QPath::Resolved(ref maybe_qself, ref path) => {
let self_ty = maybe_qself.as_ref().map(|qself| self.to_ty(qself));
let ty = AstConv::def_to_ty(self, self_ty, path, true);
(path.def, ty)
}
QPath::TypeRelative(ref qself, ref segment) => {
let ty = self.to_ty(qself);
let def = if let hir::TyKind::Path(QPath::Resolved(_, ref path)) = qself.node {
path.def
} else {
Def::Err
};
let (ty, def) = AstConv::associated_path_to_ty(self, hir_id, path_span,
ty, def, segment, true);
// Write back the new resolution.
self.tables.borrow_mut().type_dependent_defs_mut().insert(hir_id, def);
(def, ty)
}
}
}
/// Resolves associated value path into a base type and associated constant or method
/// definition. The newly resolved definition is written into `type_dependent_defs`.
pub fn resolve_ty_and_def_ufcs<'b>(&self,
qpath: &'b QPath,
hir_id: hir::HirId,
span: Span)
-> (Def, Option<Ty<'tcx>>, &'b [hir::PathSegment])
{
debug!("resolve_ty_and_def_ufcs: qpath={:?} hir_id={:?} span={:?}", qpath, hir_id, span);
let (ty, qself, item_segment) = match *qpath {
QPath::Resolved(ref opt_qself, ref path) => {
return (path.def,
opt_qself.as_ref().map(|qself| self.to_ty(qself)),
&path.segments[..]);
}
QPath::TypeRelative(ref qself, ref segment) => {
(self.to_ty(qself), qself, segment)
}
};
if let Some(cached_def) = self.tables.borrow().type_dependent_def(hir_id) {
// Return directly on cache hit. This is useful to avoid doubly reporting
// errors with default match binding modes. See #44614.
return (cached_def, Some(ty), slice::from_ref(&**item_segment))
}
let item_name = item_segment.ident;
let def = match self.resolve_ufcs(span, item_name, ty, hir_id) {
Ok(def) => def,
Err(error) => {
let def = match error {
method::MethodError::PrivateMatch(def, _) => def,
_ => Def::Err,
};
if item_name.name != keywords::Invalid.name() {
self.report_method_error(span,
ty,
item_name,
SelfSource::QPath(qself),
error,
None);
}
def
}
};
// Write back the new resolution.
self.tables.borrow_mut().type_dependent_defs_mut().insert(hir_id, def);
(def, Some(ty), slice::from_ref(&**item_segment))
}
pub fn check_decl_initializer(&self,
local: &'gcx hir::Local,
init: &'gcx hir::Expr) -> Ty<'tcx>
{
// FIXME(tschottdorf): `contains_explicit_ref_binding()` must be removed
// for #42640 (default match binding modes).
//
// See #44848.
let ref_bindings = local.pat.contains_explicit_ref_binding();
let local_ty = self.local_ty(init.span, local.hir_id).revealed_ty;
if let Some(m) = ref_bindings {
// Somewhat subtle: if we have a `ref` binding in the pattern,
// we want to avoid introducing coercions for the RHS. This is
// both because it helps preserve sanity and, in the case of
// ref mut, for soundness (issue #23116). In particular, in
// the latter case, we need to be clear that the type of the
// referent for the reference that results is *equal to* the
// type of the place it is referencing, and not some
// supertype thereof.
let init_ty = self.check_expr_with_needs(init, Needs::maybe_mut_place(m));
self.demand_eqtype(init.span, local_ty, init_ty);
init_ty
} else {
self.check_expr_coercable_to_type(init, local_ty)
}
}
pub fn check_decl_local(&self, local: &'gcx hir::Local) {
let t = self.local_ty(local.span, local.hir_id).decl_ty;
self.write_ty(local.hir_id, t);
if let Some(ref init) = local.init {
let init_ty = self.check_decl_initializer(local, &init);
if init_ty.references_error() {
self.write_ty(local.hir_id, init_ty);
}
}
self.check_pat_walk(
&local.pat,
t,
ty::BindingMode::BindByValue(hir::Mutability::MutImmutable),
None,
);
let pat_ty = self.node_ty(local.pat.hir_id);
if pat_ty.references_error() {
self.write_ty(local.hir_id, pat_ty);
}
}
pub fn check_stmt(&self, stmt: &'gcx hir::Stmt) {
// Don't do all the complex logic below for `DeclItem`.
match stmt.node {
hir::StmtKind::Item(..) => return,
hir::StmtKind::Local(..) | hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => {}
}
self.warn_if_unreachable(stmt.hir_id, stmt.span, "statement");
// Hide the outer diverging and `has_errors` flags.
let old_diverges = self.diverges.get();
let old_has_errors = self.has_errors.get();
self.diverges.set(Diverges::Maybe);
self.has_errors.set(false);
match stmt.node {
hir::StmtKind::Local(ref l) => {
self.check_decl_local(&l);
}
// Ignore for now.
hir::StmtKind::Item(_) => {}
hir::StmtKind::Expr(ref expr) => {
// Check with expected type of `()`.
self.check_expr_has_type_or_error(&expr, self.tcx.mk_unit());
}
hir::StmtKind::Semi(ref expr) => {
self.check_expr(&expr);
}
}
// Combine the diverging and `has_error` flags.
self.diverges.set(self.diverges.get() | old_diverges);
self.has_errors.set(self.has_errors.get() | old_has_errors);
}
pub fn check_block_no_value(&self, blk: &'gcx hir::Block) {
let unit = self.tcx.mk_unit();
let ty = self.check_block_with_expected(blk, ExpectHasType(unit));
// if the block produces a `!` value, that can always be
// (effectively) coerced to unit.
if !ty.is_never() {
self.demand_suptype(blk.span, unit, ty);
}
}
fn check_block_with_expected(&self,
blk: &'gcx hir::Block,
expected: Expectation<'tcx>) -> Ty<'tcx> {
let prev = {
let mut fcx_ps = self.ps.borrow_mut();
let unsafety_state = fcx_ps.recurse(blk);
replace(&mut *fcx_ps, unsafety_state)
};
// In some cases, blocks have just one exit, but other blocks
// can be targeted by multiple breaks. This can happen both
// with labeled blocks as well as when we desugar
// a `try { ... }` expression.
//
// Example 1:
//
// 'a: { if true { break 'a Err(()); } Ok(()) }
//
// Here we would wind up with two coercions, one from
// `Err(())` and the other from the tail expression
// `Ok(())`. If the tail expression is omitted, that's a
// "forced unit" -- unless the block diverges, in which
// case we can ignore the tail expression (e.g., `'a: {
// break 'a 22; }` would not force the type of the block
// to be `()`).
let tail_expr = blk.expr.as_ref();
let coerce_to_ty = expected.coercion_target_type(self, blk.span);
let coerce = if blk.targeted_by_break {
CoerceMany::new(coerce_to_ty)
} else {
let tail_expr: &[P<hir::Expr>] = match tail_expr {
Some(e) => slice::from_ref(e),
None => &[],
};
CoerceMany::with_coercion_sites(coerce_to_ty, tail_expr)
};
let prev_diverges = self.diverges.get();
let ctxt = BreakableCtxt {
coerce: Some(coerce),
may_break: false,
};
let (ctxt, ()) = self.with_breakable_ctxt(blk.hir_id, ctxt, || {
for s in &blk.stmts {
self.check_stmt(s);
}
// check the tail expression **without** holding the
// `enclosing_breakables` lock below.
let tail_expr_ty = tail_expr.map(|t| self.check_expr_with_expectation(t, expected));
let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
let ctxt = enclosing_breakables.find_breakable(blk.hir_id);
let coerce = ctxt.coerce.as_mut().unwrap();
if let Some(tail_expr_ty) = tail_expr_ty {
let tail_expr = tail_expr.unwrap();
let cause = self.cause(tail_expr.span,
ObligationCauseCode::BlockTailExpression(blk.hir_id));
coerce.coerce(self,
&cause,
tail_expr,
tail_expr_ty);
} else {
// Subtle: if there is no explicit tail expression,
// that is typically equivalent to a tail expression
// of `()` -- except if the block diverges. In that
// case, there is no value supplied from the tail
// expression (assuming there are no other breaks,
// this implies that the type of the block will be
// `!`).
//
// #41425 -- label the implicit `()` as being the
// "found type" here, rather than the "expected type".
if !self.diverges.get().always() {
// #50009 -- Do not point at the entire fn block span, point at the return type
// span, as it is the cause of the requirement, and
// `consider_hint_about_removing_semicolon` will point at the last expression
// if it were a relevant part of the error. This improves usability in editors
// that highlight errors inline.
let mut sp = blk.span;
let mut fn_span = None;
if let Some((decl, ident)) = self.get_parent_fn_decl(blk.hir_id) {
let ret_sp = decl.output.span();
if let Some(block_sp) = self.parent_item_span(blk.hir_id) {
// HACK: on some cases (`ui/liveness/liveness-issue-2163.rs`) the
// output would otherwise be incorrect and even misleading. Make sure
// the span we're aiming at correspond to a `fn` body.
if block_sp == blk.span {
sp = ret_sp;
fn_span = Some(ident.span);
}
}
}
coerce.coerce_forced_unit(self, &self.misc(sp), &mut |err| {
if let Some(expected_ty) = expected.only_has_type(self) {
self.consider_hint_about_removing_semicolon(blk, expected_ty, err);
}
if let Some(fn_span) = fn_span {
err.span_label(fn_span, "this function's body doesn't return");
}
}, false);
}
}
});
if ctxt.may_break {
// If we can break from the block, then the block's exit is always reachable
// (... as long as the entry is reachable) - regardless of the tail of the block.
self.diverges.set(prev_diverges);
}
let mut ty = ctxt.coerce.unwrap().complete(self);
if self.has_errors.get() || ty.references_error() {
ty = self.tcx.types.err
}
self.write_ty(blk.hir_id, ty);
*self.ps.borrow_mut() = prev;
ty
}
fn parent_item_span(&self, id: hir::HirId) -> Option<Span> {
let node = self.tcx.hir().get_by_hir_id(self.tcx.hir().get_parent_item(id));
match node {
Node::Item(&hir::Item {
node: hir::ItemKind::Fn(_, _, _, body_id), ..
}) |
Node::ImplItem(&hir::ImplItem {
node: hir::ImplItemKind::Method(_, body_id), ..
}) => {
let body = self.tcx.hir().body(body_id);
if let ExprKind::Block(block, _) = &body.value.node {
return Some(block.span);
}
}
_ => {}
}
None
}
/// Given a function block's `HirId`, returns its `FnDecl` if it exists, or `None` otherwise.
fn get_parent_fn_decl(&self, blk_id: hir::HirId) -> Option<(hir::FnDecl, ast::Ident)> {
let parent = self.tcx.hir().get_by_hir_id(self.tcx.hir().get_parent_item(blk_id));
self.get_node_fn_decl(parent).map(|(fn_decl, ident, _)| (fn_decl, ident))
}
/// Given a function `Node`, return its `FnDecl` if it exists, or `None` otherwise.
fn get_node_fn_decl(&self, node: Node<'_>) -> Option<(hir::FnDecl, ast::Ident, bool)> {
match node {
Node::Item(&hir::Item {
ident, node: hir::ItemKind::Fn(ref decl, ..), ..
}) => decl.clone().and_then(|decl| {
// This is less than ideal, it will not suggest a return type span on any
// method called `main`, regardless of whether it is actually the entry point,
// but it will still present it as the reason for the expected type.
Some((decl, ident, ident.name != Symbol::intern("main")))
}),
Node::TraitItem(&hir::TraitItem {
ident, node: hir::TraitItemKind::Method(hir::MethodSig {
ref decl, ..
}, ..), ..
}) => decl.clone().and_then(|decl| Some((decl, ident, true))),
Node::ImplItem(&hir::ImplItem {
ident, node: hir::ImplItemKind::Method(hir::MethodSig {
ref decl, ..
}, ..), ..
}) => decl.clone().and_then(|decl| Some((decl, ident, false))),
_ => None,
}
}
/// Given a `HirId`, return the `FnDecl` of the method it is enclosed by and whether a
/// suggestion can be made, `None` otherwise.
pub fn get_fn_decl(&self, blk_id: hir::HirId) -> Option<(hir::FnDecl, bool)> {
// Get enclosing Fn, if it is a function or a trait method, unless there's a `loop` or
// `while` before reaching it, as block tail returns are not available in them.
self.tcx.hir().get_return_block(blk_id).and_then(|blk_id| {
let parent = self.tcx.hir().get_by_hir_id(blk_id);
self.get_node_fn_decl(parent).map(|(fn_decl, _, is_main)| (fn_decl, is_main))
})
}
/// On implicit return expressions with mismatched types, provides the following suggestions:
///
/// - Points out the method's return type as the reason for the expected type.
/// - Possible missing semicolon.
/// - Possible missing return type if the return type is the default, and not `fn main()`.
pub fn suggest_mismatched_types_on_tail(
&self,
err: &mut DiagnosticBuilder<'tcx>,
expression: &'gcx hir::Expr,
expected: Ty<'tcx>,
found: Ty<'tcx>,
cause_span: Span,
blk_id: hir::HirId,
) -> bool {
self.suggest_missing_semicolon(err, expression, expected, cause_span);
let mut pointing_at_return_type = false;
if let Some((fn_decl, can_suggest)) = self.get_fn_decl(blk_id) {
pointing_at_return_type = self.suggest_missing_return_type(
err, &fn_decl, expected, found, can_suggest);
}
self.suggest_ref_or_into(err, expression, expected, found);
pointing_at_return_type
}
pub fn suggest_ref_or_into(
&self,
err: &mut DiagnosticBuilder<'tcx>,
expr: &hir::Expr,
expected: Ty<'tcx>,
found: Ty<'tcx>,
) {
if let Some((sp, msg, suggestion)) = self.check_ref(expr, found, expected) {
err.span_suggestion(
sp,
msg,
suggestion,
Applicability::MachineApplicable,
);
} else if !self.check_for_cast(err, expr, found, expected) {
let methods = self.get_conversion_methods(expr.span, expected, found);
if let Ok(expr_text) = self.sess().source_map().span_to_snippet(expr.span) {
let mut suggestions = iter::repeat(&expr_text).zip(methods.iter())
.filter_map(|(receiver, method)| {
let method_call = format!(".{}()", method.ident);
if receiver.ends_with(&method_call) {
None // do not suggest code that is already there (#53348)
} else {
let method_call_list = [".to_vec()", ".to_string()"];
if receiver.ends_with(".clone()")
&& method_call_list.contains(&method_call.as_str()) {
let max_len = receiver.rfind(".").unwrap();
Some(format!("{}{}", &receiver[..max_len], method_call))
}
else {
Some(format!("{}{}", receiver, method_call))
}
}
}).peekable();
if suggestions.peek().is_some() {
err.span_suggestions(
expr.span,
"try using a conversion method",
suggestions,
Applicability::MaybeIncorrect,
);
}
}
}
}
/// A common error is to forget to add a semicolon at the end of a block, e.g.,
///
/// ```
/// fn foo() {
/// bar_that_returns_u32()
/// }
/// ```
///
/// This routine checks if the return expression in a block would make sense on its own as a
/// statement and the return type has been left as default or has been specified as `()`. If so,
/// it suggests adding a semicolon.
fn suggest_missing_semicolon(&self,
err: &mut DiagnosticBuilder<'tcx>,
expression: &'gcx hir::Expr,
expected: Ty<'tcx>,
cause_span: Span) {
if expected.is_unit() {
// `BlockTailExpression` only relevant if the tail expr would be
// useful on its own.
match expression.node {
ExprKind::Call(..) |
ExprKind::MethodCall(..) |
ExprKind::If(..) |
ExprKind::While(..) |
ExprKind::Loop(..) |
ExprKind::Match(..) |
ExprKind::Block(..) => {
let sp = self.tcx.sess.source_map().next_point(cause_span);
err.span_suggestion(
sp,
"try adding a semicolon",
";".to_string(),
Applicability::MachineApplicable);
}
_ => (),
}
}
}
/// A possible error is to forget to add a return type that is needed:
///
/// ```
/// fn foo() {
/// bar_that_returns_u32()
/// }
/// ```
///
/// This routine checks if the return type is left as default, the method is not part of an
/// `impl` block and that it isn't the `main` method. If so, it suggests setting the return
/// type.
fn suggest_missing_return_type(
&self,
err: &mut DiagnosticBuilder<'tcx>,
fn_decl: &hir::FnDecl,
expected: Ty<'tcx>,
found: Ty<'tcx>,
can_suggest: bool,
) -> bool {
// Only suggest changing the return type for methods that
// haven't set a return type at all (and aren't `fn main()` or an impl).
match (&fn_decl.output, found.is_suggestable(), can_suggest, expected.is_unit()) {
(&hir::FunctionRetTy::DefaultReturn(span), true, true, true) => {
err.span_suggestion(
span,
"try adding a return type",
format!("-> {} ", self.resolve_type_vars_with_obligations(found)),
Applicability::MachineApplicable);
true
}
(&hir::FunctionRetTy::DefaultReturn(span), false, true, true) => {
err.span_label(span, "possibly return type missing here?");
true
}
(&hir::FunctionRetTy::DefaultReturn(span), _, false, true) => {
// `fn main()` must return `()`, do not suggest changing return type
err.span_label(span, "expected `()` because of default return type");
true
}
// expectation was caused by something else, not the default return
(&hir::FunctionRetTy::DefaultReturn(_), _, _, false) => false,
(&hir::FunctionRetTy::Return(ref ty), _, _, _) => {
// Only point to return type if the expected type is the return type, as if they
// are not, the expectation must have been caused by something else.
debug!("suggest_missing_return_type: return type {:?} node {:?}", ty, ty.node);
let sp = ty.span;
let ty = AstConv::ast_ty_to_ty(self, ty);
debug!("suggest_missing_return_type: return type {:?}", ty);
debug!("suggest_missing_return_type: expected type {:?}", ty);
if ty.sty == expected.sty {
err.span_label(sp, format!("expected `{}` because of return type",
expected));
return true;
}
false
}
}
}
/// A common error is to add an extra semicolon:
///
/// ```
/// fn foo() -> usize {
/// 22;
/// }
/// ```
///
/// This routine checks if the final statement in a block is an
/// expression with an explicit semicolon whose type is compatible
/// with `expected_ty`. If so, it suggests removing the semicolon.
fn consider_hint_about_removing_semicolon(
&self,
blk: &'gcx hir::Block,
expected_ty: Ty<'tcx>,
err: &mut DiagnosticBuilder<'_>,
) {
if let Some(span_semi) = self.could_remove_semicolon(blk, expected_ty) {
err.span_suggestion(
span_semi,
"consider removing this semicolon",
String::new(),
Applicability::MachineApplicable,
);
}
}
fn could_remove_semicolon(
&self,
blk: &'gcx hir::Block,
expected_ty: Ty<'tcx>,
) -> Option<Span> {
// Be helpful when the user wrote `{... expr;}` and
// taking the `;` off is enough to fix the error.
let last_stmt = blk.stmts.last()?;
let last_expr = match last_stmt.node {
hir::StmtKind::Semi(ref e) => e,
_ => return None,
};
let last_expr_ty = self.node_ty(last_expr.hir_id);
if self.can_sub(self.param_env, last_expr_ty, expected_ty).is_err() {
return None;
}
let original_span = original_sp(last_stmt.span, blk.span);
Some(original_span.with_lo(original_span.hi() - BytePos(1)))
}
// Rewrite `SelfCtor` to `Ctor`
pub fn rewrite_self_ctor(&self, def: Def, span: Span) -> (Def, DefId, Ty<'tcx>) {
let tcx = self.tcx;
if let Def::SelfCtor(impl_def_id) = def {
let ty = self.impl_self_ty(span, impl_def_id).ty;
let adt_def = ty.ty_adt_def();
match adt_def {
Some(adt_def) if adt_def.has_ctor() => {
let variant = adt_def.non_enum_variant();
let ctor_def_id = variant.ctor_def_id.unwrap();
let def = Def::Ctor(ctor_def_id, CtorOf::Struct, variant.ctor_kind);
(def, ctor_def_id, tcx.type_of(ctor_def_id))
}
_ => {
let mut err = tcx.sess.struct_span_err(span,
"the `Self` constructor can only be used with tuple or unit structs");
if let Some(adt_def) = adt_def {
match adt_def.adt_kind() {
AdtKind::Enum => {
err.help("did you mean to use one of the enum's variants?");
},
AdtKind::Struct |
AdtKind::Union => {
err.span_suggestion(
span,
"use curly brackets",
String::from("Self { /* fields */ }"),
Applicability::HasPlaceholders,
);
}
}
}
err.emit();
(def, impl_def_id, tcx.types.err)
}
}
} else {
let def_id = def.def_id();
// The things we are substituting into the type should not contain
// escaping late-bound regions, and nor should the base type scheme.
let ty = tcx.type_of(def_id);
(def, def_id, ty)
}
}
// Instantiates the given path, which must refer to an item with the given
// number of type parameters and type.
pub fn instantiate_value_path(&self,
segments: &[hir::PathSegment],
self_ty: Option<Ty<'tcx>>,
def: Def,
span: Span,
hir_id: hir::HirId)
-> (Ty<'tcx>, Def) {
debug!(
"instantiate_value_path(segments={:?}, self_ty={:?}, def={:?}, hir_id={})",
segments,
self_ty,
def,
hir_id,
);
let tcx = self.tcx;
match def {
Def::Local(hid) | Def::Upvar(hid, ..) => {
let ty = self.local_ty(span, hid).decl_ty;
let ty = self.normalize_associated_types_in(span, &ty);
self.write_ty(hir_id, ty);
return (ty, def);
}
_ => {}
}
let (def, def_id, ty) = self.rewrite_self_ctor(def, span);
let path_segs = AstConv::def_ids_for_path_segments(self, segments, self_ty, def);
let mut user_self_ty = None;
let mut is_alias_variant_ctor = false;
match def {
Def::Ctor(_, CtorOf::Variant, _) => {
if let Some(self_ty) = self_ty {
let adt_def = self_ty.ty_adt_def().unwrap();
user_self_ty = Some(UserSelfTy {
impl_def_id: adt_def.did,
self_ty,
});
is_alias_variant_ctor = true;
}
}
Def::Method(def_id) |
Def::AssociatedConst(def_id) => {
let container = tcx.associated_item(def_id).container;
debug!("instantiate_value_path: def={:?} container={:?}", def, container);
match container {
ty::TraitContainer(trait_did) => {
callee::check_legal_trait_for_method_call(tcx, span, trait_did)
}
ty::ImplContainer(impl_def_id) => {
if segments.len() == 1 {
// `<T>::assoc` will end up here, and so
// can `T::assoc`. It this came from an
// inherent impl, we need to record the
// `T` for posterity (see `UserSelfTy` for
// details).
let self_ty = self_ty.expect("UFCS sugared assoc missing Self");
user_self_ty = Some(UserSelfTy {
impl_def_id,
self_ty,
});
}
}
}
}
_ => {}
}
// Now that we have categorized what space the parameters for each
// segment belong to, let's sort out the parameters that the user
// provided (if any) into their appropriate spaces. We'll also report
// errors if type parameters are provided in an inappropriate place.
let generic_segs: FxHashSet<_> = path_segs.iter().map(|PathSeg(_, index)| index).collect();
let generics_has_err = AstConv::prohibit_generics(
self, segments.iter().enumerate().filter_map(|(index, seg)| {
if !generic_segs.contains(&index) || is_alias_variant_ctor {
Some(seg)
} else {
None
}
}));
if generics_has_err {
// Don't try to infer type parameters when prohibited generic arguments were given.
user_self_ty = None;
}
// Now we have to compare the types that the user *actually*
// provided against the types that were *expected*. If the user
// did not provide any types, then we want to substitute inference
// variables. If the user provided some types, we may still need
// to add defaults. If the user provided *too many* types, that's
// a problem.
let mut infer_args_for_err = FxHashSet::default();
for &PathSeg(def_id, index) in &path_segs {
let seg = &segments[index];
let generics = tcx.generics_of(def_id);
// Argument-position `impl Trait` is treated as a normal generic
// parameter internally, but we don't allow users to specify the
// parameter's value explicitly, so we have to do some error-
// checking here.
let suppress_errors = AstConv::check_generic_arg_count_for_call(
tcx,
span,
&generics,
&seg,
false, // `is_method_call`
);
if suppress_errors {
infer_args_for_err.insert(index);
self.set_tainted_by_errors(); // See issue #53251.
}
}
let has_self = path_segs.last().map(|PathSeg(def_id, _)| {
tcx.generics_of(*def_id).has_self
}).unwrap_or(false);
let substs = AstConv::create_substs_for_generic_args(
tcx,
def_id,
&[][..],
has_self,
self_ty,
// Provide the generic args, and whether types should be inferred.
|def_id| {
if let Some(&PathSeg(_, index)) = path_segs.iter().find(|&PathSeg(did, _)| {
*did == def_id
}) {
// If we've encountered an `impl Trait`-related error, we're just
// going to infer the arguments for better error messages.
if !infer_args_for_err.contains(&index) {
// Check whether the user has provided generic arguments.
if let Some(ref data) = segments[index].args {
return (Some(data), segments[index].infer_types);
}
}
return (None, segments[index].infer_types);
}
(None, true)
},
// Provide substitutions for parameters for which (valid) arguments have been provided.
|param, arg| {
match (¶m.kind, arg) {
(GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
AstConv::ast_region_to_region(self, lt, Some(param)).into()
}
(GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => {
self.to_ty(ty).into()
}
(GenericParamDefKind::Const, GenericArg::Const(ct)) => {
self.to_const(&ct.value, self.tcx.type_of(param.def_id)).into()
}
_ => unreachable!(),
}
},
// Provide substitutions for parameters for which arguments are inferred.
|substs, param, infer_types| {
match param.kind {
GenericParamDefKind::Lifetime => {
self.re_infer(span, Some(param)).unwrap().into()
}
GenericParamDefKind::Type { has_default, .. } => {
if !infer_types && has_default {
// If we have a default, then we it doesn't matter that we're not
// inferring the type arguments: we provide the default where any
// is missing.
let default = tcx.type_of(param.def_id);
self.normalize_ty(
span,
default.subst_spanned(tcx, substs.unwrap(), Some(span))
).into()
} else {
// If no type arguments were provided, we have to infer them.
// This case also occurs as a result of some malformed input, e.g.
// a lifetime argument being given instead of a type parameter.
// Using inference instead of `Error` gives better error messages.
self.var_for_def(span, param)
}
}
GenericParamDefKind::Const => {
// FIXME(const_generics:defaults)
// No const parameters were provided, we have to infer them.
self.var_for_def(span, param)
}
}
},
);
assert!(!substs.has_escaping_bound_vars());
assert!(!ty.has_escaping_bound_vars());
// First, store the "user substs" for later.
self.write_user_type_annotation_from_substs(hir_id, def_id, substs, user_self_ty);
// Add all the obligations that are required, substituting and
// normalized appropriately.
let bounds = self.instantiate_bounds(span, def_id, &substs);
self.add_obligations_for_parameters(
traits::ObligationCause::new(span, self.body_id, traits::ItemObligation(def_id)),
&bounds);
// Substitute the values for the type parameters into the type of
// the referenced item.
let ty_substituted = self.instantiate_type_scheme(span, &substs, &ty);
if let Some(UserSelfTy { impl_def_id, self_ty }) = user_self_ty {
// In the case of `Foo<T>::method` and `<Foo<T>>::method`, if `method`
// is inherent, there is no `Self` parameter; instead, the impl needs
// type parameters, which we can infer by unifying the provided `Self`
// with the substituted impl type.
// This also occurs for an enum variant on a type alias.
let ty = tcx.type_of(impl_def_id);
let impl_ty = self.instantiate_type_scheme(span, &substs, &ty);
match self.at(&self.misc(span), self.param_env).sup(impl_ty, self_ty) {
Ok(ok) => self.register_infer_ok_obligations(ok),
Err(_) => {
span_bug!(span,
"instantiate_value_path: (UFCS) {:?} was a subtype of {:?} but now is not?",
self_ty,
impl_ty);
}
}
}
self.check_rustc_args_require_const(def_id, hir_id, span);
debug!("instantiate_value_path: type of {:?} is {:?}",
hir_id,
ty_substituted);
self.write_substs(hir_id, substs);
(ty_substituted, def)
}
fn check_rustc_args_require_const(&self,
def_id: DefId,
hir_id: hir::HirId,
span: Span) {
// We're only interested in functions tagged with
// #[rustc_args_required_const], so ignore anything that's not.
if !self.tcx.has_attr(def_id, "rustc_args_required_const") {
return
}
// If our calling expression is indeed the function itself, we're good!
// If not, generate an error that this can only be called directly.
if let Node::Expr(expr) = self.tcx.hir().get_by_hir_id(
self.tcx.hir().get_parent_node_by_hir_id(hir_id))
{
if let ExprKind::Call(ref callee, ..) = expr.node {
if callee.hir_id == hir_id {
return
}
}
}
self.tcx.sess.span_err(span, "this function can only be invoked \
directly, not through a function pointer");
}
// Resolves `typ` by a single level if `typ` is a type variable.
// If no resolution is possible, then an error is reported.
// Numeric inference variables may be left unresolved.
pub fn structurally_resolved_type(&self, sp: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
let ty = self.resolve_type_vars_with_obligations(ty);
if !ty.is_ty_var() {
ty
} else {
if !self.is_tainted_by_errors() {
self.need_type_info_err((**self).body_id, sp, ty)
.note("type must be known at this point")
.emit();
}
self.demand_suptype(sp, self.tcx.types.err, ty);
self.tcx.types.err
}
}
fn with_breakable_ctxt<F: FnOnce() -> R, R>(&self, id: hir::HirId,
ctxt: BreakableCtxt<'gcx, 'tcx>, f: F)
-> (BreakableCtxt<'gcx, 'tcx>, R) {
let index;
{
let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
index = enclosing_breakables.stack.len();
enclosing_breakables.by_id.insert(id, index);
enclosing_breakables.stack.push(ctxt);
}
let result = f();
let ctxt = {
let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
debug_assert!(enclosing_breakables.stack.len() == index + 1);
enclosing_breakables.by_id.remove(&id).expect("missing breakable context");
enclosing_breakables.stack.pop().expect("missing breakable context")
};
(ctxt, result)
}
/// Instantiate a QueryResponse in a probe context, without a
/// good ObligationCause.
fn probe_instantiate_query_response(
&self,
span: Span,
original_values: &OriginalQueryValues<'tcx>,
query_result: &Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>,
) -> InferResult<'tcx, Ty<'tcx>>
{
self.instantiate_query_response_and_region_obligations(
&traits::ObligationCause::misc(span, self.body_id),
self.param_env,
original_values,
query_result)
}
/// Returns `true` if an expression is contained inside the LHS of an assignment expression.
fn expr_in_place(&self, mut expr_id: hir::HirId) -> bool {
let mut contained_in_place = false;
while let hir::Node::Expr(parent_expr) =
self.tcx.hir().get_by_hir_id(self.tcx.hir().get_parent_node_by_hir_id(expr_id))
{
match &parent_expr.node {
hir::ExprKind::Assign(lhs, ..) | hir::ExprKind::AssignOp(_, lhs, ..) => {
if lhs.hir_id == expr_id {
contained_in_place = true;
break;
}
}
_ => (),
}
expr_id = parent_expr.hir_id;
}
contained_in_place
}
}
pub fn check_bounds_are_used<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
generics: &ty::Generics,
ty: Ty<'tcx>) {
let own_counts = generics.own_counts();
debug!(
"check_bounds_are_used(n_tys={}, n_cts={}, ty={:?})",
own_counts.types,
own_counts.consts,
ty
);
// FIXME(const_generics): we probably want to check the bounds for const parameters too.
if own_counts.types == 0 {
return;
}
// Make a vector of booleans initially false, set to true when used.
let mut types_used = vec![false; own_counts.types];
for leaf_ty in ty.walk() {
if let ty::Param(ty::ParamTy { idx, .. }) = leaf_ty.sty {
debug!("Found use of ty param num {}", idx);
types_used[idx as usize - own_counts.lifetimes] = true;
} else if let ty::Error = leaf_ty.sty {
// If there is already another error, do not emit
// an error for not using a type Parameter.
assert!(tcx.sess.err_count() > 0);
return;
}
}
let types = generics.params.iter().filter(|param| match param.kind {
ty::GenericParamDefKind::Type { .. } => true,
_ => false,
});
for (&used, param) in types_used.iter().zip(types) {
if !used {
let id = tcx.hir().as_local_hir_id(param.def_id).unwrap();
let span = tcx.hir().span_by_hir_id(id);
struct_span_err!(tcx.sess, span, E0091, "type parameter `{}` is unused", param.name)
.span_label(span, "unused type parameter")
.emit();
}
}
}
fn fatally_break_rust(sess: &Session) {
let handler = sess.diagnostic();
handler.span_bug_no_panic(
MultiSpan::new(),
"It looks like you're trying to break rust; would you like some ICE?",
);
handler.note_without_error("the compiler expectedly panicked. this is a feature.");
handler.note_without_error(
"we would appreciate a joke overview: \
https://github.com/rust-lang/rust/issues/43162#issuecomment-320764675"
);
handler.note_without_error(&format!("rustc {} running on {}",
option_env!("CFG_VERSION").unwrap_or("unknown_version"),
crate::session::config::host_triple(),
));
}
fn potentially_plural_count(count: usize, word: &str) -> String {
format!("{} {}{}", count, word, if count == 1 { "" } else { "s" })
}
|
tcx.sess,
item.span,
E0044,
"foreign items may not have type parameters"
|
get_mailbox_usage_storage_with_period_request_builder.go
|
package getmailboxusagestoragewithperiod
import (
ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9 "github.com/microsoft/kiota/abstractions/go"
)
// GetMailboxUsageStorageWithPeriodRequestBuilder builds and executes requests for operations under \reports\microsoft.graph.getMailboxUsageStorage(period='{period}')
type GetMailboxUsageStorageWithPeriodRequestBuilder struct {
// Path parameters for the request
pathParameters map[string]string;
// The request adapter to use to execute the requests.
requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter;
// Url template to use to build the URL for the current request builder
urlTemplate string;
}
// GetMailboxUsageStorageWithPeriodRequestBuilderGetOptions options for Get
type GetMailboxUsageStorageWithPeriodRequestBuilderGetOptions struct {
// Request headers
H map[string]string;
// Request options
O []ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestOption;
// Response handler to use in place of the default response handling provided by the core service
ResponseHandler ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.ResponseHandler;
}
// NewGetMailboxUsageStorageWithPeriodRequestBuilderInternal instantiates a new GetMailboxUsageStorageWithPeriodRequestBuilder and sets the default values.
func NewGetMailboxUsageStorageWithPeriodRequestBuilderInternal(pathParameters map[string]string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter, period *string)(*GetMailboxUsageStorageWithPeriodRequestBuilder) {
m := &GetMailboxUsageStorageWithPeriodRequestBuilder{
}
|
}
if period != nil {
urlTplParams["period"] = *period
}
m.pathParameters = pathParameters;
m.requestAdapter = requestAdapter;
return m
}
// NewGetMailboxUsageStorageWithPeriodRequestBuilder instantiates a new GetMailboxUsageStorageWithPeriodRequestBuilder and sets the default values.
func NewGetMailboxUsageStorageWithPeriodRequestBuilder(rawUrl string, requestAdapter ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestAdapter)(*GetMailboxUsageStorageWithPeriodRequestBuilder) {
urlParams := make(map[string]string)
urlParams["request-raw-url"] = rawUrl
return NewGetMailboxUsageStorageWithPeriodRequestBuilderInternal(urlParams, requestAdapter, nil)
}
// CreateGetRequestInformation invoke function getMailboxUsageStorage
func (m *GetMailboxUsageStorageWithPeriodRequestBuilder) CreateGetRequestInformation(options *GetMailboxUsageStorageWithPeriodRequestBuilderGetOptions)(*ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.RequestInformation, error) {
requestInfo := ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = ida96af0f171bb75f894a4013a6b3146a4397c58f11adb81a2b7cbea9314783a9.GET
if options != nil && options.H != nil {
requestInfo.Headers = options.H
}
if options != nil && len(options.O) != 0 {
err := requestInfo.AddRequestOptions(options.O...)
if err != nil {
return nil, err
}
}
return requestInfo, nil
}
// Get invoke function getMailboxUsageStorage
func (m *GetMailboxUsageStorageWithPeriodRequestBuilder) Get(options *GetMailboxUsageStorageWithPeriodRequestBuilderGetOptions)([]byte, error) {
requestInfo, err := m.CreateGetRequestInformation(options);
if err != nil {
return nil, err
}
res, err := m.requestAdapter.SendPrimitiveAsync(*requestInfo, "byte", nil, nil)
if err != nil {
return nil, err
}
return res.([]byte), nil
}
|
m.urlTemplate = "{+baseurl}/reports/microsoft.graph.getMailboxUsageStorage(period='{period}')";
urlTplParams := make(map[string]string)
for idx, item := range pathParameters {
urlTplParams[idx] = item
|
payload.js
|
__NUXT_JSONP__("/88/6", (function(a,b,c,d,e,f,g,h,i,j,k,l){return {data:[{metaTitle:f,metaDesc:g,verseId:6,surahId:88,currentSurah:{number:"88",name:"الغاشية",name_latin:"Al-Gasyiyah",number_of_ayah:"26",text:{"1":"هَلْ اَتٰىكَ حَدِيْثُ الْغَاشِيَةِۗ","2":"وُجُوْهٌ يَّوْمَىِٕذٍ خَاشِعَةٌ ۙ","3":"عَامِلَةٌ نَّاصِبَةٌ ۙ","4":"تَصْلٰى نَارًا حَامِيَةً ۙ","5":"تُسْقٰى مِنْ عَيْنٍ اٰنِيَةٍ ۗ","6":"لَيْسَ لَهُمْ طَعَامٌ اِلَّا مِنْ ضَرِيْعٍۙ","7":"لَّا يُسْمِنُ وَلَا يُغْنِيْ مِنْ جُوْعٍۗ","8":"وُجُوْهٌ يَّوْمَىِٕذٍ نَّاعِمَةٌ ۙ","9":"لِّسَعْيِهَا رَاضِيَةٌ ۙ ","10":"فِيْ جَنَّةٍ عَالِيَةٍۙ","11":"لَّا تَسْمَعُ فِيْهَا لَاغِيَةً ۗ","12":"فِيْهَا عَيْنٌ جَارِيَةٌ ۘ","13":"فِيْهَا سُرُرٌ مَّرْفُوْعَةٌ ۙ","14":"وَّاَكْوَابٌ مَّوْضُوْعَةٌ ۙ","15":"وَّنَمَارِقُ مَصْفُوْفَةٌ ۙ ","16":"وَّزَرَابِيُّ مَبْثُوْثَةٌ ۗ","17":"اَفَلَا يَنْظُرُوْنَ اِلَى الْاِبِلِ كَيْفَ خُلِقَتْۗ","18":"وَاِلَى السَّمَاۤءِ كَيْفَ رُفِعَتْۗ","19":"وَاِلَى الْجِبَالِ كَيْفَ نُصِبَتْۗ","20":"وَاِلَى الْاَرْضِ كَيْفَ سُطِحَتْۗ ","21":"فَذَكِّرْۗ اِنَّمَآ اَنْتَ مُذَكِّرٌۙ","22":"لَّسْتَ عَلَيْهِمْ بِمُصَيْطِرٍۙ","23":"اِلَّا مَنْ تَوَلّٰى وَكَفَرَۙ","24":"فَيُعَذِّبُهُ اللّٰهُ الْعَذَابَ الْاَكْبَرَۗ","25":"اِنَّ اِلَيْنَآ اِيَابَهُمْ","26":"ثُمَّ اِنَّ عَلَيْنَا حِسَابَهُمْ ࣖ"},translations:{id:{name:"Hari Kiamat",text:{"1":"Sudahkah sampai kepadamu berita tentang (hari Kiamat)?","2":"Pada hari itu banyak wajah yang tertunduk terhina,","3":"(karena) bekerja keras lagi kepayahan,","4":"mereka memasuki api yang sangat panas (neraka),","5":"diberi minum dari sumber mata air yang sangat panas.","6":"Tidak ada makanan bagi mereka selain dari pohon yang berduri,","7":"yang tidak menggemukkan dan tidak menghilangkan lapar.","8":"Pada hari itu banyak (pula) wajah yang berseri-seri,","9":"merasa senang karena usahanya (sendiri),","10":"(mereka) dalam surga yang tinggi, ","11":"di sana (kamu) tidak mendengar perkataan yang tidak berguna.","12":"Di sana ada mata air yang mengalir.","13":"Di sana ada dipan-dipan yang ditinggikan,","14":"dan gelas-gelas yang tersedia (di dekatnya),","15":"dan bantal-bantal sandaran yang tersusun,","16":"dan permadani-permadani yang terhampar.","17":"Maka tidakkah mereka memperhatikan unta, bagaimana diciptakan?","18":"dan langit, bagaimana ditinggikan?","19":"Dan gunung-gunung bagaimana ditegakkan?","20":"Dan bumi bagaimana dihamparkan?","21":"Maka berilah peringatan, karena sesungguhnya engkau (Muhammad) hanyalah pemberi peringatan.","22":"Engkau bukanlah orang yang berkuasa atas mereka,","23":"kecuali (jika ada) orang yang berpaling dan kafir,","24":"maka Allah akan mengazabnya dengan azab yang besar.","25":"Sungguh, kepada Kamilah mereka kembali,","26":"kemudian sesungguhnya (kewajiban) Kamilah membuat perhitungan atas mereka."}}},tafsir:{id:{kemenag:{name:"Kemenag",source:"Aplikasi Quran Kementrian Agama Republik Indonesia",text:{"1":"Allah menyindir penduduk neraka dengan mengatakan, \"Sudahkah sampai kepada kamu berita tentang hari Kiamat.\"","2":"Kemudian Allah menjelaskan bahwa manusia ketika itu terbagi dua, yaitu golongan orang kafir dan golongan orang mukmin.\n\nGolongan orang kafir ketika melihat kedahsyatan yang terjadi ketika itu, menjadi tertunduk dan merasa terhina. Allah berfirman:\n\nDan (alangkah ngerinya), jika sekiranya kamu melihat orang-orang yang berdosa itu menundukkan kepalanya di hadapan Tuhannya, (mereka berkata), \"Ya Tuhan kami, kami telah melihat dan mendengar, maka kembalikanlah kami (ke dunia), niscaya kami akan mengerjakan kebajikan. Sungguh, kami adalah orang-orang yang yakin.\" (as-Sajdah\u002F32: 12)\n\nDan firman Allah:\n\nDan kamu akan melihat mereka dihadapkan ke neraka dalam keadaan tertunduk karena (merasa) hina, mereka melihat dengan pandangan yang lesu. Dan orang-orang yang beriman berkata, \"Sesungguhnya orang-orang yang rugi ialah orang-orang yang merugikan diri mereka sendiri dan keluarganya pada hari Kiamat.\" Ingatlah, sesungguhnya orang-orang zalim itu berada dalam azab yang kekal. (asy-Syura\u002F42: 45)","3":"Allah menerangkan bahwa orang-orang kafir itu semasa hidup di dunia bekerja dengan rajin dan sungguh-sungguh. Akan tetapi, perbuatan mereka itu tidak diterima karena mereka tidak beriman kepada Allah dan Rasul-Nya, yang merupakan syarat utama untuk diterimanya perbuatan dan mendapat ganjaran-Nya.","4":b,"5":b,"6":b,"7":b,"8":h,"9":h,"10":a,"11":a,"12":a,"13":a,"14":a,"15":a,"16":a,"17":c,"18":c,"19":c,"20":c,"21":"Dalam ayat ini, Allah memerintahkan Nabi Muhammad agar memberi peringatan dan petunjuk serta menyampaikan agama-Nya kepada umat manusia, karena tugasnya tidak lain hanyalah memberi peringatan dengan menyampaikan kabar gembira dan kabar yang menakutkan.","22":d,"23":d,"24":d,"25":i,"26":i}}}}},jsonldBreadcrumb:{"@context":j,"@type":"BreadcrumbList",itemListElement:[{"@type":e,position:1,name:"Home",item:"http:\u002F\u002Fquran.almazayaislamicschool.sch.id\u002F"},{"@type":e,position:2,name:"QS 88",item:"http:\u002F\u002Fquran.almazayaislamicschool.sch.id\u002F88\u002F"},{"@type":e,position:3,name:"QS 88:6",item:k}]},jsonLdArticle:{"@context":j,"@type":"NewsArticle",mainEntityOfPage:{"@type":"WebPage","@id":k},headline:f,image:["http:\u002F\u002Fquran.almazayaislamicschool.sch.id\u002Fmeta-image.png"],datePublished:l,dateModified:l,author:{"@type":"Person",name:"Irfan Maulana"},description:g,publisher:{"@type":"Organization",name:"mazipan",logo:{"@type":"ImageObject",url:"http:\u002F\u002Fquran.almazayaislamicschool.sch.id\u002Ficon.png"}}}}],fetch:[],mutations:[]}}("Dalam ayat-ayat berikut ini, Allah menerangkan keadaan surga:\n\n1.Surga tempatnya bernilai tinggi, lebih tinggi dari nilai tempat-tempat yang lain.\n\n2.Di dalamnya tidak terdengar perkataan yang tidak berguna, sebab tempat itu adalah tempat orang-orang yang dikasihi Allah.\n\n3.Di dalamnya terdapat mata air yang mengalirkan air bersih yang menarik pandangan bagi siapa saja yang melihatnya.\n\n4.Di dalamnya terdapat mahligai yang tinggi.\n\n5.Di dekat mereka tersedia gelas-gelas yang berisi minuman yang sudah siap diminum.\n\n6.Di dalamnya terdapat bantal-bantal tersusun yang dapat dipergunakan menurut selera mereka, duduk di atasnya atau dipakai untuk bersandar dan sebagainya.\n\n7.Di sana terdapat pula permadani yang indah dan terhampar pada setiap tempat.\n\n8.Terdapat segala macam kenikmatan rohani dan jasmani yang jauh dari yang dapat kita bayangkan.","Dalam ayat-ayat ini, Allah menerangkan bahwa orang-orang kafir akan dimasukkan ke dalam neraka. Bila mereka meminta air karena haus, maka mereka diberi air bersumber dari mata air yang sangat panas. Bila mereka meminta makan, maka diberi makanan yang jelek, yang tidak ada artinya. Allah berfirman:\n\nDan tidak ada makanan (baginya) kecuali dari darah dan nanah. (al-haqqah\u002F69: 36)\n\nDan firman Allah:\n\nKemudian sesungguhnya kamu, wahai orang-orang yang sesat lagi mendustakan! pasti akan memakan pohon zaqqum. (al-Waqi'ah\u002F56: 51-52)\n\nDalam ayat lain Allah berfirman:\n\nSungguh pohon zaqqum itu, makanan bagi orang yang banyak dosa. (ad-Dukhan\u002F44: 43-44)","Dalam ayat-ayat ini, Allah mempertanyakan apakah mereka tidak memperhatikan bagaimana unta, yang ada di depan mata mereka dan dipergunakan setiap waktu, diciptakan. Bagaimana pula langit yang berada di tempat yang tinggi tanpa tiang; bagaimana gunung-gunung dipancangkan dengan kukuh, tidak bergoyang dan dijadikan petunjuk bagi orang yang dalam perjalanan. Di atasnya terdapat danau dan mata air yang dapat dipergunakan untuk keperluan manusia, mengairi tumbuh-tumbuhan, dan memberi minum binatang ternak. Bagaimana pula bumi dihamparkan sebagai tempat tinggal bagi manusia.\n\nApabila mereka telah memperhatikan semua itu dengan seksama, tentu mereka akan mengakui bahwa penciptanya dapat membangkitkan manusia kembali pada hari Kiamat.","Dalam ayat-ayat ini, Allah menerangkan bahwa Nabi Muhammad tidak berkuasa menjadikan seseorang beriman. Akan tetapi, Allah-lah yang berkuasa menjadikan manusia beriman. Sementara itu, barang siapa yang berpaling dengan mengingkari kebenaran petunjuk Nabi-Nya, niscaya Allah menghukumnya. Allah berfirman:\n\nDan jika Tuhanmu menghendaki, tentulah beriman semua orang di bumi seluruhnya. Tetapi apakah kamu (hendak) memaksa manusia agar mereka menjadi orang-orang yang beriman? (Yunus\u002F10: 99)\n\nDan Allah berfirman:\n\nKami lebih mengetahui tentang apa yang mereka katakan, dan engkau (Muhammad) bukanlah seorang pemaksa terhadap mereka. Maka berilah peringatan dengan Al-Qur'an kepada siapa pun yang takut kepada ancaman-Ku. (Qaf\u002F50: 45)\n\nBerkaitan dengan hal itu, para juru dakwah cukup menyampaikan pesan-pesan Al-Qur'an dan hadis Nabi sambil mengajak setiap manusia untuk beriman dan beramal saleh, serta masuk ke dalam agama Islam secara keseluruhan (kaffah). Penampilan dan metode dakwah perlu dengan cara yang baik dan tidak boleh bersikap memaksa, sebagaimana firman Allah:\n\nTidak ada paksaan dalam (menganut) agama (Islam), sesungguhnya telah jelas (perbedaan) antara jalan yang benar dengan jalan yang sesat. (al-Baqarah\u002F2: 256)\n\nDan Allah berfirman:\n\nSerulah (manusia) kepada jalan Tuhanmu dengan hikmah dan pengajaran yang baik, dan berdebatlah dengan mereka dengan cara yang baik. Sesungguhnya Tuhanmu, Dialah yang lebih mengetahui siapa yang sesat dari jalan-Nya dan Dialah yang lebih mengetahui siapa yang mendapat petunjuk. (an-Nahl\u002F16: 125)","ListItem","Ayat ke 6, Quran Surat Al-Gasyiyah الغاشية (Hari Kiamat) | e-AlQuran","Ayat ke 6, Quran Surat Al-Gasyiyah الغاشية (Hari Kiamat) beserta terjemahan dan tafsir dari Kemenag, ","Allah menerangkan bahwa di dalam surga, muka orang mukmin berseri penuh kegembiraan. Mereka merasa senang melihat hasil usaha mereka yang mendapat keridaan Allah yang kemudian mendapat imbalan surga yang diidam-idamkan.\n\n(10-16) Dalam ayat-ayat berikut ini, Allah menerangkan keadaan surga:\n\n1.Surga tempatnya bernilai tinggi, lebih tinggi dari nilai tempat-tempat yang lain.\n\n2.Di dalamnya tidak terdengar perkataan yang tidak berguna, sebab tempat itu adalah tempat orang-orang yang dikasihi Allah.\n\n3.Di dalamnya terdapat mata air yang mengalirkan air bersih yang menarik pandangan bagi siapa saja yang melihatnya.\n\n4.Di dalamnya terdapat mahligai yang tinggi.\n\n5.Di dekat mereka tersedia gelas-gelas yang berisi minuman yang sudah siap diminum.\n\n6.Di dalamnya terdapat bantal-bantal tersusun yang dapat dipergunakan menurut selera mereka, duduk di atasnya atau dipakai untuk bersandar dan sebagainya.\n\n7.Di sana terdapat pula permadani yang indah dan terhampar pada setiap tempat.\n\n8.Terdapat segala macam kenikmatan rohani dan jasmani yang jauh dari yang dapat kita bayangkan.","Dalam ayat-ayat ini, Allah menerangkan bahwa mereka akan kembali kepada-Nya. Tidak ada jalan bagi mereka untuk lari daripada-Nya. Dialah yang akan menghisab mereka atas perbuatan yang telah mereka perbuat di dunia dan kemudian menjatuhkan hukuman-Nya. Ayat-ayat ini adalah penghibur hati bagi Nabi Muhammad dan sebagai obat bagi kesedihan dan kepedihan hatinya atas keingkaran orang-orang kafir itu.","https:\u002F\u002Fschema.org","http:\u002F\u002Fquran.almazayaislamicschool.sch.id\u002F88\u002F6\u002F","2020-08-14T03:42:32.790Z")));
|
||
0014_auto_20160903_0626.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-03 06:26
from __future__ import unicode_literals
from django.db import migrations, models
class
|
(migrations.Migration):
dependencies = [
('web', '0013_auto_20160903_0624'),
]
operations = [
migrations.AlterField(
model_name='competition',
name='image',
field=models.ImageField(null=True, upload_to='static/upload_files/competitions/images'),
),
]
|
Migration
|
operation_deser.rs
|
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(clippy::unnecessary_wraps)]
pub fn parse_add_notification_channel_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::AddNotificationChannelOutput,
crate::error::AddNotificationChannelError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::AddNotificationChannelError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::AddNotificationChannelError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::AddNotificationChannelError {
meta: generic,
kind: crate::error::AddNotificationChannelErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AddNotificationChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => crate::error::AddNotificationChannelError {
meta: generic,
kind: crate::error::AddNotificationChannelErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_conflict_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AddNotificationChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::AddNotificationChannelError {
meta: generic,
kind: crate::error::AddNotificationChannelErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AddNotificationChannelError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_add_notification_channel_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::AddNotificationChannelError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::AddNotificationChannelError {
meta: generic,
kind: crate::error::AddNotificationChannelErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::AddNotificationChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ServiceQuotaExceededException" => crate::error::AddNotificationChannelError {
meta: generic,
kind: crate::error::AddNotificationChannelErrorKind::ServiceQuotaExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::service_quota_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_service_quota_exceeded_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::AddNotificationChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ThrottlingException" => crate::error::AddNotificationChannelError {
meta: generic,
kind: crate::error::AddNotificationChannelErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AddNotificationChannelError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_add_notification_channel_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::AddNotificationChannelError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::AddNotificationChannelError {
meta: generic,
kind: crate::error::AddNotificationChannelErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AddNotificationChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::AddNotificationChannelError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_add_notification_channel_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::AddNotificationChannelOutput,
crate::error::AddNotificationChannelError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::add_notification_channel_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_add_notification_channel(
response.body().as_ref(),
output,
)
.map_err(crate::error::AddNotificationChannelError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_account_health_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeAccountHealthOutput,
crate::error::DescribeAccountHealthError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeAccountHealthError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DescribeAccountHealthError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::DescribeAccountHealthError {
meta: generic,
kind: crate::error::DescribeAccountHealthErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAccountHealthError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::DescribeAccountHealthError {
meta: generic,
kind: crate::error::DescribeAccountHealthErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAccountHealthError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_describe_account_health_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DescribeAccountHealthError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ThrottlingException" => crate::error::DescribeAccountHealthError {
meta: generic,
kind: crate::error::DescribeAccountHealthErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAccountHealthError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_describe_account_health_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DescribeAccountHealthError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::DescribeAccountHealthError {
meta: generic,
kind: crate::error::DescribeAccountHealthErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAccountHealthError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeAccountHealthError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_account_health_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeAccountHealthOutput,
crate::error::DescribeAccountHealthError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_account_health_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_account_health(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAccountHealthError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_account_overview_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeAccountOverviewOutput,
crate::error::DescribeAccountOverviewError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeAccountOverviewError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::DescribeAccountOverviewError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::DescribeAccountOverviewError {
meta: generic,
kind: crate::error::DescribeAccountOverviewErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAccountOverviewError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::DescribeAccountOverviewError {
meta: generic,
kind: crate::error::DescribeAccountOverviewErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAccountOverviewError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_describe_account_overview_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DescribeAccountOverviewError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ThrottlingException" => crate::error::DescribeAccountOverviewError {
meta: generic,
kind: crate::error::DescribeAccountOverviewErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAccountOverviewError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_describe_account_overview_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DescribeAccountOverviewError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::DescribeAccountOverviewError {
meta: generic,
kind: crate::error::DescribeAccountOverviewErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAccountOverviewError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeAccountOverviewError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_account_overview_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeAccountOverviewOutput,
crate::error::DescribeAccountOverviewError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_account_overview_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_account_overview(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAccountOverviewError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_anomaly_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DescribeAnomalyOutput, crate::error::DescribeAnomalyError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeAnomalyError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DescribeAnomalyError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::DescribeAnomalyError {
meta: generic,
kind: crate::error::DescribeAnomalyErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAnomalyError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::DescribeAnomalyError {
meta: generic,
kind: crate::error::DescribeAnomalyErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAnomalyError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_describe_anomaly_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DescribeAnomalyError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::DescribeAnomalyError {
meta: generic,
kind: crate::error::DescribeAnomalyErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeAnomalyError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ThrottlingException" => crate::error::DescribeAnomalyError {
meta: generic,
kind: crate::error::DescribeAnomalyErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAnomalyError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_describe_anomaly_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DescribeAnomalyError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::DescribeAnomalyError {
meta: generic,
kind: crate::error::DescribeAnomalyErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeAnomalyError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeAnomalyError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_anomaly_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DescribeAnomalyOutput, crate::error::DescribeAnomalyError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_anomaly_output::Builder::default();
let _ = response;
output =
crate::json_deser::deser_operation_describe_anomaly(response.body().as_ref(), output)
.map_err(crate::error::DescribeAnomalyError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_feedback_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DescribeFeedbackOutput, crate::error::DescribeFeedbackError>
{
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeFeedbackError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DescribeFeedbackError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::DescribeFeedbackError {
meta: generic,
kind: crate::error::DescribeFeedbackErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeFeedbackError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::DescribeFeedbackError {
meta: generic,
kind: crate::error::DescribeFeedbackErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeFeedbackError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_describe_feedback_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DescribeFeedbackError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::DescribeFeedbackError {
meta: generic,
kind: crate::error::DescribeFeedbackErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeFeedbackError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ThrottlingException" => crate::error::DescribeFeedbackError {
meta: generic,
kind: crate::error::DescribeFeedbackErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeFeedbackError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_describe_feedback_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DescribeFeedbackError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::DescribeFeedbackError {
meta: generic,
kind: crate::error::DescribeFeedbackErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeFeedbackError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeFeedbackError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_feedback_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DescribeFeedbackOutput, crate::error::DescribeFeedbackError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_feedback_output::Builder::default();
let _ = response;
output =
crate::json_deser::deser_operation_describe_feedback(response.body().as_ref(), output)
.map_err(crate::error::DescribeFeedbackError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_insight_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DescribeInsightOutput, crate::error::DescribeInsightError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeInsightError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DescribeInsightError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::DescribeInsightError {
meta: generic,
kind: crate::error::DescribeInsightErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::DescribeInsightError {
meta: generic,
kind: crate::error::DescribeInsightErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeInsightError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_describe_insight_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DescribeInsightError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::DescribeInsightError {
meta: generic,
kind: crate::error::DescribeInsightErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ThrottlingException" => crate::error::DescribeInsightError {
meta: generic,
kind: crate::error::DescribeInsightErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeInsightError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_describe_insight_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DescribeInsightError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::DescribeInsightError {
meta: generic,
kind: crate::error::DescribeInsightErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeInsightError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_insight_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DescribeInsightOutput, crate::error::DescribeInsightError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_insight_output::Builder::default();
let _ = response;
output =
crate::json_deser::deser_operation_describe_insight(response.body().as_ref(), output)
.map_err(crate::error::DescribeInsightError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_resource_collection_health_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeResourceCollectionHealthOutput,
crate::error::DescribeResourceCollectionHealthError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeResourceCollectionHealthError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::DescribeResourceCollectionHealthError::unhandled(generic))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::DescribeResourceCollectionHealthError {
meta: generic,
kind: crate::error::DescribeResourceCollectionHealthErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeResourceCollectionHealthError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => {
crate::error::DescribeResourceCollectionHealthError {
meta: generic,
kind:
crate::error::DescribeResourceCollectionHealthErrorKind::InternalServerException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeResourceCollectionHealthError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_describe_resource_collection_health_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DescribeResourceCollectionHealthError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"ThrottlingException" => crate::error::DescribeResourceCollectionHealthError {
meta: generic,
kind: crate::error::DescribeResourceCollectionHealthErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeResourceCollectionHealthError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_describe_resource_collection_health_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DescribeResourceCollectionHealthError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::DescribeResourceCollectionHealthError {
meta: generic,
kind: crate::error::DescribeResourceCollectionHealthErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeResourceCollectionHealthError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeResourceCollectionHealthError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_resource_collection_health_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeResourceCollectionHealthOutput,
crate::error::DescribeResourceCollectionHealthError,
> {
Ok({
#[allow(unused_mut)]
let mut output =
crate::output::describe_resource_collection_health_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_resource_collection_health(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeResourceCollectionHealthError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_service_integration_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeServiceIntegrationOutput,
crate::error::DescribeServiceIntegrationError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::DescribeServiceIntegrationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::DescribeServiceIntegrationError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::DescribeServiceIntegrationError {
meta: generic,
kind: crate::error::DescribeServiceIntegrationErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeServiceIntegrationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::DescribeServiceIntegrationError {
meta: generic,
kind: crate::error::DescribeServiceIntegrationErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeServiceIntegrationError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_describe_service_integration_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DescribeServiceIntegrationError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ThrottlingException" => crate::error::DescribeServiceIntegrationError {
meta: generic,
kind: crate::error::DescribeServiceIntegrationErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeServiceIntegrationError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_describe_service_integration_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::DescribeServiceIntegrationError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::DescribeServiceIntegrationError {
meta: generic,
kind: crate::error::DescribeServiceIntegrationErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeServiceIntegrationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeServiceIntegrationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_service_integration_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeServiceIntegrationOutput,
crate::error::DescribeServiceIntegrationError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_service_integration_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_service_integration(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeServiceIntegrationError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_cost_estimation_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetCostEstimationOutput, crate::error::GetCostEstimationError>
{
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetCostEstimationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetCostEstimationError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::GetCostEstimationError {
meta: generic,
kind: crate::error::GetCostEstimationErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetCostEstimationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::GetCostEstimationError {
meta: generic,
kind: crate::error::GetCostEstimationErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetCostEstimationError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_cost_estimation_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetCostEstimationError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::GetCostEstimationError {
meta: generic,
kind: crate::error::GetCostEstimationErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::GetCostEstimationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ThrottlingException" => crate::error::GetCostEstimationError {
meta: generic,
kind: crate::error::GetCostEstimationErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetCostEstimationError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_cost_estimation_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetCostEstimationError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::GetCostEstimationError {
meta: generic,
kind: crate::error::GetCostEstimationErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetCostEstimationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetCostEstimationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_cost_estimation_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetCostEstimationOutput, crate::error::GetCostEstimationError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_cost_estimation_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_get_cost_estimation(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetCostEstimationError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_resource_collection_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetResourceCollectionOutput,
crate::error::GetResourceCollectionError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::GetResourceCollectionError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetResourceCollectionError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::GetResourceCollectionError {
meta: generic,
kind: crate::error::GetResourceCollectionErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetResourceCollectionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::GetResourceCollectionError {
meta: generic,
kind: crate::error::GetResourceCollectionErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetResourceCollectionError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_resource_collection_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetResourceCollectionError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::GetResourceCollectionError {
meta: generic,
kind: crate::error::GetResourceCollectionErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::GetResourceCollectionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ThrottlingException" => crate::error::GetResourceCollectionError {
meta: generic,
kind: crate::error::GetResourceCollectionErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetResourceCollectionError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_get_resource_collection_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::GetResourceCollectionError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::GetResourceCollectionError {
meta: generic,
kind: crate::error::GetResourceCollectionErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetResourceCollectionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetResourceCollectionError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_resource_collection_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetResourceCollectionOutput,
crate::error::GetResourceCollectionError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_resource_collection_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_get_resource_collection(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetResourceCollectionError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_anomalies_for_insight_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListAnomaliesForInsightOutput,
crate::error::ListAnomaliesForInsightError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::ListAnomaliesForInsightError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::ListAnomaliesForInsightError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ListAnomaliesForInsightError {
meta: generic,
kind: crate::error::ListAnomaliesForInsightErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListAnomaliesForInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::ListAnomaliesForInsightError {
meta: generic,
kind: crate::error::ListAnomaliesForInsightErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListAnomaliesForInsightError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_list_anomalies_for_insight_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::ListAnomaliesForInsightError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::ListAnomaliesForInsightError {
meta: generic,
kind: crate::error::ListAnomaliesForInsightErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ListAnomaliesForInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ThrottlingException" => crate::error::ListAnomaliesForInsightError {
meta: generic,
kind: crate::error::ListAnomaliesForInsightErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListAnomaliesForInsightError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_list_anomalies_for_insight_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::ListAnomaliesForInsightError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::ListAnomaliesForInsightError {
meta: generic,
kind: crate::error::ListAnomaliesForInsightErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListAnomaliesForInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListAnomaliesForInsightError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_anomalies_for_insight_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListAnomaliesForInsightOutput,
crate::error::ListAnomaliesForInsightError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_anomalies_for_insight_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_list_anomalies_for_insight(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListAnomaliesForInsightError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_events_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::ListEventsOutput, crate::error::ListEventsError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::ListEventsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListEventsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ListEventsError {
meta: generic,
kind: crate::error::ListEventsErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListEventsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::ListEventsError {
meta: generic,
kind: crate::error::ListEventsErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListEventsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_list_events_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::ListEventsError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::ListEventsError {
meta: generic,
kind: crate::error::ListEventsErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ListEventsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ThrottlingException" => crate::error::ListEventsError {
meta: generic,
kind: crate::error::ListEventsErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListEventsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_list_events_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::ListEventsError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::ListEventsError {
meta: generic,
kind: crate::error::ListEventsErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListEventsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListEventsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_events_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::ListEventsOutput, crate::error::ListEventsError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_events_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_list_events(response.body().as_ref(), output)
.map_err(crate::error::ListEventsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_insights_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::ListInsightsOutput, crate::error::ListInsightsError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::ListInsightsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListInsightsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ListInsightsError {
meta: generic,
kind: crate::error::ListInsightsErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListInsightsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::ListInsightsError {
meta: generic,
kind: crate::error::ListInsightsErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListInsightsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_list_insights_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::ListInsightsError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ThrottlingException" => crate::error::ListInsightsError {
meta: generic,
kind: crate::error::ListInsightsErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListInsightsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_list_insights_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::ListInsightsError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::ListInsightsError {
meta: generic,
kind: crate::error::ListInsightsErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListInsightsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListInsightsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_insights_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::ListInsightsOutput, crate::error::ListInsightsError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_insights_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_list_insights(response.body().as_ref(), output)
.map_err(crate::error::ListInsightsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_notification_channels_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListNotificationChannelsOutput,
crate::error::ListNotificationChannelsError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::ListNotificationChannelsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::ListNotificationChannelsError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ListNotificationChannelsError {
meta: generic,
kind: crate::error::ListNotificationChannelsErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListNotificationChannelsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::ListNotificationChannelsError {
meta: generic,
kind: crate::error::ListNotificationChannelsErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListNotificationChannelsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_list_notification_channels_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::ListNotificationChannelsError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ThrottlingException" => crate::error::ListNotificationChannelsError {
meta: generic,
kind: crate::error::ListNotificationChannelsErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListNotificationChannelsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_list_notification_channels_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::ListNotificationChannelsError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::ListNotificationChannelsError {
meta: generic,
kind: crate::error::ListNotificationChannelsErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListNotificationChannelsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListNotificationChannelsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_notification_channels_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListNotificationChannelsOutput,
crate::error::ListNotificationChannelsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_notification_channels_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_list_notification_channels(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListNotificationChannelsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_recommendations_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListRecommendationsOutput,
crate::error::ListRecommendationsError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::ListRecommendationsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListRecommendationsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::ListRecommendationsError {
meta: generic,
kind: crate::error::ListRecommendationsErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListRecommendationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::ListRecommendationsError {
meta: generic,
kind: crate::error::ListRecommendationsErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListRecommendationsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_list_recommendations_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::ListRecommendationsError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::ListRecommendationsError {
meta: generic,
kind: crate::error::ListRecommendationsErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ListRecommendationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ThrottlingException" => crate::error::ListRecommendationsError {
meta: generic,
kind: crate::error::ListRecommendationsErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListRecommendationsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_list_recommendations_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::ListRecommendationsError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::ListRecommendationsError {
meta: generic,
kind: crate::error::ListRecommendationsErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListRecommendationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListRecommendationsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_recommendations_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListRecommendationsOutput,
crate::error::ListRecommendationsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_recommendations_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_list_recommendations(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListRecommendationsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn
|
(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::PutFeedbackOutput, crate::error::PutFeedbackError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::PutFeedbackError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::PutFeedbackError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::PutFeedbackError {
meta: generic,
kind: crate::error::PutFeedbackErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::PutFeedbackError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => crate::error::PutFeedbackError {
meta: generic,
kind: crate::error::PutFeedbackErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_conflict_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::PutFeedbackError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::PutFeedbackError {
meta: generic,
kind: crate::error::PutFeedbackErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::PutFeedbackError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_put_feedback_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::PutFeedbackError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::PutFeedbackError {
meta: generic,
kind: crate::error::PutFeedbackErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::PutFeedbackError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ThrottlingException" => crate::error::PutFeedbackError {
meta: generic,
kind: crate::error::PutFeedbackErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::PutFeedbackError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_put_feedback_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::PutFeedbackError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::PutFeedbackError {
meta: generic,
kind: crate::error::PutFeedbackErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::PutFeedbackError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::PutFeedbackError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_put_feedback_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::PutFeedbackOutput, crate::error::PutFeedbackError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::put_feedback_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_remove_notification_channel_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::RemoveNotificationChannelOutput,
crate::error::RemoveNotificationChannelError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::RemoveNotificationChannelError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::RemoveNotificationChannelError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::RemoveNotificationChannelError {
meta: generic,
kind: crate::error::RemoveNotificationChannelErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::RemoveNotificationChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => crate::error::RemoveNotificationChannelError {
meta: generic,
kind: crate::error::RemoveNotificationChannelErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_conflict_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::RemoveNotificationChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::RemoveNotificationChannelError {
meta: generic,
kind: crate::error::RemoveNotificationChannelErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::RemoveNotificationChannelError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_remove_notification_channel_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::RemoveNotificationChannelError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::RemoveNotificationChannelError {
meta: generic,
kind: crate::error::RemoveNotificationChannelErrorKind::ResourceNotFoundException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::RemoveNotificationChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"ThrottlingException" => crate::error::RemoveNotificationChannelError {
meta: generic,
kind: crate::error::RemoveNotificationChannelErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::RemoveNotificationChannelError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_remove_notification_channel_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::RemoveNotificationChannelError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::RemoveNotificationChannelError {
meta: generic,
kind: crate::error::RemoveNotificationChannelErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::RemoveNotificationChannelError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::RemoveNotificationChannelError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_remove_notification_channel_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::RemoveNotificationChannelOutput,
crate::error::RemoveNotificationChannelError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::remove_notification_channel_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_search_insights_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::SearchInsightsOutput, crate::error::SearchInsightsError> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::SearchInsightsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::SearchInsightsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::SearchInsightsError {
meta: generic,
kind: crate::error::SearchInsightsErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::SearchInsightsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::SearchInsightsError {
meta: generic,
kind: crate::error::SearchInsightsErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::SearchInsightsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_search_insights_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::SearchInsightsError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ThrottlingException" => crate::error::SearchInsightsError {
meta: generic,
kind: crate::error::SearchInsightsErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::SearchInsightsError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_search_insights_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::SearchInsightsError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::SearchInsightsError {
meta: generic,
kind: crate::error::SearchInsightsErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::SearchInsightsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::SearchInsightsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_search_insights_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::SearchInsightsOutput, crate::error::SearchInsightsError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::search_insights_output::Builder::default();
let _ = response;
output =
crate::json_deser::deser_operation_search_insights(response.body().as_ref(), output)
.map_err(crate::error::SearchInsightsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_start_cost_estimation_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::StartCostEstimationOutput,
crate::error::StartCostEstimationError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::StartCostEstimationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::StartCostEstimationError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::StartCostEstimationError {
meta: generic,
kind: crate::error::StartCostEstimationErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::StartCostEstimationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => crate::error::StartCostEstimationError {
meta: generic,
kind: crate::error::StartCostEstimationErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_conflict_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::StartCostEstimationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::StartCostEstimationError {
meta: generic,
kind: crate::error::StartCostEstimationErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::StartCostEstimationError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_start_cost_estimation_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::StartCostEstimationError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::StartCostEstimationError {
meta: generic,
kind: crate::error::StartCostEstimationErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::StartCostEstimationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
"ThrottlingException" => crate::error::StartCostEstimationError {
meta: generic,
kind: crate::error::StartCostEstimationErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::StartCostEstimationError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_start_cost_estimation_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::StartCostEstimationError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::StartCostEstimationError {
meta: generic,
kind: crate::error::StartCostEstimationErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::StartCostEstimationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::StartCostEstimationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_start_cost_estimation_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::StartCostEstimationOutput,
crate::error::StartCostEstimationError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::start_cost_estimation_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_resource_collection_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::UpdateResourceCollectionOutput,
crate::error::UpdateResourceCollectionError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::UpdateResourceCollectionError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::UpdateResourceCollectionError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::UpdateResourceCollectionError {
meta: generic,
kind: crate::error::UpdateResourceCollectionErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateResourceCollectionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => crate::error::UpdateResourceCollectionError {
meta: generic,
kind: crate::error::UpdateResourceCollectionErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_conflict_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateResourceCollectionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::UpdateResourceCollectionError {
meta: generic,
kind: crate::error::UpdateResourceCollectionErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateResourceCollectionError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_update_resource_collection_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::UpdateResourceCollectionError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ThrottlingException" => crate::error::UpdateResourceCollectionError {
meta: generic,
kind: crate::error::UpdateResourceCollectionErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateResourceCollectionError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_update_resource_collection_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::UpdateResourceCollectionError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::UpdateResourceCollectionError {
meta: generic,
kind: crate::error::UpdateResourceCollectionErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateResourceCollectionError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::UpdateResourceCollectionError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_resource_collection_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::UpdateResourceCollectionOutput,
crate::error::UpdateResourceCollectionError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::update_resource_collection_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_service_integration_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::UpdateServiceIntegrationOutput,
crate::error::UpdateServiceIntegrationError,
> {
let generic = crate::json_deser::parse_http_generic_error(response)
.map_err(crate::error::UpdateServiceIntegrationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::UpdateServiceIntegrationError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::UpdateServiceIntegrationError {
meta: generic,
kind: crate::error::UpdateServiceIntegrationErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateServiceIntegrationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ConflictException" => crate::error::UpdateServiceIntegrationError {
meta: generic,
kind: crate::error::UpdateServiceIntegrationErrorKind::ConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_conflict_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateServiceIntegrationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalServerException" => crate::error::UpdateServiceIntegrationError {
meta: generic,
kind: crate::error::UpdateServiceIntegrationErrorKind::InternalServerException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_server_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_server_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateServiceIntegrationError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_update_service_integration_internal_server_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::UpdateServiceIntegrationError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ThrottlingException" => crate::error::UpdateServiceIntegrationError {
meta: generic,
kind: crate::error::UpdateServiceIntegrationErrorKind::ThrottlingException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::throttling_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_throttling_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateServiceIntegrationError::unhandled)?;
output = output.set_retry_after_seconds(
crate::http_serde::deser_header_update_service_integration_throttling_exception_retry_after_seconds(response.headers())
.map_err(|_|crate::error::UpdateServiceIntegrationError::unhandled("Failed to parse RetryAfterSeconds from header `Retry-After"))?
);
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ValidationException" => crate::error::UpdateServiceIntegrationError {
meta: generic,
kind: crate::error::UpdateServiceIntegrationErrorKind::ValidationException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::validation_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_validation_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateServiceIntegrationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::UpdateServiceIntegrationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_service_integration_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::UpdateServiceIntegrationOutput,
crate::error::UpdateServiceIntegrationError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::update_service_integration_output::Builder::default();
let _ = response;
output.build()
})
}
|
parse_put_feedback_error
|
Int64Value.ts
|
// Original file: null
import type { Long } from '@grpc/proto-loader';
export interface Int64Value {
'value'?: (number | string | Long);
}
|
'value': (string);
}
|
export interface Int64Value__Output {
|
0003_add_validators.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import conman.routes.validators
class
|
(migrations.Migration):
dependencies = [
('routes', '0002_remove_slug_parent'),
]
operations = [
migrations.AlterField(
model_name='route',
name='url',
field=models.TextField(db_index=True, help_text='The operative URL for this Route.', validators=[conman.routes.validators.validate_end_in_slash, conman.routes.validators.validate_start_in_slash, conman.routes.validators.validate_no_dotty_subpaths, conman.routes.validators.validate_no_double_slashes, conman.routes.validators.validate_no_hash_symbol, conman.routes.validators.validate_no_questionmark], unique=True, verbose_name='URL'),
),
]
|
Migration
|
lib.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
use aptos_id_generator::{IdGenerator, U64IdGenerator};
use aptos_infallible::RwLock;
use aptos_types::{
account_state::AccountState,
contract_event::ContractEvent,
event::EventKey,
move_resource::MoveStorage,
on_chain_config,
on_chain_config::{config_address, ConfigID, OnChainConfigPayload},
transaction::Version,
};
use channel::{aptos_channel, message_queues::QueueStyle};
use futures::{channel::mpsc::SendError, stream::FusedStream, Stream};
use serde::{Deserialize, Serialize};
use std::{
collections::{HashMap, HashSet},
convert::TryFrom,
iter::FromIterator,
ops::Deref,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use storage_interface::DbReaderWriter;
use thiserror::Error;
#[cfg(test)]
mod tests;
// Maximum channel sizes for each notification subscriber. If messages are not
// consumed, they will be dropped (oldest messages first). The remaining messages
// will be retrieved using FIFO ordering.
const EVENT_NOTIFICATION_CHANNEL_SIZE: usize = 100;
const RECONFIG_NOTIFICATION_CHANNEL_SIZE: usize = 1;
#[derive(Clone, Debug, Deserialize, Error, PartialEq, Serialize)]
pub enum Error {
#[error("Cannot subscribe to zero event keys!")]
CannotSubscribeToZeroEventKeys,
#[error("Missing event subscription! Subscription ID: {0}")]
MissingEventSubscription(u64),
#[error("Unable to send event notification! Error: {0}")]
UnableToSendEventNotification(String),
#[error("Unexpected error encountered: {0}")]
UnexpectedErrorEncountered(String),
}
impl From<SendError> for Error {
fn from(error: SendError) -> Self {
Error::UnableToSendEventNotification(error.to_string())
}
}
/// The interface between state sync and the subscription notification service,
/// allowing state sync to notify the subscription service of new events.
pub trait EventNotificationSender: Send {
/// Notify the subscription service of the events at the specified version.
fn notify_events(&mut self, version: Version, events: Vec<ContractEvent>) -> Result<(), Error>;
/// Forces the subscription service to notify subscribers of the current
/// on-chain configurations at the specified version.
/// This is useful for forcing reconfiguration notifications even if no
/// reconfiguration event was processed (e.g., on startup).
fn notify_initial_configs(&mut self, version: Version) -> Result<(), Error>;
}
/// The subscription service offered by state sync, responsible for notifying
/// subscribers of on-chain events.
pub struct EventSubscriptionService {
// Event subscription registry
event_key_subscriptions: HashMap<EventKey, HashSet<SubscriptionId>>,
subscription_id_to_event_subscription: HashMap<SubscriptionId, EventSubscription>,
// Reconfig subscription registry
reconfig_subscriptions: HashMap<SubscriptionId, ReconfigSubscription>,
// Database to fetch on-chain configuration data
storage: Arc<RwLock<DbReaderWriter>>,
// The list of all on-chain configurations used to notify subscribers
config_registry: Vec<ConfigID>,
// Internal subscription ID generator
subscription_id_generator: U64IdGenerator,
}
impl EventSubscriptionService {
pub fn new(config_registry: &[ConfigID], storage: Arc<RwLock<DbReaderWriter>>) -> Self {
Self {
event_key_subscriptions: HashMap::new(),
subscription_id_to_event_subscription: HashMap::new(),
reconfig_subscriptions: HashMap::new(),
config_registry: config_registry.to_vec(),
storage,
subscription_id_generator: U64IdGenerator::new(),
}
}
/// Returns an EventNotificationListener that can be monitored for
/// subscribed events. If an event key is subscribed to, it means the
/// EventNotificationListener will be sent a notification every time an
/// event with the matching key occurs on-chain. Note: if the notification
/// buffer fills up too quickly, older notifications will be dropped. As
/// such, it is the responsibility of the subscriber to ensure notifications
/// are processed in a timely manner.
pub fn subscribe_to_events(
&mut self,
event_keys: Vec<EventKey>,
) -> Result<EventNotificationListener, Error> {
if event_keys.is_empty() {
return Err(Error::CannotSubscribeToZeroEventKeys);
}
let (notification_sender, notification_receiver) =
aptos_channel::new(QueueStyle::KLAST, EVENT_NOTIFICATION_CHANNEL_SIZE, None);
// Create a new event subscription
let subscription_id = self.get_new_subscription_id();
let event_subscription = EventSubscription {
subscription_id,
notification_sender,
event_buffer: vec![],
};
// Store the new subscription
if let Some(old_subscription) = self
.subscription_id_to_event_subscription
.insert(subscription_id, event_subscription)
{
panic!(
"Duplicate event subscription found! This should not occur! ID: {}, subscription: {:?}",
subscription_id, old_subscription
);
}
// Update the event key subscriptions to include the new subscription
for event_key in event_keys {
self.event_key_subscriptions
.entry(event_key)
.and_modify(|subscriptions| {
subscriptions.insert(subscription_id);
})
.or_insert_with(|| HashSet::from_iter(vec![subscription_id].iter().cloned()));
}
Ok(EventNotificationListener {
notification_receiver,
})
}
/// Returns a ReconfigNotificationListener that can be monitored for
/// reconfiguration events. Subscribers will be sent a notification
/// containing all new on-chain configuration values whenever a new epoch
/// begins. Note: if the notification buffer fills up too quickly, older
/// notifications will be dropped. As such, it is the responsibility of the
/// subscriber to ensure notifications are processed in a timely manner.
pub fn subscribe_to_reconfigurations(&mut self) -> Result<ReconfigNotificationListener, Error> {
let (notification_sender, notification_receiver) =
aptos_channel::new(QueueStyle::KLAST, RECONFIG_NOTIFICATION_CHANNEL_SIZE, None);
// Create a new reconfiguration subscription
let subscription_id = self.get_new_subscription_id();
let reconfig_subscription = ReconfigSubscription {
subscription_id,
notification_sender,
};
// Store the new subscription
if let Some(old_subscription) = self
.reconfig_subscriptions
.insert(subscription_id, reconfig_subscription)
{
panic!(
"Duplicate reconfiguration subscription found! This should not occur! ID: {}, subscription: {:?}",
subscription_id, old_subscription
);
}
Ok(ReconfigNotificationListener {
notification_receiver,
})
}
fn get_new_subscription_id(&mut self) -> u64 {
self.subscription_id_generator.next()
}
/// This notifies all the event subscribers of the new events found at the
/// specified version. If a reconfiguration event (i.e., new epoch) is found,
/// this method will return true.
fn notify_event_subscribers(
&mut self,
version: Version,
events: Vec<ContractEvent>,
) -> Result<bool, Error> {
let mut reconfig_event_found = false;
let mut event_subscription_ids_to_notify = HashSet::new();
for event in events.iter() {
let event_key = event.key();
// Process all subscriptions for the current event
if let Some(subscription_ids) = self.event_key_subscriptions.get(event_key) {
// Add the event to the subscription's pending event buffer
// and store the subscriptions that will need to notified once all
// events have been processed.
for subscription_id in subscription_ids.iter() {
if let Some(event_subscription) = self
.subscription_id_to_event_subscription
.get_mut(subscription_id)
{
event_subscription.buffer_event(event.clone());
event_subscription_ids_to_notify.insert(*subscription_id);
} else {
return Err(Error::MissingEventSubscription(*subscription_id));
}
}
}
// Take note if a reconfiguration (new epoch) has occurred
if *event_key == on_chain_config::new_epoch_event_key() {
reconfig_event_found = true;
}
}
// Notify event subscribers of the new events
for event_subscription_id in event_subscription_ids_to_notify {
if let Some(event_subscription) = self
.subscription_id_to_event_subscription
.get_mut(&event_subscription_id)
{
event_subscription.notify_subscriber_of_events(version)?;
} else {
return Err(Error::MissingEventSubscription(event_subscription_id));
}
}
Ok(reconfig_event_found)
}
/// This notifies all the reconfiguration subscribers of the on-chain
/// configurations at the specified version.
fn notify_reconfiguration_subscribers(&mut self, version: Version) -> Result<(), Error> {
if self.reconfig_subscriptions.is_empty() {
return Ok(()); // No reconfiguration subscribers!
}
let new_configs = self.read_on_chain_configs(version)?;
for (_, reconfig_subscription) in self.reconfig_subscriptions.iter_mut() {
reconfig_subscription.notify_subscriber_of_configs(version, new_configs.clone())?;
}
Ok(())
}
/// Fetches the configs on-chain at the specified version.
/// Note: We cannot assume that all configs will exist on-chain. As such, we
/// must fetch each resource one at a time. Reconfig subscribers must be able
/// to handle on-chain configs not existing in a reconfiguration notification.
fn read_on_chain_configs(&self, version: Version) -> Result<OnChainConfigPayload, Error> {
// Build a map from config ID to the config value found on-chain
let mut config_id_to_config = HashMap::new();
for config_id in self.config_registry.iter() {
if let Ok(config) = self
.storage
.read()
.reader
.deref()
.fetch_config_by_version(*config_id, version)
{
if let Some(old_entry) = config_id_to_config.insert(*config_id, config.clone()) {
panic!(
"Unexpected config values for duplicate config id found! Key: {}, Value: {:?}!",
config_id, old_entry
);
}
}
}
// Fetch the account state blob
let (account_state_blob, _) = self
.storage
.read()
.reader
.get_account_state_with_proof_by_version(config_address(), version)
.map_err(|error| {
Error::UnexpectedErrorEncountered(format!(
"Failed to fetch account state with proof {:?}",
error
))
})?;
let account_state_blob = account_state_blob.ok_or_else(|| {
Error::UnexpectedErrorEncountered("Missing account state blob!".into())
})?;
// Fetch the new epoch from storage
let epoch = AccountState::try_from(&account_state_blob)
.and_then(|state| {
Ok(state
.get_configuration_resource()?
.ok_or_else(|| {
Error::UnexpectedErrorEncountered(
"Configuration resource does not exist!".into(),
)
})?
.epoch())
})
.map_err(|error| {
Error::UnexpectedErrorEncountered(format!(
"Failed to fetch configuration resource! Error: {:?}",
error
))
})?;
// Return the new on-chain config payload (containing all found configs at this version).
Ok(OnChainConfigPayload::new(
epoch,
Arc::new(config_id_to_config),
))
}
}
impl EventNotificationSender for EventSubscriptionService {
fn notify_events(&mut self, version: Version, events: Vec<ContractEvent>) -> Result<(), Error> {
if events.is_empty() {
return Ok(()); // No events!
}
// Notify event subscribers and check if a reconfiguration event was processed
let reconfig_event_processed = self.notify_event_subscribers(version, events)?;
// If a reconfiguration event was found, also notify the reconfig subscribers
// of the new configuration values.
if reconfig_event_processed {
self.notify_reconfiguration_subscribers(version)
} else {
Ok(())
}
}
fn notify_initial_configs(&mut self, version: Version) -> Result<(), Error> {
self.notify_reconfiguration_subscribers(version)
}
}
/// A unique ID used to identify each subscription.
type SubscriptionId = u64;
/// A single event subscription, holding the subscription identifier, channel to
/// send the corresponding notifications and a buffer to hold pending events.
#[derive(Debug)]
struct EventSubscription {
pub subscription_id: SubscriptionId,
pub event_buffer: Vec<ContractEvent>,
pub notification_sender: channel::aptos_channel::Sender<(), EventNotification>,
}
impl EventSubscription {
fn buffer_event(&mut self, event: ContractEvent) {
self.event_buffer.push(event)
}
fn notify_subscriber_of_events(&mut self, version: Version) -> Result<(), Error> {
let event_notification = EventNotification {
subscribed_events: self.event_buffer.drain(..).collect(),
version,
};
self.notification_sender
.push((), event_notification)
.map_err(|error| Error::UnexpectedErrorEncountered(format!("{:?}", error)))
}
}
/// A single reconfig subscription, holding the channel to send the
/// corresponding notifications.
#[derive(Debug)]
struct ReconfigSubscription {
pub subscription_id: SubscriptionId,
pub notification_sender: channel::aptos_channel::Sender<(), ReconfigNotification>,
}
impl ReconfigSubscription {
fn notify_subscriber_of_configs(
&mut self,
version: Version,
on_chain_configs: OnChainConfigPayload,
) -> Result<(), Error> {
let reconfig_notification = ReconfigNotification {
version,
on_chain_configs,
};
self.notification_sender
.push((), reconfig_notification)
|
}
/// A notification for events.
#[derive(Debug)]
pub struct EventNotification {
pub version: Version,
pub subscribed_events: Vec<ContractEvent>,
}
/// A notification for reconfigurations.
#[derive(Debug)]
pub struct ReconfigNotification {
pub version: Version,
pub on_chain_configs: OnChainConfigPayload,
}
/// A subscription listener for on-chain events.
pub type EventNotificationListener = NotificationListener<EventNotification>;
/// A subscription listener for reconfigurations.
pub type ReconfigNotificationListener = NotificationListener<ReconfigNotification>;
/// The component responsible for listening to subscription notifications.
#[derive(Debug)]
pub struct NotificationListener<T> {
pub notification_receiver: channel::aptos_channel::Receiver<(), T>,
}
impl<T> Stream for NotificationListener<T> {
type Item = T;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
Pin::new(&mut self.get_mut().notification_receiver).poll_next(cx)
}
}
impl<T> FusedStream for NotificationListener<T> {
fn is_terminated(&self) -> bool {
self.notification_receiver.is_terminated()
}
}
|
.map_err(|error| Error::UnexpectedErrorEncountered(format!("{:?}", error)))
}
|
Footer.js
|
import React from 'react';
import AniLink from 'gatsby-plugin-transition-link/AniLink';
|
import socialIcons from '../constants/social-icons';
const Footer = () => {
return (
<footer className={styles.footer}>
<div className={styles.links}>
{links.map((item, i) => {
return (
<AniLink fade key={i} to={item.path}>
{item.text}
</AniLink>
);
})}
</div>
<div className={styles.icons}>
{socialIcons.map((item, i) => {
return (
<a
key={i}
href={item.url}
target="_blank"
rel="noopener noreferrer"
>{item.icon}</a>
);
})}
</div>
<div className={styles.copyright}>
copyright © backroads travel company {new Date().getFullYear()} all rights reserved
</div>
</footer>
);
};
export default Footer;
|
import styles from '../css/footer.module.css';
import links from '../constants/links';
|
add_awssqssources.go
|
/*
Copyright 2018 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
|
import (
"github.com/knative/eventing-sources/pkg/reconciler/awssqssource"
)
func init() {
// AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
AddToManagerFuncs = append(AddToManagerFuncs, awssqssource.Add)
}
|
package controller
|
check-kernel-imports.js
|
#! /usr/bin/env node
const path = require("path");
const fs = require("fs");
const readline = require("readline");
const HELP = `
Usage: check-kernel-imports PACKAGES ...
Where PACKAGES are paths to one or more elm packages. check-kernel-imports
checks that:
1. Use of kernel definitions match imports in elm files.
2. Use of kernel definition in elm files match a definition in a javascipt
file.
3. Use of elm definition in javascript files matches definition in an elm
file.
4. Use of an external definition matches an import in a javascript file.
5. Javascript files begin with an import block (that has an end).
6. A line inside an import block is not an import.
Note that 3. is a best effort attempt. There are some missed cases and some
false positives. Warnings will be issued for:
1. Unused imports in javascript files.
2. Empty import blocks in javascript files (as it probably means there is a comment before the
import block in the file).
3. Imports outside the import block in Javascript files.
Restrictions on kernel imports not checked:
1. You cannot import an elm file from another package unless it is exposed.
Options:
-h, --help display this help and exit
`.trim();
/* Future additions:
*
* * Check we do not use Bool in kernel interop.
*
*/
async function* getFiles(dir) {
const dirents = await fs.promises.readdir(dir, { withFileTypes: true });
for (const dirent of dirents) {
const absPath = path.resolve(dir, dirent.name);
if (dirent.isDirectory()) {
yield* getFiles(absPath);
} else {
yield absPath;
}
}
}
function getSrcFiles(packagePath) {
return getFiles(path.join(packagePath, "src"));
}
async function* asyncFlatMap(source, mapper) {
for await (const item of source) {
for await (const nestedItem of mapper(item)) {
yield nestedItem;
}
}
}
class CallLocation {
constructor(path, line) {
this.path = path;
this.line = line;
Object.freeze(this);
}
}
function addCall(map, call, location) {
const callArray = (() => {
if (!map.has(call)) {
const a = [];
map.set(call, a);
return a;
}
return map.get(call);
})();
callArray.push(location);
}
async function* withLineNumbers(rl) {
let i = 1;
for await (const line of rl) {
yield { line, number: i };
i += 1;
}
}
async function processElmFile(file, elmDefinitions, kernelCalls) {
const lines = withLineNumbers(
readline.createInterface({
input: fs.createReadStream(file),
})
);
let moduleName = null;
const kernelImports = new Map();
const errors = [];
const warnings = [];
function addDef(defName, lineNumber) {
if (moduleName === null) {
errors.push(
`Elm definition before module line (or missing module line) at ${file}:${lineNumber}.`
);
}
elmDefinitions.add(`${moduleName}.${defName}`);
}
for await (const { number, line } of lines) {
const moduleNameMatch = line.match(/module\s*(\S+)\s.*exposing/u);
// Ignore all but the first module line, some comments include example elm
// files which cause multiple matches here. In these cases it is the first
// module line that is the valid one. (We hope!)
if (moduleNameMatch !== null && moduleName === null) {
moduleName = moduleNameMatch[1];
}
const importMatch = line.match(/^import\s+(Elm\.Kernel\.\w+)/u);
if (importMatch !== null) {
kernelImports.set(importMatch[1], { lineNumber: number, used: false });
continue;
}
const skippedIportMatch = line.match(/^-- skipme import\s+(Elm\.Kernel\.\w+)/u);
if (skippedIportMatch !== null) {
kernelImports.set(skippedIportMatch[1], { lineNumber: number, used: false });
warnings.push(`Kernel import of ${skippedIportMatch[1]} skipped at ${file}:${number}`);
continue;
}
const elmVarMatch = line.match(/^(\S*).*?=/u);
if (elmVarMatch !== null) {
addDef(elmVarMatch[1], number);
}
const elmTypeMatch = line.match(/type\s+(?:alias\s+)?(\S+)/u);
if (elmTypeMatch !== null) {
addDef(elmTypeMatch[1], number);
}
const elmCustomTypeMatch = line.match(/ {2}(?: {2})?[=|] (\w*)/u);
if (elmCustomTypeMatch !== null) {
addDef(elmCustomTypeMatch[1], number);
}
const kernelCallMatch = line.match(/(Elm\.Kernel\.\w+).\w+/u);
if (kernelCallMatch !== null) {
const kernelCall = kernelCallMatch[0];
const kernelModule = kernelCallMatch[1];
const importFacts = kernelImports.get(kernelModule);
if (importFacts === undefined) {
errors.push(`Kernel call ${kernelCall} at ${file}:${number} missing import`);
} else {
importFacts.used = true;
}
addCall(kernelCalls, kernelCall, new CallLocation(file, number));
}
}
for (const [kernelModule, { lineNumber, used }] of kernelImports.entries()) {
if (!used) {
warnings.push(`Kernel import of ${kernelModule} is unused in ${file}:${lineNumber}`);
}
}
return { errors, warnings };
}
async function processJsFile(file, importedDefs, kernelDefinitions) {
const lines = withLineNumbers(
readline.createInterface({
input: fs.createReadStream(file),
})
);
const moduleName = path.basename(file, ".js");
const imports = new Map();
const errors = [];
const warnings = [];
let importBlockFound = 0;
let inImport = false;
let lastImportLineNumber = 0;
for await (const { number, line } of lines) {
const importMatch = line.match(
/import\s+((?:[.\w]+\.)?(\w+))\s+(?:as (\w+)\s+)?exposing\s+\((\w+(?:,\s+\w+)*)\)/
);
if (!importBlockFound && line === "/*") {
importBlockFound = true;
inImport = true;
if (number !== 1) {
errors.push(`Kernel files must begin with imports at ${file}:${number}.`);
}
continue;
} else if (inImport) {
if (importMatch !== null) {
const hasAlias = importMatch[3] !== undefined;
// Use alias if it is there, otherwise use last part of import.
const moduleAlias = hasAlias ? importMatch[3] : importMatch[2];
const importedModulePath = importMatch[1];
const isKernel = importedModulePath.startsWith("Elm.Kernel");
if (hasAlias && isKernel) {
errors.push(
`Kernel import ${importedModulePath} cannot have alias at ${file}:${number}.`
);
} else if (!hasAlias && !isKernel && importedModulePath.includes(".")) {
errors.push(
`Qualified import ${importedModulePath} needs an alias at ${file}:${number}.`
);
}
for (const defName of importMatch[4].split(",").map((s) => s.trim())) {
imports.set(`__${moduleAlias}_${defName}`, { lineNumber: number, used: false });
const callFullPath = `${importedModulePath}.${defName}`;
addCall(importedDefs, callFullPath, new CallLocation(file, number));
}
lastImportLineNumber = number;
} else if (line === "*/") {
if (lastImportLineNumber === 0) {
warnings.push(`Empty import block at ${file}:${number}.`);
}
inImport = false;
} else if (line !== "") {
errors.push(`Invalid line in imports block at ${file}:${number}.`);
}
|
}
if (importMatch !== null) {
warnings.push(`Import found outside of imports block at ${file}:${number}.`);
}
let defMatch = line.match(/^(?:var|const|let)\s*(_(\w+?)_(\w+))\s*=/u);
if (defMatch === null) {
defMatch = line.match(/^function\*?\s*(_(\w+?)_(\w+))\s*\(/u);
}
if (defMatch !== null) {
if (defMatch[2] !== moduleName) {
errors.push(
`Kernel definition ${defMatch[1]} at ${file}:${number} should match _${moduleName}_*`
);
}
let defName = defMatch[3];
if (defName.endsWith("__DEBUG")) {
defName = defName.slice(0, defName.length - "__DEBUG".length);
} else if (defName.endsWith("__PROD")) {
defName = defName.slice(0, defName.length - "__PROD".length);
}
// TODO(Harry): check __DEBUG and __PROD match.
kernelDefinitions.add(`Elm.Kernel.${moduleName}.${defName}`);
}
let index = 0;
for (;;) {
const kernelCallMatch = line.slice(index).match(/_?_(\w+?)_\w+/u);
if (kernelCallMatch === null) {
break;
}
const isComment = line.slice(0, index + kernelCallMatch.index).includes("//");
const calledModuleName = kernelCallMatch[1];
const kernelCall = kernelCallMatch[0];
if (
calledModuleName[0] === calledModuleName[0].toUpperCase() &&
!(calledModuleName[0] >= "0" && calledModuleName[0] <= "9")
) {
if (kernelCall.startsWith("__")) {
if (isComment) {
errors.push(`Kernel call like syntax ${kernelCall} in comment at ${file}:${number}.`);
} else {
// External kernel call
const importFacts = imports.get(kernelCall);
if (importFacts === undefined) {
errors.push(`Kernel call ${kernelCall} at ${file}:${number} missing import`);
} else {
importFacts.used = true;
}
}
} else if (calledModuleName !== moduleName && !isComment) {
errors.push(
`Non-local kernel call ${kernelCall} at ${file}:${number} must start with a double underscore`
);
}
}
index += kernelCallMatch.index + kernelCallMatch[0].length;
}
}
if (inImport) {
errors.push(`Imports block is missing end at ${file}:${lastImportLineNumber}`);
}
for (const [kernelModule, { lineNumber, used }] of imports.entries()) {
if (!used) {
warnings.push(`Import of ${kernelModule} is unused in ${file}:${lineNumber}`);
}
}
return { errors, warnings };
}
async function main() {
if (process.argv.length < 3) {
console.error("check-kernel-imports: error! at least one path to source directories required");
process.exit(1);
}
if (process.argv.includes("-h") || process.argv.includes("--help")) {
console.log(HELP);
process.exit(0);
}
const sourceDirs = process.argv.slice(2);
// Keys: kernel definition full elm path
const kernelDefinitions = new Set();
// Keys: elm definition full elm path
const elmDefinitions = new Set();
// Keys: kernel call, values: array of CallLocations
const kernelCalls = new Map();
// Keys: full elm path of call, values: array of CallLocations
const elmCallsFromKernel = new Map();
const allErrors = [];
const allWarnings = [];
for await (const f of asyncFlatMap(sourceDirs, getSrcFiles)) {
const extname = path.extname(f);
if (extname === ".elm") {
const { errors, warnings } = await processElmFile(f, elmDefinitions, kernelCalls);
allErrors.push(...errors);
allWarnings.push(...warnings);
} else if (extname === ".js") {
const { errors, warnings } = await processJsFile(f, elmCallsFromKernel, kernelDefinitions);
allErrors.push(...errors);
allWarnings.push(...warnings);
}
}
for (const [call, locations] of kernelCalls.entries()) {
if (!kernelDefinitions.has(call)) {
for (const location of locations) {
allErrors.push(
`Kernel call ${call} at ${location.path}:${location.line} missing definition`
);
}
}
}
for (const [call, locations] of elmCallsFromKernel.entries()) {
if (!elmDefinitions.has(call) && !kernelDefinitions.has(call)) {
for (const location of locations) {
allErrors.push(`Import of ${call} at ${location.path}:${location.line} missing definition`);
}
}
}
console.error(`${allWarnings.length} warnings`);
console.error(allWarnings.join("\n"));
console.error("");
console.error(`${allErrors.length} errors`);
console.error(allErrors.join("\n"));
process.exitCode = allErrors.length === 0 ? 0 : 1;
}
main();
|
continue;
|
animationCurveEditorComponent.tsx
|
import * as React from "react";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import { faTimes } from "@fortawesome/free-solid-svg-icons";
import { Animation } from 'babylonjs/Animations/animation';
import { Vector2 } from 'babylonjs/Maths/math.vector';
import { EasingFunction, BezierCurveEase } from 'babylonjs/Animations/easing';
import { IAnimationKey } from 'babylonjs/Animations/animationKey';
import { IKeyframeSvgPoint } from './keyframeSvgPoint';
import { SvgDraggableArea } from './svgDraggableArea';
import { Timeline } from './timeline';
import { Playhead } from './playhead';
import { Scene } from "babylonjs/scene";
import { ButtonLineComponent } from '../../../lines/buttonLineComponent';
import { IAnimatable } from 'babylonjs/Animations/animatable.interface';
require("./curveEditor.scss");
interface IAnimationCurveEditorComponentProps {
close: (event: any) => void;
playOrPause: () => void;
title: string;
animations: Animation[];
entityName: string;
scene: Scene;
entity: IAnimatable;
}
interface ICanvasAxis {
value: number;
}
export class AnimationCurveEditorComponent extends React.Component<IAnimationCurveEditorComponentProps, { animations: Animation[], animationName: string, animationTargetProperty: string, isOpen: boolean, selected: Animation, currentPathData: string | undefined, svgKeyframes: IKeyframeSvgPoint[] | undefined, currentFrame: number, frameAxisLength: ICanvasAxis[] }> {
readonly _heightScale: number = 100;
readonly _canvasLength: number = 20;
private _playheadOffset: number = 0;
private _newAnimations: Animation[] = [];
private _svgKeyframes: IKeyframeSvgPoint[] = [];
private _frames: Vector2[] = [];
private _isPlaying: boolean = false;
private _graphCanvas: React.RefObject<HTMLDivElement>;
constructor(props: IAnimationCurveEditorComponentProps) {
super(props);
this._graphCanvas = React.createRef();
this.state = { animations: this._newAnimations, selected: this.props.animations[0], isOpen: true, currentPathData: this.getPathData(this.props.animations[0]), svgKeyframes: this._svgKeyframes, animationTargetProperty: 'position.x', animationName: "", currentFrame: 0, frameAxisLength: (new Array(this._canvasLength)).fill(0).map((s, i) => { return { value: i * 10 } }) }
}
componentDidMount() {
if (this._graphCanvas.current) {
this._playheadOffset = (this._graphCanvas.current.children[1].clientWidth) / (this._canvasLength * 10)
}
}
handleNameChange(event: React.ChangeEvent<HTMLInputElement>) {
event.preventDefault();
this.setState({ animationName: event.target.value });
}
handlePropertyChange(event: React.ChangeEvent<HTMLInputElement>) {
event.preventDefault();
this.setState({ animationTargetProperty: event.target.value });
}
addAnimation() {
if (this.state.animationName != "" && this.state.animationTargetProperty != "") {
let animation = new Animation(this.state.animationName, this.state.animationTargetProperty, 30, Animation.ANIMATIONTYPE_FLOAT, Animation.ANIMATIONLOOPMODE_CYCLE);
var keys = [];
keys.push({
frame: 0,
value: 1
});
keys.push({
frame: 100,
value: 1
});
animation.setKeys(keys);
var bezierEase = new BezierCurveEase(10, 0, 10, 0);
bezierEase.setEasingMode(EasingFunction.EASINGMODE_EASEINOUT);
|
(this.props.entity as IAnimatable).animations?.push(animation);
}
}
addKeyFrame(event: React.MouseEvent<SVGSVGElement>) {
event.preventDefault();
if (event.button === 2) {
var svg = event.target as SVGSVGElement;
var pt = svg.createSVGPoint();
pt.x = event.clientX;
pt.y = event.clientY;
var inverse = svg.getScreenCTM()?.inverse();
var cursorpt = pt.matrixTransform(inverse);
var currentAnimation = this.state.selected;
var keys = currentAnimation.getKeys();
var height = 100;
var middle = (height / 2);
var keyValue;
if (cursorpt.y < middle) {
keyValue = 1 + ((100 / cursorpt.y) * .1)
}
if (cursorpt.y > middle) {
keyValue = 1 - ((100 / cursorpt.y) * .1)
}
keys.push({ frame: cursorpt.x, value: keyValue });
currentAnimation.setKeys(keys);
this.selectAnimation(currentAnimation);
}
}
updateKeyframe(keyframe: Vector2, index: number) {
let anim = this.state.selected as Animation;
var keys: IAnimationKey[] = [];
var svgKeyframes = this.state.svgKeyframes?.map((k, i) => {
if (i === index) {
k.keyframePoint.x = keyframe.x;
k.keyframePoint.y = keyframe.y;
}
var height = 100;
var middle = (height / 2);
var keyValue;
if (k.keyframePoint.y < middle) {
keyValue = 1 + ((100 / k.keyframePoint.y) * .1)
}
if (k.keyframePoint.y > middle) {
keyValue = 1 - ((100 / k.keyframePoint.y) * .1)
}
keys.push({ frame: k.keyframePoint.x, value: keyValue })
return k;
});
anim.setKeys(keys);
this.setState({ svgKeyframes: svgKeyframes })
}
getAnimationProperties(animation: Animation) {
let easingType, easingMode;
let easingFunction: EasingFunction = animation.getEasingFunction() as EasingFunction;
if (easingFunction === undefined) {
easingType = undefined
easingMode = undefined;
} else {
easingType = easingFunction.constructor.name;
easingMode = easingFunction.getEasingMode();
}
return { easingType, easingMode }
}
getPathData(animation: Animation) {
const { easingMode, easingType } = this.getAnimationProperties(animation);
const keyframes = animation.getKeys();
if (keyframes === undefined) {
return "";
}
const startKey = keyframes[0];
// This assumes the startkey is always 0... beed to change this
let middle = this._heightScale / 2;
// START OF LINE/CURVE
let data: string | undefined = `M${startKey.frame}, ${this._heightScale - (startKey.value * middle)}`;
if (easingType === undefined && easingMode === undefined) {
data = this.linearInterpolation(keyframes, data, middle);
} else {
let easingFunction = animation.getEasingFunction();
data = this.curvePath(keyframes, data, middle, easingFunction as EasingFunction)
}
return data;
}
drawAllFrames(initialKey: IAnimationKey, endKey: IAnimationKey, easingFunction: EasingFunction) {
let i = initialKey.frame;
for (i; i < endKey.frame; i++) {
(i * 100 / endKey.frame)
let dy = easingFunction.easeInCore(i);
let value = this._heightScale - (dy * (this._heightScale / 2));
this._frames.push(new Vector2(i, value));
}
}
curvePath(keyframes: IAnimationKey[], data: string, middle: number, easingFunction: EasingFunction) {
// This will get 1/4 and 3/4 of points in eased curve
const u = .25;
const v = .75;
keyframes.forEach((key, i) => {
// Gets previous initial point of curve segment
var pointA = new Vector2(0, 0);
if (i === 0) {
pointA.x = key.frame;
pointA.y = this._heightScale - (key.value * middle);
this.setKeyframePoint([pointA], i, keyframes.length);
} else {
pointA.x = keyframes[i - 1].frame;
pointA.y = this._heightScale - (keyframes[i - 1].value * middle)
// Gets the end point of this curve segment
var pointB = new Vector2(key.frame, this._heightScale - (key.value * middle));
// Get easing value of percentage to get the bezier control points below
let du = easingFunction.easeInCore(u); // What to do here, when user edits the curve? Option 1: Modify the curve with the new control points as BezierEaseCurve(x,y,z,w)
let dv = easingFunction.easeInCore(v); // Option 2: Create a easeInCore function and adapt it with the new control points values... needs more revision.
// Direction of curve up/down
let yInt25 = 0;
if (pointB.y > pointA.y) { // if pointB.y > pointA.y = goes down
yInt25 = ((pointB.y - pointA.y) * du) + pointA.y
} else if (pointB.y < pointA.y) { // if pointB.y < pointA.y = goes up
yInt25 = pointA.y - ((pointA.y - pointB.y) * du);
}
let yInt75 = 0;
if (pointB.y > pointA.y) {
yInt75 = ((pointB.y - pointA.y) * dv) + pointA.y
} else if (pointB.y < pointA.y) {
yInt75 = pointA.y - ((pointA.y - pointB.y) * dv)
}
// Intermediate points in curve
let intermediatePoint25 = new Vector2(((pointB.x - pointA.x) * u) + pointA.x, yInt25);
let intermediatePoint75 = new Vector2(((pointB.x - pointA.x) * v) + pointA.x, yInt75);
// Gets the four control points of bezier curve
let controlPoints = this.interpolateControlPoints(pointA, intermediatePoint25, u, intermediatePoint75, v, pointB);
if (controlPoints === undefined) {
console.log("error getting bezier control points");
} else {
this.setKeyframePoint(controlPoints, i, keyframes.length);
data += ` C${controlPoints[1].x} ${controlPoints[1].y} ${controlPoints[2].x} ${controlPoints[2].y} ${controlPoints[3].x} ${controlPoints[3].y}`
}
}
});
return data;
}
renderPoints(updatedSvgKeyFrame: IKeyframeSvgPoint, index: number) {
let animation = this.state.selected as Animation;
let keys = [...animation.getKeys()];
let newFrame = 0;
if (updatedSvgKeyFrame.keyframePoint.x !== 0) {
if (updatedSvgKeyFrame.keyframePoint.x > 0 && updatedSvgKeyFrame.keyframePoint.x < 1) {
newFrame = 1;
} else {
newFrame = Math.round(updatedSvgKeyFrame.keyframePoint.x);
}
}
keys[index].frame = newFrame; // This value comes as percentage/frame/time
keys[index].value = ((this._heightScale - updatedSvgKeyFrame.keyframePoint.y) / this._heightScale) * 2; // this value comes inverted svg from 0 = 100 to 100 = 0
animation.setKeys(keys);
this.selectAnimation(animation);
}
linearInterpolation(keyframes: IAnimationKey[], data: string, middle: number): string {
keyframes.forEach((key, i) => {
var point = new Vector2(0, 0);
point.x = key.frame;
point.y = this._heightScale - (key.value * middle);
this.setKeyframePointLinear(point, i);
if (i !== 0) {
data += ` L${point.x} ${point.y}`
}
});
return data;
}
setKeyframePointLinear(point: Vector2, index: number) {
let svgKeyframe = { keyframePoint: point, rightControlPoint: null, leftControlPoint: null, id: index.toString() }
this._svgKeyframes.push(svgKeyframe);
}
setKeyframePoint(controlPoints: Vector2[], index: number, keyframesCount: number) {
let svgKeyframe;
if (index === 0) {
svgKeyframe = { keyframePoint: controlPoints[0], rightControlPoint: null, leftControlPoint: null, id: index.toString() }
} else {
this._svgKeyframes[index - 1].rightControlPoint = controlPoints[1];
svgKeyframe = { keyframePoint: controlPoints[3], rightControlPoint: null, leftControlPoint: controlPoints[2], id: index.toString() }
}
this._svgKeyframes.push(svgKeyframe);
}
isAnimationPlaying() {
this._isPlaying = this.props.scene.getAllAnimatablesByTarget(this.props.entity).length > 0;
if (this._isPlaying) {
this.props.playOrPause();
} else {
this._isPlaying = false;
}
}
selectAnimation(animation: Animation) {
this.isAnimationPlaying();
this._svgKeyframes = [];
const pathData = this.getPathData(animation);
if (pathData === "") {
console.log("no keyframes in this animation");
}
this.setState({ selected: animation, currentPathData: pathData, svgKeyframes: this._svgKeyframes });
}
interpolateControlPoints(p0: Vector2, p1: Vector2, u: number, p2: Vector2, v: number, p3: Vector2): Vector2[] | undefined {
let a = 0.0;
let b = 0.0;
let c = 0.0;
let d = 0.0;
let det = 0.0;
let q1: Vector2 = new Vector2();
let q2: Vector2 = new Vector2();
let controlA: Vector2 = p0;
let controlB: Vector2 = new Vector2();
let controlC: Vector2 = new Vector2();
let controlD: Vector2 = p3;
if ((u <= 0.0) || (u >= 1.0) || (v <= 0.0) || (v >= 1.0) || (u >= v)) {
return undefined;
}
a = 3 * (1 - u) * (1 - u) * u; b = 3 * (1 - u) * u * u;
c = 3 * (1 - v) * (1 - v) * v; d = 3 * (1 - v) * v * v;
det = a * d - b * c;
if (det == 0.0) return undefined;
q1.x = p1.x - ((1 - u) * (1 - u) * (1 - u) * p0.x + u * u * u * p3.x);
q1.y = p1.y - ((1 - u) * (1 - u) * (1 - u) * p0.y + u * u * u * p3.y);
q2.x = p2.x - ((1 - v) * (1 - v) * (1 - v) * p0.x + v * v * v * p3.x);
q2.y = p2.y - ((1 - v) * (1 - v) * (1 - v) * p0.y + v * v * v * p3.y);
controlB.x = (d * q1.x - b * q2.x) / det;
controlB.y = (d * q1.y - b * q2.y) / det;
controlC.x = ((-c) * q1.x + a * q2.x) / det;
controlC.y = ((-c) * q1.y + a * q2.y) / det;
return [controlA, controlB, controlC, controlD];
}
changeCurrentFrame(frame: number) {
this.setState({ currentFrame: frame });
}
render() {
return (
<div id="animation-curve-editor">
<div className="header">
<div className="title">{this.props.title}</div>
<div className="close" onClick={(event: React.MouseEvent<HTMLDivElement, MouseEvent>) => this.props.close(event)}>
<FontAwesomeIcon icon={faTimes} />
</div>
</div>
<div className="content">
<div className="row">
<div className="animation-list">
<div>
<div className="label-input">
<label>Animation Name</label>
<input type="text" value={this.state.animationName} onChange={(e) => this.handleNameChange(e)}></input>
</div>
<div className="label-input">
<label>Target Property</label>
<input type="text" value={this.state.animationTargetProperty} onChange={(e) => this.handlePropertyChange(e)}></input>
</div>
<ButtonLineComponent label={"Add Animation"} onClick={() => this.addAnimation()} />
</div>
<div className="object-tree">
<h2>{this.props.entityName}</h2>
<ul>
{this.props.animations && this.props.animations.map((animation, i) => {
return <li className={this.state.selected.name === animation.name ? 'active' : ''} key={i} onClick={() => this.selectAnimation(animation)}>{animation.name} <strong>{animation.targetProperty}</strong></li>
})}
</ul>
</div>
</div>
<div ref={this._graphCanvas} className="graph-chart">
<Playhead frame={this.state.currentFrame} offset={this._playheadOffset} />
{this.state.svgKeyframes && <SvgDraggableArea keyframeSvgPoints={this.state.svgKeyframes} updatePosition={(updatedSvgKeyFrame: IKeyframeSvgPoint, index: number) => this.renderPoints(updatedSvgKeyFrame, index)}>
{/* Frame Labels */}
{ /* Vertical Grid */}
{this.state.frameAxisLength.map((f, i) =>
<svg key={i}>
<text x={f.value} y="0" dx="-1em" style={{ font: 'italic 0.2em sans-serif' }}>{f.value}</text>
<line x1={f.value} y1="0" x2={f.value} y2="100"></line>
</svg>
)}
{ /* Value Labels */}
<text x="0" y="10" dx="-1em" style={{ font: 'italic 0.2em sans-serif' }}>1.8</text>
<text x="0" y="20" dx="-1em" style={{ font: 'italic 0.2em sans-serif' }}>1.6</text>
<text x="0" y="30" dx="-1em" style={{ font: 'italic 0.2em sans-serif' }}>1.4</text>
<text x="0" y="40" dx="-1em" style={{ font: 'italic 0.2em sans-serif' }}>1.2</text>
<text x="0" y="50" dx="-1em" style={{ font: 'italic 0.2em sans-serif' }}>1</text>
<text x="0" y="60" dx="-1em" style={{ font: 'italic 0.2em sans-serif' }}>0.8</text>
<text x="0" y="70" dx="-1em" style={{ font: 'italic 0.2em sans-serif' }}>0.6</text>
<text x="0" y="80" dx="-1em" style={{ font: 'italic 0.2em sans-serif' }}>0.4</text>
<text x="0" y="90" dx="-1em" style={{ font: 'italic 0.2em sans-serif' }}>0.2</text>
{ /* Horizontal Grid */}
<line x1="0" y1="10" x2="1000" y2="10"></line>
<line x1="0" y1="20" x2="1000" y2="20"></line>
<line x1="0" y1="30" x2="1000" y2="30"></line>
<line x1="0" y1="40" x2="1000" y2="40"></line>
<line x1="0" y1="50" x2="1000" y2="50"></line>
<line x1="0" y1="60" x2="1000" y2="60"></line>
<line x1="0" y1="70" x2="1000" y2="70"></line>
<line x1="0" y1="80" x2="1000" y2="80"></line>
<line x1="0" y1="90" x2="1000" y2="90"></line>
{ /* Single Curve -Modify this for multiple selection and view */}
<path id="curve" d={this.state.currentPathData} style={{ stroke: 'red', fill: 'none', strokeWidth: '0.5' }}></path>
{this._frames && this._frames.map(frame =>
<svg x={frame.x} y={frame.y} style={{ overflow: 'visible' }}>
<circle cx="0" cy="0" r="2" stroke="black" strokeWidth="1" fill="white" />
</svg>
)}
</SvgDraggableArea>
}
</div>
</div>
<div className="row">
<Timeline currentFrame={this.state.currentFrame} onCurrentFrameChange={(frame: number) => this.changeCurrentFrame(frame)} keyframes={this.state.selected.getKeys()} selected={this.state.selected.getKeys()[0]}></Timeline>
</div>
</div>
</div>
);
}
}
|
animation.setEasingFunction((bezierEase as unknown) as EasingFunction);
// Need to redefine/refactor not to update the prop collection
|
__init__.py
|
from __future__ import absolute_import, print_function, division
from .scala_kernel import SpylonKernel
from .scala_magic import ScalaMagic
from .init_spark_magic import InitSparkMagic
from .scala_interpreter import get_scala_interpreter
def register_ipython_magics():
"""For usage within ipykernel.
This will instantiate the magics for IPython
"""
from metakernel import IPythonKernel
from IPython.core.magic import register_cell_magic, register_line_cell_magic
kernel = IPythonKernel()
scala_magic = ScalaMagic(kernel)
init_spark_magic = InitSparkMagic(kernel)
@register_line_cell_magic
def
|
(line, cell):
if line:
return scala_magic.line_scala(line)
else:
scala_magic.code = cell
return scala_magic.cell_scala()
@register_cell_magic
def init_spark(line, cell):
init_spark_magic.code = cell
return init_spark_magic.cell_init_spark()
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
scala
|
backend.go
|
package backend
import (
"crypto/tls"
"fmt"
"net"
"net/http"
"os"
"os/signal"
"syscall"
"text/template"
log "github.com/Sirupsen/logrus"
"github.com/mwitkow/go-grpc-middleware"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/net/context"
"github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/mchudgins/certMgr/pkg/certMgr"
"github.com/mchudgins/certMgr/pkg/healthz"
|
"google.golang.org/grpc/credentials"
)
type server struct {
cfg certMgr.AppConfig
ca *ca
}
func grpcEndpointLog(s string) grpc.UnaryServerInterceptor {
return func(ctx context.Context,
req interface{},
info *grpc.UnaryServerInfo,
handler grpc.UnaryHandler) (interface{}, error) {
log.Debugf("grpcEndpointLog %s+", s)
defer log.Debugf("grpcEndpointLog %s-", s)
return handler(ctx, req)
}
}
// Run the backend command
func Run(cfg *certMgr.AppConfig) {
server := &server{cfg: *cfg}
// set the log level
if cfg.Verbose {
log.SetLevel(log.DebugLevel)
}
hostname, err := os.Hostname()
if err != nil {
log.Fatal(err)
}
// create the Certificate Authority
server.ca, err = NewCertificateAuthorityFromConfig(cfg)
// make a channel to listen on events,
// then launch the servers.
errc := make(chan error)
// interrupt handler
go func() {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
errc <- fmt.Errorf("%s", <-c)
}()
// gRPC server
go func() {
lis, err := net.Listen("tcp", cfg.GRPCListenAddress)
if err != nil {
errc <- err
return
}
var s *grpc.Server
if cfg.Insecure {
s = grpc.NewServer(
grpc_middleware.WithUnaryServerChain(
grpc_prometheus.UnaryServerInterceptor,
grpcEndpointLog("certMgr")))
} else {
tlsCreds, err := credentials.NewServerTLSFromFile(cfg.CertFilename, cfg.KeyFilename)
if err != nil {
log.WithError(err).Fatal("Failed to generate grpc TLS credentials")
}
s = grpc.NewServer(
grpc.Creds(tlsCreds),
grpc.RPCCompressor(grpc.NewGZIPCompressor()),
grpc.RPCDecompressor(grpc.NewGZIPDecompressor()),
grpc_middleware.WithUnaryServerChain(
grpc_prometheus.UnaryServerInterceptor,
grpcEndpointLog("certMgr")))
}
pb.RegisterCertMgrServer(s, server)
if cfg.Insecure {
log.Warnf("gRPC service listening insecurely on %s", cfg.GRPCListenAddress)
} else {
log.Infof("gRPC service listening on %s", cfg.GRPCListenAddress)
}
errc <- s.Serve(lis)
}()
// http server
go func() {
hc, err := healthz.NewConfig(cfg)
healthzHandler, err := healthz.Handler(hc)
if err != nil {
log.Panic(err)
}
http.Handle("/healthz", healthzHandler)
http.Handle("/metrics", prometheus.Handler())
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
type data struct {
Hostname string
}
tmp, err := template.New("/").Parse(html)
if err != nil {
log.WithError(err).WithField("template", "/").Errorf("Unable to parse template")
return
}
err = tmp.Execute(w, data{Hostname: hostname})
if err != nil {
log.WithError(err).Error("Unable to execute template")
}
})
tlsServer := &http.Server{
Addr: cfg.HTTPListenAddress,
TLSConfig: &tls.Config{
MinVersion: tls.VersionTLS12,
},
}
// FIXME: cluster can't health check the self-signed cert endpoint
if true {
log.Warnf("HTTP service listening insecurely on %s", cfg.HTTPListenAddress)
errc <- http.ListenAndServe(cfg.HTTPListenAddress, nil)
} else {
log.Infof("HTTPS service listening on %s", cfg.HTTPListenAddress)
errc <- tlsServer.ListenAndServeTLS(cfg.CertFilename, cfg.KeyFilename)
}
}()
// wait for somthin'
log.Infof("exit: %s", <-errc)
}
|
pb "github.com/mchudgins/certMgr/pkg/service"
"google.golang.org/grpc"
|
test.js
|
'use strict';
/**
* cydia-api-node
* @version 0.1.0
* @author 1Conan <[email protected]> (https://1conan.com)
*/
const cydiaapi = require('./index.js');
const freeTweakName = 'cydia';
const freeTweakDisplay = 'Cydia installer';
const paidTweakName = 'com.ziph0n.pickpocket';
const paidTweakDisplay = 'PickPocket'
let price, info;
console.log("Starting tests...");
price = cydiaapi.getPrice(freeTweakName).then(info => {
console.log("\nTesting getPrice() for Free Package (cydia)");
console.log(info)
});
price = cydiaapi.getPrice(paidTweakName).then(info => {
console.log("\nTesting getPrice() for Paid Package (PickPocket)");
console.log(info)
});
info = cydiaapi.getInfo(freeTweakName).then(info => {
console.log("\nTesting getInfo() for Free Package using package name (cydia)");
console.log(info)
});
info = cydiaapi.getInfo(freeTweakDisplay).then(info => {
console.log("\nTesting getInfo() for Free Package using display name (Cydia Installer)");
console.log(info)
});
info = cydiaapi.getInfo(paidTweakName).then(info => {
console.log("\nTesting getInfo() for Paid Package using package name (com.ziph0n.pickpocket)");
console.log(info)
});
info = cydiaapi.getInfo(paidTweakDisplay).then(info => {
console.log("\nTesting getInfo() for Paid Package using display name (PickPocket)");
console.log(info)
});
info = cydiaapi.getAllInfo(freeTweakName).then(info => {
console.log("\nTesting getAllInfo() for Free Package using package name (cydia)");
console.log(info)
});
info = cydiaapi.getAllInfo(freeTweakDisplay).then(info => {
console.log("\nTesting getAllInfo() for Free Package using display name (Cydia Installer)");
console.log(info)
});
info = cydiaapi.getAllInfo(paidTweakName).then(info => {
console.log("\nTesting getAllInfo() for Paid Package using package name (com.ziph0n.pickpocket)");
console.log(info)
});
info = cydiaapi.getAllInfo(paidTweakDisplay).then(info => {
|
console.log(info)
});
|
console.log("\nTesting getAllInfo() for Paid Package using display name (PickPocket)");
|
cached.rs
|
// Copyright (c) 2019 Jason White
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
use std::fmt;
use std::io;
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use bytes::Bytes;
use futures::{
channel::oneshot,
future::{self, FutureExt, TryFutureExt},
stream::{StreamExt, TryStreamExt},
};
use humansize::{file_size_opts as file_size, FileSize};
use tokio::{self, sync::Mutex};
use crate::lru;
use super::{LFSObject, Storage, StorageKey, StorageStream};
type Cache = lru::Cache<StorageKey>;
#[derive(Debug)]
pub enum Error<C, S> {
/// An error that occurred in the cache.
Cache(C),
/// An error that occurred in the storage backend.
Storage(S),
/// An error that occurred in the stream.
Stream(io::Error),
}
impl<C, S> fmt::Display for Error<C, S>
where
C: fmt::Display,
S: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Error::Cache(x) => fmt::Display::fmt(&x, f),
Error::Storage(x) => fmt::Display::fmt(&x, f),
Error::Stream(x) => fmt::Display::fmt(&x, f),
}
}
}
impl<C, S> Error<C, S> {
pub fn from_cache(error: C) -> Self {
Error::Cache(error)
}
pub fn from_storage(error: S) -> Self {
Error::Storage(error)
}
pub fn from_stream(error: io::Error) -> Self {
Error::Stream(error)
}
}
impl<C, S> std::error::Error for Error<C, S>
where
C: fmt::Debug + fmt::Display,
S: fmt::Debug + fmt::Display,
{
}
/// Combines a cache with a permanent storage backend such that if a query to
/// the cache fails, it falls back to a permanent storage backend.
pub struct Backend<C, S> {
lru: Arc<Mutex<Cache>>,
max_size: u64,
cache: Arc<C>,
storage: Arc<S>,
}
impl<C, S> Backend<C, S>
where
C: Storage + Send + Sync,
S: Storage,
{
pub async fn new(
max_size: u64,
cache: C,
storage: S,
) -> Result<Self, C::Error> {
let lru = Cache::from_stream(cache.list()).await?;
log::info!(
"Prepopulated cache with {} entries ({})",
lru.len(),
lru.size()
.file_size(file_size::DECIMAL)
.unwrap_or_else(|e| e)
);
let lru = Arc::new(Mutex::new(lru));
let cache = Arc::new(cache);
// Prune the cache. The maximum size setting may have changed
// between server invocations. Thus, prune it down right away
// instead of waiting for a client to do an upload.
let count = prune_cache(lru.clone(), max_size, cache.clone()).await?;
if count > 0 {
log::info!("Pruned {} entries from the cache", count);
}
Ok(Backend {
lru,
max_size,
cache,
storage: Arc::new(storage),
})
}
}
/// Returns a future that prunes the least recently used entries that cause the
/// storage to exceed the given maximum size.
async fn prune_cache<S>(
lru: Arc<Mutex<Cache>>,
max_size: u64,
storage: Arc<S>,
) -> Result<usize, S::Error>
where
S: Storage + Send + Sync,
{
if max_size == 0 {
// The cache can have unlimited size.
return Ok(0);
}
let mut deleted = 0;
let mut lru = lru.lock().await;
while lru.size() > max_size {
if let Some((key, _)) = lru.pop() {
log::debug!("Pruning '{}' from cache", key);
let _ = storage.delete(&key).await;
deleted += 1;
}
}
Ok(deleted)
}
async fn cache_and_prune<C>(
cache: Arc<C>,
key: StorageKey,
obj: LFSObject,
lru: Arc<Mutex<Cache>>,
max_size: u64,
) -> Result<(), C::Error>
where
C: Storage + Send + Sync,
{
let len = obj.len();
let oid = *key.oid();
log::debug!("Caching {}", oid);
cache.put(key.clone(), obj).await?;
log::debug!("Finished caching {}", oid);
// Add the object info to our LRU cache once the download from
// permanent storage is complete.
{
let mut lru = lru.lock().await;
lru.push(key, len);
}
match prune_cache(lru, max_size, cache).await {
Ok(count) => {
if count > 0 {
log::info!("Pruned {} entries from the cache", count);
}
Ok(())
}
Err(err) => {
log::error!("Error caching {} ({})", oid, err);
Err(err)
}
}
}
#[async_trait]
impl<C, S> Storage for Backend<C, S>
where
S: Storage + Send + Sync + 'static,
S::Error: 'static,
C: Storage + Send + Sync + 'static,
C::Error: 'static,
{
type Error = Error<C::Error, S::Error>;
/// Tries to query the cache first. If that fails, falls back to the
/// permanent storage backend.
async fn get(
&self,
key: &StorageKey,
) -> Result<Option<LFSObject>, Self::Error> {
// TODO: Keep stats on cache hits and misses. We can then display those
// stats on a web page or send them to another service such as
// Prometheus.
if self.lru.lock().await.get_refresh(key).is_some() {
// Cache hit! (Probably)
let obj = self.cache.get(&key).await.map_err(Error::from_cache)?;
|
// If the cache doesn't actually have it, delete the entry
// from our LRU. This can happen if the cache is cleared out
// manually.
let mut lru = self.lru.lock().await;
lru.remove(&key);
// Fall back to permanent storage. Note that this won't
// actually cache the object. This will be done next time
// the same object is requested.
self.storage.get(&key).await.map_err(Error::from_storage)
}
};
}
// Cache miss. Get the object from permanent storage. If successful, we
// need to cache the resulting byte stream.
let lru = self.lru.clone();
let max_size = self.max_size;
let cache = self.cache.clone();
let key = key.clone();
let obj = self.storage.get(&key).await.map_err(Error::from_storage)?;
match obj {
Some(obj) => {
// Cache the returned LFS object.
let (f, a, b) = obj.fanout();
// Cache the object in the background. Whether or not this
// succeeds shouldn't prevent the client from getting the LFS
// object. For example, even if we run out of disk space, the
// server should still continue operating.
let cache =
cache_and_prune(cache, key.clone(), b, lru, max_size)
.map_err(Error::from_cache);
tokio::spawn(
future::try_join(f.map_err(Error::from_stream), cache)
.map_ok(|((), ())| ())
.map_err(move |err: Self::Error| {
log::error!("Error caching {} ({})", key, err);
}),
);
// Send the object from permanent-storage.
Ok(Some(a))
}
None => {
// The permanent storage also doesn't have it.
//
// Note that we cannot cache the non-existence of an object
// because the storage backend can be manipulated independently
// of the cache. There can also be multiple instances of caches
// per storage backend.
Ok(None)
}
}
}
async fn put(
&self,
key: StorageKey,
value: LFSObject,
) -> Result<(), Self::Error> {
let lru = self.lru.clone();
let max_size = self.max_size;
let cache = self.cache.clone();
let (f, a, b) = value.fanout();
// Note: We can only cache an object if it is successfully uploaded to
// the store. Thus, we do something clever with this one shot channel.
//
// When the permanent storage finishes receiving its LFS object, we send
// a signal to be received by an empty chunk at the end of the stream
// going to the cache. Then, the cache only receives its last (empty)
// chunk when the LFS object has been successfully stored.
let (signal_sender, signal_receiver) = oneshot::channel();
let store = self
.storage
.put(key.clone(), a)
.map_ok(move |()| {
// Send a signal to the cache so that it can complete its write.
log::debug!("Received last chunk from server.");
signal_sender.send(()).unwrap_or(())
})
.map_err(Error::from_storage);
let (len, stream) = b.into_parts();
// Add an empty chunk to the end of the stream whose only job is to
// complete when it receives a signal that the upload to permanent
// storage has completed.
let stream = stream.chain(
signal_receiver
.map_ok(|()| Bytes::new())
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
.into_stream(),
);
let cache = cache_and_prune(
cache,
key,
LFSObject::new(len, Box::pin(stream)),
lru,
max_size,
)
.map_err(Error::from_cache);
future::try_join3(f.map_err(Error::from_stream), cache, store).await?;
Ok(())
}
async fn size(&self, key: &StorageKey) -> Result<Option<u64>, Self::Error> {
// Get just the size of an object without perturbing the LRU ordering.
// Only downloads or uploads need to perturb the LRU ordering.
let lru = self.lru.lock().await;
if let Some(size) = lru.get(key) {
// Cache hit!
Ok(Some(size))
} else {
// Cache miss. Check permanent storage.
self.storage.size(key).await.map_err(Error::from_storage)
}
}
/// Deletes an item from the cache (not from permanent storage).
async fn delete(&self, key: &StorageKey) -> Result<(), Self::Error> {
// Only ever delete items from the cache. This may be called when
// a corrupted object is detected.
log::info!("Deleted {} from the cache", key);
self.cache.delete(key).await.map_err(Error::from_cache)
}
/// Returns a stream of cached items.
fn list(&self) -> StorageStream<(StorageKey, u64), Self::Error> {
// TODO: Use the LRU instead to get this list.
Box::pin(self.cache.list().map_err(Error::from_cache))
}
/// Returns the total size of the LRU cache (not the total size of the
/// permanent storage).
async fn total_size(&self) -> Option<u64> {
Some(self.lru.lock().await.size())
}
/// Returns the maximum size of the LRU cache (not the maximum size of the
/// permanent storage).
async fn max_size(&self) -> Option<u64> {
if self.max_size == 0 {
None
} else {
Some(self.max_size)
}
}
fn public_url(&self, key: &StorageKey) -> Option<String> {
self.storage.public_url(key)
}
async fn upload_url(
&self,
key: &StorageKey,
expires_in: Duration,
) -> Option<String> {
self.storage.upload_url(key, expires_in).await
}
}
|
return match obj {
Some(obj) => Ok(Some(obj)),
None => {
|
main.go
|
import "fmt"
func countCharacters(words []string, chars string) int {
var l = 0
var m [26]int
for _, c := range chars {
m[c-'a']++
}
for _, word := range words {
cm := m
if belong(word, cm) {
l += len(word)
}
}
return l
}
func belong(sub string, master [26]int) bool {
for _, c := range sub {
if v := master[c-'a']; v == 0 {
return false
} else {
master[c-'a']--
}
}
return true
}
func main() {
fmt.Println(countCharacters([]string{"hello","world","leetcode"}, "welldonehoneyr"))
}
|
package main
|
|
worker.py
|
import yaml
from celery import Celery
from pymongo import MongoClient
from models.todo_dao import MongoDAO
from models.todo import TodoSchema
from library.utils import replace_env, make_url
with open("/config/todos/default_config.yml", "r") as f:
config = yaml.load(f, yaml.SafeLoader)
replace_env(config)
url = make_url(config["database"]["mongo"], include_db=False)
client = MongoClient(url)
collection = client.todos.todos_collection
broker_url = make_url(config["celery"]["broker"])
results_backend_url = make_url(config["celery"]["results_backend"])
celery = Celery(__name__, broker=broker_url, backend=results_backend_url)
@celery.task(name="tasks.worker.get_all_todos")
def
|
(dao=MongoDAO(collection, TodoSchema)):
return TodoSchema(many=True).dump(dao.get_all())
|
get_all_todos
|
crate-owner-invite.js
|
import Model, { attr } from '@ember-data/model';
export default Model.extend({
invited_by_username: attr('string'),
crate_name: attr('string'),
crate_id: attr('number'),
created_at: attr('date'),
|
accepted: attr('boolean', { defaultValue: false }),
});
|
|
lib.rs
|
/// Here is a function. It has a doctest, but the doctest does not even build.
///
/// cargo-mutants tests use this to check that doctests can be skipped.
///
/// ```
/// # use mutants_testdata_already_failing_doctests::takes_one_arg;
/// takes_one_arg(123,123,123);
/// ```
pub fn takes_one_arg(a: usize) -> usize
|
mod test {
#[test]
fn takes_one_arg() {
assert_eq!(super::takes_one_arg(1), 2);
}
}
|
{
a + 1
}
|
NfsMountInst.py
|
"""This module contains the general information for NfsMountInst ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class NfsMountInstConsts:
ADMIN_STATE_MOUNT = "mount"
ADMIN_STATE_NONE = "none"
ADMIN_STATE_REMOUNT = "remount"
ADMIN_STATE_UNMOUNT = "unmount"
CLIENT_CONFIG_STATE_CONFIGURED = "configured"
CLIENT_CONFIG_STATE_FAILED = "failed"
CLIENT_CONFIG_STATE_REGISTERED = "registered"
CLIENT_CONFIG_STATE_UNREGISTERED = "unregistered"
FSM_PREV_MOUNT_BEGIN = "MountBegin"
FSM_PREV_MOUNT_FAIL = "MountFail"
FSM_PREV_MOUNT_MOUNT_LOCAL = "MountMountLocal"
FSM_PREV_MOUNT_MOUNT_PEER = "MountMountPeer"
FSM_PREV_MOUNT_REGISTER_CLIENT = "MountRegisterClient"
FSM_PREV_MOUNT_SUCCESS = "MountSuccess"
FSM_PREV_MOUNT_VERIFY_REGISTRATION = "MountVerifyRegistration"
FSM_PREV_UNMOUNT_BEGIN = "UnmountBegin"
FSM_PREV_UNMOUNT_FAIL = "UnmountFail"
FSM_PREV_UNMOUNT_SUCCESS = "UnmountSuccess"
FSM_PREV_UNMOUNT_UNMOUNT_LOCAL = "UnmountUnmountLocal"
FSM_PREV_UNMOUNT_UNMOUNT_PEER = "UnmountUnmountPeer"
FSM_PREV_NOP = "nop"
FSM_RMT_INV_ERR_CODE_ERR_2FA_AUTH_RETRY = "ERR-2fa-auth-retry"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_FAILED = "ERR-ACTIVATE-failed"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_IN_PROGRESS = "ERR-ACTIVATE-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_ACTIVATE_RETRY = "ERR-ACTIVATE-retry"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_TOKENS_OLD_BIOS = "ERR-BIOS-TOKENS-OLD-BIOS"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_TOKENS_OLD_CIMC = "ERR-BIOS-TOKENS-OLD-CIMC"
FSM_RMT_INV_ERR_CODE_ERR_BIOS_NETWORK_BOOT_ORDER_NOT_FOUND = "ERR-BIOS-network-boot-order-not-found"
FSM_RMT_INV_ERR_CODE_ERR_BOARDCTRLUPDATE_IGNORE = "ERR-BOARDCTRLUPDATE-ignore"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_CANCELLED = "ERR-DIAG-cancelled"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_FSM_RESTARTED = "ERR-DIAG-fsm-restarted"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_TEST_FAILED = "ERR-DIAG-test-failed"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = "ERR-DNLD-authentication-failure"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = "ERR-DNLD-hostkey-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_INVALID_IMAGE = "ERR-DNLD-invalid-image"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_FILE = "ERR-DNLD-no-file"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_SPACE = "ERR-DNLD-no-space"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_USB_UNMOUNTED = "ERR-DNLD-usb-unmounted"
FSM_RMT_INV_ERR_CODE_ERR_DNS_DELETE_ERROR = "ERR-DNS-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_GET_ERROR = "ERR-DNS-get-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_SET_ERROR = "ERR-DNS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_IN_PROGRESS = "ERR-Diagnostics-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_MEMTEST_IN_PROGRESS = "ERR-Diagnostics-memtest-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_DIAGNOSTICS_NETWORK_IN_PROGRESS = "ERR-Diagnostics-network-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = "ERR-FILTER-illegal-format"
FSM_RMT_INV_ERR_CODE_ERR_FSM_NO_SUCH_STATE = "ERR-FSM-no-such-state"
FSM_RMT_INV_ERR_CODE_ERR_HOST_FRU_IDENTITY_MISMATCH = "ERR-HOST-fru-identity-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_SET_ERROR = "ERR-HTTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTPS_SET_ERROR = "ERR-HTTPS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_ANALYZE_RESULTS = "ERR-IBMC-analyze-results"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_CONNECT_ERROR = "ERR-IBMC-connect-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_CONNECTOR_INFO_RETRIEVAL_ERROR = "ERR-IBMC-connector-info-retrieval-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_FRU_RETRIEVAL_ERROR = "ERR-IBMC-fru-retrieval-error"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_INVALID_END_POINT_CONFIG = "ERR-IBMC-invalid-end-point-config"
FSM_RMT_INV_ERR_CODE_ERR_IBMC_RESULTS_NOT_READY = "ERR-IBMC-results-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_MAX_SUBSCRIPTIONS_ALLOWED_ERROR = "ERR-MAX-subscriptions-allowed-error"
FSM_RMT_INV_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = "ERR-MO-CONFIG-child-object-cant-be-configured"
FSM_RMT_INV_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = "ERR-MO-META-no-such-object-class"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = "ERR-MO-PROPERTY-no-such-property"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = "ERR-MO-PROPERTY-value-out-of-range"
FSM_RMT_INV_ERR_CODE_ERR_MO_ACCESS_DENIED = "ERR-MO-access-denied"
FSM_RMT_INV_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = "ERR-MO-deletion-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = "ERR-MO-duplicate-object"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = "ERR-MO-illegal-containment"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CREATION = "ERR-MO-illegal-creation"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = "ERR-MO-illegal-iterator-state"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = "ERR-MO-illegal-object-lifecycle-transition"
FSM_RMT_INV_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = "ERR-MO-naming-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = "ERR-MO-object-not-found"
FSM_RMT_INV_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = "ERR-MO-resource-allocation"
FSM_RMT_INV_ERR_CODE_ERR_NTP_DELETE_ERROR = "ERR-NTP-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_GET_ERROR = "ERR-NTP-get-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_SET_ERROR = "ERR-NTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_POWER_CAP_UNSUPPORTED = "ERR-POWER-CAP-UNSUPPORTED"
FSM_RMT_INV_ERR_CODE_ERR_POWER_PROFILE_IN_PROGRESS = "ERR-POWER-PROFILE-IN-PROGRESS"
FSM_RMT_INV_ERR_CODE_ERR_SERVER_MIS_CONNECT = "ERR-SERVER-mis-connect"
FSM_RMT_INV_ERR_CODE_ERR_SWITCH_INVALID_IF_CONFIG = "ERR-SWITCH-invalid-if-config"
FSM_RMT_INV_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = "ERR-TOKEN-request-denied"
FSM_RMT_INV_ERR_CODE_ERR_UNABLE_TO_FETCH_BIOS_SETTINGS = "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_FAILED = "ERR-UPDATE-failed"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_IN_PROGRESS = "ERR-UPDATE-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_RETRY = "ERR-UPDATE-retry"
FSM_RMT_INV_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = "ERR-aaa-config-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = "ERR-acct-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_ADMIN_PASSWD_SET = "ERR-admin-passwd-set"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_ISSUE = "ERR-auth-issue"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_GET_ERROR = "ERR-auth-realm-get-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = "ERR-auth-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTHENTICATION = "ERR-authentication"
FSM_RMT_INV_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = "ERR-authorization-required"
FSM_RMT_INV_ERR_CODE_ERR_CLI_SESSION_LIMIT_REACHED = "ERR-cli-session-limit-reached"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_KEYRING = "ERR-create-keyring"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_LOCALE = "ERR-create-locale"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_ROLE = "ERR-create-role"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_TP = "ERR-create-tp"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_USER = "ERR-create-user"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_LOCALE = "ERR-delete-locale"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_ROLE = "ERR-delete-role"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_SESSION = "ERR-delete-session"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_USER = "ERR-delete-user"
FSM_RMT_INV_ERR_CODE_ERR_DOWNGRADE_FAIL = "ERR-downgrade-fail"
FSM_RMT_INV_ERR_CODE_ERR_EFI_DIAGNOSTICS_IN_PROGRESS = "ERR-efi-Diagnostics--in-progress"
FSM_RMT_INV_ERR_CODE_ERR_ENABLE_MGMT_CONN = "ERR-enable-mgmt-conn"
FSM_RMT_INV_ERR_CODE_ERR_EP_SET_ERROR = "ERR-ep-set-error"
FSM_RMT_INV_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = "ERR-get-max-http-user-sessions"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_INITIALIZING = "ERR-http-initializing"
FSM_RMT_INV_ERR_CODE_ERR_INSUFFICIENTLY_EQUIPPED = "ERR-insufficiently-equipped"
FSM_RMT_INV_ERR_CODE_ERR_INTERNAL_ERROR = "ERR-internal-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_DELETE_ERROR = "ERR-ldap-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GET_ERROR = "ERR-ldap-get-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = "ERR-ldap-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = "ERR-ldap-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_SET_ERROR = "ERR-ldap-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LOCALE_SET_ERROR = "ERR-locale-set-error"
FSM_RMT_INV_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = "ERR-max-userid-sessions-reached"
FSM_RMT_INV_ERR_CODE_ERR_MISSING_METHOD = "ERR-missing-method"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_LOCALE = "ERR-modify-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_ROLE = "ERR-modify-role"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER = "ERR-modify-user"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_LOCALE = "ERR-modify-user-locale"
|
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GET_ERROR = "ERR-radius-get-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = "ERR-radius-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = "ERR-radius-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_SET_ERROR = "ERR-radius-set-error"
FSM_RMT_INV_ERR_CODE_ERR_REQUEST_TIMEOUT = "ERR-request-timeout"
FSM_RMT_INV_ERR_CODE_ERR_RESET_ADAPTER = "ERR-reset-adapter"
FSM_RMT_INV_ERR_CODE_ERR_ROLE_SET_ERROR = "ERR-role-set-error"
FSM_RMT_INV_ERR_CODE_ERR_SECONDARY_NODE = "ERR-secondary-node"
FSM_RMT_INV_ERR_CODE_ERR_SERVICE_NOT_READY = "ERR-service-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_CACHE_FULL = "ERR-session-cache-full"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_NOT_FOUND = "ERR-session-not-found"
FSM_RMT_INV_ERR_CODE_ERR_SET_KEY_CERT = "ERR-set-key-cert"
FSM_RMT_INV_ERR_CODE_ERR_SET_LOGIN_PROFILE = "ERR-set-login-profile"
FSM_RMT_INV_ERR_CODE_ERR_SET_MIN_PASSPHRASE_LENGTH = "ERR-set-min-passphrase-length"
FSM_RMT_INV_ERR_CODE_ERR_SET_NETWORK = "ERR-set-network"
FSM_RMT_INV_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = "ERR-set-password-strength-check"
FSM_RMT_INV_ERR_CODE_ERR_SET_PORT_CHANNEL = "ERR-set-port-channel"
FSM_RMT_INV_ERR_CODE_ERR_STORE_PRE_LOGIN_BANNER_MSG = "ERR-store-pre-login-banner-msg"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_ENABLE_ERROR = "ERR-tacacs-enable-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = "ERR-tacacs-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = "ERR-tacacs-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_PLUS_GET_ERROR = "ERR-tacacs-plus-get-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_SET_ERROR = "ERR-tacacs-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TEST_ERROR_1 = "ERR-test-error-1"
FSM_RMT_INV_ERR_CODE_ERR_TEST_ERROR_2 = "ERR-test-error-2"
FSM_RMT_INV_ERR_CODE_ERR_TIMEZONE_SET_ERROR = "ERR-timezone-set-error"
FSM_RMT_INV_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = "ERR-user-account-expired"
FSM_RMT_INV_ERR_CODE_ERR_USER_PASSWD_EXPIRED = "ERR-user-passwd-expired"
FSM_RMT_INV_ERR_CODE_ERR_USER_SET_ERROR = "ERR-user-set-error"
FSM_RMT_INV_ERR_CODE_ERR_XML_PARSE_ERROR = "ERR-xml-parse-error"
FSM_RMT_INV_ERR_CODE_NONE = "none"
FSM_STAMP_NEVER = "never"
FSM_STATUS_MOUNT_BEGIN = "MountBegin"
FSM_STATUS_MOUNT_FAIL = "MountFail"
FSM_STATUS_MOUNT_MOUNT_LOCAL = "MountMountLocal"
FSM_STATUS_MOUNT_MOUNT_PEER = "MountMountPeer"
FSM_STATUS_MOUNT_REGISTER_CLIENT = "MountRegisterClient"
FSM_STATUS_MOUNT_SUCCESS = "MountSuccess"
FSM_STATUS_MOUNT_VERIFY_REGISTRATION = "MountVerifyRegistration"
FSM_STATUS_UNMOUNT_BEGIN = "UnmountBegin"
FSM_STATUS_UNMOUNT_FAIL = "UnmountFail"
FSM_STATUS_UNMOUNT_SUCCESS = "UnmountSuccess"
FSM_STATUS_UNMOUNT_UNMOUNT_LOCAL = "UnmountUnmountLocal"
FSM_STATUS_UNMOUNT_UNMOUNT_PEER = "UnmountUnmountPeer"
FSM_STATUS_NOP = "nop"
OPER_STATE_MOUNTED = "mounted"
OPER_STATE_UNMOUNTED = "unmounted"
PURPOSE_BACKUP = "backup"
PURPOSE_IMAGE = "image"
class NfsMountInst(ManagedObject):
"""This is NfsMountInst class."""
consts = NfsMountInstConsts()
naming_props = set(['name'])
mo_meta = MoMeta("NfsMountInst", "nfsMountInst", "nfs-mount-inst-[name]", VersionMeta.Version211a, "InputOutput", 0x3f, [], ["admin"], ['networkElement'], ['eventInst', 'faultInst', 'nfsMountInstFsm', 'nfsMountInstFsmTask'], ["Get"])
prop_meta = {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["mount", "none", "remount", "unmount"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"client_config_state": MoPropertyMeta("client_config_state", "clientConfigState", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["configured", "failed", "registered", "unregistered"], []),
"def_dn": MoPropertyMeta("def_dn", "defDn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"fsm_descr": MoPropertyMeta("fsm_descr", "fsmDescr", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_prev": MoPropertyMeta("fsm_prev", "fsmPrev", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, None, ["MountBegin", "MountFail", "MountMountLocal", "MountMountPeer", "MountRegisterClient", "MountSuccess", "MountVerifyRegistration", "UnmountBegin", "UnmountFail", "UnmountSuccess", "UnmountUnmountLocal", "UnmountUnmountPeer", "nop"], []),
"fsm_progr": MoPropertyMeta("fsm_progr", "fsmProgr", "byte", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, None, [], ["0-100"]),
"fsm_rmt_inv_err_code": MoPropertyMeta("fsm_rmt_inv_err_code", "fsmRmtInvErrCode", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, None, ["ERR-2fa-auth-retry", "ERR-ACTIVATE-failed", "ERR-ACTIVATE-in-progress", "ERR-ACTIVATE-retry", "ERR-BIOS-TOKENS-OLD-BIOS", "ERR-BIOS-TOKENS-OLD-CIMC", "ERR-BIOS-network-boot-order-not-found", "ERR-BOARDCTRLUPDATE-ignore", "ERR-DIAG-cancelled", "ERR-DIAG-fsm-restarted", "ERR-DIAG-test-failed", "ERR-DNLD-authentication-failure", "ERR-DNLD-hostkey-mismatch", "ERR-DNLD-invalid-image", "ERR-DNLD-no-file", "ERR-DNLD-no-space", "ERR-DNLD-usb-unmounted", "ERR-DNS-delete-error", "ERR-DNS-get-error", "ERR-DNS-set-error", "ERR-Diagnostics-in-progress", "ERR-Diagnostics-memtest-in-progress", "ERR-Diagnostics-network-in-progress", "ERR-FILTER-illegal-format", "ERR-FSM-no-such-state", "ERR-HOST-fru-identity-mismatch", "ERR-HTTP-set-error", "ERR-HTTPS-set-error", "ERR-IBMC-analyze-results", "ERR-IBMC-connect-error", "ERR-IBMC-connector-info-retrieval-error", "ERR-IBMC-fru-retrieval-error", "ERR-IBMC-invalid-end-point-config", "ERR-IBMC-results-not-ready", "ERR-MAX-subscriptions-allowed-error", "ERR-MO-CONFIG-child-object-cant-be-configured", "ERR-MO-META-no-such-object-class", "ERR-MO-PROPERTY-no-such-property", "ERR-MO-PROPERTY-value-out-of-range", "ERR-MO-access-denied", "ERR-MO-deletion-rule-violation", "ERR-MO-duplicate-object", "ERR-MO-illegal-containment", "ERR-MO-illegal-creation", "ERR-MO-illegal-iterator-state", "ERR-MO-illegal-object-lifecycle-transition", "ERR-MO-naming-rule-violation", "ERR-MO-object-not-found", "ERR-MO-resource-allocation", "ERR-NTP-delete-error", "ERR-NTP-get-error", "ERR-NTP-set-error", "ERR-POWER-CAP-UNSUPPORTED", "ERR-POWER-PROFILE-IN-PROGRESS", "ERR-SERVER-mis-connect", "ERR-SWITCH-invalid-if-config", "ERR-TOKEN-request-denied", "ERR-UNABLE-TO-FETCH-BIOS-SETTINGS", "ERR-UPDATE-failed", "ERR-UPDATE-in-progress", "ERR-UPDATE-retry", "ERR-aaa-config-modify-error", "ERR-acct-realm-set-error", "ERR-admin-passwd-set", "ERR-auth-issue", "ERR-auth-realm-get-error", "ERR-auth-realm-set-error", "ERR-authentication", "ERR-authorization-required", "ERR-cli-session-limit-reached", "ERR-create-keyring", "ERR-create-locale", "ERR-create-role", "ERR-create-tp", "ERR-create-user", "ERR-delete-locale", "ERR-delete-role", "ERR-delete-session", "ERR-delete-user", "ERR-downgrade-fail", "ERR-efi-Diagnostics--in-progress", "ERR-enable-mgmt-conn", "ERR-ep-set-error", "ERR-get-max-http-user-sessions", "ERR-http-initializing", "ERR-insufficiently-equipped", "ERR-internal-error", "ERR-ldap-delete-error", "ERR-ldap-get-error", "ERR-ldap-group-modify-error", "ERR-ldap-group-set-error", "ERR-ldap-set-error", "ERR-locale-set-error", "ERR-max-userid-sessions-reached", "ERR-missing-method", "ERR-modify-locale", "ERR-modify-role", "ERR-modify-user", "ERR-modify-user-locale", "ERR-modify-user-role", "ERR-provider-group-modify-error", "ERR-provider-group-set-error", "ERR-radius-get-error", "ERR-radius-global-set-error", "ERR-radius-group-set-error", "ERR-radius-set-error", "ERR-request-timeout", "ERR-reset-adapter", "ERR-role-set-error", "ERR-secondary-node", "ERR-service-not-ready", "ERR-session-cache-full", "ERR-session-not-found", "ERR-set-key-cert", "ERR-set-login-profile", "ERR-set-min-passphrase-length", "ERR-set-network", "ERR-set-password-strength-check", "ERR-set-port-channel", "ERR-store-pre-login-banner-msg", "ERR-tacacs-enable-error", "ERR-tacacs-global-set-error", "ERR-tacacs-group-set-error", "ERR-tacacs-plus-get-error", "ERR-tacacs-set-error", "ERR-test-error-1", "ERR-test-error-2", "ERR-timezone-set-error", "ERR-user-account-expired", "ERR-user-passwd-expired", "ERR-user-set-error", "ERR-xml-parse-error", "none"], ["0-4294967295"]),
"fsm_rmt_inv_err_descr": MoPropertyMeta("fsm_rmt_inv_err_descr", "fsmRmtInvErrDescr", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, 0, 510, None, [], []),
"fsm_rmt_inv_rslt": MoPropertyMeta("fsm_rmt_inv_rslt", "fsmRmtInvRslt", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, r"""((defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout),){0,32}(defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout){0,1}""", [], []),
"fsm_stage_descr": MoPropertyMeta("fsm_stage_descr", "fsmStageDescr", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_stamp": MoPropertyMeta("fsm_stamp", "fsmStamp", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", ["never"], []),
"fsm_status": MoPropertyMeta("fsm_status", "fsmStatus", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, None, ["MountBegin", "MountFail", "MountMountLocal", "MountMountPeer", "MountRegisterClient", "MountSuccess", "MountVerifyRegistration", "UnmountBegin", "UnmountFail", "UnmountSuccess", "UnmountUnmountLocal", "UnmountUnmountPeer", "nop"], []),
"fsm_try": MoPropertyMeta("fsm_try", "fsmTry", "byte", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"local_dir": MoPropertyMeta("local_dir", "localDir", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version211a, MoPropertyMeta.NAMING, 0x8, 1, 510, None, [], []),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["mounted", "unmounted"], []),
"purpose": MoPropertyMeta("purpose", "purpose", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["backup", "image"], []),
"remote_dir": MoPropertyMeta("remote_dir", "remoteDir", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"server": MoPropertyMeta("server", "server", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"adminState": "admin_state",
"childAction": "child_action",
"clientConfigState": "client_config_state",
"defDn": "def_dn",
"dn": "dn",
"fsmDescr": "fsm_descr",
"fsmPrev": "fsm_prev",
"fsmProgr": "fsm_progr",
"fsmRmtInvErrCode": "fsm_rmt_inv_err_code",
"fsmRmtInvErrDescr": "fsm_rmt_inv_err_descr",
"fsmRmtInvRslt": "fsm_rmt_inv_rslt",
"fsmStageDescr": "fsm_stage_descr",
"fsmStamp": "fsm_stamp",
"fsmStatus": "fsm_status",
"fsmTry": "fsm_try",
"localDir": "local_dir",
"name": "name",
"operState": "oper_state",
"purpose": "purpose",
"remoteDir": "remote_dir",
"rn": "rn",
"sacl": "sacl",
"server": "server",
"status": "status",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.admin_state = None
self.child_action = None
self.client_config_state = None
self.def_dn = None
self.fsm_descr = None
self.fsm_prev = None
self.fsm_progr = None
self.fsm_rmt_inv_err_code = None
self.fsm_rmt_inv_err_descr = None
self.fsm_rmt_inv_rslt = None
self.fsm_stage_descr = None
self.fsm_stamp = None
self.fsm_status = None
self.fsm_try = None
self.local_dir = None
self.oper_state = None
self.purpose = None
self.remote_dir = None
self.sacl = None
self.server = None
self.status = None
ManagedObject.__init__(self, "NfsMountInst", parent_mo_or_dn, **kwargs)
|
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_ROLE = "ERR-modify-user-role"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = "ERR-provider-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = "ERR-provider-group-set-error"
|
vimeo.py
|
"""
$description Global live streaming and video hosting social platform.
$url vimeo.com
$type live, vod
$notes Password protected streams are not supported
"""
import logging
import re
from html import unescape as html_unescape
from urllib.parse import urlparse
from streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.dash import DASHStream
from streamlink.stream.ffmpegmux import MuxedStream
from streamlink.stream.hls import HLSStream
from streamlink.stream.http import HTTPStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(player\.vimeo\.com/video/\d+|(www\.)?vimeo\.com/.+)"
))
class Vimeo(Plugin):
|
__plugin__ = Vimeo
|
_config_url_re = re.compile(r'(?:"config_url"|\bdata-config-url)\s*[:=]\s*(".+?")')
_config_re = re.compile(r"var\s+config\s*=\s*({.+?})\s*;")
_config_url_schema = validate.Schema(
validate.transform(_config_url_re.search),
validate.any(
None,
validate.Schema(
validate.get(1),
validate.parse_json(),
validate.transform(html_unescape),
validate.url(),
),
),
)
_config_schema = validate.Schema(
validate.parse_json(),
{
"request": {
"files": {
validate.optional("dash"): {"cdns": {validate.text: {"url": validate.url()}}},
validate.optional("hls"): {"cdns": {validate.text: {"url": validate.url()}}},
validate.optional("progressive"): validate.all(
[{"url": validate.url(), "quality": validate.text}]
),
},
validate.optional("text_tracks"): validate.all(
[{"url": validate.text, "lang": validate.text}]
),
}
},
)
_player_schema = validate.Schema(
validate.transform(_config_re.search),
validate.any(None, validate.Schema(validate.get(1), _config_schema)),
)
arguments = PluginArguments(
PluginArgument("mux-subtitles", is_global=True)
)
def _get_streams(self):
if "player.vimeo.com" in self.url:
data = self.session.http.get(self.url, schema=self._player_schema)
else:
api_url = self.session.http.get(self.url, schema=self._config_url_schema)
if not api_url:
return
data = self.session.http.get(api_url, schema=self._config_schema)
videos = data["request"]["files"]
streams = []
for stream_type in ("hls", "dash"):
if stream_type not in videos:
continue
for _, video_data in videos[stream_type]["cdns"].items():
log.trace("{0!r}".format(video_data))
url = video_data.get("url")
if stream_type == "hls":
for stream in HLSStream.parse_variant_playlist(self.session, url).items():
streams.append(stream)
elif stream_type == "dash":
p = urlparse(url)
if p.path.endswith("dash.mpd"):
# LIVE
url = self.session.http.get(url).json()["url"]
elif p.path.endswith("master.json"):
# VOD
url = url.replace("master.json", "master.mpd")
else:
log.error("Unsupported DASH path: {0}".format(p.path))
continue
for stream in DASHStream.parse_manifest(self.session, url).items():
streams.append(stream)
for stream in videos.get("progressive", []):
streams.append((stream["quality"], HTTPStream(self.session, stream["url"])))
if self.get_option("mux_subtitles") and data["request"].get("text_tracks"):
substreams = {
s["lang"]: HTTPStream(self.session, "https://vimeo.com" + s["url"])
for s in data["request"]["text_tracks"]
}
for quality, stream in streams:
yield quality, MuxedStream(self.session, stream, subtitles=substreams)
else:
for stream in streams:
yield stream
|
encoded_datum.go
|
// Copyright 2016 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
package sqlbase
import (
"bytes"
"fmt"
"unsafe"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/pkg/errors"
)
// EncodingDirToDatumEncoding returns an equivalent DatumEncoding for the given
// encoding direction.
func EncodingDirToDatumEncoding(dir encoding.Direction) DatumEncoding {
switch dir {
case encoding.Ascending:
return DatumEncoding_ASCENDING_KEY
case encoding.Descending:
return DatumEncoding_DESCENDING_KEY
default:
panic(fmt.Sprintf("invalid encoding direction: %d", dir))
}
}
// EncDatum represents a datum that is "backed" by an encoding and/or by a
// tree.Datum. It allows "passing through" a Datum without decoding and
// reencoding.
type EncDatum struct {
// Encoding type. Valid only if encoded is not nil.
encoding DatumEncoding
// Encoded datum (according to the encoding field).
encoded []byte
// Decoded datum.
Datum tree.Datum
}
func (ed *EncDatum) stringWithAlloc(typ *types.T, a *DatumAlloc) string {
if ed.Datum == nil {
if ed.encoded == nil {
return "<unset>"
}
if a == nil {
a = &DatumAlloc{}
}
err := ed.EnsureDecoded(typ, a)
if err != nil {
return fmt.Sprintf("<error: %v>", err)
}
}
return ed.Datum.String()
}
func (ed *EncDatum) String(typ *types.T) string {
return ed.stringWithAlloc(typ, nil)
}
// BytesEqual is true if the EncDatum's encoded field is equal to the input.
func (ed *EncDatum) BytesEqual(b []byte) bool {
return bytes.Equal(ed.encoded, b)
}
// EncodedString returns an immutable copy of this EncDatum's encoded field.
func (ed *EncDatum) EncodedString() string {
return string(ed.encoded)
}
// EncDatumOverhead is the overhead of EncDatum in bytes.
const EncDatumOverhead = unsafe.Sizeof(EncDatum{})
// Size returns a lower bound on the total size of the receiver in bytes,
// including memory referenced by the receiver.
func (ed EncDatum) Size() uintptr {
size := EncDatumOverhead
if ed.encoded != nil {
size += uintptr(len(ed.encoded))
}
if ed.Datum != nil {
size += ed.Datum.Size()
}
return size
}
// EncDatumFromEncoded initializes an EncDatum with the given encoded
// value. The encoded value is stored as a shallow copy, so the caller must
// make sure the slice is not modified for the lifetime of the EncDatum.
// SetEncoded wipes the underlying Datum.
func EncDatumFromEncoded(enc DatumEncoding, encoded []byte) EncDatum {
if len(encoded) == 0 {
panic(fmt.Sprintf("empty encoded value"))
}
return EncDatum{
encoding: enc,
encoded: encoded,
Datum: nil,
}
}
// EncDatumFromBuffer initializes an EncDatum with an encoding that is
// possibly followed by other data. Similar to EncDatumFromEncoded,
// except that this function figures out where the encoding stops and returns a
// slice for the rest of the buffer.
func EncDatumFromBuffer(typ *types.T, enc DatumEncoding, buf []byte) (EncDatum, []byte, error) {
if len(buf) == 0 {
return EncDatum{}, nil, errors.New("empty encoded value")
}
switch enc {
case DatumEncoding_ASCENDING_KEY, DatumEncoding_DESCENDING_KEY:
var encLen int
var err error
encLen, err = encoding.PeekLength(buf)
if err != nil {
return EncDatum{}, nil, err
}
ed := EncDatumFromEncoded(enc, buf[:encLen])
return ed, buf[encLen:], nil
case DatumEncoding_VALUE:
typeOffset, encLen, err := encoding.PeekValueLength(buf)
if err != nil {
return EncDatum{}, nil, err
}
ed := EncDatumFromEncoded(enc, buf[typeOffset:encLen])
return ed, buf[encLen:], nil
default:
panic(fmt.Sprintf("unknown encoding %s", enc))
}
}
// EncDatumValueFromBufferWithOffsetsAndType is just like calling
// EncDatumFromBuffer with DatumEncoding_VALUE, except it expects that you pass
// in the result of calling DecodeValueTag on the input buf. Use this if you've
// already called DecodeValueTag on buf already, to avoid it getting called
// more than necessary.
func EncDatumValueFromBufferWithOffsetsAndType(
buf []byte, typeOffset int, dataOffset int, typ encoding.Type,
) (EncDatum, []byte, error)
|
// DatumToEncDatum initializes an EncDatum with the given Datum.
func DatumToEncDatum(ctyp *types.T, d tree.Datum) EncDatum {
if d == nil {
panic("Cannot convert nil datum to EncDatum")
}
dTyp := d.ResolvedType()
if d != tree.DNull && !ctyp.Equivalent(dTyp) && !dTyp.IsAmbiguous() {
panic(fmt.Sprintf("invalid datum type given: %s, expected %s", dTyp, ctyp))
}
return EncDatum{Datum: d}
}
// UnsetDatum ensures subsequent IsUnset() calls return false.
func (ed *EncDatum) UnsetDatum() {
ed.encoded = nil
ed.Datum = nil
ed.encoding = 0
}
// IsUnset returns true if SetEncoded or SetDatum were not called.
func (ed *EncDatum) IsUnset() bool {
return ed.encoded == nil && ed.Datum == nil
}
// IsNull returns true if the EncDatum value is NULL. Equivalent to checking if
// ed.Datum is DNull after calling EnsureDecoded.
func (ed *EncDatum) IsNull() bool {
if ed.Datum != nil {
return ed.Datum == tree.DNull
}
if ed.encoded == nil {
panic("IsNull on unset EncDatum")
}
switch ed.encoding {
case DatumEncoding_ASCENDING_KEY, DatumEncoding_DESCENDING_KEY:
_, isNull := encoding.DecodeIfNull(ed.encoded)
return isNull
case DatumEncoding_VALUE:
_, _, _, typ, err := encoding.DecodeValueTag(ed.encoded)
if err != nil {
panic(err)
}
return typ == encoding.Null
default:
panic(fmt.Sprintf("unknown encoding %s", ed.encoding))
}
}
// EnsureDecoded ensures that the Datum field is set (decoding if it is not).
func (ed *EncDatum) EnsureDecoded(typ *types.T, a *DatumAlloc) error {
if ed.Datum != nil {
return nil
}
if ed.encoded == nil {
return pgerror.AssertionFailedf("decoding unset EncDatum")
}
var err error
var rem []byte
switch ed.encoding {
case DatumEncoding_ASCENDING_KEY:
ed.Datum, rem, err = DecodeTableKey(a, typ, ed.encoded, encoding.Ascending)
case DatumEncoding_DESCENDING_KEY:
ed.Datum, rem, err = DecodeTableKey(a, typ, ed.encoded, encoding.Descending)
case DatumEncoding_VALUE:
ed.Datum, rem, err = DecodeTableValue(a, typ, ed.encoded)
default:
return pgerror.AssertionFailedf("unknown encoding %d", log.Safe(ed.encoding))
}
if err != nil {
return pgerror.Wrapf(err, pgerror.CodeDataExceptionError,
"error decoding %d bytes", log.Safe(len(ed.encoded)))
}
if len(rem) != 0 {
ed.Datum = nil
return pgerror.AssertionFailedf(
"%d trailing bytes in encoded value: %+v", log.Safe(len(rem)), rem)
}
return nil
}
// Encoding returns the encoding that is already available (the latter indicated
// by the bool return value).
func (ed *EncDatum) Encoding() (DatumEncoding, bool) {
if ed.encoded == nil {
return 0, false
}
return ed.encoding, true
}
// Encode appends the encoded datum to the given slice using the requested
// encoding.
// Note: DatumEncoding_VALUE encodings are not unique because they can contain
// a column ID so they should not be used to test for equality.
func (ed *EncDatum) Encode(
typ *types.T, a *DatumAlloc, enc DatumEncoding, appendTo []byte,
) ([]byte, error) {
if ed.encoded != nil && enc == ed.encoding {
// We already have an encoding that matches that we can use.
return append(appendTo, ed.encoded...), nil
}
if err := ed.EnsureDecoded(typ, a); err != nil {
return nil, err
}
switch enc {
case DatumEncoding_ASCENDING_KEY:
return EncodeTableKey(appendTo, ed.Datum, encoding.Ascending)
case DatumEncoding_DESCENDING_KEY:
return EncodeTableKey(appendTo, ed.Datum, encoding.Descending)
case DatumEncoding_VALUE:
return EncodeTableValue(appendTo, ColumnID(encoding.NoColumnID), ed.Datum, a.scratch)
default:
panic(fmt.Sprintf("unknown encoding requested %s", enc))
}
}
// Compare returns:
// -1 if the receiver is less than rhs,
// 0 if the receiver is equal to rhs,
// +1 if the receiver is greater than rhs.
func (ed *EncDatum) Compare(
typ *types.T, a *DatumAlloc, evalCtx *tree.EvalContext, rhs *EncDatum,
) (int, error) {
// TODO(radu): if we have both the Datum and a key encoding available, which
// one would be faster to use?
if ed.encoding == rhs.encoding && ed.encoded != nil && rhs.encoded != nil {
switch ed.encoding {
case DatumEncoding_ASCENDING_KEY:
return bytes.Compare(ed.encoded, rhs.encoded), nil
case DatumEncoding_DESCENDING_KEY:
return bytes.Compare(rhs.encoded, ed.encoded), nil
}
}
if err := ed.EnsureDecoded(typ, a); err != nil {
return 0, err
}
if err := rhs.EnsureDecoded(typ, a); err != nil {
return 0, err
}
return ed.Datum.Compare(evalCtx, rhs.Datum), nil
}
// GetInt decodes an EncDatum that is known to be of integer type and returns
// the integer value. It is a more convenient and more efficient alternative to
// calling EnsureDecoded and casting the Datum.
func (ed *EncDatum) GetInt() (int64, error) {
if ed.Datum != nil {
if ed.Datum == tree.DNull {
return 0, errors.Errorf("NULL INT value")
}
return int64(*ed.Datum.(*tree.DInt)), nil
}
switch ed.encoding {
case DatumEncoding_ASCENDING_KEY:
if _, isNull := encoding.DecodeIfNull(ed.encoded); isNull {
return 0, errors.Errorf("NULL INT value")
}
_, val, err := encoding.DecodeVarintAscending(ed.encoded)
return val, err
case DatumEncoding_DESCENDING_KEY:
if _, isNull := encoding.DecodeIfNull(ed.encoded); isNull {
return 0, errors.Errorf("NULL INT value")
}
_, val, err := encoding.DecodeVarintDescending(ed.encoded)
return val, err
case DatumEncoding_VALUE:
_, dataOffset, _, typ, err := encoding.DecodeValueTag(ed.encoded)
if err != nil {
return 0, err
}
// NULL, true, and false are special, because their values are fully encoded by their value tag.
if typ == encoding.Null {
return 0, errors.Errorf("NULL INT value")
}
_, val, err := encoding.DecodeUntaggedIntValue(ed.encoded[dataOffset:])
return val, err
default:
return 0, errors.Errorf("unknown encoding %s", ed.encoding)
}
}
// EncDatumRow is a row of EncDatums.
type EncDatumRow []EncDatum
func (r EncDatumRow) stringToBuf(types []types.T, a *DatumAlloc, b *bytes.Buffer) {
if len(types) != len(r) {
panic(fmt.Sprintf("mismatched types (%v) and row (%v)", types, r))
}
b.WriteString("[")
for i := range r {
if i > 0 {
b.WriteString(" ")
}
b.WriteString(r[i].stringWithAlloc(&types[i], a))
}
b.WriteString("]")
}
// Copy makes a copy of this EncDatumRow. Convenient for tests. Use an
// EncDatumRowAlloc in non-test code.
func (r EncDatumRow) Copy() EncDatumRow {
if r == nil {
return nil
}
rCopy := make(EncDatumRow, len(r))
copy(rCopy, r)
return rCopy
}
func (r EncDatumRow) String(types []types.T) string {
var b bytes.Buffer
r.stringToBuf(types, &DatumAlloc{}, &b)
return b.String()
}
// EncDatumRowOverhead is the overhead of EncDatumRow in bytes.
const EncDatumRowOverhead = unsafe.Sizeof(EncDatumRow{})
// Size returns a lower bound on the total size all EncDatum's in the receiver,
// including memory referenced by all EncDatum's.
func (r EncDatumRow) Size() uintptr {
size := EncDatumRowOverhead
for _, ed := range r {
size += ed.Size()
}
return size
}
// EncDatumRowToDatums converts a given EncDatumRow to a Datums.
func EncDatumRowToDatums(
types []types.T, datums tree.Datums, row EncDatumRow, da *DatumAlloc,
) error {
if len(types) != len(row) {
panic(fmt.Sprintf("mismatched types (%v) and row (%v)", types, row))
}
if len(row) != len(datums) {
return errors.Errorf(
"Length mismatch (%d and %d) between datums and row", len(datums), len(row))
}
for i, encDatum := range row {
if encDatum.IsUnset() {
datums[i] = tree.DNull
continue
}
err := encDatum.EnsureDecoded(&types[i], da)
if err != nil {
return err
}
datums[i] = encDatum.Datum
}
return nil
}
// Compare returns the relative ordering of two EncDatumRows according to a
// ColumnOrdering:
// -1 if the receiver comes before the rhs in the ordering,
// +1 if the receiver comes after the rhs in the ordering,
// 0 if the relative order does not matter (i.e. the two rows have the same
// values for the columns in the ordering).
//
// Note that a return value of 0 does not (in general) imply that the rows are
// equal; for example, rows [1 1 5] and [1 1 6] when compared against ordering
// {{0, asc}, {1, asc}} (i.e. ordered by first column and then by second
// column).
func (r EncDatumRow) Compare(
types []types.T,
a *DatumAlloc,
ordering ColumnOrdering,
evalCtx *tree.EvalContext,
rhs EncDatumRow,
) (int, error) {
if len(r) != len(types) || len(rhs) != len(types) {
panic(fmt.Sprintf("length mismatch: %d types, %d lhs, %d rhs\n%+v\n%+v\n%+v", len(types), len(r), len(rhs), types, r, rhs))
}
for _, c := range ordering {
cmp, err := r[c.ColIdx].Compare(&types[c.ColIdx], a, evalCtx, &rhs[c.ColIdx])
if err != nil {
return 0, err
}
if cmp != 0 {
if c.Direction == encoding.Descending {
cmp = -cmp
}
return cmp, nil
}
}
return 0, nil
}
// CompareToDatums is a version of Compare which compares against decoded Datums.
func (r EncDatumRow) CompareToDatums(
types []types.T,
a *DatumAlloc,
ordering ColumnOrdering,
evalCtx *tree.EvalContext,
rhs tree.Datums,
) (int, error) {
for _, c := range ordering {
if err := r[c.ColIdx].EnsureDecoded(&types[c.ColIdx], a); err != nil {
return 0, err
}
cmp := r[c.ColIdx].Datum.Compare(evalCtx, rhs[c.ColIdx])
if cmp != 0 {
if c.Direction == encoding.Descending {
cmp = -cmp
}
return cmp, nil
}
}
return 0, nil
}
// EncDatumRows is a slice of EncDatumRows having the same schema.
type EncDatumRows []EncDatumRow
func (r EncDatumRows) String(types []types.T) string {
var a DatumAlloc
var b bytes.Buffer
b.WriteString("[")
for i, r := range r {
if i > 0 {
b.WriteString(" ")
}
r.stringToBuf(types, &a, &b)
}
b.WriteString("]")
return b.String()
}
// EncDatumRowContainer holds rows and can cycle through them.
// Must be Reset upon initialization.
type EncDatumRowContainer struct {
rows EncDatumRows
index int
}
// Peek returns the current element at the top of the container.
func (c *EncDatumRowContainer) Peek() EncDatumRow {
return c.rows[c.index]
}
// Pop returns the next row from the container. Will cycle through the rows
// again if we reach the end.
func (c *EncDatumRowContainer) Pop() EncDatumRow {
if c.index < 0 {
c.index = len(c.rows) - 1
}
row := c.rows[c.index]
c.index--
return row
}
// Push adds a row to the container.
func (c *EncDatumRowContainer) Push(row EncDatumRow) {
c.rows = append(c.rows, row)
c.index = len(c.rows) - 1
}
// Reset clears the container and resets the indexes.
// Must be called upon creating a container.
func (c *EncDatumRowContainer) Reset() {
c.rows = c.rows[:0]
c.index = -1
}
// IsEmpty returns whether the container is "empty", which means that it's about
// to cycle through its rows again on the next Pop.
func (c *EncDatumRowContainer) IsEmpty() bool {
return c.index == -1
}
// EncDatumRowAlloc is a helper that speeds up allocation of EncDatumRows
// (preferably of the same length).
type EncDatumRowAlloc struct {
buf []EncDatum
// Preallocate a small initial batch (helps cases where
// we only allocate a few small rows).
prealloc [16]EncDatum
}
// AllocRow allocates an EncDatumRow with the given number of columns.
func (a *EncDatumRowAlloc) AllocRow(cols int) EncDatumRow {
if a.buf == nil {
// First call.
a.buf = a.prealloc[:]
}
if len(a.buf) < cols {
// If the rows are small, allocate storage for a bunch of rows at once.
bufLen := cols
if cols <= 16 {
bufLen *= 16
} else if cols <= 64 {
bufLen *= 4
}
a.buf = make([]EncDatum, bufLen)
}
// Chop off a row from buf, and limit its capacity to avoid corrupting the
// following row in the unlikely case that the caller appends to the slice.
result := EncDatumRow(a.buf[:cols:cols])
a.buf = a.buf[cols:]
return result
}
// CopyRow allocates an EncDatumRow and copies the given row to it.
func (a *EncDatumRowAlloc) CopyRow(row EncDatumRow) EncDatumRow {
rowCopy := a.AllocRow(len(row))
copy(rowCopy, row)
return rowCopy
}
|
{
encLen, err := encoding.PeekValueLengthWithOffsetsAndType(buf, dataOffset, typ)
if err != nil {
return EncDatum{}, nil, err
}
ed := EncDatumFromEncoded(DatumEncoding_VALUE, buf[typeOffset:encLen])
return ed, buf[encLen:], nil
}
|
move_semantics2.rs
|
// move_semantics2.rs
// Make me compile without changing line 13!
// Execute `rustlings hint move_semantics2` for hints :)
fn
|
() {
let vec0 = Vec::new();
let mut vec1 = fill_vec(&vec0);
// Do not change the following line!
println!("{} has length {} content `{:?}`", "vec0", vec0.len(), vec0);
vec1.push(88);
println!("{} has length {} content `{:?}`", "vec1", vec1.len(), vec1);
}
fn fill_vec(vec_borrowed: &Vec<i32>) -> Vec<i32> {
let mut vec = vec_borrowed.to_vec();
vec.push(22);
vec.push(44);
vec.push(66);
vec
}
|
main
|
model.py
|
from flask import Flask
from marshmallow import Schema, fields, pre_load, validate
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
from flask_redis import FlaskRedis
ma = Marshmallow()
db = SQLAlchemy()
redis_cache = FlaskRedis()
class FoodModel(db.Model):
__tablename__ = 'foods'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
description = db.Column(db.String(250))
creation_date = db.Column(db.TIMESTAMP, server_default=db.func.current_timestamp(), nullable=False)
restaurant_id = db.Column(db.Integer, db.ForeignKey('restaurants.id', ondelete='CASCADE'), nullable=False)
restaurant = db.relationship('RestaurantModel', backref=db.backref('foods', lazy='dynamic' ))
menu_id = db.Column(db.Integer, db.ForeignKey('menus.id', ondelete='CASCADE'), nullable=False)
menu = db.relationship('MenuModel')
def __init__(self, name, description, restaurant_id, menu_id):
self.name = name
self.description = description
self.restaurant_id = restaurant_id
self.menu_id = menu_id
class MenuModel(db.Model):
__tablename__ = 'menus'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
restaurant_id = db.Column(db.Integer, db.ForeignKey('restaurants.id', ondelete='CASCADE'), nullable=False)
restaurant = db.relationship('RestaurantModel')
def __init__(self, name, restaurant_id):
self.name = name
self.restaurant_id = restaurant_id
class RestaurantModel(db.Model):
__tablename__ = 'restaurants'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(150), unique=True, nullable=False)
def __init__(self, name):
|
class RestaurantSchema(ma.Schema):
id = fields.Integer()
name = fields.String(required=True)
class MenuSchema(ma.Schema):
id = fields.Integer()
restaurant_id = fields.Integer(required=True)
name = fields.String(required=True)
class FoodSchema(ma.Schema):
id = fields.Integer(dump_only=True)
restaurant_id = fields.Integer(required=True)
name = fields.String(required=True, validate=validate.Length(1))
description = fields.String()
creation_date = fields.DateTime()
|
self.name = name
|
graph_features.py
|
import networkx as nx
class FeatureExtractor:
"""
Extracting some hand-crafted x1_features for the x1_graphs
- Number of (effective nodes)
- Average
"""
def __init__(self, g: nx.Graph, node_attr_name='op_name', s='input', t='output'):
"""
g: a valid networkx graph
node_attr_name: the tag of the node attribute. default is 'op_name'
s, t: the tag of the two special input and output nodes. Note that there can be more than one input node (s), but
only one output node (t)
"""
self.g = g
self.input_index = []
self.output_index = None
for n in range(g.number_of_nodes()):
assert node_attr_name in list(dict(g.nodes[n]).keys()), node_attr_name + " is not found in " + str(
g.nodes[n])
if str(g.nodes[n][node_attr_name]) == str(s):
self.input_index.append(n)
elif str(g.nodes[n][node_attr_name]) == str(t):
self.output_index = n
self.node_attr_name = node_attr_name
if len(self.input_index) == 0:
raise ValueError("Unknown input node!")
elif self.output_index is None:
raise ValueError("Unknown output node!")
# Specify the special nodes (i.e. the input and output, source and sink)
if isinstance(self.g, nx.DiGraph):
self.undirected_g = self.g.to_undirected()
else:
self.undirected_g = self.g
def __getattr__(self, item):
"""Identify the feature already implemented in the graph class"""
try:
res = getattr(self.g, item)
except AttributeError:
raise AttributeError("Item" + str(item) + ' is not found either in the feature extractor nor the graph'
'instance!')
if callable(res):
return res()
return res
def _paths(self) -> list:
"""Enumerate all paths from input to output. Return a list of lists with each sub-list the node indices from
the input to output
Data shape:
(N_input x2 N_path x2 length of each path)
for SISO graph, the data shape is (1 x2 N_path x2 length of each path)
"""
if not isinstance(self.g, nx.DiGraph):
raise TypeError("Longest path is only applicable for directed graph!")
result = []
for i in self.input_index:
result.append(list(nx.all_simple_paths(self.g, i, self.output_index)))
return result
@property
def number_of_paths(self):
paths = self._paths()
if len(paths) == 1:
return len(paths[0])
return [len(i) for i in paths]
@property
def longest_path(self):
"""Return the longest path from input to output. the return type is a list in case when there is more than one
input node."""
paths = self._paths()
if len(paths) == 1: # if the list is a singlet (i.e. the S-T style graph), then return a scalar output only
return len(max(paths[0], key=lambda x: len(x)))
return [len(max(i, key=lambda x: len(x))) for i in paths]
@property
def degree_distribution(self, normalize=False):
|
@property
def laplacian_spectrum(self, ):
return nx.normalized_laplacian_spectrum(self.undirected_g)
@property
def average_undirected_degree(self):
return sum(dict(self.undirected_g.degree).values()) / (self.undirected_g.number_of_nodes() + 0.0)
@property
def number_of_conv3x3(self):
i = 0
for node, attr in self.g.nodes(data=True):
if attr['op_name'] == 'conv3x3-bn-relu':
i += 1
return i
|
"""
return the degree distribution of the *undirected* counterpart of the graph, if the graph is directed.
return a dictionary in the form of ((D1, N1), (D2, N2)... ) where Di is the degree and Ni is the frequency
"""
from collections import Counter
degree_seq = sorted([d for d, n in dict(self.undirected_g.degree)], reverse=True)
degree_count = Counter(degree_seq)
deg, cnt = zip(*degree_count.items())
if normalize:
n = self.undirected_g.number_of_nodes()
cnt //= n
return deg, cnt
|
001.rs
|
// If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9.
// The sum of these multiples is 23. Find the sum of all the multiples of 3 or 5 below 1000.
fn main()
|
{
let result = (3..1000).filter(|n| n % 3 == 0 || n % 5 == 0).fold(0, |sum, acc| sum + acc);
assert_eq!(result, 233168);
println!("{}", result);
}
|
|
filter_instances_responses.go
|
// Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package operations
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
"github.com/blox/blox/cluster-state-service/internal/models"
)
// FilterInstancesReader is a Reader for the FilterInstances structure.
type FilterInstancesReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *FilterInstancesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewFilterInstancesOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 500:
result := NewFilterInstancesInternalServerError()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("unknown error", response, response.Code())
}
}
// NewFilterInstancesOK creates a FilterInstancesOK with default headers values
func
|
() *FilterInstancesOK {
return &FilterInstancesOK{}
}
/*FilterInstancesOK handles this case with default header values.
Filter instances - success
*/
type FilterInstancesOK struct {
Payload *models.ContainerInstances
}
func (o *FilterInstancesOK) Error() string {
return fmt.Sprintf("[GET /instances/filter][%d] filterInstancesOK %+v", 200, o.Payload)
}
func (o *FilterInstancesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ContainerInstances)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewFilterInstancesInternalServerError creates a FilterInstancesInternalServerError with default headers values
func NewFilterInstancesInternalServerError() *FilterInstancesInternalServerError {
return &FilterInstancesInternalServerError{}
}
/*FilterInstancesInternalServerError handles this case with default header values.
Filter instances - unexpected error
*/
type FilterInstancesInternalServerError struct {
Payload string
}
func (o *FilterInstancesInternalServerError) Error() string {
return fmt.Sprintf("[GET /instances/filter][%d] filterInstancesInternalServerError %+v", 500, o.Payload)
}
func (o *FilterInstancesInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
|
NewFilterInstancesOK
|
router.rs
|
use crate::{
advanced, beacon, consensus, error::ApiError, helpers, metrics, network, node, spec, validator,
BoxFut, NetworkChannel,
};
use beacon_chain::{BeaconChain, BeaconChainTypes};
use client_network::Service as NetworkService;
use eth2_config::Eth2Config;
use futures::{Future, IntoFuture};
use hyper::{Body, Error, Method, Request, Response};
use slog::debug;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
fn into_boxfut<F: IntoFuture + 'static>(item: F) -> BoxFut
where
F: IntoFuture<Item = Response<Body>, Error = ApiError>,
F::Future: Send,
{
Box::new(item.into_future())
}
// Allowing more than 7 arguments.
#[allow(clippy::too_many_arguments)]
pub fn route<T: BeaconChainTypes>(
req: Request<Body>,
beacon_chain: Arc<BeaconChain<T>>,
network_service: Arc<NetworkService<T>>,
network_channel: NetworkChannel,
eth2_config: Arc<Eth2Config>,
local_log: slog::Logger,
db_path: PathBuf,
freezer_db_path: PathBuf,
) -> impl Future<Item = Response<Body>, Error = Error> {
metrics::inc_counter(&metrics::REQUEST_COUNT);
let timer = metrics::start_timer(&metrics::REQUEST_RESPONSE_TIME);
let received_instant = Instant::now();
let path = req.uri().path().to_string();
|
let log = local_log.clone();
let request_result: Box<dyn Future<Item = Response<_>, Error = _> + Send> =
match (req.method(), path.as_ref()) {
// Methods for Client
(&Method::GET, "/node/version") => into_boxfut(node::get_version(req)),
(&Method::GET, "/node/syncing") => {
into_boxfut(helpers::implementation_pending_response(req))
}
// Methods for Network
(&Method::GET, "/network/enr") => {
into_boxfut(network::get_enr::<T>(req, network_service))
}
(&Method::GET, "/network/peer_count") => {
into_boxfut(network::get_peer_count::<T>(req, network_service))
}
(&Method::GET, "/network/peer_id") => {
into_boxfut(network::get_peer_id::<T>(req, network_service))
}
(&Method::GET, "/network/peers") => {
into_boxfut(network::get_peer_list::<T>(req, network_service))
}
(&Method::GET, "/network/listen_port") => {
into_boxfut(network::get_listen_port::<T>(req, network_service))
}
(&Method::GET, "/network/listen_addresses") => {
into_boxfut(network::get_listen_addresses::<T>(req, network_service))
}
// Methods for Beacon Node
(&Method::GET, "/beacon/head") => into_boxfut(beacon::get_head::<T>(req, beacon_chain)),
(&Method::GET, "/beacon/heads") => {
into_boxfut(beacon::get_heads::<T>(req, beacon_chain))
}
(&Method::GET, "/beacon/block") => {
into_boxfut(beacon::get_block::<T>(req, beacon_chain))
}
(&Method::GET, "/beacon/block_root") => {
into_boxfut(beacon::get_block_root::<T>(req, beacon_chain))
}
(&Method::GET, "/beacon/fork") => into_boxfut(beacon::get_fork::<T>(req, beacon_chain)),
(&Method::GET, "/beacon/genesis_time") => {
into_boxfut(beacon::get_genesis_time::<T>(req, beacon_chain))
}
(&Method::GET, "/beacon/validators") => {
into_boxfut(beacon::get_validators::<T>(req, beacon_chain))
}
(&Method::POST, "/beacon/validators") => {
into_boxfut(beacon::post_validators::<T>(req, beacon_chain))
}
(&Method::GET, "/beacon/validators/all") => {
into_boxfut(beacon::get_all_validators::<T>(req, beacon_chain))
}
(&Method::GET, "/beacon/validators/active") => {
into_boxfut(beacon::get_active_validators::<T>(req, beacon_chain))
}
(&Method::GET, "/beacon/state") => {
into_boxfut(beacon::get_state::<T>(req, beacon_chain))
}
(&Method::GET, "/beacon/state_root") => {
into_boxfut(beacon::get_state_root::<T>(req, beacon_chain))
}
(&Method::GET, "/beacon/state/genesis") => {
into_boxfut(beacon::get_genesis_state::<T>(req, beacon_chain))
}
(&Method::GET, "/beacon/committees") => {
into_boxfut(beacon::get_committees::<T>(req, beacon_chain))
}
(&Method::POST, "/beacon/proposer_slashing") => {
into_boxfut(beacon::proposer_slashing::<T>(req, beacon_chain))
}
(&Method::POST, "/beacon/attester_slashing") => {
into_boxfut(beacon::attester_slashing::<T>(req, beacon_chain))
}
// Methods for Validator
(&Method::POST, "/validator/duties") => {
let timer =
metrics::start_timer(&metrics::VALIDATOR_GET_DUTIES_REQUEST_RESPONSE_TIME);
let response = validator::post_validator_duties::<T>(req, beacon_chain);
drop(timer);
into_boxfut(response)
}
(&Method::GET, "/validator/duties/all") => {
into_boxfut(validator::get_all_validator_duties::<T>(req, beacon_chain))
}
(&Method::GET, "/validator/duties/active") => into_boxfut(
validator::get_active_validator_duties::<T>(req, beacon_chain),
),
(&Method::GET, "/validator/block") => {
let timer =
metrics::start_timer(&metrics::VALIDATOR_GET_BLOCK_REQUEST_RESPONSE_TIME);
let response = validator::get_new_beacon_block::<T>(req, beacon_chain, log);
drop(timer);
into_boxfut(response)
}
(&Method::POST, "/validator/block") => {
validator::publish_beacon_block::<T>(req, beacon_chain, network_channel, log)
}
(&Method::GET, "/validator/attestation") => {
let timer =
metrics::start_timer(&metrics::VALIDATOR_GET_ATTESTATION_REQUEST_RESPONSE_TIME);
let response = validator::get_new_attestation::<T>(req, beacon_chain);
drop(timer);
into_boxfut(response)
}
(&Method::POST, "/validator/attestation") => {
validator::publish_attestation::<T>(req, beacon_chain, network_channel, log)
}
(&Method::GET, "/consensus/global_votes") => {
into_boxfut(consensus::get_vote_count::<T>(req, beacon_chain))
}
(&Method::POST, "/consensus/individual_votes") => {
consensus::post_individual_votes::<T>(req, beacon_chain)
}
// Methods for bootstrap and checking configuration
(&Method::GET, "/spec") => into_boxfut(spec::get_spec::<T>(req, beacon_chain)),
(&Method::GET, "/spec/slots_per_epoch") => {
into_boxfut(spec::get_slots_per_epoch::<T>(req))
}
(&Method::GET, "/spec/deposit_contract") => {
into_boxfut(helpers::implementation_pending_response(req))
}
(&Method::GET, "/spec/eth2_config") => {
into_boxfut(spec::get_eth2_config::<T>(req, eth2_config))
}
// Methods for advanced parameters
(&Method::GET, "/advanced/fork_choice") => {
into_boxfut(advanced::get_fork_choice::<T>(req, beacon_chain))
}
(&Method::GET, "/advanced/operation_pool") => {
into_boxfut(advanced::get_operation_pool::<T>(req, beacon_chain))
}
(&Method::GET, "/metrics") => into_boxfut(metrics::get_prometheus::<T>(
req,
beacon_chain,
db_path,
freezer_db_path,
)),
_ => Box::new(futures::future::err(ApiError::NotFound(
"Request path and/or method not found.".to_owned(),
))),
};
// Map the Rust-friendly `Result` in to a http-friendly response. In effect, this ensures that
// any `Err` returned from our response handlers becomes a valid http response to the client
// (e.g., a response with a 404 or 500 status).
request_result.then(move |result| {
let duration = Instant::now().duration_since(received_instant);
match result {
Ok(response) => {
debug!(
local_log,
"HTTP API request successful";
"path" => path,
"duration_ms" => duration.as_millis()
);
metrics::inc_counter(&metrics::SUCCESS_COUNT);
metrics::stop_timer(timer);
Ok(response)
}
Err(e) => {
let error_response = e.into();
debug!(
local_log,
"HTTP API request failure";
"path" => path,
"duration_ms" => duration.as_millis()
);
metrics::stop_timer(timer);
Ok(error_response)
}
}
})
}
| |
tests.rs
|
use crate::{Error, mock::*};
use frame_support::{assert_ok, assert_noop};
use super::*;
#[test]
fn create_claim_works(){
new_test_ext().execute_with(|| {
let claim: Vec<u8> = vec![0,1];
assert_ok!(PoeModule::create_claim(Origin::signed(1),claim.clone()));
assert_eq!(
Proofs::<Test>::get(&claim),
((1,frame_system::Pallet::<Test>::block_number()))
);
})
}
#[test]
fn create_claim_failed_when_claim_already_exist(){
new_test_ext().execute_with(|| {
let claim: Vec<u8> = vec![0,1];
let _ = PoeModule::create_claim(Origin::signed(1),claim.clone());
assert_noop!(
PoeModule::create_claim(Origin::signed(1),claim.clone()),
Error::<Test>::ProofAlreadyClaimed
);
})
}
#[test]
fn revoke_claim_works(){
new_test_ext().execute_with(|| {
let claim: Vec<u8> = vec![0,1];
let _ = PoeModule::create_claim(Origin::signed(1),claim.clone());
assert_ok!(PoeModule::revoke_claim(Origin::signed(1),claim.clone()));
//返回tuple ,None类型不对要转成 (u64,u64)类型
assert_eq!(
Proofs::<Test>::get(&claim),
(0u64,0u64)
);
})
}
#[test]
fn revoke_claim_failed_when_claim_is_not_exist(){
new_test_ext().execute_with(|| {
let claim: Vec<u8> = vec![0,1];
assert_noop!(
PoeModule::revoke_claim(Origin::signed(1),claim.clone()),
Error::<Test>::NoSuchProof
);
})
}
#[test]
fn revoke_claim_failed_when_is_not_owner() {
new_test_ext().exe
|
aim_works(){
new_test_ext().execute_with(|| {
let claim: Vec<u8> = vec![0,1];
let _ = PoeModule::create_claim(Origin::signed(1),claim.clone());
assert_ok!(PoeModule::transfer_claim(Origin::signed(1),2u64,claim.clone()));
//验证转移之后的结果
assert_eq!(
Proofs::<Test>::get(&claim),
(2,frame_system::Pallet::<Test>::block_number())
);
//转移之后,不能revoke原来owner的claim
assert_noop!(
PoeModule::revoke_claim(Origin::signed(1), claim.clone()),
Error::<Test>::NotProofOwner
);
})
}
#[test]
fn transfer_claim_failed_when_is_not_transfer_owner() {
new_test_ext().execute_with(|| {
let claim = vec![0, 1];
let _ = PoeModule::create_claim(Origin::signed(1), claim.clone());
assert_noop!(
PoeModule::transfer_claim(Origin::signed(2), 3u64,claim.clone()),
Error::<Test>::NotProofOwner
);
})
}
#[test]
fn transfer_claim_failed_when_claim_no_exist() {
new_test_ext().execute_with(|| {
let claim = vec![0, 1];
let _ = PoeModule::create_claim(Origin::signed(1), claim.clone());
let claim_temp = vec![2, 3];
assert_noop!(
PoeModule::transfer_claim(Origin::signed(1),3u64, claim_temp.clone()),
Error::<Test>::NoSuchProof
);
})
}
#[test]
fn create_claim_failed_when_claim_too_long() {
new_test_ext().execute_with(|| {
let claim = vec![0, 1, 2, 3, 4, 5];
assert_noop!(
PoeModule::create_claim(Origin::signed(1), claim.clone()),
Error::<Test>::ProofTooLong
);
});
}
|
cute_with(|| {
let claim = vec![0, 1];
let _ = PoeModule::create_claim(Origin::signed(1), claim.clone());
assert_noop!(
PoeModule::revoke_claim(Origin::signed(2), claim.clone()),
Error::<Test>::NotProofOwner
);
})
}
#[test]
fn transfer_cl
|
array.rs
|
use super::SettingProps;
use serde_json as json;
use settings_schema::SchemaNode;
use yew::html;
use yew_functional::function_component;
#[function_component(Array)]
pub fn array(props: &SettingProps<Vec<SchemaNode>, Vec<json::Value>>) -> Html
|
{
html!("array")
}
|
|
programming-exercise.service.ts
|
import { Injectable } from '@angular/core';
import * as moment from 'moment';
import { HttpClient, HttpParams, HttpResponse } from '@angular/common/http';
import { Observable } from 'rxjs/Observable';
import { map } from 'rxjs/operators';
import { omit as _omit } from 'lodash';
import { SERVER_API_URL } from 'app/app.constants';
import { createRequestOption } from 'app/shared/util/request-util';
import { ExerciseService } from 'app/exercises/shared/exercise/exercise.service';
import { ProgrammingExercise } from 'app/entities/programming-exercise.model';
import { TemplateProgrammingExerciseParticipation } from 'app/entities/participation/template-programming-exercise-participation.model';
import { SolutionProgrammingExerciseParticipation } from 'app/entities/participation/solution-programming-exercise-participation.model';
import { Moment } from 'moment';
export type EntityResponseType = HttpResponse<ProgrammingExercise>;
export type EntityArrayResponseType = HttpResponse<ProgrammingExercise[]>;
export type ProgrammingExerciseTestCaseStateDTO = {
released: boolean;
hasStudentResult: boolean;
testCasesChanged: boolean;
buildAndTestStudentSubmissionsAfterDueDate?: Moment;
};
@Injectable({ providedIn: 'root' })
export class
|
{
public resourceUrl = SERVER_API_URL + 'api/programming-exercises';
constructor(private http: HttpClient, private exerciseService: ExerciseService) {}
/**
* Sets a new programming exercise up
* @param programmingExercise which should be setup
*/
automaticSetup(programmingExercise: ProgrammingExercise): Observable<EntityResponseType> {
const copy = this.convertDataFromClient(programmingExercise);
return this.http
.post<ProgrammingExercise>(this.resourceUrl + '/setup', copy, { observe: 'response' })
.pipe(map((res: EntityResponseType) => this.convertDateFromServer(res)));
}
/**
* Generates the structure oracle
* @param exerciseId of the programming exercise for which the structure oracle should be created
*/
generateStructureOracle(exerciseId: number): Observable<string> {
return this.http.put<string>(this.resourceUrl + '/' + exerciseId + '/generate-tests', { responseType: 'text' });
}
/**
* Check for plagiarism
* @param exerciseId of the programming exercise
*/
checkPlagiarism(exerciseId: number): Observable<HttpResponse<Blob>> {
return this.http.get(`${this.resourceUrl}/${exerciseId}/check-plagiarism`, {
observe: 'response',
responseType: 'blob',
});
}
/**
* Combines all commits of the template repository to one
* @param exerciseId of the particular programming exercise
*/
combineTemplateRepositoryCommits(exerciseId: number) {
return this.http.put(this.resourceUrl + '/' + exerciseId + '/combine-template-commits', { responseType: 'text' });
}
/**
* Imports a programming exercise by cloning the entity itself plus all bas build plans and repositories
* (template, solution, test).
*
* @param adaptedSourceProgrammingExercise The exercise that should be imported, including adapted values for the
* new exercise. E.g. with another title than the original exercise. Old
* values that should get discarded (like the old ID) will be handled by the
* server.
* @param recreateBuildPlans Option determining whether the build plans should be recreated or copied from the imported exercise
* @param updateTemplate Option determining whether the template files in the repositories should be updated
*/
importExercise(adaptedSourceProgrammingExercise: ProgrammingExercise, recreateBuildPlans: boolean, updateTemplate: boolean): Observable<EntityResponseType> {
const options = createRequestOption({ recreateBuildPlans, updateTemplate });
return this.http
.post<ProgrammingExercise>(`${this.resourceUrl}/import/${adaptedSourceProgrammingExercise.id}`, adaptedSourceProgrammingExercise, {
params: options,
observe: 'response',
})
.pipe(map((res: EntityResponseType) => this.exerciseService.convertDateFromServer(res)));
}
/**
* Updates an existing programming exercise
* @param programmingExercise which should be updated
* @param req optional request options
*/
update(programmingExercise: ProgrammingExercise, req?: any): Observable<EntityResponseType> {
const options = createRequestOption(req);
const copy = this.convertDataFromClient(programmingExercise);
return this.http
.put<ProgrammingExercise>(this.resourceUrl, copy, { params: options, observe: 'response' })
.pipe(map((res: EntityResponseType) => this.convertDateFromServer(res)));
}
/**
* Updates the problem statement
* @param programmingExerciseId of the programming exercise for which to change the problem statement
* @param problemStatement the new problem statement
* @param req optional request options
*/
updateProblemStatement(programmingExerciseId: number, problemStatement: string, req?: any) {
const options = createRequestOption(req);
return this.http
.patch<ProgrammingExercise>(`${this.resourceUrl}/${programmingExerciseId}/problem-statement`, problemStatement, { params: options, observe: 'response' })
.pipe(map((res: EntityResponseType) => this.convertDateFromServer(res)));
}
/**
* Finds the programming exercise for the given exerciseId
* @param programmingExerciseId of the programming exercise to retrieve
*/
find(programmingExerciseId: number): Observable<EntityResponseType> {
return this.http
.get<ProgrammingExercise>(`${this.resourceUrl}/${programmingExerciseId}`, { observe: 'response' })
.pipe(map((res: EntityResponseType) => this.convertDateFromServer(res)));
}
/**
* Finds the programming exercise for the given exerciseId with the corresponding participation's
* @param programmingExerciseId of the programming exercise to retrieve
*/
findWithTemplateAndSolutionParticipation(programmingExerciseId: number): Observable<EntityResponseType> {
return this.http
.get<ProgrammingExercise>(`${this.resourceUrl}/${programmingExerciseId}/with-participations`, { observe: 'response' })
.pipe(map((res: EntityResponseType) => this.convertDateFromServer(res)));
}
/**
* Returns a entity with true in the body if there is a programming exercise with the given id, it is released (release date < now) and there is at least one student result.
*
* @param exerciseId ProgrammingExercise id
*/
getProgrammingExerciseTestCaseState(exerciseId: number): Observable<HttpResponse<ProgrammingExerciseTestCaseStateDTO>> {
return this.http.get<ProgrammingExerciseTestCaseStateDTO>(`${this.resourceUrl}/${exerciseId}/test-case-state`, { observe: 'response' });
}
/**
* Receives all programming exercises for the particular query
* @param req optional request options
*/
query(req?: any): Observable<EntityArrayResponseType> {
const options = createRequestOption(req);
return this.http
.get<ProgrammingExercise[]>(this.resourceUrl, { params: options, observe: 'response' })
.pipe(map((res: EntityArrayResponseType) => this.exerciseService.convertDateArrayFromServer(res)));
}
/**
* Deletes the programming exercise with the corresponding programming exercise Id
* @param programmingExerciseId of the programming exercise to delete
* @param deleteStudentReposBuildPlans indicates if the StudentReposBuildPlans should be also deleted or not
* @param deleteBaseReposBuildPlans indicates if the BaseReposBuildPlans should be also deleted or not
*/
delete(programmingExerciseId: number, deleteStudentReposBuildPlans: boolean, deleteBaseReposBuildPlans: boolean): Observable<HttpResponse<{}>> {
let params = new HttpParams();
params = params.set('deleteStudentReposBuildPlans', deleteStudentReposBuildPlans.toString());
params = params.set('deleteBaseReposBuildPlans', deleteBaseReposBuildPlans.toString());
return this.http.delete(`${this.resourceUrl}/${programmingExerciseId}`, { params, observe: 'response' });
}
/**
* Converts the data from the client
* if template & solution participation exist removes the exercise and results from them
* @param exercise for which the data should be converted
*/
convertDataFromClient(exercise: ProgrammingExercise) {
const copy = {
...this.exerciseService.convertDateFromClient(exercise),
buildAndTestStudentSubmissionsAfterDueDate:
exercise.buildAndTestStudentSubmissionsAfterDueDate && moment(exercise.buildAndTestStudentSubmissionsAfterDueDate).isValid()
? moment(exercise.buildAndTestStudentSubmissionsAfterDueDate).toJSON()
: undefined,
};
// Remove exercise from template & solution participation to avoid circular dependency issues.
// Also remove the results, as they can have circular structures as well and don't have to be saved here.
if (copy.templateParticipation) {
copy.templateParticipation = _omit(copy.templateParticipation, ['exercise', 'results']) as TemplateProgrammingExerciseParticipation;
}
if (copy.solutionParticipation) {
copy.solutionParticipation = _omit(copy.solutionParticipation, ['exercise', 'results']) as SolutionProgrammingExerciseParticipation;
}
return copy;
}
/**
* Convert all date fields of the programming exercise to momentJs date objects.
* Note: This conversion could produce an invalid date if the date is malformatted.
*
* @param entity ProgrammingExercise
*/
convertDateFromServer(entity: EntityResponseType) {
const res = this.exerciseService.convertDateFromServer(entity);
if (!res.body) {
return res;
}
res.body.buildAndTestStudentSubmissionsAfterDueDate = res.body.buildAndTestStudentSubmissionsAfterDueDate
? moment(res.body.buildAndTestStudentSubmissionsAfterDueDate)
: undefined;
return res;
}
}
|
ProgrammingExerciseService
|
scraper.py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
import time
import os
import sys
from glob import glob
import re
import json
import random
import shutil
import re
import codecs
dones = [x for x in open("done.txt",'r').read().split("\n") if len(x)]
correct = json.loads(open("bad-corrected.json",'r').read())
# print(correct)
|
box = sys.argv[1]
print("DOING BOX: ",box)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("safebrowsing-disable-extension-blacklist")
chrome_options.add_argument("--safebrowsing-disable-download-protection")
chrome_options.add_experimental_option("prefs", {'safebrowsing.enabled': 'false'})
filenames = [x.split(".")[0].split("/") for x in str(open("canonical_filename_order.txt",'r').read()).split("\n") if len(x)]
filenames = [x[1] for x in filenames if x[0] == box]
print("NUM STUFF IN BOX: ",len(filenames))
#filenames = [x.split("/")[1].split(".")[0] for x in str(open("canonical_filename_order.txt",'r').read()).split("\n") if len(x)]
def init_driver(path=os.path.join(os.getcwd(),"chromedriver")):
driver = webdriver.Chrome(chrome_options=chrome_options, executable_path=path)
return driver
def parse_info_html(html):
url = html.split('href="')[1].split('">')[0]
creator = html.split('creator-link\">')[1].split('</a>')[0]
date = html.split('Date:</dt><dd class="search-result__value">')[1].split("</dd>")[0]
desc = html.split('">')[2].split('</a>')[0]
return url,desc,creator,date
def parse_accession_number(html):
return html.split('Accession number:')[1].split('object__attributes-value">')[1].split('</dd>')[0]
driver = init_driver();
time.sleep(3);
for idx,fname in enumerate(filenames):
if fname in dones:
print(fname,"is done, skip")
continue
print("now processing ",fname)
entry = ("no description","no date","no accession number","no object id")
try:
driver.get("https://collection.cmoa.org/?q="+fname)
search_results = []
trials = 0
while len(search_results) == 0:
time.sleep(3)
if (trials > 5):
print("give up")
break
print("trial ",trials)
search_results = driver.find_elements_by_class_name("search-result__info")
trials += 1
cands = []
for x in search_results:
html = x.get_attribute('innerHTML')
iurl,desc,creator,date = parse_info_html(html)
print(iurl,desc,creator,date)
if (fname in correct):
if correct[fname].split("/")[-1] != iurl.split("/")[-1]:
print("SKIPPING BECAUSE OF MANUAL LABEL", fname,iurl)
continue
if True or (u"Teenie" in creator):
driver.get("https://collection.cmoa.org"+iurl);
time.sleep(2)
obj = driver.find_elements_by_class_name("object")[1].get_attribute('innerHTML')
# print(obj)
acc = parse_accession_number(obj)
print(acc)
cands.append((desc,date,acc,iurl.split("/")[-1]))
if (len(cands) > 1):
entry = cands[0]
print("WARNING!!!!!! MULIPLE POSSIBLE RESULTS FOUND!!! MANUAL CHECK!!!", fname)
elif (len(cands) == 0):
print("WARNING!!!!!! NO RELAVENT RESULT FOUND!!! MANUAL CHECK!!!", fname)
else:
entry = cands[0]
print("ENTRY:",fname,entry)
except:
print("SHIT!!!! DONT KNOW WHAT WENT WRONG",fname)
print(sys.exc_info())
codecs.open("out/"+box+".txt",'a+',encoding='utf8').write(fname+"\t"+entry[0]+"\t"+entry[1]+"\t"+entry[2]+"\t"+entry[3]+"\n")
| |
richards.py
|
# based on a Java version:
# Based on original version written in BCPL by Dr Martin Richards
# in 1981 at Cambridge University Computer Laboratory, England
# and a C++ version derived from a Smalltalk version written by
# L Peter Deutsch.
# Java version: Copyright (C) 1995 Sun Microsystems, Inc.
# Translation from C++, Mario Wolczko
# Outer loop added by Alex Jacoby
# Task IDs
I_IDLE = 1
I_WORK = 2
I_HANDLERA = 3
I_HANDLERB = 4
I_DEVA = 5
I_DEVB = 6
# Packet types
K_DEV = 1000
K_WORK = 1001
# Packet
BUFSIZE = 4
BUFSIZE_RANGE = range(BUFSIZE)
class Packet(object):
def __init__(self,l,i,k):
self.link = l
self.ident = i
self.kind = k
self.datum = 0
self.data = [0] * BUFSIZE
def append_to(self,lst):
self.link = None
if lst is None:
return self
else:
p = lst
next = p.link
while next is not None:
p = next
next = p.link
p.link = self
return lst
# Task Records
class TaskRec(object):
pass
class DeviceTaskRec(TaskRec):
def __init__(self):
self.pending = None
class IdleTaskRec(TaskRec):
def __init__(self):
self.control = 1
self.count = 10000
class HandlerTaskRec(TaskRec):
def __init__(self):
self.work_in = None
self.device_in = None
def workInAdd(self,p):
self.work_in = p.append_to(self.work_in)
return self.work_in
def deviceInAdd(self,p):
self.device_in = p.append_to(self.device_in)
return self.device_in
class WorkerTaskRec(TaskRec):
def __init__(self):
self.destination = I_HANDLERA
self.count = 0
# Task
class TaskState(object):
def __init__(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
def packetPending(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
return self
def waiting(self):
self.packet_pending = False
self.task_waiting = True
self.task_holding = False
return self
def running(self):
self.packet_pending = False
self.task_waiting = False
self.task_holding = False
return self
def waitingWithPacket(self):
self.packet_pending = True
self.task_waiting = True
self.task_holding = False
return self
def isPacketPending(self):
return self.packet_pending
def isTaskWaiting(self):
return self.task_waiting
def isTaskHolding(self):
return self.task_holding
def isTaskHoldingOrWaiting(self):
return self.task_holding or (not self.packet_pending and self.task_waiting)
def isWaitingWithPacket(self):
return self.packet_pending and self.task_waiting and not self.task_holding
tracing = False
layout = 0
def trace(a):
global layout
layout -= 1
if layout <= 0:
print
layout = 50
print a,
TASKTABSIZE = 10
class TaskWorkArea(object):
def __init__(self):
self.taskTab = [None] * TASKTABSIZE
self.taskList = None
self.holdCount = 0
self.qpktCount = 0
taskWorkArea = TaskWorkArea()
class Task(TaskState):
def __init__(self,i,p,w,initialState,r):
self.link = taskWorkArea.taskList
self.ident = i
self.priority = p
self.input = w
self.packet_pending = initialState.isPacketPending()
self.task_waiting = initialState.isTaskWaiting()
self.task_holding = initialState.isTaskHolding()
self.handle = r
taskWorkArea.taskList = self
taskWorkArea.taskTab[i] = self
def fn(self,pkt,r):
raise NotImplementedError
def addPacket(self,p,old):
if self.input is None:
self.input = p
self.packet_pending = True
if self.priority > old.priority:
return self
else:
p.append_to(self.input)
return old
def runTask(self):
if self.isWaitingWithPacket():
msg = self.input
self.input = msg.link
if self.input is None:
self.running()
else:
self.packetPending()
else:
msg = None
return self.fn(msg,self.handle)
def waitTask(self):
self.task_waiting = True
return self
def hold(self):
taskWorkArea.holdCount += 1
self.task_holding = True
return self.link
def release(self,i):
t = self.findtcb(i)
t.task_holding = False
if t.priority > self.priority:
return t
else:
return self
def qpkt(self,pkt):
t = self.findtcb(pkt.ident)
taskWorkArea.qpktCount += 1
pkt.link = None
pkt.ident = self.ident
return t.addPacket(pkt,self)
def
|
(self,id):
t = taskWorkArea.taskTab[id]
if t is None:
raise Exception("Bad task id %d" % id)
return t
# DeviceTask
class DeviceTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,p,w,s,r)
def fn(self,pkt,r):
d = r
assert isinstance(d, DeviceTaskRec)
if pkt is None:
pkt = d.pending
if pkt is None:
return self.waitTask()
else:
d.pending = None
return self.qpkt(pkt)
else:
d.pending = pkt
if tracing: trace(pkt.datum)
return self.hold()
class HandlerTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,p,w,s,r)
def fn(self,pkt,r):
h = r
assert isinstance(h, HandlerTaskRec)
if pkt is not None:
if pkt.kind == K_WORK:
h.workInAdd(pkt)
else:
h.deviceInAdd(pkt)
work = h.work_in
if work is None:
return self.waitTask()
count = work.datum
if count >= BUFSIZE:
h.work_in = work.link
return self.qpkt(work)
dev = h.device_in
if dev is None:
return self.waitTask()
h.device_in = dev.link
dev.datum = work.data[count]
work.datum = count + 1
return self.qpkt(dev)
# IdleTask
class IdleTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,0,None,s,r)
def fn(self,pkt,r):
i = r
assert isinstance(i, IdleTaskRec)
i.count -= 1
if i.count == 0:
return self.hold()
elif i.control & 1 == 0:
i.control /= 2
return self.release(I_DEVA)
else:
i.control = i.control/2 ^ 0xd008
return self.release(I_DEVB)
# WorkTask
A = ord('A')
class WorkTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,p,w,s,r)
def fn(self,pkt,r):
w = r
assert isinstance(w, WorkerTaskRec)
if pkt is None:
return self.waitTask()
if w.destination == I_HANDLERA:
dest = I_HANDLERB
else:
dest = I_HANDLERA
w.destination = dest
pkt.ident = dest
pkt.datum = 0
for i in BUFSIZE_RANGE: # xrange(BUFSIZE)
w.count += 1
if w.count > 26:
w.count = 1
pkt.data[i] = A + w.count - 1
return self.qpkt(pkt)
import time
def schedule():
t = taskWorkArea.taskList
while t is not None:
pkt = None
if tracing:
print "tcb =",t.ident
if t.isTaskHoldingOrWaiting():
t = t.link
else:
if tracing: trace(chr(ord("0")+t.ident))
t = t.runTask()
class Richards(object):
def run(self, iterations):
for i in xrange(iterations):
taskWorkArea.holdCount = 0
taskWorkArea.qpktCount = 0
IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec())
wkq = Packet(None, 0, K_WORK)
wkq = Packet(wkq , 0, K_WORK)
WorkTask(I_WORK, 1000, wkq, TaskState().waitingWithPacket(), WorkerTaskRec())
wkq = Packet(None, I_DEVA, K_DEV)
wkq = Packet(wkq , I_DEVA, K_DEV)
wkq = Packet(wkq , I_DEVA, K_DEV)
HandlerTask(I_HANDLERA, 2000, wkq, TaskState().waitingWithPacket(), HandlerTaskRec())
wkq = Packet(None, I_DEVB, K_DEV)
wkq = Packet(wkq , I_DEVB, K_DEV)
wkq = Packet(wkq , I_DEVB, K_DEV)
HandlerTask(I_HANDLERB, 3000, wkq, TaskState().waitingWithPacket(), HandlerTaskRec())
wkq = None;
DeviceTask(I_DEVA, 4000, wkq, TaskState().waiting(), DeviceTaskRec());
DeviceTask(I_DEVB, 5000, wkq, TaskState().waiting(), DeviceTaskRec());
schedule()
if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246:
pass
else:
return False
return True
def entry_point(iterations):
r = Richards()
startTime = time.time()
result = r.run(iterations)
endTime = time.time()
return result, startTime, endTime
def main(entry_point = entry_point, iterations = 10):
print "Richards benchmark (Python) starting... [%r]" % entry_point
result, startTime, endTime = entry_point(iterations)
if not result:
print "Incorrect results!"
return -1
print "finished."
total_s = endTime - startTime
print "Total time for %d iterations: %.2f secs" %(iterations,total_s)
print "Average time per iteration: %.2f ms" %(total_s*1000/iterations)
return 42
try:
import sys
if '-nojit' in sys.argv:
sys.argv.remove('-nojit')
raise ImportError
import pypyjit
except ImportError:
pass
else:
import types
for item in globals().values():
if isinstance(item, types.FunctionType):
pypyjit.enable(item.func_code)
elif isinstance(item, type):
for it in item.__dict__.values():
if isinstance(it, types.FunctionType):
pypyjit.enable(it.func_code)
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2:
main(iterations = int(sys.argv[1]))
else:
main()
|
findtcb
|
s0162_find_peak_element.rs
|
/**
* [162] Find Peak Element
*
* A peak element is an element that is strictly greater than its neighbors.
* Given an integer array nums, find a peak element, and return its index. If the array contains multiple peaks, return the index to any of the peaks.
* You may imagine that nums[-1] = nums[n] = -∞.
* You must write an algorithm that runs in O(log n) time.
*
* Example 1:
*
* Input: nums = [1,2,3,1]
* Output: 2
* Explanation: 3 is a peak element and your function should return the index number 2.
* Example 2:
*
* Input: nums = [1,2,1,3,5,6,4]
* Output: 5
* Explanation: Your function can return either index number 1 where the peak element is 2, or index number 5 where the peak element is 6.
*
* Constraints:
*
* 1 <= nums.length <= 1000
* -2^31 <= nums[i] <= 2^31 - 1
* nums[i] != nums[i + 1] for all valid i.
*
*/
pub struct Solution {}
// problem: https://leetcode.com/problems/find-peak-element/
// discuss: https://leetcode.com/problems/find-peak-element/discuss/?currentPage=1&orderBy=most_votes&query=
// submission codes start here
impl Solution {
pub fn find_peak_element(nums: Vec<i32>) -> i32
|
}
// submission codes end
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_0162_example_1() {
let nums = vec![1, 2, 3, 1];
let result = 2;
assert_eq!(Solution::find_peak_element(nums), result);
}
#[test]
fn test_0162_example_2() {
let nums = vec![1, 2, 1, 3, 5, 6, 4];
let result = 5;
assert_eq!(Solution::find_peak_element(nums), result);
}
}
|
{
let mut low = 0;
let mut high = nums.len() - 1;
while low < high {
let mid = (low + (high - low) / 2) as usize;
if nums[mid] < nums[mid + 1] {
low = mid + 1;
} else {
high = mid;
}
}
low as i32
}
|
test_main.py
|
from datetime import datetime
from filetime import from_datetime, to_datetime, utc
def test_from_datetime():
|
def test_to_datetime():
assert to_datetime(116444736000000000) == datetime(1970, 1, 1, 0, 0)
assert to_datetime(128930364000000000) == datetime(2009, 7, 25, 23, 0)
assert to_datetime(128930364000001000) == datetime(2009, 7, 25, 23, 0, 0, 100)
|
assert from_datetime(datetime(2009, 7, 25, 23, 0)) == 128930364000000000
assert from_datetime(datetime(1970, 1, 1, 0, 0, tzinfo=utc)) == 116444736000000000
assert from_datetime(datetime(1970, 1, 1, 0, 0)) == 116444736000000000
assert from_datetime(datetime(2009, 7, 25, 23, 0, 0, 100)) == 128930364000001000
|
StringAttr.py
|
from .BasicTypeAttr import BasicTypeAttr
|
def __init__(self, attr):
BasicTypeAttr.__init__(self, attr)
if self.get('Max') is not None:
self['Max'] = int(self['Max'])
if self.get('Min') is not None:
self['Min'] = int(self['Min'])
def printWarnings(self, out):
if self.get('Max') in (None, '') and not self.get('SQLType'):
out.write('warning: model %s: class %s: attr %s: max string length unspecified\n' % (
self.model().name(), self.klass().name(), self.name()))
|
class StringAttr(BasicTypeAttr):
|
day06.rs
|
use anyhow::Result;
use hashbrown::HashSet;
use libaoc::{aoc, AocResult, Timer};
use regex::Regex;
#[aoc("7283", "3520")]
pub fn
|
(timer: &mut Timer, input: &str) -> Result<AocResult> {
let line = Regex::new(r"(\r?\n){2}")?;
let chars1 = line
.split(&input)
.map(|x| x.chars().filter(|&x| x != '\n' && x != '\r'));
timer.lap("Parse part 1");
let chars2 = line.split(&input).map(|x| {
x.lines()
.map(|x| x.chars().collect())
.collect::<Vec<Vec<_>>>()
});
timer.lap("Parse part 2");
let part1 = chars1.fold(0, |acc, group| group.collect::<HashSet<char>>().len() + acc);
timer.lap("Part 1");
let part2 = chars2.fold(0, |acc, group| {
group
.iter()
.map(|x| x.iter().copied().collect())
.fold(
group[0].iter().copied().collect(),
|a, person: HashSet<char>| &a & &person,
)
.len()
+ acc
});
timer.lap("Part 2");
Ok(AocResult::new(part1, part2))
}
|
solve
|
models.go
|
// +build go1.9
// Copyright 2018 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This code was auto-generated by:
// github.com/Azure/azure-sdk-for-go/tools/profileBuilder
package links
import (
"context"
original "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-09-01/links"
)
const (
DefaultBaseURI = original.DefaultBaseURI
)
type BaseClient = original.BaseClient
type Filter = original.Filter
const (
AtScope Filter = original.AtScope
)
type Operation = original.Operation
type OperationDisplay = original.OperationDisplay
type OperationListResult = original.OperationListResult
type OperationListResultIterator = original.OperationListResultIterator
type OperationListResultPage = original.OperationListResultPage
type ResourceLink = original.ResourceLink
type ResourceLinkFilter = original.ResourceLinkFilter
type ResourceLinkProperties = original.ResourceLinkProperties
type ResourceLinkResult = original.ResourceLinkResult
type ResourceLinkResultIterator = original.ResourceLinkResultIterator
type ResourceLinkResultPage = original.ResourceLinkResultPage
type OperationsClient = original.OperationsClient
type ResourceLinksClient = original.ResourceLinksClient
func New(subscriptionID string) BaseClient {
return original.New(subscriptionID)
}
func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
return original.NewWithBaseURI(baseURI, subscriptionID)
}
func PossibleFilterValues() []Filter {
return original.PossibleFilterValues()
}
func NewOperationListResultIterator(page OperationListResultPage) OperationListResultIterator {
return original.NewOperationListResultIterator(page)
}
func NewOperationListResultPage(getNextPage func(context.Context, OperationListResult) (OperationListResult, error)) OperationListResultPage {
return original.NewOperationListResultPage(getNextPage)
}
func NewResourceLinkResultIterator(page ResourceLinkResultPage) ResourceLinkResultIterator {
return original.NewResourceLinkResultIterator(page)
}
func NewResourceLinkResultPage(getNextPage func(context.Context, ResourceLinkResult) (ResourceLinkResult, error)) ResourceLinkResultPage {
return original.NewResourceLinkResultPage(getNextPage)
}
func NewOperationsClient(subscriptionID string) OperationsClient {
return original.NewOperationsClient(subscriptionID)
}
func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
return original.NewOperationsClientWithBaseURI(baseURI, subscriptionID)
}
func NewResourceLinksClient(subscriptionID string) ResourceLinksClient {
|
}
func UserAgent() string {
return original.UserAgent() + " profiles/preview"
}
func Version() string {
return original.Version()
}
|
return original.NewResourceLinksClient(subscriptionID)
}
func NewResourceLinksClientWithBaseURI(baseURI string, subscriptionID string) ResourceLinksClient {
return original.NewResourceLinksClientWithBaseURI(baseURI, subscriptionID)
|
describe_test.go
|
// Copyright © 2019 The Tekton Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pipelinerun
import (
"fmt"
"strings"
"testing"
"time"
"github.com/jonboulle/clockwork"
"github.com/tektoncd/cli/pkg/test"
cb "github.com/tektoncd/cli/pkg/test/builder"
testDynamic "github.com/tektoncd/cli/pkg/test/dynamic"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources"
pipelinev1beta1test "github.com/tektoncd/pipeline/test"
tb "github.com/tektoncd/pipeline/test/builder"
pipelinetest "github.com/tektoncd/pipeline/test/v1alpha1"
"gotest.tools/v3/golden"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1"
)
func TestPipelineRunDescribe_invalid_namespace(t *testing.T) {
ns := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
},
}
cs, _ := test.SeedTestData(t, pipelinetest.Data{Namespaces: ns})
p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube}
pipelinerun := Command(p)
_, err := test.ExecuteCommand(pipelinerun, "desc", "bar", "-n", "invalid")
if err == nil {
t.Errorf("Expected error for invalid namespace")
}
expected := "namespaces \"invalid\" not found"
test.AssertOutput(t, expected, err.Error())
}
func TestPipelineRunDescribe_not_found(t *testing.T) {
ns := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
},
}
cs, _ := test.SeedTestData(t, pipelinetest.Data{Namespaces: ns})
cs.Pipeline.Resources = cb.APIResourceList("v1alpha1", []string{"pipelinerun"})
tdc := testDynamic.Options{}
dynamic, err := tdc.Client()
if err != nil {
fmt.Println(err)
}
p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube, Dynamic: dynamic}
pipelinerun := Command(p)
_, err = test.ExecuteCommand(pipelinerun, "desc", "bar", "-n", "ns")
if err == nil {
t.Errorf("Expected error, did not get any")
}
expected := "failed to find pipelinerun \"bar\""
test.AssertOutput(t, expected, err.Error())
}
func TestPipelineRunDescribe_only_taskrun(t *testing.T) {
clock := clockwork.NewFakeClock()
trs := []*v1alpha1.TaskRun{
tb.TaskRun("tr-1", "ns",
tb.TaskRunStatus(
tb.TaskRunStartTime(clock.Now().Add(2*time.Minute)),
cb.TaskRunCompletionTime(clock.Now().Add(5*time.Minute)),
tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
}),
),
),
}
pipelineRuns := []*v1alpha1.PipelineRun{
tb.PipelineRun("pipeline-run", "ns",
cb.PipelineRunCreationTimestamp(clock.Now()),
tb.PipelineRunLabel("tekton.dev/pipeline", "pipeline"),
tb.PipelineRunSpec("pipeline"),
tb.PipelineRunStatus(
tb.PipelineRunTaskRunsStatus("tr-1", &v1alpha1.PipelineRunTaskRunStatus{
PipelineTaskName: "t-1",
Status: &trs[0].Status,
}),
tb.PipelineRunStatusCondition(apis.Condition{
Status: corev1.ConditionTrue,
Reason: resources.ReasonSucceeded,
}),
tb.PipelineRunStartTime(clock.Now()),
cb.PipelineRunCompletionTime(clock.Now().Add(5*time.Minute)),
),
),
}
namespaces := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
},
}
version := "v1alpha1"
tdc := testDynamic.Options{}
dynamic, err := tdc.Client(
cb.UnstructuredPR(pipelineRuns[0], version),
cb.UnstructuredTR(trs[0], version),
)
if err != nil {
t.Errorf("unable to create dynamic client: %v", err)
}
cs, _ := test.SeedTestData(t, pipelinetest.Data{Namespaces: namespaces, PipelineRuns: pipelineRuns,
TaskRuns: trs,
})
cs.Pipeline.Resources = cb.APIResourceList(version, []string{"pipelinerun", "taskrun"})
p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube, Dynamic: dynamic, Clock: clock}
pipelinerun := Command(p)
clock.Advance(10 * time.Minute)
actual, err := test.ExecuteCommand(pipelinerun, "desc", "pipeline-run", "-n", "ns")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
golden.Assert(t, actual, fmt.Sprintf("%s.golden", t.Name()))
}
func TestPipelineRunDescribe_multiple_taskrun_ordering(t *testing.T) {
clock := clockwork.NewFakeClock()
trs := []*v1alpha1.TaskRun{
tb.TaskRun("tr-1", "ns",
tb.TaskRunStatus(
tb.TaskRunStartTime(clock.Now().Add(2*time.Minute)),
cb.TaskRunCompletionTime(clock.Now().Add(5*time.Minute)),
tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
}),
),
),
tb.TaskRun("tr-2", "ns",
tb.TaskRunStatus(
tb.TaskRunStartTime(clock.Now().Add(5*time.Minute)),
cb.TaskRunCompletionTime(clock.Now().Add(9*time.Minute)),
tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
}),
),
),
}
pipelineRuns := []*v1alpha1.PipelineRun{
tb.PipelineRun("pipeline-run", "ns",
cb.PipelineRunCreationTimestamp(clock.Now()),
tb.PipelineRunLabel("tekton.dev/pipeline", "pipeline"),
tb.PipelineRunSpec("pipeline"),
tb.PipelineRunStatus(
tb.PipelineRunTaskRunsStatus("tr-1", &v1alpha1.PipelineRunTaskRunStatus{
PipelineTaskName: "t-1",
Status: &trs[0].Status,
}),
tb.PipelineRunTaskRunsStatus("tr-2", &v1alpha1.PipelineRunTaskRunStatus{
PipelineTaskName: "t-2",
Status: &trs[1].Status,
}),
tb.PipelineRunStatusCondition(apis.Condition{
Status: corev1.ConditionTrue,
Reason: resources.ReasonSucceeded,
}),
tb.PipelineRunStartTime(clock.Now()),
cb.PipelineRunCompletionTime(clock.Now().Add(15*time.Minute)),
),
),
}
namespaces := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
},
}
version := "v1alpha1"
tdc := testDynamic.Options{}
dynamic, err := tdc.Client(
cb.UnstructuredPR(pipelineRuns[0], version),
cb.UnstructuredTR(trs[0], version),
cb.UnstructuredTR(trs[1], version),
)
if err != nil {
t.Errorf("unable to create dynamic client: %v", err)
}
cs, _ := test.SeedTestData(t, pipelinetest.Data{Namespaces: namespaces, PipelineRuns: pipelineRuns,
TaskRuns: trs,
})
cs.Pipeline.Resources = cb.APIResourceList(version, []string{"pipelinerun", "taskrun"})
p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube, Dynamic: dynamic, Clock: clock}
pipelinerun := Command(p)
clock.Advance(10 * time.Minute)
actual, err := test.ExecuteCommand(pipelinerun, "desc", "pipeline-run", "-n", "ns")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
golden.Assert(t, actual, fmt.Sprintf("%s.golden", t.Name()))
}
func TestPipelineRunDescribe_failed(t *testing.T) {
clock := clockwork.NewFakeClock()
trs := []*v1alpha1.TaskRun{
tb.TaskRun("tr-1", "ns",
tb.TaskRunStatus(
tb.TaskRunStartTime(clock.Now().Add(2*time.Minute)),
cb.TaskRunCompletionTime(clock.Now().Add(5*time.Minute)),
tb.StatusCondition(apis.Condition{
Status: corev1.ConditionFalse,
Reason: resources.ReasonFailed,
Message: "Testing tr failed",
}),
),
),
}
pipelineRuns := []*v1alpha1.PipelineRun{
tb.PipelineRun("pipeline-run", "ns",
cb.PipelineRunCreationTimestamp(clock.Now()),
tb.PipelineRunLabel("tekton.dev/pipeline", "pipeline"),
tb.PipelineRunSpec("pipeline",
tb.PipelineRunServiceAccountName("test-sa"),
),
tb.PipelineRunStatus(
tb.PipelineRunTaskRunsStatus("tr-1", &v1alpha1.PipelineRunTaskRunStatus{
PipelineTaskName: "t-1",
Status: &trs[0].Status,
}),
tb.PipelineRunStatusCondition(apis.Condition{
Status: corev1.ConditionFalse,
Reason: "Resource not found",
Message: "Resource test-resource not found in the pipelinerun",
}),
tb.PipelineRunStartTime(clock.Now()),
cb.PipelineRunCompletionTime(clock.Now().Add(5*time.Minute)),
),
),
}
namespaces := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
},
}
version := "v1alpha1"
tdc := testDynamic.Options{}
dynamic, err := tdc.Client(
cb.UnstructuredPR(pipelineRuns[0], version),
cb.UnstructuredTR(trs[0], version),
)
if err != nil {
t.Errorf("unable to create dynamic client: %v", err)
}
cs, _ := test.SeedTestData(t, pipelinetest.Data{Namespaces: namespaces, PipelineRuns: pipelineRuns,
TaskRuns: trs,
})
cs.Pipeline.Resources = cb.APIResourceList(version, []string{"pipelinerun", "taskrun"})
p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube, Dynamic: dynamic, Clock: clock}
pipelinerun := Command(p)
clock.Advance(10 * time.Minute)
actual, err := test.ExecuteCommand(pipelinerun, "desc", "pipeline-run", "-n", "ns")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
golden.Assert(t, actual, fmt.Sprintf("%s.golden", t.Name()))
}
func TestPipelineRunDescribe_failed_withoutTRCondition(t *testing.T) {
clock := clockwork.NewFakeClock()
trs := []*v1alpha1.TaskRun{
tb.TaskRun("tr-1", "ns",
tb.TaskRunStatus(
tb.TaskRunStartTime(clock.Now().Add(2*time.Minute)),
cb.TaskRunCompletionTime(clock.Now().Add(5*time.Minute)),
),
),
}
pipelineRuns := []*v1alpha1.PipelineRun{
tb.PipelineRun("pipeline-run", "ns",
cb.PipelineRunCreationTimestamp(clock.Now()),
tb.PipelineRunLabel("tekton.dev/pipeline", "pipeline"),
tb.PipelineRunSpec("pipeline",
tb.PipelineRunServiceAccountName("test-sa"),
),
tb.PipelineRunStatus(
tb.PipelineRunTaskRunsStatus("tr-1", &v1alpha1.PipelineRunTaskRunStatus{
PipelineTaskName: "t-1",
Status: &trs[0].Status,
}),
tb.PipelineRunStatusCondition(apis.Condition{
Status: corev1.ConditionFalse,
Reason: "Resource not found",
Message: "Resource test-resource not found in the pipelinerun",
}),
tb.PipelineRunStartTime(clock.Now()),
cb.PipelineRunCompletionTime(clock.Now().Add(5*time.Minute)),
),
),
}
namespaces := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
},
}
version := "v1alpha1"
tdc := testDynamic.Options{}
dynamic, err := tdc.Client(
cb.UnstructuredPR(pipelineRuns[0], version),
cb.UnstructuredTR(trs[0], version),
)
if err != nil {
t.Errorf("unable to create dynamic client: %v", err)
}
cs, _ := test.SeedTestData(t, pipelinetest.Data{Namespaces: namespaces, PipelineRuns: pipelineRuns,
TaskRuns: trs,
})
cs.Pipeline.Resources = cb.APIResourceList(version, []string{"pipelinerun", "taskrun"})
p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube, Dynamic: dynamic, Clock: clock}
pipelinerun := Command(p)
clock.Advance(10 * time.Minute)
actual, err := test.ExecuteCommand(pipelinerun, "desc", "pipeline-run", "-n", "ns")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
golden.Assert(t, actual, fmt.Sprintf("%s.golden", t.Name()))
}
func TestPipelineRunDescribe_failed_withoutPRCondition(t *testing.T) {
clock := clockwork.NewFakeClock()
trs := []*v1alpha1.TaskRun{
tb.TaskRun("tr-1", "ns",
tb.TaskRunStatus(
tb.TaskRunStartTime(clock.Now().Add(2*time.Minute)),
cb.TaskRunCompletionTime(clock.Now().Add(5*time.Minute)),
),
),
}
pipelineRuns := []*v1alpha1.PipelineRun{
tb.PipelineRun("pipeline-run", "ns",
cb.PipelineRunCreationTimestamp(clock.Now()),
tb.PipelineRunLabel("tekton.dev/pipeline", "pipeline"),
tb.PipelineRunSpec("pipeline",
tb.PipelineRunServiceAccountName("test-sa"),
),
tb.PipelineRunStatus(
tb.PipelineRunTaskRunsStatus("tr-1", &v1alpha1.PipelineRunTaskRunStatus{
PipelineTaskName: "t-1",
Status: &trs[0].Status,
}),
tb.PipelineRunStartTime(clock.Now()),
cb.PipelineRunCompletionTime(clock.Now().Add(5*time.Minute)),
),
),
}
namespaces := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
},
}
version := "v1alpha1"
tdc := testDynamic.Options{}
dynamic, err := tdc.Client(
cb.UnstructuredPR(pipelineRuns[0], version),
cb.UnstructuredTR(trs[0], version),
)
if err != nil {
t.Errorf("unable to create dynamic client: %v", err)
}
cs, _ := test.SeedTestData(t, pipelinetest.Data{Namespaces: namespaces, PipelineRuns: pipelineRuns,
TaskRuns: trs,
})
cs.Pipeline.Resources = cb.APIResourceList(version, []string{"pipelinerun", "taskrun"})
p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube, Dynamic: dynamic, Clock: clock}
pipelinerun := Command(p)
clock.Advance(10 * time.Minute)
actual, err := test.ExecuteCommand(pipelinerun, "desc", "pipeline-run", "-n", "ns")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
golden.Assert(t, actual, fmt.Sprintf("%s.golden", t.Name()))
}
func TestPipelineRunDescribe_with_resources_taskrun(t *testing.T) {
clock := clockwork.NewFakeClock()
trs := []*v1alpha1.TaskRun{
tb.TaskRun("tr-1", "ns",
tb.TaskRunStatus(
tb.TaskRunStartTime(clock.Now().Add(2*time.Minute)),
cb.TaskRunCompletionTime(clock.Now().Add(5*time.Minute)),
tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
}),
),
),
}
pipelineRuns := []*v1alpha1.PipelineRun{
tb.PipelineRun("pipeline-run", "ns",
cb.PipelineRunCreationTimestamp(clock.Now()),
tb.PipelineRunLabel("tekton.dev/pipeline", "pipeline"),
tb.PipelineRunSpec("pipeline",
tb.PipelineRunServiceAccountName("test-sa"),
tb.PipelineRunParam("test-param", "param-value"),
tb.PipelineRunResourceBinding("test-resource",
tb.PipelineResourceBindingRef("test-resource-ref"),
),
),
tb.PipelineRunStatus(
tb.PipelineRunTaskRunsStatus("tr-1", &v1alpha1.PipelineRunTaskRunStatus{
PipelineTaskName: "t-1",
Status: &trs[0].Status,
}),
tb.PipelineRunStatusCondition(apis.Condition{
Status: corev1.ConditionTrue,
Reason: resources.ReasonSucceeded,
}),
tb.PipelineRunStartTime(clock.Now()),
cb.PipelineRunCompletionTime(clock.Now().Add(5*time.Minute)),
),
),
}
namespaces := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
},
}
version := "v1alpha1"
tdc := testDynamic.Options{}
dynamic, err := tdc.Client(
cb.UnstructuredPR(pipelineRuns[0], version),
cb.UnstructuredTR(trs[0], version),
)
if err != nil {
t.Errorf("unable to create dynamic client: %v", err)
}
cs, _ := test.SeedTestData(t, pipelinetest.Data{Namespaces: namespaces, PipelineRuns: pipelineRuns,
TaskRuns: trs,
})
cs.Pipeline.Resources = cb.APIResourceList(version, []string{"pipelinerun", "taskrun"})
p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube, Dynamic: dynamic, Clock: clock}
pipelinerun := Command(p)
clock.Advance(10 * time.Minute)
actual, err := test.ExecuteCommand(pipelinerun, "desc", "pipeline-run", "-n", "ns")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
golden.Assert(t, actual, fmt.Sprintf("%s.golden", t.Name()))
}
func TestPipelineRunDescribe_without_start_time(t *testing.T) {
clock := clockwork.NewFakeClock()
pipelineRuns := []*v1alpha1.PipelineRun{
tb.PipelineRun("pipeline-run", "ns",
cb.PipelineRunCreationTimestamp(clock.Now()),
tb.PipelineRunLabel("tekton.dev/pipeline", "pipeline"),
tb.PipelineRunSpec("pipeline"),
tb.PipelineRunStatus(),
),
}
namespaces := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
},
}
version := "v1alpha1"
tdc := testDynamic.Options{}
dynamic, err := tdc.Client(
cb.UnstructuredPR(pipelineRuns[0], version),
)
if err != nil {
t.Errorf("unable to create dynamic client: %v", err)
}
cs, _ := test.SeedTestData(t, pipelinetest.Data{Namespaces: namespaces, PipelineRuns: pipelineRuns})
cs.Pipeline.Resources = cb.APIResourceList(version, []string{"pipelinerun", "taskrun"})
p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube, Dynamic: dynamic, Clock: clock}
pipelinerun := Command(p)
clock.Advance(10 * time.Minute)
actual, err := test.ExecuteCommand(pipelinerun, "desc", "pipeline-run", "-n", "ns")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
golden.Assert(t, actual, fmt.Sprintf("%s.golden", t.Name()))
}
func TestPipelineRunDescribe_without_pipelineref(t *testing.T) {
clock := clockwork.NewFakeClock()
pipelineRuns := []*v1alpha1.PipelineRun{
tb.PipelineRun("pipeline-run", "ns",
cb.PipelineRunCreationTimestamp(clock.Now()),
tb.PipelineRunLabel("tekton.dev/pipeline", "pipeline"),
tb.PipelineRunStatus(),
),
}
namespaces := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
},
}
version := "v1alpha1"
tdc := testDynamic.Options{}
dynamic, err := tdc.Client(
cb.UnstructuredPR(pipelineRuns[0], version),
)
if err != nil {
t.Errorf("unable to create dynamic client: %v", err)
}
cs, _ := test.SeedTestData(t, pipelinetest.Data{Namespaces: namespaces, PipelineRuns: pipelineRuns})
cs.Pipeline.Resources = cb.APIResourceList(version, []string{"pipelinerun", "taskrun"})
p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube, Dynamic: dynamic, Clock: clock}
pipelinerun := Command(p)
clock.Advance(10 * time.Minute)
actual, err := test.ExecuteCommand(pipelinerun, "desc", "pipeline-run", "-n", "ns")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
golden.Assert(t, actual, fmt.Sprintf("%s.golden", t.Name()))
}
func TestPipelineRunDescribe_no_resourceref(t *testing.T) {
clock := clockwork.NewFakeClock()
trs := []*v1alpha1.TaskRun{
tb.TaskRun("tr-1", "ns",
tb.TaskRunStatus(
tb.TaskRunStartTime(clock.Now().Add(2*time.Minute)),
cb.TaskRunCompletionTime(clock.Now().Add(5*time.Minute)),
tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
}),
),
),
}
pipelineRuns := []*v1alpha1.PipelineRun{
tb.PipelineRun("pipeline-run", "ns",
cb.PipelineRunCreationTimestamp(clock.Now()),
tb.PipelineRunLabel("tekton.dev/pipeline", "pipeline"),
tb.PipelineRunSpec("pipeline",
tb.PipelineRunServiceAccountName("test-sa"),
tb.PipelineRunParam("test-param", "param-value"),
tb.PipelineRunResourceBinding("test-resource"),
),
tb.PipelineRunStatus(
tb.PipelineRunTaskRunsStatus("tr-1", &v1alpha1.PipelineRunTaskRunStatus{
PipelineTaskName: "t-1",
Status: &trs[0].Status,
}),
tb.PipelineRunStatusCondition(apis.Condition{
Status: corev1.ConditionTrue,
Reason: resources.ReasonSucceeded,
}),
tb.PipelineRunStartTime(clock.Now()),
cb.PipelineRunCompletionTime(clock.Now().Add(5*time.Minute)),
),
),
}
namespaces := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
},
}
version := "v1alpha1"
tdc := testDynamic.Options{}
dynamic, err := tdc.Client(
cb.UnstructuredPR(pipelineRuns[0], version),
cb.UnstructuredTR(trs[0], version),
)
if err != nil {
t.Errorf("unable to create dynamic client: %v", err)
}
cs, _ := test.SeedTestData(t, pipelinetest.Data{Namespaces: namespaces, PipelineRuns: pipelineRuns,
TaskRuns: trs,
})
cs.Pipeline.Resources = cb.APIResourceList(version, []string{"pipelinerun", "taskrun"})
p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube, Dynamic: dynamic, Clock: clock}
pipelinerun := Command(p)
clock.Advance(10 * time.Minute)
actual, err := test.ExecuteCommand(pipelinerun, "desc", "pipeline-run", "-n", "ns")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
golden.Assert(t, actual, fmt.Sprintf("%s.golden", t.Name()))
}
func TestPipelineRunDescribe_cancelled_pipelinerun(t *testing.T) {
clock := clockwork.NewFakeClock()
trs := []*v1alpha1.TaskRun{
tb.TaskRun("tr-1", "ns",
tb.TaskRunStatus(
tb.TaskRunStartTime(clock.Now().Add(2*time.Minute)),
cb.TaskRunCompletionTime(clock.Now().Add(5*time.Minute)),
tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
}),
),
),
}
pipelineRuns := []*v1alpha1.PipelineRun{
tb.PipelineRun("pipeline-run", "ns",
cb.PipelineRunCreationTimestamp(clock.Now()),
tb.PipelineRunLabel("tekton.dev/pipeline", "pipeline"),
tb.PipelineRunSpec("pipeline"),
tb.PipelineRunStatus(
tb.PipelineRunTaskRunsStatus("tr-1", &v1alpha1.PipelineRunTaskRunStatus{
PipelineTaskName: "t-1",
Status: &trs[0].Status,
}),
tb.PipelineRunStatusCondition(apis.Condition{
Status: corev1.ConditionFalse,
Reason: "PipelineRunCancelled",
Message: "PipelineRun \"pipeline-run\" was cancelled",
}),
tb.PipelineRunStartTime(clock.Now()),
cb.PipelineRunCompletionTime(clock.Now().Add(5*time.Minute)),
),
),
}
namespaces := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
},
}
version := "v1alpha1"
tdc := testDynamic.Options{}
dynamic, err := tdc.Client(
cb.UnstructuredPR(pipelineRuns[0], version),
cb.UnstructuredTR(trs[0], version),
)
if err != nil {
t.Errorf("unable to create dynamic client: %v", err)
}
cs, _ := test.SeedTestData(t, pipelinetest.Data{Namespaces: namespaces, PipelineRuns: pipelineRuns,
TaskRuns: trs,
})
cs.Pipeline.Resources = cb.APIResourceList(version, []string{"pipelinerun", "taskrun"})
p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube, Dynamic: dynamic, Clock: clock}
pipelinerun := Command(p)
clock.Advance(10 * time.Minute)
actual, err := test.ExecuteCommand(pipelinerun, "desc", "pipeline-run", "-n", "ns")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
golden.Assert(t, actual, fmt.Sprintf("%s.golden", t.Name()))
}
func TestPipelineRunDescribe_without_tr_start_time(t *testing.T) {
clock := clockwork.NewFakeClock()
trs := []*v1alpha1.TaskRun{
tb.TaskRun("tr-1", "ns",
tb.TaskRunStatus(
tb.StatusCondition(apis.Condition{
Type: apis.ConditionReady,
Status: corev1.ConditionUnknown,
}),
),
),
}
pipelineRuns := []*v1alpha1.PipelineRun{
tb.PipelineRun("pipeline-run", "ns",
cb.PipelineRunCreationTimestamp(clock.Now()),
tb.PipelineRunLabel("tekton.dev/pipeline", "pipeline"),
tb.PipelineRunSpec("pipeline"),
tb.PipelineRunStatus(
tb.PipelineRunStatusCondition(apis.Condition{
Status: corev1.ConditionUnknown,
Reason: resources.ReasonRunning,
}),
tb.PipelineRunTaskRunsStatus("tr-1", &v1alpha1.PipelineRunTaskRunStatus{
PipelineTaskName: "t-1",
Status: &trs[0].Status,
}),
tb.PipelineRunStartTime(clock.Now()),
),
),
}
namespaces := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
},
}
version := "v1alpha1"
tdc := testDynamic.Options{}
dynamic, err := tdc.Client(
cb.UnstructuredPR(pipelineRuns[0], version),
cb.UnstructuredTR(trs[0], version),
)
if err != nil {
t.Errorf("unable to create dynamic client: %v", err)
}
cs, _ := test.SeedTestData(t, pipelinetest.Data{Namespaces: namespaces, PipelineRuns: pipelineRuns,
TaskRuns: trs,
})
cs.Pipeline.Resources = cb.APIResourceList(version, []string{"pipelinerun", "taskrun"})
p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube, Dynamic: dynamic, Clock: clock}
pipelinerun := Command(p)
clock.Advance(10 * time.Minute)
actual, err := test.ExecuteCommand(pipelinerun, "desc", "pipeline-run", "-n", "ns")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
golden.Assert(t, actual, fmt.Sprintf("%s.golden", t.Name()))
}
func TestPipelineRunDescribe_custom_timeout(t *testing.T) {
prun := []*v1alpha1.PipelineRun{
tb.PipelineRun("pr-custom-timeout", "ns",
tb.PipelineRunSpec("pr-custom-timeout", tb.PipelineRunTimeout(time.Minute)),
),
}
namespaces := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
},
}
version := "v1alpha1"
tdc := testDynamic.Options{}
dynamic, err := tdc.Client(
cb.UnstructuredPR(prun[0], version),
)
if err != nil {
t.Errorf("unable to create dynamic client: %v", err)
}
cs, _ := test.SeedTestData(t, pipelinetest.Data{Namespaces: namespaces, PipelineRuns: prun})
cs.Pipeline.Resources = cb.APIResourceList(version, []string{"pipelinerun", "taskrun"})
p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube, Dynamic: dynamic}
pipelinerun := Command(p)
actual, err := test.ExecuteCommand(pipelinerun, "desc", "pr-custom-timeout", "-n", "ns")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
golden.Assert(t, actual, fmt.Sprintf("%s.golden", t.Name()))
}
func TestPipelineRunsDescribe_custom_output(t *testing.T) {
pipelinerunname := "pipeline-run"
expected := "pipelinerun.tekton.dev/" + pipelinerunname
prun := []*v1alpha1.PipelineRun{
tb.PipelineRun(pipelinerunname, "ns"),
}
namespaces := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
},
}
version := "v1alpha1"
tdc := testDynamic.Options{}
dynamic, err := tdc.Client(
cb.UnstructuredPR(prun[0], version),
)
if err != nil {
t.Errorf("unable to create dynamic client: %v", err)
}
cs, _ := test.SeedTestData(t, pipelinetest.Data{Namespaces: namespaces, PipelineRuns: prun})
cs.Pipeline.Resources = cb.APIResourceList(version, []string{"pipelinerun", "taskrun"})
p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube, Dynamic: dynamic}
pipelinerun := Command(p)
got, err := test.ExecuteCommand(pipelinerun, "desc", "-o", "name", "-n", "ns", pipelinerunname)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
got = strings.TrimSpace(got)
if got != expected {
t.Errorf("Result should be '%s' != '%s'", got, expected)
}
}
func TestPipelineRunDescribeV1beta1_custom_output(t *testing.T) {
pipelinerunname := "pipeline-run"
expected := "pipelinerun.tekton.dev/" + pipelinerunname
prun := []*v1beta1.PipelineRun{
{
ObjectMeta: metav1.ObjectMeta{
Name: pipelinerunname,
Namespace: "ns",
},
},
}
namespaces := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
},
}
version := "v1beta1"
tdc := testDynamic.Options{}
dynamic, err := tdc.Client(
cb.UnstructuredV1beta1PR(prun[0], version),
)
if err != nil {
t.Errorf("unable to create dynamic client: %v", err)
}
cs, _ := test.SeedV1beta1TestData(t, pipelinev1beta1test.Data{Namespaces: namespaces, PipelineRuns: prun})
cs.Pipeline.Resources = cb.APIResourceList(version, []string{"pipelinerun", "taskrun"})
p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube, Dynamic: dynamic}
pipelinerun := Command(p)
got, err := test.ExecuteCommand(pipelinerun, "desc", "-o", "name", "-n", "ns", pipelinerunname)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
got = strings.TrimSpace(got)
if got != expected {
t.Errorf("Result should be '%s' != '%s'", got, expected)
}
}
func TestPipelineRunDescribeV1beta1(t *testing.T) {
|
clock := clockwork.NewFakeClock()
pipelinerunname := "pipeline-run"
taskRuns := []*v1beta1.TaskRun{
{
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns",
Name: "tr-1",
Labels: map[string]string{"tekton.dev/task": "task-1"},
},
Spec: v1beta1.TaskRunSpec{
TaskRef: &v1beta1.TaskRef{
Name: "task-1",
},
},
Status: v1beta1.TaskRunStatus{
Status: duckv1beta1.Status{
Conditions: duckv1beta1.Conditions{
{
Status: corev1.ConditionFalse,
Reason: resources.ReasonFailed,
},
},
},
TaskRunStatusFields: v1beta1.TaskRunStatusFields{
StartTime: &metav1.Time{Time: clock.Now()},
CompletionTime: &metav1.Time{Time: clock.Now().Add(5 * time.Minute)},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Namespace: "ns",
Name: "tr-2",
Labels: map[string]string{"tekton.dev/task": "task-1"},
},
Spec: v1beta1.TaskRunSpec{
TaskRef: &v1beta1.TaskRef{
Name: "task-1",
},
},
Status: v1beta1.TaskRunStatus{
Status: duckv1beta1.Status{
Conditions: duckv1beta1.Conditions{
{
Status: corev1.ConditionTrue,
Reason: resources.ReasonSucceeded,
},
},
},
TaskRunStatusFields: v1beta1.TaskRunStatusFields{
StartTime: &metav1.Time{Time: clock.Now().Add(10 * time.Minute)},
CompletionTime: &metav1.Time{Time: clock.Now().Add(17 * time.Minute)},
},
},
},
}
prun := []*v1beta1.PipelineRun{
{
ObjectMeta: metav1.ObjectMeta{
Name: pipelinerunname,
Namespace: "ns",
},
Spec: v1beta1.PipelineRunSpec{
PipelineRef: &v1beta1.PipelineRef{
Name: "pipeline",
},
Resources: []v1beta1.PipelineResourceBinding{
{
Name: "res-1",
ResourceRef: &v1beta1.PipelineResourceRef{
Name: "test-res",
},
},
{
Name: "res-2",
ResourceRef: &v1beta1.PipelineResourceRef{
Name: "test-res2",
},
},
},
Params: []v1beta1.Param{
{
Name: "p-1",
Value: v1beta1.ArrayOrString{
Type: v1beta1.ParamTypeString,
StringVal: "somethingdifferent",
},
},
{
Name: "p-2",
Value: v1beta1.ArrayOrString{
Type: v1beta1.ParamTypeArray,
ArrayVal: []string{"booms", "booms", "booms"},
},
},
},
},
Status: v1beta1.PipelineRunStatus{
Status: duckv1beta1.Status{
Conditions: duckv1beta1.Conditions{
{
Status: corev1.ConditionTrue,
Reason: resources.ReasonSucceeded,
Message: "Completed",
},
},
},
PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{
StartTime: &metav1.Time{Time: clock.Now()},
CompletionTime: &metav1.Time{Time: clock.Now().Add(20 * time.Minute)},
TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{
"tr-1": {
PipelineTaskName: "t-1",
Status: &taskRuns[0].Status,
},
"tr-2": {
PipelineTaskName: "t-2",
Status: &taskRuns[1].Status,
},
},
},
},
},
}
namespaces := []*corev1.Namespace{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ns",
},
},
}
version := "v1beta1"
tdc := testDynamic.Options{}
dynamic, err := tdc.Client(
cb.UnstructuredV1beta1PR(prun[0], version),
cb.UnstructuredV1beta1TR(taskRuns[0], version),
cb.UnstructuredV1beta1TR(taskRuns[1], version),
)
if err != nil {
t.Errorf("unable to create dynamic client: %v", err)
}
cs, _ := test.SeedV1beta1TestData(t, pipelinev1beta1test.Data{Namespaces: namespaces, PipelineRuns: prun, TaskRuns: taskRuns})
cs.Pipeline.Resources = cb.APIResourceList(version, []string{"pipelinerun", "taskrun"})
p := &test.Params{Tekton: cs.Pipeline, Kube: cs.Kube, Dynamic: dynamic}
pipelinerun := Command(p)
got, err := test.ExecuteCommand(pipelinerun, "desc", "-n", "ns", pipelinerunname)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
golden.Assert(t, got, fmt.Sprintf("%s.golden", t.Name()))
}
|
|
builtin_traits.rs
|
use super::builder::ClauseBuilder;
use crate::{Interner, RustIrDatabase, TraitRef, WellKnownTrait};
use chalk_ir::TyData;
mod clone;
mod copy;
mod sized;
|
db: &dyn RustIrDatabase<I>,
builder: &mut ClauseBuilder<'_, I>,
well_known: WellKnownTrait,
trait_ref: &TraitRef<I>,
ty: &TyData<I>,
) {
if let Some(force_impl) = db.force_impl_for(well_known, ty) {
if force_impl {
builder.push_fact(trait_ref.clone());
}
return;
}
match well_known {
WellKnownTrait::SizedTrait => sized::add_sized_program_clauses(db, builder, trait_ref, ty),
WellKnownTrait::CopyTrait => copy::add_copy_program_clauses(db, builder, trait_ref, ty),
WellKnownTrait::CloneTrait => clone::add_clone_program_clauses(db, builder, trait_ref, ty),
// Drop impls are provided explicitly
WellKnownTrait::DropTrait => (),
}
}
|
/// For well known traits we have special hard-coded impls, either as an
/// optimization or to enforce special rules for correctness.
pub fn add_builtin_program_clauses<I: Interner>(
|
test__record.py
|
import pytest
from indy_catalyst_agent.storage import StorageRecord
class
|
:
def test_create(self):
record_type = "TYPE"
record_value = "VALUE"
record = StorageRecord(record_type, record_value)
assert record.type == record_type
assert record.value == record_value
assert record.id and type(record.id) is str
assert record.tags == {}
|
TestStorageRecord
|
lol_regalia_chat_presence_lol_data.rs
|
/*
*
*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* The version of the OpenAPI document: 1.0.0
*
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LolRegaliaChatPresenceLolData {
#[serde(rename = "level", skip_serializing_if = "Option::is_none")]
pub level: Option<i32>,
#[serde(rename = "rankedLeagueDivision", skip_serializing_if = "Option::is_none")]
pub ranked_league_division: Option<crate::models::LolRegaliaLeagueDivision>,
#[serde(rename = "rankedLeagueQueue", skip_serializing_if = "Option::is_none")]
pub ranked_league_queue: Option<crate::models::LolRegaliaLeagueQueueType>,
#[serde(rename = "rankedLeagueTier", skip_serializing_if = "Option::is_none")]
pub ranked_league_tier: Option<crate::models::LolRegaliaLeagueTier>,
#[serde(rename = "rankedPrevSeasonDivision", skip_serializing_if = "Option::is_none")]
pub ranked_prev_season_division: Option<crate::models::LolRegaliaLeagueDivision>,
#[serde(rename = "rankedPrevSeasonTier", skip_serializing_if = "Option::is_none")]
pub ranked_prev_season_tier: Option<crate::models::LolRegaliaLeagueTier>,
#[serde(rename = "rankedSplitRewardLevel", skip_serializing_if = "Option::is_none")]
pub ranked_split_reward_level: Option<i32>,
#[serde(rename = "regalia", skip_serializing_if = "Option::is_none")]
pub regalia: Option<crate::models::LolRegaliaRegaliaSettings>,
}
impl LolRegaliaChatPresenceLolData {
pub fn new() -> LolRegaliaChatPresenceLolData {
LolRegaliaChatPresenceLolData {
level: None,
ranked_league_division: None,
ranked_league_queue: None,
ranked_league_tier: None,
ranked_prev_season_division: None,
ranked_prev_season_tier: None,
ranked_split_reward_level: None,
regalia: None,
}
|
}
|
}
|
findNbands.py
|
#!/usr/bin/env python
# @Copyright 2007 Kristjan Haule
from scipy import *
def
|
(Emin,Emax,enefiles,strfile):
Ry2eV = 13.6056923
# Find 'nat' in the structure file
fs = open(strfile,'r')
fs.next()
line = fs.next()
lattic = line[:4]
nat = int(line[4+23:4+23+3])
fs.close()
print 'Number of all atoms found in struct file', nat
nemin=10000
nemax=0
for enefile in enefiles:
# Find nemin,nemax in energy file
fi = open(enefile,'r')
for i in range(nat):
fi.next() # linearization Energy
fi.next() # linearization Energy
try:
for k in range(1,1000000):
line = fi.next()
S,T,Z = float(line[:19]),float(line[19:2*19]),float(line[2*19:3*19])
KNAME = line[3*19:3*19+10]
N, NEn = int(line[67:67+6]), int(line[67+6:67+6*2])
nemin_=1
nemax_=0
for ii in range(NEn):
line = fi.next().split()
num, e1 = int(line[0]), float(line[1])
e1 *= Ry2eV
if (e1<Emin): nemin_ += 1
if (e1<Emax): nemax_ += 1
nemin = min(nemin,nemin_)
nemax = max(nemax,nemax_)
except StopIteration:
fi.close()
print 'file:', enefile, 'nemin=', nemin, 'nemax=', nemax
print 'Finally set nemin=', nemin, 'nemax=', nemax
return (nemin,nemax)
if __name__ == '__main__':
import os
import sys
import glob
import re
import utils
Ry2eV = 13.6056923
if len(sys.argv)<3:
exmin=-10
exmax= 10
else:
exmin=float(sys.argv[1])
exmax=float(sys.argv[2])
print 'Energy window:', exmin, exmax
w2k = utils.W2kEnvironment()
# looking for EF
if os.path.isfile('EF.dat'):
EF = float(open('EF.dat').read())
else:
fname = w2k.case+".scf2"
if os.path.isfile(fname) or os.path.isfile(fname+'up'):
if os.path.isfile(fname):
fscf = open(fname, 'r')
else:
fscf = open(fname+'up', 'r')
lines = fscf.readlines()
for line in lines:
if re.match(r':FER', line) is not None:
EF = float(line[38:])*Ry2eV
print 'EF from scf file : ', EF
break
else:
EF =float(open(w2k.case+'.indmf1').readlines()[1].split()[1])
print 'EF from indmf1 file : ', EF
print 'EF=', EF
#Emin,Emax = -1.331295, 18.668705
Emin, Emax = EF+exmin, EF+exmax
print 'Emin, Emax=', Emin, Emax
strfile = w2k.case+'.struct'
enefiles = glob.glob(w2k.case+'.energy'+'*')
enefiles = filter(lambda fil: os.path.getsize(fil)>0, enefiles) # Remove empty files
for fil in enefiles:
if re.match(w2k.case+'.energyso', fil): # Spin-orbit on, remove non-spin-orbit files
enefiles = filter(lambda fil: re.match(w2k.case+'.energyso', fil) is not None, enefiles) # Remove empty files
break
print 'enefiles=', enefiles
(nemin,nemax) = findNbands(Emin,Emax,enefiles,strfile)
print 'nemin,nemax=', nemin, nemax
print 'Replace second line of '+w2k.case+'.indmfl with'
print nemin,nemax,1,4,'# hybridization nmin, nmax, renormalize for interstitials, projection type'
|
findNbands
|
synchronizer.rs
|
use std::collections::{hash_map::Entry, HashMap, HashSet};
use super::protocol_state::{ProtocolState, VertexTrait};
use crate::components::consensus::traits::NodeIdT;
/// Note that we might be requesting download of the duplicate element
/// (one that had requested for earlier) but with a different node.
/// The assumption is that a downloading layer will collect different node IDs as alternative
/// sources and use different address in the case of download failures.
#[derive(Debug)]
pub(crate) enum SynchronizerEffect<I, V: VertexTrait> {
/// Effect for the reactor to download missing vertex.
RequestVertex(I, V::Id),
/// Effect for the reactor to download missing consensus values (a deploy for example).
RequestConsensusValue(I, V::Value),
/// Effect for the reactor to requeue a vertex once its dependencies are downloaded.
RequeueVertex(I, V),
/// Means that the vertex has all dependencies (vertices and consensus values)
/// successfully added to the state.
Ready(V),
}
/// Structure that tracks which vertices wait for what consensus value dependencies.
#[derive(Debug)]
pub(crate) struct ConsensusValueDependencies<V: VertexTrait> {
// Multiple vertices can be dependent on the same consensus value.
cv_to_set: HashMap<V::Value, Vec<V::Id>>,
// Each vertex can be depending on multiple consensus values.
id_to_group: HashMap<V::Id, HashSet<V::Value>>,
}
impl<V: VertexTrait> ConsensusValueDependencies<V> {
fn new() -> Self {
ConsensusValueDependencies {
cv_to_set: HashMap::new(),
id_to_group: HashMap::new(),
}
}
/// Adds a consensus value dependency.
fn add(&mut self, c: V::Value, id: V::Id) {
self.cv_to_set
.entry(c.clone())
.or_default()
.push(id.clone());
self.id_to_group.entry(id).or_default().insert(c);
}
/// Remove a consensus value from dependencies.
/// Call when it's downloaded/synchronized.
/// Returns vertices that were waiting on it.
fn remove(&mut self, c: &V::Value) -> Vec<V::Id> {
// Get list of vertices that are dependent for the consensus value.
match self.cv_to_set.remove(c) {
None => Vec::new(),
Some(dependent_vertices) => {
// Remove the consensus value from the set of values each vertex is waiting for.
dependent_vertices
.into_iter()
.filter(|vertex| {
if let Entry::Occupied(mut consensus_values) =
self.id_to_group.entry(vertex.clone())
{
consensus_values.get_mut().remove(c);
if consensus_values.get().is_empty() {
consensus_values.remove();
true
} else {
false
}
} else {
false
}
})
.collect()
}
}
}
}
#[derive(Debug)]
pub(crate) struct DagSynchronizerState<I, P>
where
P: ProtocolState,
{
consensus_value_deps: ConsensusValueDependencies<P::Vertex>,
// Tracks which vertices are still waiting for its vertex dependencies to be downloaded.
// Since a vertex can have multiple vertices depend on it, downloading single vertex
// can "release" more than one new vertex to be requeued to the reactor.
//TODO: Wrap the following with a struct that will keep the details hidden.
vertex_dependants: HashMap<P::VId, Vec<P::VId>>,
vertex_by_vid: HashMap<P::VId, (I, P::Vertex)>,
}
impl<I, P> DagSynchronizerState<I, P>
where
I: NodeIdT,
P: ProtocolState,
{
pub(crate) fn new() -> Self {
DagSynchronizerState {
consensus_value_deps: ConsensusValueDependencies::new(),
vertex_dependants: HashMap::new(),
vertex_by_vid: HashMap::new(),
}
}
/// Synchronizes vertex `v`.
///
/// If protocol state is missing vertex dependencies of `v` returns vertex request effect.
/// If protocol state is sync'd, returns request to synchronize consensus value.
/// If all dependencies are satisfied, returns `Ready(v)` signaling that `v`
/// can be safely added to the protocol state and `RequeueVertex` effects for vertices
/// that were depending on `v`.
pub(crate) fn synchronize_vertex(
&mut self,
sender: I,
v: P::Vertex,
protocol_state: &P,
) -> Vec<SynchronizerEffect<I, P::Vertex>> {
if let Some(missing_vid) = protocol_state.missing_dependency(&v) {
// If there are missing vertex dependencies, sync those first.
vec![self.sync_vertex(sender, missing_vid, v.clone())]
} else {
// Once all vertex dependencies are satisfied, we need to make sure that
// we also have all the block's deploys.
match self.sync_consensus_values(sender, &v) {
Some(eff) => vec![eff],
None => {
// Downloading vertex `v` may resolve the dependency requests for other
// vertices. If it's a vote that has no consensus values it
// can be added to the state.
self.on_vertex_fully_synced(v)
}
}
}
}
// Vertex `v` depends on the vertex with ID `v_id`
fn add_vertex_dependency(&mut self, v_id: P::VId, sender: I, v: P::Vertex) {
let dependant_id = v.id();
self.vertex_by_vid
.entry(dependant_id.clone())
.or_insert((sender, v));
self.vertex_dependants
.entry(v_id)
.or_insert_with(Vec::new)
.push(dependant_id);
}
fn add_consensus_value_dependency(&mut self, c: P::Value, sender: I, v: &P::Vertex) {
let dependant_id = v.id();
self.vertex_by_vid
.entry(dependant_id.clone())
.or_insert_with(|| (sender, v.clone()));
self.consensus_value_deps.add(c, dependant_id)
}
/// Complete a vertex dependency (called when that vertex is downloaded from another node and
/// persisted). Returns list of vertices that were waiting on that vertex dependency.
/// Vertices returned have all of its dependencies completed - i.e. are not waiting for
/// anything else.
fn complete_vertex_dependency(&mut self, v_id: P::VId) -> Vec<(I, P::Vertex)> {
match self.vertex_dependants.remove(&v_id) {
None => Vec::new(),
Some(dependants) => self.get_vertices_by_id(dependants),
}
}
|
/// Returns list of vertices that were waiting on the completion of that consensus value.
/// Vertices returned have all of its dependencies completed - i.e. are not waiting for anything
/// else.
fn remove_consensus_value_dependency(&mut self, c: &P::Value) -> Vec<(I, P::Vertex)> {
let dependants = self.consensus_value_deps.remove(c);
if dependants.is_empty() {
Vec::new()
} else {
self.get_vertices_by_id(dependants)
}
}
/// Helper method for returning list of vertices by its ID.
fn get_vertices_by_id(&mut self, vertex_ids: Vec<P::VId>) -> Vec<(I, P::Vertex)> {
vertex_ids
.into_iter()
.filter_map(|vertex_id| self.vertex_by_vid.remove(&vertex_id))
.collect()
}
/// Synchronizes the consensus value the vertex is introducing to the protocol state.
/// It may be a single deploy, list of deploys, an integer value etc.
/// Implementations will know which values are missing
/// (ex. deploys in the local deploy buffer vs new deploys introduced by the block).
/// Node passed in is the one that proposed the original vertex. It should also have the missing
/// dependency.
fn sync_consensus_values(
&mut self,
sender: I,
v: &P::Vertex,
) -> Option<SynchronizerEffect<I, P::Vertex>> {
v.value().map(|cv| {
self.add_consensus_value_dependency(cv.clone(), sender.clone(), v);
SynchronizerEffect::RequestConsensusValue(sender, cv.clone())
})
}
/// Synchronizes the dependency (single) of a newly received vertex.
/// In practice, this method will produce an effect that will be passed on to the reactor for
/// handling. Node passed in is the one that proposed the original vertex. It should also
/// have the missing dependency.
fn sync_vertex(
&mut self,
node: I,
missing_dependency: P::VId,
new_vertex: P::Vertex,
) -> SynchronizerEffect<I, P::Vertex> {
self.add_vertex_dependency(missing_dependency.clone(), node.clone(), new_vertex);
SynchronizerEffect::RequestVertex(node, missing_dependency)
}
/// Vertex `v` has been fully sync'd – both protocol state dependencies
/// and consensus values have been downloaded.
/// Returns vector of synchronizer effects to be handled.
pub(crate) fn on_vertex_fully_synced(
&mut self,
v: P::Vertex,
) -> Vec<SynchronizerEffect<I, P::Vertex>> {
let v_id = v.id();
let mut effects = vec![SynchronizerEffect::Ready(v)];
let completed_dependencies = self.complete_vertex_dependency(v_id);
effects.extend(
completed_dependencies
.into_iter()
.map(|(s, v)| SynchronizerEffect::RequeueVertex(s, v)),
);
effects
}
/// Marks `c` consensus value as downloaded.
/// Returns vertices that were dependant on it.
/// NOTE: Must be called only after all vertex dependencies are downloaded.
pub(crate) fn on_consensus_value_synced(
&mut self,
c: &P::Value,
) -> Vec<SynchronizerEffect<I, P::Vertex>> {
let completed_dependencies = self.remove_consensus_value_dependency(c);
completed_dependencies
.into_iter()
// Because we sync consensus value dependencies only when we have sync'd
// all vertex dependencies, we can now consider `v` to have all dependencies resolved.
.flat_map(|(_, v)| self.on_vertex_fully_synced(v))
.collect()
}
/// Drops all vertices depending directly or indirectly on `vid` and returns all senders.
pub(crate) fn on_vertex_invalid(&mut self, vid: P::VId) -> HashSet<I> {
let mut faulty_senders = HashSet::new();
let mut invalid_ids = vec![vid];
// All vertices waiting for invalid vertices are invalid as well.
while let Some(vid) = invalid_ids.pop() {
faulty_senders.extend(self.vertex_by_vid.remove(&vid).map(|(s, _)| s));
invalid_ids.extend(self.vertex_dependants.remove(&vid).into_iter().flatten());
for dependants in self.vertex_dependants.values_mut() {
dependants.retain(|dvid| *dvid != vid);
}
}
self.vertex_dependants.retain(|_, dep| !dep.is_empty());
faulty_senders
}
/// Drops all vertices depending directly or indirectly on value `c` and returns all senders.
pub(crate) fn on_consensus_value_invalid(&mut self, c: &P::Value) -> HashSet<I> {
// All vertices waiting for this dependency are invalid.
let (faulty_senders, invalid_ids): (HashSet<I>, HashSet<P::VId>) = self
.remove_consensus_value_dependency(c)
.into_iter()
.map(|(sender, v)| (sender, v.id()))
.unzip();
// And all vertices waiting for invalid vertices are invalid as well.
invalid_ids
.into_iter()
.flat_map(|vid| self.on_vertex_invalid(vid))
.chain(faulty_senders)
.collect()
}
}
|
/// Removes a consensus value dependency (called when that C is downloaded from another node).
|
git_pillar.py
|
"""
Runner module to directly manage the git external pillar
"""
import logging
import salt.pillar.git_pillar
import salt.utils.gitfs
from salt.exceptions import SaltRunnerError
log = logging.getLogger(__name__)
def update(branch=None, repo=None):
"""
.. versionadded:: 2014.1.0
.. versionchanged:: 2015.8.4
This runner function now supports the :ref:`git_pillar
configuration schema <git-pillar-configuration>` introduced in
2015.8.0. Additionally, the branch and repo can now be omitted to
update all git_pillar remotes. The return data has also changed to
a dictionary. The values will be ``True`` only if new commits were
fetched, and ``False`` if there were errors or no new commits were
fetched.
.. versionchanged:: 2018.3.0
The return for a given git_pillar remote will now be ``None`` when no
changes were fetched. ``False`` now is reserved only for instances in
which there were errors.
.. versionchanged:: 3001
The repo parameter also matches against the repo name.
Fetch one or all configured git_pillar remotes.
.. note::
This will *not* fast-forward the git_pillar cachedir on the master. All
it does is perform a ``git fetch``. If this runner is executed with
``-l debug``, you may see a log message that says that the repo is
up-to-date. Keep in mind that Salt automatically fetches git_pillar
repos roughly every 60 seconds (or whatever
:conf_master:`loop_interval` is set to). So, it is possible that the
repo was fetched automatically in the time between when changes were
pushed to the repo, and when this runner was executed. When in doubt,
simply refresh pillar data using :py:func:`saltutil.refresh_pillar
<salt.modules.saltutil.refresh_pillar>` and then use
:py:func:`pillar.item <salt.modules.pillar.item>` to check if the
pillar data has changed as expected.
CLI Example:
.. code-block:: bash
# Update specific branch and repo
salt-run git_pillar.update branch='branch' repo='https://foo.com/bar.git'
# Update specific repo, by name
salt-run git_pillar.update repo=myrepo
# Update all repos
salt-run git_pillar.update
# Run with debug logging
salt-run git_pillar.update -l debug
"""
ret = {}
for ext_pillar in __opts__.get("ext_pillar", []):
pillar_type = next(iter(ext_pillar))
if pillar_type != "git":
continue
pillar_conf = ext_pillar[pillar_type]
pillar = salt.utils.gitfs.GitPillar(
__opts__,
|
global_only=salt.pillar.git_pillar.GLOBAL_ONLY,
)
for remote in pillar.remotes:
# Skip this remote if it doesn't match the search criteria
if branch is not None:
if branch != remote.branch:
continue
if repo is not None:
if repo != remote.url and repo != getattr(remote, "name", None):
continue
try:
result = remote.fetch()
except Exception as exc: # pylint: disable=broad-except
log.error(
"Exception '%s' caught while fetching git_pillar " "remote '%s'",
exc,
remote.id,
exc_info_on_loglevel=logging.DEBUG,
)
result = False
finally:
remote.clear_lock()
ret[remote.id] = result
if not ret:
if branch is not None or repo is not None:
raise SaltRunnerError(
"Specified git branch/repo not found in ext_pillar config"
)
else:
raise SaltRunnerError("No git_pillar remotes are configured")
return ret
|
pillar_conf,
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
|
resource.go
|
package resource
import (
"strings"
yaml "gopkg.in/yaml.v2"
"github.com/weaveworks/flux"
fluxerr "github.com/weaveworks/flux/errors"
"github.com/weaveworks/flux/policy"
"github.com/weaveworks/flux/resource"
)
const (
PolicyPrefix = "flux.weave.works/"
)
// -- unmarshaling code for specific object and field types
// struct to embed in objects, to provide default implementation
type baseObject struct {
source string
bytes []byte
Kind string `yaml:"kind"`
Meta struct {
Namespace string `yaml:"namespace"`
Name string `yaml:"name"`
Annotations map[string]string `yaml:"annotations,omitempty"`
} `yaml:"metadata"`
}
func (o baseObject) ResourceID() flux.ResourceID {
ns := o.Meta.Namespace
if ns == "" {
ns = "default"
}
return flux.MakeResourceID(ns, o.Kind, o.Meta.Name)
}
// It's useful for comparisons in tests to be able to remove the
// record of bytes
func (o *baseObject) debyte() {
o.bytes = nil
}
func (o baseObject) Policy() policy.Set {
set := policy.Set{}
for k, v := range o.Meta.Annotations {
if strings.HasPrefix(k, PolicyPrefix) {
p := strings.TrimPrefix(k, PolicyPrefix)
if v == "true" {
set = set.Add(policy.Policy(p))
} else {
set = set.Set(policy.Policy(p), v)
}
}
}
return set
}
func (o baseObject) Source() string {
return o.source
}
func (o baseObject) Bytes() []byte {
return o.bytes
}
func unmarshalObject(source string, bytes []byte) (resource.Resource, error) {
var base = baseObject{source: source, bytes: bytes}
if err := yaml.Unmarshal(bytes, &base); err != nil {
return nil, err
}
r, err := unmarshalKind(base, bytes)
if err != nil {
return nil, makeUnmarshalObjectErr(source, err)
}
return r, nil
}
func unmarshalKind(base baseObject, bytes []byte) (resource.Resource, error) {
switch base.Kind {
case "CronJob":
var cj = CronJob{baseObject: base}
if err := yaml.Unmarshal(bytes, &cj); err != nil {
return nil, err
}
return &cj, nil
case "DaemonSet":
var ds = DaemonSet{baseObject: base}
if err := yaml.Unmarshal(bytes, &ds); err != nil {
return nil, err
}
return &ds, nil
case "Deployment":
var dep = Deployment{baseObject: base}
if err := yaml.Unmarshal(bytes, &dep); err != nil {
return nil, err
}
return &dep, nil
case "Namespace":
var ns = Namespace{baseObject: base}
if err := yaml.Unmarshal(bytes, &ns); err != nil {
return nil, err
}
return &ns, nil
case "StatefulSet":
var ss = StatefulSet{baseObject: base}
if err := yaml.Unmarshal(bytes, &ss); err != nil {
return nil, err
}
return &ss, nil
case "List":
var raw rawList
if err := yaml.Unmarshal(bytes, &raw); err != nil {
return nil, err
}
var list List
unmarshalList(base, &raw, &list)
return &list, nil
case "FluxHelmRelease", "HelmRelease":
var fhr = FluxHelmRelease{baseObject: base}
if err := yaml.Unmarshal(bytes, &fhr); err != nil {
return nil, err
}
return &fhr, nil
case "":
// If there is an empty resource (due to eg an introduced comment),
// we are returning nil for the resource and nil for an error
// (as not really an error). We are not, at least at the moment,
// reporting an error for invalid non-resource yamls on the
// assumption it is unlikely to happen.
return nil, nil
// The remainder are things we have to care about, but not
// treat specially
default:
return &base, nil
}
}
type rawList struct {
Items []map[string]interface{}
}
func unmarshalList(base baseObject, raw *rawList, list *List) error {
list.baseObject = base
list.Items = make([]resource.Resource, len(raw.Items), len(raw.Items))
for i, item := range raw.Items {
bytes, err := yaml.Marshal(item)
if err != nil {
return err
}
res, err := unmarshalObject(base.source, bytes)
if err != nil {
return err
}
list.Items[i] = res
}
return nil
}
|
func makeUnmarshalObjectErr(source string, err error) *fluxerr.Error {
return &fluxerr.Error{
Type: fluxerr.User,
Err: err,
Help: `Could not parse "` + source + `".
This likely means it is malformed YAML.
`,
}
}
// For reference, the Kubernetes v1 types are in:
// https://github.com/kubernetes/client-go/blob/master/pkg/api/v1/types.go
| |
index.js
|
// This file is the factory for the GUI part..bundleRenderer.renderToStream
// The imporant thing here is the createElement async method
// This file must be an es module in order to be loaded with the SDK (with dynamic imports)
import QuadrafuzzHTMLElement from './Gui.js';
export { QuadrafuzzHTMLElement };
/**
* A mandatory method if you want a gui for your plugin
* @param {WebAudioModule} plugin - the plugin instance
* @returns {Promise<Element>} - the plugin root node that is inserted in the DOM of the host
*/
export async function
|
(plugin, ...args) {
// here we return the WebComponent GUI but it could be
// any DOM node
return new QuadrafuzzHTMLElement(plugin, ...args);
}
|
createElement
|
access_token.go
|
package context
import (
"encoding/json"
"fmt"
"sync"
"time"
"github.com/TianHanYueDeng/wechat/util"
)
const (
//AccessTokenURL 获取access_token的接口
AccessTokenURL = "https://api.weixin.qq.com/cgi-bin/token"
)
//ResAccessToken struct
type ResAccessToken struct {
util.CommonError
AccessToken string `json:"access_token"`
ExpiresIn int64 `json:"expires_in"`
}
//GetAccessTokenFunc 获取 access token 的函数签名
type GetAccessTokenFunc func(ctx *Context) (accessToken string, err error)
//SetAccessTokenLock 设置读写锁(一个appID一个读写锁)
func (ctx *Context) SetAccessTokenLock(l *sync.RWMutex) {
ctx.accessTokenLock = l
}
//SetGetAccessTokenFunc 设置自定义获取accessToken的方式, 需要自己实现缓存
func (ctx *Context) SetGetAccessTokenFunc(f GetAccessTokenFunc) {
ctx.accessTokenFunc = f
}
//GetAccessToken 获取access_token
func (ctx *Context) GetAccessToken() (accessToken string, err error) {
ctx.accessTokenLock.Lock()
defer ctx.accessTokenLock.Unlock()
if ctx.accessTokenFunc != nil {
return ctx.accessTokenFunc(ctx)
}
accessTokenCacheKey := fmt.Sprintf("access_token_%s", ctx.AppID)
val := ctx.Cache.Get(accessTokenCacheKey)
if val != nil {
accessToken = val.(string)
return
}
//从微信服务器获取
var resAccessToken ResAccessToken
resAccessToken, err = ctx.GetAccessTokenFromServer()
if err != nil {
return
}
accessToken = resAccessToken.AccessToken
return
}
//GetAccessTokenFromServer 强制从微信服务器获取tok
|
*Context) GetAccessTokenFromServer() (resAccessToken ResAccessToken, err error) {
url := fmt.Sprintf("%s?grant_type=client_credential&appid=%s&secret=%s", AccessTokenURL, ctx.AppID, ctx.AppSecret)
var body []byte
body, err = util.HTTPGet(url)
if err != nil {
return
}
err = json.Unmarshal(body, &resAccessToken)
if err != nil {
return
}
if resAccessToken.ErrMsg != "" {
err = fmt.Errorf("get access_token error : errcode=%v , errormsg=%v", resAccessToken.ErrCode, resAccessToken.ErrMsg)
return
}
accessTokenCacheKey := fmt.Sprintf("access_token_%s", ctx.AppID)
expires := resAccessToken.ExpiresIn - 1500
err = ctx.Cache.Set(accessTokenCacheKey, resAccessToken.AccessToken, time.Duration(expires)*time.Second)
return
}
|
en
func (ctx
|
AppMenu.js
|
const AppMenu = () => {
return (
<div>
<h2>{'AppMenu'}</h2>
<p>{'Find me in ./web/src/components/AppMenu/AppMenu.js'}</p>
</div>
)
}
export default AppMenu
| ||
dao.cache_test.go
|
package like
import (
"context"
"testing"
"fmt"
"go-common/app/interface/main/activity/model/like"
"github.com/smartystreets/goconvey/convey"
)
func TestLikeLike(t *testing.T) {
convey.Convey("Like", t, func(ctx convey.C) {
var (
c = context.Background()
id = int64(77)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.Like(c, id)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestLikeActSubject(t *testing.T) {
convey.Convey("ActSubject", t, func(ctx convey.C) {
var (
c = context.Background()
id = int64(10256)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.ActSubject(c, id)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestLikeLikeMissionBuff(t *testing.T) {
convey.Convey("LikeMissionBuff", t, func(ctx convey.C) {
var (
c = context.Background()
id = int64(10256)
mid = int64(77)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.LikeMissionBuff(c, id, mid)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestLikeMissionGroupItems(t *testing.T) {
convey.Convey("MissionGroupItems", t, func(ctx convey.C) {
var (
c = context.Background()
keys = []int64{1, 2}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.MissionGroupItems(c, keys)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestLikeActMission(t *testing.T) {
convey.Convey("ActMission", t, func(ctx convey.C) {
var (
c = context.Background()
id = int64(10256)
lid = int64(7)
mid = int64(77)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.ActMission(c, id, lid, mid)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestLikeActLikeAchieves(t *testing.T) {
convey.Convey("ActLikeAchieves", t, func(ctx convey.C) {
var (
c = context.Background()
id = int64(1)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.ActLikeAchieves(c, id)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestLikeActMissionFriends(t *testing.T) {
convey.Convey("ActMissionFriends", t, func(ctx convey.C) {
var (
c = context.Background()
id = int64(10256)
lid = int64(1)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.ActMissionFriends(c, id, lid)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestLikeActUserAchieve(t *testing.T) {
convey.Convey("ActUserAchieve", t, func(ctx convey.C) {
var (
c = context.Background()
id = int64(1)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.ActUserAchieve(c, id)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestLikeMatchSubjects(t *testing.T) {
convey.Convey("MatchSubjects", t, func(ctx convey.C) {
var (
c = context.Background()
keys = []int64{10256}
|
res, err := d.MatchSubjects(c, keys)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestLikeLikeContent(t *testing.T) {
convey.Convey("LikeContent", t, func(ctx convey.C) {
var (
c = context.Background()
keys = []int64{1, 2}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.LikeContent(c, keys)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestActSubjectProtocol(t *testing.T) {
convey.Convey("LikeContent", t, func(ctx convey.C) {
var (
c = context.Background()
sid = int64(10298)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.ActSubjectProtocol(c, sid)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
fmt.Printf("%+v", res)
})
})
})
}
func TestCacheActSubjectProtocol(t *testing.T) {
convey.Convey("LikeContent", t, func(ctx convey.C) {
var (
c = context.Background()
sid = int64(10298)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.CacheActSubjectProtocol(c, sid)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
fmt.Printf("%+v", res)
})
})
})
}
func TestAddCacheActSubjectProtocol(t *testing.T) {
convey.Convey("LikeContent", t, func(ctx convey.C) {
var (
c = context.Background()
sid = int64(10256)
protocol = &like.ActSubjectProtocol{ID: 1, Sid: 10256}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := d.AddCacheActSubjectProtocol(c, sid, protocol)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
|
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
|
stat.rs
|
//! Provides some statistical information on the "8051ness" of regions of a file
use crate::instr::{InsType, Instructeam};
use lazy_static::lazy_static;
use std::vec::Vec;
pub fn count_instructions(buf: &[u8], blocksize: usize) -> Vec<[usize; 256]> {
let total_len = buf.len() / blocksize + (buf.len() % blocksize != 0) as usize;
let mut countvec: Vec<[usize; 256]> = vec![[0; 256]; total_len];
let instr_iter = crate::instr::Instructeam::new(buf);
for instr in instr_iter {
countvec[instr.pos / blocksize][instr.bytes[0] as usize] += 1;
}
countvec
}
/// Square-Chi test of a block, used for the statistical analysis of the blocks
/// of a file for 8051ness
pub fn square_chi(freq: &[usize], block_n: usize, pop: &[f64]) -> f64
|
/// Kullback-Leibler divergence of a block, ranges from 0 to 1
pub fn kullback_leibler(freq: &[usize], block_n: usize, pop: &[f64]) -> f64 {
let mut kld = 0.0f64;
for (p, q) in freq.iter().zip(pop.iter()) {
// if freq[i] = 0, we multiply by 0 and the log becomes 0 too (in this case)
if *p > 0 {
let rel_p = (*p as f64) / (block_n as f64);
kld += rel_p * (rel_p / q).log(256.0);
}
}
kld
}
/// Runs a statistical goodness-of-fit test blockwise on the opcodes (not operands!).
/// Typically, <300 means that it is 8051 code.
/// Note that the individual opcodes are grouped into groups of similar probability so that
/// smaller blocks work better.
pub fn stat_blocks(
buf: &[u8],
blocksize: usize,
blockfun: fn(&[usize], usize, &[f64]) -> f64,
freqinfo: Option<&FreqInfo>,
) -> Vec<(f64, usize)> {
// the maximum instruction size is 3, make sure we have at least one instruction in each block
// (not that blocksizes this small would make sense anyway)
if blocksize < 3 {
panic!("Blocksize needs to be at least 3");
}
let actual_freqinfo = freqinfo.unwrap_or_default();
let mut ret = Vec::new();
let mut freq = vec![0; actual_freqinfo.relative_freqency.len()];
let mut prev_block = 0;
let mut block_n = 0;
// only care about opcodes
for instr in crate::instr::Instructeam::new(buf) {
// if new block begins
if instr.pos / blocksize > prev_block {
ret.push((
blockfun(&freq, block_n, &actual_freqinfo.relative_freqency),
block_n,
));
prev_block = instr.pos / blocksize;
block_n = 0;
for x in &mut freq {
*x = 0;
}
}
block_n += 1;
freq[actual_freqinfo.group_map[instr.bytes[0] as usize] as usize] += 1;
}
ret.push((
blockfun(&freq, block_n, &actual_freqinfo.relative_freqency),
block_n,
));
ret
}
/// Counts the percentage of instructions whose jump address does not align with the instruction
/// stream
/// buf: firmware
/// blocksize: size of blocks where percentage is calculated
/// abs: whether to include absolute jumps and block jumps (ajmp/acall)
/// count_outside: whether to include jumps outside of the buffer
pub fn instr_align_count(
buf: &[u8],
blocksize: usize,
abs: bool,
count_outside: bool,
) -> Vec<(f64, usize)> {
if blocksize < 3 {
panic!("Blocksize needs to be at least 3");
}
let mut is_instr_start = Vec::new();
// record which bytes are the start of an instruction,
// assuming a continuous instruction stream
for instr in Instructeam::new(buf) {
for i in 0..instr.bytes.len() {
is_instr_start.push(i == 0 && !matches!(*instr.itype, InsType::Resrv))
}
}
// there might be a byte near the end whose instruction is
// longer than the file end, so we add that here
is_instr_start.resize(buf.len(), false);
let mut ret = Vec::new();
let mut prev_block = 0;
let mut block_jumps = 0usize;
let mut block_aligns = 0usize;
for instr in Instructeam::new(buf) {
if instr.pos / blocksize > prev_block {
// begin new block
ret.push((1.0 - block_aligns as f64 / block_jumps as f64, block_jumps));
block_aligns = 0;
block_jumps = 0;
prev_block = instr.pos / blocksize;
}
if let Some(target) = instr.get_jump_target() {
let is_abs = matches!(
instr.itype,
InsType::Ljmp | InsType::Lcall | InsType::Ajmp | InsType::Acall
);
// count number of valid aligned jumps
if (abs || !is_abs) && (count_outside || target < is_instr_start.len()) {
block_jumps += 1;
}
// count number of all valid jumps
if let Some(true) = is_instr_start.get(target) {
if abs || !is_abs {
block_aligns += 1;
}
}
}
}
// push last remaining block
ret.push((1.0 - block_aligns as f64 / block_jumps as f64, block_jumps));
ret
}
/// Contains information about the distribution of opcodes, collected into buckets of similar
/// frequency
pub struct FreqInfo {
group_map: [u8; 256],
relative_freqency: Vec<f64>,
}
impl FreqInfo {
/// Derives frequency information from a buffer with given bucket sizes
/// # Arguments
/// * `buckets`: sizes of the buckets where opcodes of similar frequencies get put, in
/// ascending order of frequency
/// * `buf`: corpus containing pure 8051 machine code
pub fn new(buckets: &[usize], buf: &[u8]) -> Result<FreqInfo, &'static str> {
let bucketsum: usize = buckets.iter().sum();
if bucketsum != 256 {
return Err("Bucket sizes must add up to exactly 256");
};
let count = count_instructions(buf, buf.len());
// number of all opcodes
let total_count: usize = count[0].iter().sum();
// pairs of position and frequency
let mut freq_map: Vec<(usize, &usize)> = count[0].iter().enumerate().collect();
// sort by frequency
freq_map.sort_by_key(|x| x.1);
let mut buck_asc = Vec::new();
for (i, x) in buckets.iter().enumerate() {
for _ in 0..*x {
buck_asc.push(i as u8);
}
}
let mut group_map = [0u8; 256];
for (i, gm) in group_map.iter_mut().enumerate() {
match freq_map.iter().position(|x| x.0 == i) {
Some(pos) => *gm = buck_asc[pos],
None => panic!("Oops"),
}
}
let mut sums = vec![0.0f64; buckets.len()];
for (i, m) in freq_map {
sums[usize::from(group_map[i])] += (*m as f64) / (total_count as f64);
}
Ok(FreqInfo {
group_map,
relative_freqency: sums,
})
}
}
impl Default for &FreqInfo {
fn default() -> &'static FreqInfo {
lazy_static! {
static ref FREQ: FreqInfo = FreqInfo {
group_map: [
13, 6, 14, 6, 7, 14, 2, 1, 6, 4, 4, 1, 2, 2, 1, 3, 2, 2, 14, 5, 12, 7, 0, 1, 4,
2, 1, 2, 2, 2, 0, 2, 8, 3, 13, 5, 14, 11, 0, 1, 3, 2, 3, 1, 2, 4, 5, 6, 8, 2,
3, 9, 13, 9, 0, 0, 3, 5, 3, 5, 5, 3, 7, 4, 10, 2, 1, 12, 4, 4, 0, 0, 3, 3, 1,
3, 2, 3, 3, 3, 8, 1, 2, 12, 11, 3, 0, 0, 1, 1, 1, 1, 2, 0, 2, 0, 12, 3, 6, 4,
8, 5, 3, 3, 6, 5, 2, 0, 3, 2, 7, 3, 13, 0, 3, 4, 14, 15, 2, 0, 10, 7, 7, 7, 8,
9, 7, 9, 13, 1, 3, 2, 3, 13, 4, 2, 4, 5, 8, 6, 10, 9, 11, 9, 15, 0, 5, 12, 11,
7, 0, 0, 3, 1, 1, 1, 3, 3, 5, 6, 0, 1, 6, 15, 12, 0, 2, 2, 5, 5, 7, 6, 8, 7,
11, 9, 1, 0, 1, 2, 10, 6, 0, 1, 3, 2, 2, 2, 3, 2, 3, 3, 14, 4, 11, 12, 9, 6, 2,
0, 7, 1, 4, 1, 4, 3, 10, 4, 14, 0, 11, 6, 0, 1, 0, 0, 5, 3, 1, 1, 0, 0, 1, 1,
15, 0, 1, 0, 15, 15, 10, 1, 6, 6, 5, 6, 7, 10, 11, 13, 15, 0, 2, 1, 4, 15, 5,
0, 8, 7, 7, 7, 10, 12, 13, 14,
],
relative_freqency: vec![
0.001_133_263_378_803_777_4,
0.005_624_344_176_285_417,
0.012_507_869_884_575_034,
0.021_112_277_019_937_044,
0.016_243_441_762_854_145,
0.022_581_322_140_608_61,
0.033_452_256_033_578_175,
0.048_898_216_159_496_33,
0.031_143_756_558_237_15,
0.034_291_710_388_247_63,
0.040_839_454_354_669_465,
0.054_270_724_029_380_9,
0.069_464_847_848_898_21,
0.107_366_211_962_224_57,
0.145_015_739_769_150_05,
0.356_054_564_533_053_56,
]
};
}
&FREQ
}
}
|
{
let mut square_x = 0.0f64;
for (p, q) in freq.iter().zip(pop.iter()) {
let rel_p = (*p as f64) / (block_n as f64);
// I'm not sure if I should ignore the factor at the beginning, it is certainly
// more consistent against different blocksizes when leaving it out and we
// don't care about statistical significance anyway
square_x += /*(block_n as f64) **/ (rel_p - q).powi(2) / q;
}
square_x
}
|
input_objects_have_public_fields.rs
|
#![allow(dead_code, unused_variables, unused_must_use, unused_imports)]
include!("setup.rs");
mod schema {
use super::*;
juniper_from_schema::graphql_schema! {
type Query {
usersAtLocation(coordinate: Coordinate!): Boolean!
}
input Coordinate {
lat: Int!
long: Int!
}
schema { query: Query }
}
}
pub struct
|
;
impl schema::QueryFields for Query {
fn field_users_at_location(
&self,
executor: &Executor<Context>,
coordinate: schema::Coordinate,
) -> FieldResult<&bool> {
coordinate.lat;
coordinate.long;
unimplemented!()
}
}
|
Query
|
task.py
|
from singletask_sql.tests import DBTestCase
from sqlalchemy import orm
from sqlalchemy import func
from singletask_sql.tables import TasksTable
from singletask_sql.tables.utils import query as query_utils
# https://docs.sqlalchemy.org/en/14/orm/query.html
def
|
(description):
task = TasksTable()
task.description = description
task.domains = f"{description} domains"
task.tags = f"{description} tasks"
return task
class TaskTestCase(DBTestCase):
def test(self):
with orm.Session(self.sql_engine) as session:
query = session.query(func.count(TasksTable.id))
result = session.execute(query).all()
print(result)
query = session.query(func.count(TasksTable.id))
query = query_utils.include_deleted(query)
result = session.execute(query).all()
print(result)
|
create_task
|
pull.go
|
package main
import (
"fmt"
"io"
"os"
"strings"
dockerarchive "github.com/containers/image/docker/archive"
"github.com/containers/image/transports/alltransports"
"github.com/containers/image/types"
"github.com/containers/libpod/libpod/adapter"
image2 "github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var (
pullFlags = []cli.Flag{
cli.StringFlag{
Name: "authfile",
Usage: "Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. Use REGISTRY_AUTH_FILE environment variable to override. ",
},
cli.StringFlag{
Name: "cert-dir",
Usage: "`pathname` of a directory containing TLS certificates and keys",
},
cli.StringFlag{
Name: "creds",
Usage: "`credentials` (USERNAME:PASSWORD) to use for authenticating to a registry",
},
|
},
cli.StringFlag{
Name: "signature-policy",
Usage: "`pathname` of signature policy file (not usually used)",
},
cli.BoolTFlag{
Name: "tls-verify",
Usage: "require HTTPS and verify certificates when contacting registries (default: true)",
},
}
pullDescription = `
Pulls an image from a registry and stores it locally.
An image can be pulled using its tag or digest. If a tag is not
specified, the image with the 'latest' tag (if it exists) is pulled
`
pullCommand = cli.Command{
Name: "pull",
Usage: "Pull an image from a registry",
Description: pullDescription,
Flags: sortFlags(pullFlags),
Action: pullCmd,
ArgsUsage: "",
OnUsageError: usageErrorHandler,
}
)
// pullCmd gets the data from the command line and calls pullImage
// to copy an image from a registry to a local machine
func pullCmd(c *cli.Context) error {
runtime, err := adapter.GetRuntime(c)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
args := c.Args()
if len(args) == 0 {
logrus.Errorf("an image name must be specified")
return nil
}
if len(args) > 1 {
logrus.Errorf("too many arguments. Requires exactly 1")
return nil
}
if err := validateFlags(c, pullFlags); err != nil {
return err
}
image := args[0]
var registryCreds *types.DockerAuthConfig
if c.IsSet("creds") {
creds, err := util.ParseRegistryCreds(c.String("creds"))
if err != nil {
return err
}
registryCreds = creds
}
var (
writer io.Writer
imgID string
)
if !c.Bool("quiet") {
writer = os.Stderr
}
dockerRegistryOptions := image2.DockerRegistryOptions{
DockerRegistryCreds: registryCreds,
DockerCertPath: c.String("cert-dir"),
}
if c.IsSet("tls-verify") {
dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.BoolT("tls-verify"))
}
// Possible for docker-archive to have multiple tags, so use LoadFromArchiveReference instead
if strings.HasPrefix(image, dockerarchive.Transport.Name()+":") {
srcRef, err := alltransports.ParseImageName(image)
if err != nil {
return errors.Wrapf(err, "error parsing %q", image)
}
newImage, err := runtime.LoadFromArchiveReference(getContext(), srcRef, c.String("signature-policy"), writer)
if err != nil {
return errors.Wrapf(err, "error pulling image from %q", image)
}
imgID = newImage[0].ID()
} else {
authfile := getAuthFile(c.String("authfile"))
newImage, err := runtime.New(getContext(), image, c.String("signature-policy"), authfile, writer, &dockerRegistryOptions, image2.SigningOptions{}, true, nil)
if err != nil {
return errors.Wrapf(err, "error pulling image %q", image)
}
imgID = newImage.ID()
}
// Intentionally choosing to ignore if there is an error because
// outputting the image ID is a NTH and not integral to the pull
fmt.Println(imgID)
return nil
}
|
cli.BoolFlag{
Name: "quiet, q",
Usage: "Suppress output information when pulling images",
|
utils.py
|
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume-related Utilities and helpers."""
import ast
import functools
import math
import operator
import re
import time
import uuid
from Crypto.Random import random
import eventlet
from eventlet import tpool
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from six.moves import range
from cinder.brick.local_dev import lvm as brick_lvm
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _, _LI, _LW, _LE
from cinder import objects
from cinder import rpc
from cinder import utils
from cinder.volume import group_types
from cinder.volume import throttling
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def null_safe_str(s):
return str(s) if s else ''
def _usage_from_volume(context, volume_ref, **kw):
now = timeutils.utcnow()
launched_at = volume_ref['launched_at'] or now
created_at = volume_ref['created_at'] or now
volume_status = volume_ref['status']
if volume_status == 'error_managing_deleting':
volume_status = 'deleting'
usage_info = dict(
tenant_id=volume_ref['project_id'],
host=volume_ref['host'],
user_id=volume_ref['user_id'],
availability_zone=volume_ref['availability_zone'],
volume_id=volume_ref['id'],
volume_type=volume_ref['volume_type_id'],
display_name=volume_ref['display_name'],
launched_at=launched_at.isoformat(),
created_at=created_at.isoformat(),
status=volume_status,
snapshot_id=volume_ref['snapshot_id'],
size=volume_ref['size'],
replication_status=volume_ref['replication_status'],
replication_extended_status=volume_ref['replication_extended_status'],
replication_driver_data=volume_ref['replication_driver_data'],
metadata=volume_ref.get('volume_metadata'),)
usage_info.update(kw)
try:
attachments = db.volume_attachment_get_all_by_volume_id(
context, volume_ref['id'])
usage_info['volume_attachment'] = attachments
glance_meta = db.volume_glance_metadata_get(context, volume_ref['id'])
if glance_meta:
usage_info['glance_metadata'] = glance_meta
except exception.GlanceMetadataNotFound:
pass
except exception.VolumeNotFound:
LOG.debug("Can not find volume %s at notify usage", volume_ref['id'])
return usage_info
def _usage_from_backup(backup, **kw):
num_dependent_backups = backup.num_dependent_backups
usage_info = dict(tenant_id=backup.project_id,
user_id=backup.user_id,
availability_zone=backup.availability_zone,
backup_id=backup.id,
host=backup.host,
display_name=backup.display_name,
created_at=str(backup.created_at),
status=backup.status,
volume_id=backup.volume_id,
size=backup.size,
service_metadata=backup.service_metadata,
service=backup.service,
fail_reason=backup.fail_reason,
parent_id=backup.parent_id,
num_dependent_backups=num_dependent_backups,
snapshot_id=backup.snapshot_id,
)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_volume_usage(context, volume, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_volume(context, volume, **extra_usage_info)
rpc.get_notifier("volume", host).info(context, 'volume.%s' % event_suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_backup_usage(context, backup, event_suffix,
extra_usage_info=None,
host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_backup(backup, **extra_usage_info)
rpc.get_notifier("backup", host).info(context, 'backup.%s' % event_suffix,
usage_info)
def _usage_from_snapshot(snapshot, context, **extra_usage_info):
# (niedbalski) a snapshot might be related to a deleted
# volume, if that's the case, the volume information is still
# required for filling the usage_info, so we enforce to read
# the volume data even if the volume has been deleted.
context.read_deleted = "yes"
volume = db.volume_get(context, snapshot.volume_id)
usage_info = {
'tenant_id': snapshot.project_id,
'user_id': snapshot.user_id,
'availability_zone': volume['availability_zone'],
'volume_id': snapshot.volume_id,
'volume_size': snapshot.volume_size,
'snapshot_id': snapshot.id,
'display_name': snapshot.display_name,
'created_at': str(snapshot.created_at),
'status': snapshot.status,
'deleted': null_safe_str(snapshot.deleted),
'metadata': null_safe_str(snapshot.metadata),
}
usage_info.update(extra_usage_info)
return usage_info
@utils.if_notifications_enabled
def notify_about_snapshot_usage(context, snapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_snapshot(snapshot, context, **extra_usage_info)
rpc.get_notifier('snapshot', host).info(context,
'snapshot.%s' % event_suffix,
usage_info)
def _usage_from_capacity(capacity, **extra_usage_info):
capacity_info = {
'name_to_id': capacity['name_to_id'],
'total': capacity['total'],
'free': capacity['free'],
'allocated': capacity['allocated'],
'provisioned': capacity['provisioned'],
'virtual_free': capacity['virtual_free'],
'reported_at': capacity['reported_at']
}
capacity_info.update(extra_usage_info)
return capacity_info
@utils.if_notifications_enabled
def notify_about_capacity_usage(context, capacity, suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_capacity(capacity, **extra_usage_info)
rpc.get_notifier('capacity', host).info(context,
'capacity.%s' % suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_replication_usage(context, volume, suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_volume(context, volume,
**extra_usage_info)
rpc.get_notifier('replication', host).info(context,
'replication.%s' % suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_replication_error(context, volume, suffix,
extra_error_info=None, host=None):
if not host:
host = CONF.host
if not extra_error_info:
extra_error_info = {}
usage_info = _usage_from_volume(context, volume,
**extra_error_info)
rpc.get_notifier('replication', host).error(context,
'replication.%s' % suffix,
usage_info)
def _usage_from_consistencygroup(group_ref, **kw):
usage_info = dict(tenant_id=group_ref.project_id,
user_id=group_ref.user_id,
availability_zone=group_ref.availability_zone,
consistencygroup_id=group_ref.id,
name=group_ref.name,
created_at=group_ref.created_at.isoformat(),
status=group_ref.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_consistencygroup_usage(context, group, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_consistencygroup(group,
**extra_usage_info)
rpc.get_notifier("consistencygroup", host).info(
context,
'consistencygroup.%s' % event_suffix,
usage_info)
def _usage_from_group(group_ref, **kw):
usage_info = dict(tenant_id=group_ref.project_id,
user_id=group_ref.user_id,
availability_zone=group_ref.availability_zone,
group_id=group_ref.id,
group_type=group_ref.group_type_id,
name=group_ref.name,
created_at=group_ref.created_at.isoformat(),
status=group_ref.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_group_usage(context, group, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_group(group,
**extra_usage_info)
rpc.get_notifier("group", host).info(
context,
'group.%s' % event_suffix,
usage_info)
def _usage_from_cgsnapshot(cgsnapshot, **kw):
usage_info = dict(
tenant_id=cgsnapshot.project_id,
user_id=cgsnapshot.user_id,
cgsnapshot_id=cgsnapshot.id,
name=cgsnapshot.name,
consistencygroup_id=cgsnapshot.consistencygroup_id,
created_at=cgsnapshot.created_at.isoformat(),
status=cgsnapshot.status)
usage_info.update(kw)
return usage_info
def _usage_from_group_snapshot(group_snapshot, **kw):
usage_info = dict(
tenant_id=group_snapshot.project_id,
user_id=group_snapshot.user_id,
group_snapshot_id=group_snapshot.id,
name=group_snapshot.name,
group_id=group_snapshot.group_id,
group_type=group_snapshot.group_type_id,
created_at=group_snapshot.created_at.isoformat(),
status=group_snapshot.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_cgsnapshot_usage(context, cgsnapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_cgsnapshot(cgsnapshot,
**extra_usage_info)
rpc.get_notifier("cgsnapshot", host).info(
context,
'cgsnapshot.%s' % event_suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_group_snapshot_usage(context, group_snapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_group_snapshot(group_snapshot,
**extra_usage_info)
rpc.get_notifier("group_snapshot", host).info(
context,
'group_snapshot.%s' % event_suffix,
usage_info)
def _check_blocksize(blocksize):
# Check if volume_dd_blocksize is valid
try:
# Rule out zero-sized/negative/float dd blocksize which
# cannot be caught by strutils
if blocksize.startswith(('-', '0')) or '.' in blocksize:
raise ValueError
strutils.string_to_bytes('%sB' % blocksize)
except ValueError:
LOG.warning(_LW("Incorrect value error: %(blocksize)s, "
"it may indicate that \'volume_dd_blocksize\' "
"was configured incorrectly. Fall back to default."),
{'blocksize': blocksize})
# Fall back to default blocksize
CONF.clear_override('volume_dd_blocksize')
blocksize = CONF.volume_dd_blocksize
return blocksize
def check_for_odirect_support(src, dest, flag='oflag=direct'):
# Check whether O_DIRECT is supported
try:
# iflag=direct and if=/dev/zero combination does not work
# error: dd: failed to open '/dev/zero': Invalid argument
if (src == '/dev/zero' and flag == 'iflag=direct'):
return False
else:
utils.execute('dd', 'count=0', 'if=%s' % src,
'of=%s' % dest,
flag, run_as_root=True)
return True
except processutils.ProcessExecutionError:
return False
def _copy_volume_with_path(prefix, srcstr, deststr, size_in_m, blocksize,
sync=False, execute=utils.execute, ionice=None,
sparse=False):
cmd = prefix[:]
if ionice:
cmd.extend(('ionice', ionice))
blocksize = _check_blocksize(blocksize)
size_in_bytes = size_in_m * units.Mi
cmd.extend(('dd', 'if=%s' % srcstr, 'of=%s' % deststr,
'count=%d' % size_in_bytes, 'bs=%s' % blocksize))
# Use O_DIRECT to avoid thrashing the system buffer cache
odirect = check_for_odirect_support(srcstr, deststr, 'iflag=direct')
cmd.append('iflag=count_bytes,direct' if odirect else 'iflag=count_bytes')
if check_for_odirect_support(srcstr, deststr, 'oflag=direct'):
cmd.append('oflag=direct')
odirect = True
# If the volume is being unprovisioned then
# request the data is persisted before returning,
# so that it's not discarded from the cache.
conv = []
if sync and not odirect:
conv.append('fdatasync')
if sparse:
conv.append('sparse')
if conv:
conv_options = 'conv=' + ",".join(conv)
cmd.append(conv_options)
# Perform the copy
start_time = timeutils.utcnow()
execute(*cmd, run_as_root=True)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
# NOTE(jdg): use a default of 1, mostly for unit test, but in
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
mbps = (size_in_m / duration)
LOG.debug("Volume copy details: src %(src)s, dest %(dest)s, "
"size %(sz).2f MB, duration %(duration).2f sec",
{"src": srcstr,
"dest": deststr,
"sz": size_in_m,
"duration": duration})
LOG.info(_LI("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s"),
{'size_in_m': size_in_m, 'mbps': mbps})
def _open_volume_with_path(path, mode):
try:
with utils.temporary_chown(path):
handle = open(path, mode)
return handle
except Exception:
LOG.error(_LE("Failed to open volume from %(path)s."), {'path': path})
def _transfer_data(src, dest, length, chunk_size):
"""Transfer data between files (Python IO objects)."""
chunks = int(math.ceil(length / chunk_size))
remaining_length = length
LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred.",
{'chunks': chunks, 'bytes': chunk_size})
for chunk in range(0, chunks):
before = time.time()
data = tpool.execute(src.read, min(chunk_size, remaining_length))
# If we have reached end of source, discard any extraneous bytes from
# destination volume if trim is enabled and stop writing.
if data == b'':
break
tpool.execute(dest.write, data)
remaining_length -= len(data)
delta = (time.time() - before)
rate = (chunk_size / delta) / units.Ki
LOG.debug("Transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s).",
{'chunk': chunk + 1, 'chunks': chunks, 'rate': rate})
# yield to any other pending operations
eventlet.sleep(0)
tpool.execute(dest.flush)
def _copy_volume_with_file(src, dest, size_in_m):
src_handle = src
if isinstance(src, six.string_types):
src_handle = _open_volume_with_path(src, 'rb')
dest_handle = dest
if isinstance(dest, six.string_types):
dest_handle = _open_volume_with_path(dest, 'wb')
if not src_handle:
raise exception.DeviceUnavailable(
_("Failed to copy volume, source device unavailable."))
if not dest_handle:
raise exception.DeviceUnavailable(
_("Failed to copy volume, destination device unavailable."))
start_time = timeutils.utcnow()
_transfer_data(src_handle, dest_handle, size_in_m * units.Mi, units.Mi * 4)
duration = max(1, timeutils.delta_seconds(start_time, timeutils.utcnow()))
if isinstance(src, six.string_types):
src_handle.close()
if isinstance(dest, six.string_types):
dest_handle.close()
mbps = (size_in_m / duration)
LOG.info(_LI("Volume copy completed (%(size_in_m).2f MB at "
"%(mbps).2f MB/s)."),
{'size_in_m': size_in_m, 'mbps': mbps})
def copy_volume(src, dest, size_in_m, blocksize, sync=False,
execute=utils.execute, ionice=None, throttle=None,
sparse=False):
"""Copy data from the source volume to the destination volume.
The parameters 'src' and 'dest' are both typically of type str, which
represents the path to each volume on the filesystem. Connectors can
optionally return a volume handle of type RawIOBase for volumes that are
not available on the local filesystem for open/close operations.
If either 'src' or 'dest' are not of type str, then they are assumed to be
of type RawIOBase or any derivative that supports file operations such as
read and write. In this case, the handles are treated as file handles
instead of file paths and, at present moment, throttling is unavailable.
"""
if (isinstance(src, six.string_types) and
isinstance(dest, six.string_types)):
if not throttle:
throttle = throttling.Throttle.get_default()
with throttle.subcommand(src, dest) as throttle_cmd:
_copy_volume_with_path(throttle_cmd['prefix'], src, dest,
size_in_m, blocksize, sync=sync,
execute=execute, ionice=ionice,
sparse=sparse)
else:
_copy_volume_with_file(src, dest, size_in_m)
def clear_volume(volume_size, volume_path, volume_clear=None,
volume_clear_size=None, volume_clear_ionice=None,
throttle=None):
"""Unprovision old volumes to prevent data leaking between users."""
if volume_clear is None:
volume_clear = CONF.volume_clear
if volume_clear_size is None:
volume_clear_size = CONF.volume_clear_size
if volume_clear_size == 0:
volume_clear_size = volume_size
if volume_clear_ionice is None:
volume_clear_ionice = CONF.volume_clear_ionice
LOG.info(_LI("Performing secure delete on volume: %s"), volume_path)
# We pass sparse=False explicitly here so that zero blocks are not
# skipped in order to clear the volume.
if volume_clear == 'zero':
return copy_volume('/dev/zero', volume_path, volume_clear_size,
CONF.volume_dd_blocksize,
sync=True, execute=utils.execute,
ionice=volume_clear_ionice,
throttle=throttle, sparse=False)
else:
raise exception.InvalidConfigurationValue(
option='volume_clear',
value=volume_clear)
def supports_thin_provisioning():
return brick_lvm.LVM.supports_thin_provisioning(
utils.get_root_helper())
def get_all_physical_volumes(vg_name=None):
return brick_lvm.LVM.get_all_physical_volumes(
utils.get_root_helper(),
vg_name)
def get_all_volume_groups(vg_name=None):
return brick_lvm.LVM.get_all_volume_groups(
utils.get_root_helper(),
vg_name)
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
'abcdefghijkmnopqrstuvwxyz') # Removed: l
def generate_password(length=16, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbol groups.
At least one symbol from each group will be included. Unpredictable
results if length is less than the number of symbol groups.
Believed to be reasonably secure (with a reasonable password length!)
"""
# NOTE(jerdfelt): Some password policies require at least one character
# from each group of symbols, so start off with one random character
# from each symbol group
password = [random.choice(s) for s in symbolgroups]
# If length < len(symbolgroups), the leading characters will only
# be from the first length groups. Try our best to not be predictable
# by shuffling and then truncating.
random.shuffle(password)
password = password[:length]
length -= len(password)
# then fill with random characters from all symbol groups
symbols = ''.join(symbolgroups)
password.extend([random.choice(symbols) for _i in range(length)])
# finally shuffle to ensure first x characters aren't from a
# predictable group
random.shuffle(password)
return ''.join(password)
def generate_username(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
# Use the same implementation as the password generation.
return generate_password(length, symbolgroups)
DEFAULT_POOL_NAME = '_pool0'
def extract_host(host, level='backend', default_pool_name=False):
"""Extract Host, Backend or Pool information from host string.
:param host: String for host, which could include host@backend#pool info
:param level: Indicate which level of information should be extracted
from host string. Level can be 'host', 'backend' or 'pool',
default value is 'backend'
:param default_pool_name: this flag specify what to do if level == 'pool'
and there is no 'pool' info encoded in host
string. default_pool_name=True will return
DEFAULT_POOL_NAME, otherwise we return None.
Default value of this parameter is False.
:return: expected information, string or None
:raises: exception.InvalidVolume
For example:
host = 'HostA@BackendB#PoolC'
ret = extract_host(host, 'host')
# ret is 'HostA'
ret = extract_host(host, 'backend')
# ret is 'HostA@BackendB'
ret = extract_host(host, 'pool')
# ret is 'PoolC'
host = 'HostX@BackendY'
ret = extract_host(host, 'pool')
# ret is None
ret = extract_host(host, 'pool', True)
# ret is '_pool0'
"""
if host is None:
msg = _("volume is not assigned to a host")
raise exception.InvalidVolume(reason=msg)
if level == 'host':
# make sure pool is not included
hst = host.split('#')[0]
return hst.split('@')[0]
elif level == 'backend':
return host.split('#')[0]
elif level == 'pool':
lst = host.split('#')
if len(lst) == 2:
return lst[1]
elif default_pool_name is True:
return DEFAULT_POOL_NAME
else:
return None
def append_host(host, pool):
"""Encode pool into host info."""
if not host or not pool:
return host
new_host = "#".join([host, pool])
return new_host
def matching_backend_name(src_volume_type, volume_type):
if src_volume_type.get('volume_backend_name') and \
volume_type.get('volume_backend_name'):
return src_volume_type.get('volume_backend_name') == \
volume_type.get('volume_backend_name')
else:
return False
def hosts_are_equivalent(host_1, host_2):
# In case host_1 or host_2 are None
if not (host_1 and host_2):
return host_1 == host_2
return extract_host(host_1) == extract_host(host_2)
def read_proc_mounts():
|
def extract_id_from_volume_name(vol_name):
regex = re.compile(
CONF.volume_name_template.replace('%s', '(?P<uuid>.+)'))
match = regex.match(vol_name)
return match.group('uuid') if match else None
def check_already_managed_volume(vol_id):
"""Check cinder db for already managed volume.
:param vol_id: volume id parameter
:returns: bool -- return True, if db entry with specified
volume id exists, otherwise return False
"""
try:
return (vol_id and isinstance(vol_id, six.string_types) and
uuid.UUID(vol_id, version=4) and
objects.Volume.exists(context.get_admin_context(), vol_id))
except ValueError:
return False
def extract_id_from_snapshot_name(snap_name):
"""Return a snapshot's ID from its name on the backend."""
regex = re.compile(
CONF.snapshot_name_template.replace('%s', '(?P<uuid>.+)'))
match = regex.match(snap_name)
return match.group('uuid') if match else None
def paginate_entries_list(entries, marker, limit, offset, sort_keys,
sort_dirs):
"""Paginate a list of entries.
:param entries: list of dictionaries
:marker: The last element previously returned
:limit: The maximum number of items to return
:offset: The number of items to skip from the marker or from the first
element.
:sort_keys: A list of keys in the dictionaries to sort by
:sort_dirs: A list of sort directions, where each is either 'asc' or 'dec'
"""
comparers = [(operator.itemgetter(key.strip()), multiplier)
for (key, multiplier) in zip(sort_keys, sort_dirs)]
def comparer(left, right):
for fn, d in comparers:
left_val = fn(left)
right_val = fn(right)
if isinstance(left_val, dict):
left_val = sorted(left_val.values())[0]
if isinstance(right_val, dict):
right_val = sorted(right_val.values())[0]
if left_val == right_val:
continue
if d == 'asc':
return -1 if left_val < right_val else 1
else:
return -1 if left_val > right_val else 1
else:
return 0
sorted_entries = sorted(entries, key=functools.cmp_to_key(comparer))
start_index = 0
if offset is None:
offset = 0
if marker:
start_index = -1
for i, entry in enumerate(sorted_entries):
if entry['reference'] == marker:
start_index = i + 1
break
if start_index < 0:
msg = _('marker not found: %s') % marker
raise exception.InvalidInput(reason=msg)
range_end = start_index + limit
return sorted_entries[start_index + offset:range_end + offset]
def convert_config_string_to_dict(config_string):
"""Convert config file replication string to a dict.
The only supported form is as follows:
"{'key-1'='val-1' 'key-2'='val-2'...}"
:param config_string: Properly formatted string to convert to dict.
:response: dict of string values
"""
resultant_dict = {}
try:
st = config_string.replace("=", ":")
st = st.replace(" ", ", ")
resultant_dict = ast.literal_eval(st)
except Exception:
LOG.warning(_LW("Error encountered translating config_string: "
"%(config_string)s to dict"),
{'config_string': config_string})
return resultant_dict
def create_encryption_key(context, key_manager, volume_type_id):
encryption_key_id = None
if volume_types.is_encrypted(context, volume_type_id):
volume_type_encryption = (
volume_types.get_volume_type_encryption(context,
volume_type_id))
cipher = volume_type_encryption.cipher
length = volume_type_encryption.key_size
algorithm = cipher.split('-')[0] if cipher else None
encryption_key_id = key_manager.create_key(
context,
algorithm=algorithm,
length=length)
return encryption_key_id
def is_replicated_str(str):
spec = (str or '').split()
return (len(spec) == 2 and
spec[0] == '<is>' and strutils.bool_from_string(spec[1]))
def is_replicated_spec(extra_specs):
return (extra_specs and
is_replicated_str(extra_specs.get('replication_enabled')))
def group_get_by_id(group_id):
ctxt = context.get_admin_context()
group = db.group_get(ctxt, group_id)
return group
def is_group_a_cg_snapshot_type(group_or_snap):
LOG.debug("Checking if %s is a consistent snapshot group",
group_or_snap)
if group_or_snap["group_type_id"] is not None:
spec = group_types.get_group_type_specs(
group_or_snap["group_type_id"],
key="consistent_group_snapshot_enabled"
)
return spec == "<is> True"
return False
|
"""Read the /proc/mounts file.
It's a dummy function but it eases the writing of unit tests as mocking
__builtin__open() for a specific file only is not trivial.
"""
with open('/proc/mounts') as mounts:
return mounts.readlines()
|
doc.go
|
// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:generate mockgen -destination=mocks.go -package=logr github.com/go-logr/logr Logger
|
package logr
|
|
photo-gallery.component.ts
|
import { CanActivate, Component } from "../core/component-decorators";
import { PhotoGalleryActionCreator } from "./photo-gallery.actions";
@Component({
templateUrl: "wwwroot/photo-gallery/photo-gallery.component.html",
selector: "photo-gallery",
providers: ["photoGalleryActionCreator"]
|
export class PhotoGalleryComponent {
constructor(private photoGalleryActionCreator: PhotoGalleryActionCreator) { }
}
|
})
|
dqn.py
|
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import numpy as np
import torch as th
from torch.nn import functional as F
from stable_baselines3.common import logger
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update
from stable_baselines3.dqn.policies import DQNPolicy
class DQN(OffPolicyAlgorithm):
"""
Deep Q-Network (DQN)
Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236
Default hyperparameters are taken from the nature paper,
except for the optimizer and learning rate that were taken from Stable Baselines defaults.
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Set to `-1` to disable.
:param gradient_steps: How many gradient steps to do after each rollout
(see ``train_freq`` and ``n_episodes_rollout``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param n_episodes_rollout: Update the model every ``n_episodes_rollout`` episodes.
Note that this cannot be used at the same time as ``train_freq``. Set to `-1` to disable.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param target_update_interval: update the target network every ``target_update_interval``
environment steps.
:param exploration_fraction: fraction of entire training period over which the exploration rate is reduced
:param exploration_initial_eps: initial value of random action probability
:param exploration_final_eps: final value of random action probability
:param max_grad_norm: The maximum value for the gradient clipping
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[DQNPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 1e-4,
buffer_size: int = 1000000,
learning_starts: int = 50000,
batch_size: Optional[int] = 32,
tau: float = 1.0,
gamma: float = 0.99,
train_freq: int = 4,
gradient_steps: int = 1,
n_episodes_rollout: int = -1,
optimize_memory_usage: bool = False,
target_update_interval: int = 10000,
exploration_fraction: float = 0.1,
exploration_initial_eps: float = 1.0,
exploration_final_eps: float = 0.05,
max_grad_norm: float = 10,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(DQN, self).__init__(
policy,
env,
DQNPolicy,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
n_episodes_rollout,
action_noise=None, # No action noise
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
sde_support=False,
optimize_memory_usage=optimize_memory_usage,
)
self.exploration_initial_eps = exploration_initial_eps
self.exploration_final_eps = exploration_final_eps
self.exploration_fraction = exploration_fraction
self.target_update_interval = target_update_interval
self.max_grad_norm = max_grad_norm
# "epsilon" for the epsilon-greedy exploration
self.exploration_rate = 0.0
# Linear schedule will be defined in `_setup_model()`
self.exploration_schedule = None
self.q_net, self.q_net_target = None, None
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(DQN, self)._setup_model()
self._create_aliases()
self.exploration_schedule = get_linear_fn(
self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction
)
def _create_aliases(self) -> None:
self.q_net = self.policy.q_net
self.q_net_target = self.policy.q_net_target
def _on_step(self) -> None:
"""
Update the exploration rate and target network if needed.
This method is called in ``collect_rollouts()`` after each step in the environment.
"""
if self.num_timesteps % self.target_update_interval == 0:
polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)
self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)
logger.record("rollout/exploration rate", self.exploration_rate)
def train(self, gradient_steps: int, batch_size: int = 100) -> None:
# Update learning rate according to schedule
self._update_learning_rate(self.policy.optimizer)
losses = []
for gradient_step in range(gradient_steps):
# Sample replay buffer
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
with th.no_grad():
# Compute the target Q values
target_q = self.q_net_target(replay_data.next_observations)
# Follow greedy policy: use the one with the highest value
target_q, _ = target_q.max(dim=1)
# Avoid potential broadcast issue
target_q = target_q.reshape(-1, 1)
# 1-step TD target
target_q = replay_data.rewards + (1 - replay_data.dones) * self.gamma * target_q
# Get current Q estimates
current_q = self.q_net(replay_data.observations)
# Retrieve the q-values for the actions from the replay buffer
current_q = th.gather(current_q, dim=1, index=replay_data.actions.long())
# Compute Huber loss (less sensitive to outliers)
loss = F.smooth_l1_loss(current_q, target_q)
losses.append(loss.item())
# Optimize the policy
self.policy.optimizer.zero_grad()
loss.backward()
# Clip gradient norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
# Increase update counter
self._n_updates += gradient_steps
logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
logger.record("train/loss", np.mean(losses))
def predict(
self,
observation: np.ndarray,
state: Optional[np.ndarray] = None,
mask: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""
Overrides the base_class predict function to include epsilon-greedy exploration.
:param observation: the input observation
:param state: The last states (can be None, used in recurrent policies)
:param mask: The last masks (can be None, used in recurrent policies)
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next state
(used in recurrent policies)
"""
if not deterministic and np.random.rand() < self.exploration_rate:
|
else:
action, state = self.policy.predict(observation, state, mask, deterministic)
return action, state
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "DQN",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super(DQN, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
def _excluded_save_params(self) -> List[str]:
return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "policy.optimizer"]
return state_dicts, []
|
if is_vectorized_observation(observation, self.observation_space):
n_batch = observation.shape[0]
action = np.array([self.action_space.sample() for _ in range(n_batch)])
else:
action = np.array(self.action_space.sample())
|
debug.go
|
// Copyright 2016 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License. See the AUTHORS file
// for names of contributors.
package cli
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"os"
"sort"
"strconv"
"strings"
"time"
"github.com/cockroachdb/cockroach/pkg/cli/synctest"
"github.com/cockroachdb/cockroach/pkg/config"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/storage/engine"
"github.com/cockroachdb/cockroach/pkg/storage/engine/enginepb"
"github.com/cockroachdb/cockroach/pkg/storage/rditer"
"github.com/cockroachdb/cockroach/pkg/storage/stateloader"
"github.com/cockroachdb/cockroach/pkg/storage/storagebase"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/humanizeutil"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/coreos/etcd/raft/raftpb"
"github.com/gogo/protobuf/jsonpb"
"github.com/kr/pretty"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var debugKeysCmd = &cobra.Command{
Use: "keys <directory>",
Short: "dump all the keys in a store",
Long: `
Pretty-prints all keys in a store.
`,
Args: cobra.ExactArgs(1),
RunE: MaybeDecorateGRPCError(runDebugKeys),
}
func parseRangeID(arg string) (roachpb.RangeID, error) {
rangeIDInt, err := strconv.ParseInt(arg, 10, 64)
if err != nil {
return 0, err
}
if rangeIDInt < 1 {
return 0, fmt.Errorf("illegal RangeID: %d", rangeIDInt)
}
return roachpb.RangeID(rangeIDInt), nil
}
func openExistingStore(dir string, stopper *stop.Stopper, readOnly bool) (*engine.RocksDB, error) {
cache := engine.NewRocksDBCache(server.DefaultCacheSize)
defer cache.Release()
maxOpenFiles, err := server.SetOpenFileLimitForOneStore()
if err != nil {
return nil, err
}
db, err := engine.NewRocksDB(
engine.RocksDBConfig{
Settings: serverCfg.Settings,
Dir: dir,
MaxOpenFiles: maxOpenFiles,
MustExist: true,
ReadOnly: readOnly,
},
cache,
)
if err != nil {
return nil, err
}
stopper.AddCloser(db)
return db, nil
}
func printKey(kv engine.MVCCKeyValue) (bool, error) {
fmt.Printf("%s %s: ", kv.Key.Timestamp, kv.Key.Key)
if debugCtx.sizes {
fmt.Printf(" %d %d", len(kv.Key.Key), len(kv.Value))
}
fmt.Printf("\n")
return false, nil
}
func printKeyValue(kv engine.MVCCKeyValue) (bool, error) {
fmt.Printf("%s %s: ", kv.Key.Timestamp, kv.Key.Key)
if debugCtx.sizes {
fmt.Printf("%d %d: ", len(kv.Key.Key), len(kv.Value))
}
decoders := []func(kv engine.MVCCKeyValue) (string, error){
tryRaftLogEntry,
tryRangeDescriptor,
tryMeta,
tryTxn,
tryRangeIDKey,
tryIntent,
}
for _, decoder := range decoders {
out, err := decoder(kv)
if err != nil {
continue
}
fmt.Println(out)
return false, nil
}
// No better idea, just print raw bytes and hope that folks use `less -S`.
fmt.Printf("%q\n\n", kv.Value)
return false, nil
}
func runDebugKeys(cmd *cobra.Command, args []string) error {
stopper := stop.NewStopper()
defer stopper.Stop(context.Background())
db, err := openExistingStore(args[0], stopper, true /* readOnly */)
if err != nil {
return err
}
printer := printKey
if debugCtx.values {
printer = printKeyValue
}
return db.Iterate(debugCtx.startKey, debugCtx.endKey, printer)
}
var debugRangeDataCmd = &cobra.Command{
Use: "range-data <directory> <range id>",
Short: "dump all the data in a range",
Long: `
Pretty-prints all keys and values in a range. By default, includes unreplicated
state like the raft HardState. With --replicated, only includes data covered by
the consistency checker.
`,
Args: cobra.ExactArgs(2),
RunE: MaybeDecorateGRPCError(runDebugRangeData),
}
func runDebugRangeData(cmd *cobra.Command, args []string) error {
stopper := stop.NewStopper()
defer stopper.Stop(context.Background())
db, err := openExistingStore(args[0], stopper, true /* readOnly */)
if err != nil {
return err
}
rangeID, err := parseRangeID(args[1])
if err != nil {
return err
}
desc, err := loadRangeDescriptor(db, rangeID)
if err != nil {
return err
}
iter := rditer.NewReplicaDataIterator(&desc, db, debugCtx.replicated)
defer iter.Close()
for ; ; iter.Next() {
if ok, err := iter.Valid(); err != nil {
return err
} else if !ok {
break
}
if _, err := printKeyValue(engine.MVCCKeyValue{
Key: iter.Key(),
Value: iter.Value(),
}); err != nil {
return err
}
}
return nil
}
var debugRangeDescriptorsCmd = &cobra.Command{
Use: "range-descriptors <directory>",
Short: "print all range descriptors in a store",
Long: `
Prints all range descriptors in a store with a history of changes.
`,
Args: cobra.ExactArgs(1),
RunE: MaybeDecorateGRPCError(runDebugRangeDescriptors),
}
func descStr(desc roachpb.RangeDescriptor) string {
return fmt.Sprintf("[%s, %s)\n\tRaw:%s\n",
desc.StartKey, desc.EndKey, &desc)
}
func tryMeta(kv engine.MVCCKeyValue) (string, error) {
if !bytes.HasPrefix(kv.Key.Key, keys.Meta1Prefix) && !bytes.HasPrefix(kv.Key.Key, keys.Meta2Prefix) {
return "", errors.New("not a meta key")
}
value := roachpb.Value{
Timestamp: kv.Key.Timestamp,
RawBytes: kv.Value,
}
var desc roachpb.RangeDescriptor
if err := value.GetProto(&desc); err != nil {
return "", err
}
return descStr(desc), nil
}
func maybeUnmarshalInline(v []byte, dest protoutil.Message) error {
var meta enginepb.MVCCMetadata
if err := protoutil.Unmarshal(v, &meta); err != nil {
return err
}
value := roachpb.Value{
RawBytes: meta.RawBytes,
}
return value.GetProto(dest)
}
func tryTxn(kv engine.MVCCKeyValue) (string, error) {
var txn roachpb.Transaction
if err := maybeUnmarshalInline(kv.Value, &txn); err != nil {
return "", err
}
return txn.String() + "\n", nil
}
func tryRangeIDKey(kv engine.MVCCKeyValue) (string, error) {
if kv.Key.Timestamp != (hlc.Timestamp{}) {
return "", fmt.Errorf("range ID keys shouldn't have timestamps: %s", kv.Key)
}
_, _, suffix, _, err := keys.DecodeRangeIDKey(kv.Key.Key)
if err != nil {
return "", err
}
// All range ID keys are stored inline on the metadata.
var meta enginepb.MVCCMetadata
if err := protoutil.Unmarshal(kv.Value, &meta); err != nil {
return "", err
}
value := roachpb.Value{RawBytes: meta.RawBytes}
// Values encoded as protobufs set msg and continue outside the
// switch. Other types are handled inside the switch and return.
var msg protoutil.Message
switch {
case bytes.Equal(suffix, keys.LocalLeaseAppliedIndexSuffix):
fallthrough
case bytes.Equal(suffix, keys.LocalRaftAppliedIndexSuffix):
i, err := value.GetInt()
if err != nil {
return "", err
}
return strconv.FormatInt(i, 10), nil
case bytes.Equal(suffix, keys.LocalRangeFrozenStatusSuffix):
b, err := value.GetBool()
if err != nil {
return "", err
}
return strconv.FormatBool(b), nil
case bytes.Equal(suffix, keys.LocalAbortSpanSuffix):
msg = &roachpb.AbortSpanEntry{}
case bytes.Equal(suffix, keys.LocalRangeLastGCSuffix):
msg = &hlc.Timestamp{}
case bytes.Equal(suffix, keys.LocalRaftTombstoneSuffix):
msg = &roachpb.RaftTombstone{}
case bytes.Equal(suffix, keys.LocalRaftTruncatedStateSuffix):
msg = &roachpb.RaftTruncatedState{}
case bytes.Equal(suffix, keys.LocalRangeLeaseSuffix):
msg = &roachpb.Lease{}
case bytes.Equal(suffix, keys.LocalRangeStatsSuffix):
msg = &enginepb.MVCCStats{}
case bytes.Equal(suffix, keys.LocalRaftHardStateSuffix):
msg = &raftpb.HardState{}
case bytes.Equal(suffix, keys.LocalRaftLastIndexSuffix):
i, err := value.GetInt()
if err != nil {
return "", err
}
return strconv.FormatInt(i, 10), nil
case bytes.Equal(suffix, keys.LocalRangeLastVerificationTimestampSuffixDeprecated):
msg = &hlc.Timestamp{}
case bytes.Equal(suffix, keys.LocalRangeLastReplicaGCTimestampSuffix):
msg = &hlc.Timestamp{}
default:
return "", fmt.Errorf("unknown raft id key %s", suffix)
}
if err := value.GetProto(msg); err != nil {
return "", err
}
return msg.String(), nil
}
func checkRangeDescriptorKey(key engine.MVCCKey) error {
_, suffix, _, err := keys.DecodeRangeKey(key.Key)
if err != nil {
return err
}
if !bytes.Equal(suffix, keys.LocalRangeDescriptorSuffix) {
return fmt.Errorf("wrong suffix: %s", suffix)
}
return nil
}
func tryRangeDescriptor(kv engine.MVCCKeyValue) (string, error) {
if err := checkRangeDescriptorKey(kv.Key); err != nil {
return "", err
}
var desc roachpb.RangeDescriptor
if err := getProtoValue(kv.Value, &desc); err != nil {
return "", err
}
return descStr(desc), nil
}
func printRangeDescriptor(kv engine.MVCCKeyValue) (bool, error) {
if out, err := tryRangeDescriptor(kv); err == nil {
fmt.Printf("%s %q: %s\n", kv.Key.Timestamp, kv.Key.Key, out)
}
return false, nil
}
func tryIntent(kv engine.MVCCKeyValue) (string, error) {
if len(kv.Value) == 0 {
return "", errors.New("empty")
}
var meta enginepb.MVCCMetadata
if err := protoutil.Unmarshal(kv.Value, &meta); err != nil {
return "", err
}
s := fmt.Sprintf("%+v", meta)
if meta.Txn != nil {
s = meta.Txn.Timestamp.String() + " " + s
}
return s, nil
}
func getProtoValue(data []byte, msg protoutil.Message) error {
value := roachpb.Value{
RawBytes: data,
}
return value.GetProto(msg)
}
func loadRangeDescriptor(
db engine.Engine, rangeID roachpb.RangeID,
) (roachpb.RangeDescriptor, error) {
var desc roachpb.RangeDescriptor
handleKV := func(kv engine.MVCCKeyValue) (bool, error) {
if kv.Key.Timestamp == (hlc.Timestamp{}) {
// We only want values, not MVCCMetadata.
return false, nil
}
if err := checkRangeDescriptorKey(kv.Key); err != nil {
// Range descriptor keys are interleaved with others, so if it
// doesn't parse as a range descriptor just skip it.
return false, nil
}
if err := getProtoValue(kv.Value, &desc); err != nil {
return false, err
}
return desc.RangeID == rangeID, nil
}
// Range descriptors are stored by key, so we have to scan over the
// range-local data to find the one for this RangeID.
start := engine.MakeMVCCMetadataKey(keys.LocalRangePrefix)
end := engine.MakeMVCCMetadataKey(keys.LocalRangeMax)
if err := db.Iterate(start, end, handleKV); err != nil {
return roachpb.RangeDescriptor{}, err
}
if desc.RangeID == rangeID {
return desc, nil
}
return roachpb.RangeDescriptor{}, fmt.Errorf("range descriptor %d not found", rangeID)
}
func runDebugRangeDescriptors(cmd *cobra.Command, args []string) error {
stopper := stop.NewStopper()
defer stopper.Stop(context.Background())
db, err := openExistingStore(args[0], stopper, true /* readOnly */)
if err != nil {
return err
}
start := engine.MakeMVCCMetadataKey(keys.LocalRangePrefix)
end := engine.MakeMVCCMetadataKey(keys.LocalRangeMax)
return db.Iterate(start, end, printRangeDescriptor)
}
var debugDecodeKeyCmd = &cobra.Command{
Use: "decode-key",
Short: "decode <key>",
Long: `
Decode a hexadecimal-encoded key and pretty-print it. For example:
$ decode-key BB89F902ADB43000151C2D1ED07DE6C009
/Table/51/1/44938288/1521140384.514565824,0
`,
Args: cobra.ArbitraryArgs,
RunE: func(cmd *cobra.Command, args []string) error {
for _, arg := range args {
b, err := hex.DecodeString(arg)
if err != nil {
return err
}
k, err := engine.DecodeKey(b)
if err != nil {
return err
}
fmt.Println(k)
}
return nil
},
}
var debugRaftLogCmd = &cobra.Command{
Use: "raft-log <directory> <range id>",
Short: "print the raft log for a range",
Long: `
Prints all log entries in a store for the given range.
`,
Args: cobra.ExactArgs(2),
RunE: MaybeDecorateGRPCError(runDebugRaftLog),
}
func tryRaftLogEntry(kv engine.MVCCKeyValue) (string, error) {
var ent raftpb.Entry
if err := maybeUnmarshalInline(kv.Value, &ent); err != nil {
return "", err
}
if ent.Type == raftpb.EntryNormal {
if len(ent.Data) > 0 {
_, cmdData := storage.DecodeRaftCommand(ent.Data)
var cmd storagebase.RaftCommand
if err := protoutil.Unmarshal(cmdData, &cmd); err != nil {
return "", err
}
ent.Data = nil
var leaseStr string
if l := cmd.DeprecatedProposerLease; l != nil {
// Use the full lease, if available.
leaseStr = l.String()
} else {
leaseStr = fmt.Sprintf("lease #%d", cmd.ProposerLeaseSequence)
}
return fmt.Sprintf("%s by %s\n%s\n", &ent, leaseStr, &cmd), nil
}
return fmt.Sprintf("%s: EMPTY\n", &ent), nil
} else if ent.Type == raftpb.EntryConfChange {
var cc raftpb.ConfChange
if err := protoutil.Unmarshal(ent.Data, &cc); err != nil {
return "", err
}
var ctx storage.ConfChangeContext
if err := protoutil.Unmarshal(cc.Context, &ctx); err != nil {
return "", err
}
var cmd storagebase.ReplicatedEvalResult
if err := protoutil.Unmarshal(ctx.Payload, &cmd); err != nil {
return "", err
}
ent.Data = nil
return fmt.Sprintf("%s\n%s\n", &ent, &cmd), nil
}
return "", fmt.Errorf("unknown log entry type: %s", &ent)
}
func printRaftLogEntry(kv engine.MVCCKeyValue) (bool, error) {
if out, err := tryRaftLogEntry(kv); err != nil {
fmt.Printf("%q: %v\n\n", kv.Key.Key, err)
} else {
fmt.Printf("%q: %s\n", kv.Key.Key, out)
}
return false, nil
}
func runDebugRaftLog(cmd *cobra.Command, args []string) error {
stopper := stop.NewStopper()
defer stopper.Stop(context.Background())
db, err := openExistingStore(args[0], stopper, true /* readOnly */)
if err != nil {
return err
}
rangeID, err := parseRangeID(args[1])
if err != nil {
return err
}
start := engine.MakeMVCCMetadataKey(keys.RaftLogPrefix(rangeID))
end := engine.MakeMVCCMetadataKey(keys.RaftLogPrefix(rangeID).PrefixEnd())
return db.Iterate(start, end, printRaftLogEntry)
}
var debugGCCmd = &cobra.Command{
Use: "estimate-gc <directory> [range id]",
Short: "find out what a GC run would do",
Long: `
Sets up (but does not run) a GC collection cycle, giving insight into how much
work would be done (assuming all intent resolution and pushes succeed).
Without a RangeID specified on the command line, runs the analysis for all
ranges individually.
Uses a hard-coded GC policy with a 24 hour TTL for old versions.
`,
Args: cobra.RangeArgs(1, 2),
RunE: MaybeDecorateGRPCError(runDebugGCCmd),
}
func runDebugGCCmd(cmd *cobra.Command, args []string) error {
stopper := stop.NewStopper()
defer stopper.Stop(context.Background())
var rangeID roachpb.RangeID
if len(args) == 2 {
var err error
if rangeID, err = parseRangeID(args[1]); err != nil {
return err
}
}
db, err := openExistingStore(args[0], stopper, true /* readOnly */)
if err != nil {
return err
}
start := keys.RangeDescriptorKey(roachpb.RKeyMin)
end := keys.RangeDescriptorKey(roachpb.RKeyMax)
var descs []roachpb.RangeDescriptor
if _, err := engine.MVCCIterate(context.Background(), db, start, end, hlc.MaxTimestamp,
false /* consistent */, false /* tombstones */, nil, /* txn */
false /* reverse */, func(kv roachpb.KeyValue) (bool, error) {
var desc roachpb.RangeDescriptor
_, suffix, _, err := keys.DecodeRangeKey(kv.Key)
if err != nil {
return false, err
}
if !bytes.Equal(suffix, keys.LocalRangeDescriptorSuffix) {
return false, nil
}
if err := kv.Value.GetProto(&desc); err != nil {
return false, err
}
if desc.RangeID == rangeID || rangeID == 0 {
descs = append(descs, desc)
}
return desc.RangeID == rangeID, nil
|
if len(descs) == 0 {
return fmt.Errorf("no range matching the criteria found")
}
for _, desc := range descs {
snap := db.NewSnapshot()
defer snap.Close()
info, err := storage.RunGC(
context.Background(),
&desc,
snap,
hlc.Timestamp{WallTime: timeutil.Now().UnixNano()},
config.GCPolicy{TTLSeconds: 24 * 60 * 60 /* 1 day */},
func(_ context.Context, _ [][]roachpb.GCRequest_GCKey, _ *storage.GCInfo) error { return nil },
func(_ context.Context, _ []roachpb.Intent) error { return nil },
func(_ context.Context, _ *roachpb.Transaction, _ []roachpb.Intent) error { return nil },
)
if err != nil {
return err
}
fmt.Printf("RangeID: %d [%s, %s):\n", desc.RangeID, desc.StartKey, desc.EndKey)
_, _ = pretty.Println(info)
}
return nil
}
var debugCheckStoreCmd = &cobra.Command{
Use: "check-store <directory>",
Short: "consistency check for a single store",
Long: `
Perform local consistency checks of a single store.
Capable of detecting the following errors:
* Raft logs that are inconsistent with their metadata
* MVCC stats that are inconsistent with the data within the range
`,
Args: cobra.ExactArgs(1),
RunE: MaybeDecorateGRPCError(runDebugCheckStoreCmd),
}
type replicaCheckInfo struct {
truncatedIndex uint64
appliedIndex uint64
firstIndex uint64
lastIndex uint64
}
func runDebugCheckStoreCmd(cmd *cobra.Command, args []string) error {
stopper := stop.NewStopper()
defer stopper.Stop(context.Background())
ctx := context.Background()
db, err := openExistingStore(args[0], stopper, true /* readOnly */)
if err != nil {
return err
}
if err := runDebugCheckStoreRaft(ctx, db); err != nil {
return err
}
return runDebugCheckStoreDescriptors(ctx, db)
}
func runDebugCheckStoreDescriptors(ctx context.Context, db *engine.RocksDB) error {
fmt.Println("checking MVCC stats")
defer fmt.Println()
var failed bool
if err := storage.IterateRangeDescriptors(ctx, db,
func(desc roachpb.RangeDescriptor) (bool, error) {
claimedMS, err := stateloader.Make(serverCfg.Settings, desc.RangeID).LoadMVCCStats(ctx, db)
if err != nil {
return false, err
}
ms, err := rditer.ComputeStatsForRange(&desc, db, claimedMS.LastUpdateNanos)
if err != nil {
return false, err
}
if !ms.Equal(claimedMS) {
var prefix string
if !claimedMS.ContainsEstimates {
failed = true
} else {
prefix = "(ignored) "
}
fmt.Printf("\n%s%+v: diff(actual, claimed): %s\n", prefix, desc, strings.Join(pretty.Diff(ms, claimedMS), "\n"))
} else {
fmt.Print(".")
}
return false, nil
}); err != nil {
return err
}
if failed {
return errors.New("check failed")
}
return nil
}
func runDebugCheckStoreRaft(ctx context.Context, db *engine.RocksDB) error {
// Iterate over the entire range-id-local space.
start := roachpb.Key(keys.LocalRangeIDPrefix)
end := start.PrefixEnd()
replicaInfo := map[roachpb.RangeID]*replicaCheckInfo{}
getReplicaInfo := func(rangeID roachpb.RangeID) *replicaCheckInfo {
if info, ok := replicaInfo[rangeID]; ok {
return info
}
replicaInfo[rangeID] = &replicaCheckInfo{}
return replicaInfo[rangeID]
}
if _, err := engine.MVCCIterate(ctx, db, start, end, hlc.MaxTimestamp,
false /* consistent */, false /* tombstones */, nil, /* txn */
false /* reverse */, func(kv roachpb.KeyValue) (bool, error) {
rangeID, _, suffix, detail, err := keys.DecodeRangeIDKey(kv.Key)
if err != nil {
return false, err
}
switch {
case bytes.Equal(suffix, keys.LocalRaftTruncatedStateSuffix):
var trunc roachpb.RaftTruncatedState
if err := kv.Value.GetProto(&trunc); err != nil {
return false, err
}
getReplicaInfo(rangeID).truncatedIndex = trunc.Index
case bytes.Equal(suffix, keys.LocalRaftAppliedIndexSuffix):
idx, err := kv.Value.GetInt()
if err != nil {
return false, err
}
getReplicaInfo(rangeID).appliedIndex = uint64(idx)
case bytes.Equal(suffix, keys.LocalRaftLogSuffix):
_, index, err := encoding.DecodeUint64Ascending(detail)
if err != nil {
return false, err
}
ri := getReplicaInfo(rangeID)
if ri.firstIndex == 0 {
ri.firstIndex = index
ri.lastIndex = index
} else {
if index != ri.lastIndex+1 {
fmt.Printf("range %s: log index anomaly: %v followed by %v\n",
rangeID, ri.lastIndex, index)
}
ri.lastIndex = index
}
}
return false, nil
}); err != nil {
return err
}
for rangeID, info := range replicaInfo {
if info.truncatedIndex != info.firstIndex-1 {
fmt.Printf("range %s: truncated index %v should equal first index %v - 1\n",
rangeID, info.truncatedIndex, info.firstIndex)
}
if info.appliedIndex < info.firstIndex || info.appliedIndex > info.lastIndex {
fmt.Printf("range %s: applied index %v should be between first index %v and last index %v\n",
rangeID, info.appliedIndex, info.firstIndex, info.lastIndex)
}
}
return nil
}
var debugRocksDBCmd = &cobra.Command{
Use: "rocksdb",
Short: "run the RocksDB 'ldb' tool",
Long: `
Runs the RocksDB 'ldb' tool, which provides various subcommands for examining
raw store data. 'cockroach debug rocksdb' accepts the same arguments and flags
as 'ldb'.
https://github.com/facebook/rocksdb/wiki/Administration-and-Data-Access-Tool#ldb-tool
`,
// LDB does its own flag parsing.
DisableFlagParsing: true,
Run: func(cmd *cobra.Command, args []string) {
engine.RunLDB(args)
},
}
var debugEnvCmd = &cobra.Command{
Use: "env",
Short: "output environment settings",
Long: `
Output environment variables that influence configuration.
`,
Args: cobra.NoArgs,
Run: func(cmd *cobra.Command, args []string) {
env := envutil.GetEnvReport()
fmt.Print(env)
},
}
var debugCompactCmd = &cobra.Command{
Use: "compact <directory>",
Short: "compact the sstables in a store",
Long: `
Compact the sstables in a store.
`,
Args: cobra.ExactArgs(1),
RunE: MaybeDecorateGRPCError(runDebugCompact),
}
func runDebugCompact(cmd *cobra.Command, args []string) error {
stopper := stop.NewStopper()
defer stopper.Stop(context.Background())
db, err := openExistingStore(args[0], stopper, false /* readOnly */)
if err != nil {
return err
}
{
approxBytesBefore, err := db.ApproximateDiskBytes(roachpb.KeyMin, roachpb.KeyMax)
if err != nil {
return errors.Wrap(err, "while computing approximate size before compaction")
}
fmt.Printf("approximate reported database size before compaction: %s\n", humanizeutil.IBytes(int64(approxBytesBefore)))
}
if err := db.Compact(); err != nil {
return errors.Wrap(err, "while compacting")
}
{
approxBytesAfter, err := db.ApproximateDiskBytes(roachpb.KeyMin, roachpb.KeyMax)
if err != nil {
return errors.Wrap(err, "while computing approximate size after compaction")
}
fmt.Printf("approximate reported database size after compaction: %s\n", humanizeutil.IBytes(int64(approxBytesAfter)))
}
return nil
}
var debugSSTablesCmd = &cobra.Command{
Use: "sstables <directory>",
Short: "list the sstables in a store",
Long: `
List the sstables in a store. The output format is 1 or more lines of:
level [ total size #files ]: file sizes
Only non-empty levels are shown. For levels greater than 0, the files span
non-overlapping ranges of the key space. Level-0 is special in that sstables
are created there by flushing the mem-table, thus every level-0 sstable must be
consulted to see if it contains a particular key. Within a level, the file
sizes are displayed in decreasing order and bucketed by the number of files of
that size. The following example shows 3-level output. In Level-3, there are 19
total files and 14 files that are 129 MiB in size.
1 [ 8M 3 ]: 7M 1M 63K
2 [ 110M 7 ]: 31M 30M 13M[2] 10M 8M 5M
3 [ 2G 19 ]: 129M[14] 122M 93M 24M 18M 9M
The suffixes K, M, G and T are used for terseness to represent KiB, MiB, GiB
and TiB.
`,
Args: cobra.ExactArgs(1),
RunE: MaybeDecorateGRPCError(runDebugSSTables),
}
func runDebugSSTables(cmd *cobra.Command, args []string) error {
stopper := stop.NewStopper()
defer stopper.Stop(context.Background())
db, err := openExistingStore(args[0], stopper, true /* readOnly */)
if err != nil {
return err
}
fmt.Printf("%s", db.GetSSTables())
return nil
}
var debugGossipValuesCmd = &cobra.Command{
Use: "gossip-values <directory>",
Short: "dump all the values in a node's gossip instance",
Long: `
Pretty-prints the values in a node's gossip instance.
Can connect to a running server to get the values or can be provided with
a JSON file captured from a node's /_status/gossip/ debug endpoint.
`,
Args: cobra.ExactArgs(1),
RunE: MaybeDecorateGRPCError(runDebugGossipValues),
}
func runDebugGossipValues(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// If a file is provided, use it. Otherwise, try talking to the running node.
var gossipInfo *gossip.InfoStatus
if debugCtx.inputFile != "" {
file, err := os.Open(debugCtx.inputFile)
if err != nil {
return err
}
defer file.Close()
gossipInfo = new(gossip.InfoStatus)
if err := jsonpb.Unmarshal(file, gossipInfo); err != nil {
return errors.Wrap(err, "failed to parse provided file as gossip.InfoStatus")
}
} else {
conn, _, finish, err := getClientGRPCConn(ctx)
if err != nil {
return err
}
defer finish()
status := serverpb.NewStatusClient(conn)
gossipInfo, err = status.Gossip(ctx, &serverpb.GossipRequest{})
if err != nil {
return errors.Wrap(err, "failed to retrieve gossip from server")
}
}
output, err := parseGossipValues(gossipInfo)
if err != nil {
return err
}
fmt.Println(output)
return nil
}
func parseGossipValues(gossipInfo *gossip.InfoStatus) (string, error) {
var output []string
for key, info := range gossipInfo.Infos {
bytes, err := info.Value.GetBytes()
if err != nil {
return "", errors.Wrapf(err, "failed to extract bytes for key %q", key)
}
if key == gossip.KeyClusterID || key == gossip.KeySentinel {
clusterID, err := uuid.FromBytes(bytes)
if err != nil {
return "", errors.Wrapf(err, "failed to parse value for key %q", key)
}
output = append(output, fmt.Sprintf("%q: %v", key, clusterID))
} else if key == gossip.KeySystemConfig {
if debugCtx.printSystemConfig {
var config config.SystemConfig
if err := protoutil.Unmarshal(bytes, &config); err != nil {
return "", errors.Wrapf(err, "failed to parse value for key %q", key)
}
output = append(output, fmt.Sprintf("%q: %+v", key, config))
} else {
output = append(output, fmt.Sprintf("%q: omitted", key))
}
} else if key == gossip.KeyFirstRangeDescriptor {
var desc roachpb.RangeDescriptor
if err := protoutil.Unmarshal(bytes, &desc); err != nil {
return "", errors.Wrapf(err, "failed to parse value for key %q", key)
}
output = append(output, fmt.Sprintf("%q: %v", key, desc))
} else if gossip.IsNodeIDKey(key) {
var desc roachpb.NodeDescriptor
if err := protoutil.Unmarshal(bytes, &desc); err != nil {
return "", errors.Wrapf(err, "failed to parse value for key %q", key)
}
output = append(output, fmt.Sprintf("%q: %+v", key, desc))
} else if strings.HasPrefix(key, gossip.KeyStorePrefix) {
var desc roachpb.StoreDescriptor
if err := protoutil.Unmarshal(bytes, &desc); err != nil {
return "", errors.Wrapf(err, "failed to parse value for key %q", key)
}
output = append(output, fmt.Sprintf("%q: %+v", key, desc))
} else if strings.HasPrefix(key, gossip.KeyNodeLivenessPrefix) {
var liveness storage.Liveness
if err := protoutil.Unmarshal(bytes, &liveness); err != nil {
return "", errors.Wrapf(err, "failed to parse value for key %q", key)
}
output = append(output, fmt.Sprintf("%q: %+v", key, liveness))
} else if strings.HasPrefix(key, gossip.KeyDeadReplicasPrefix) {
var deadReplicas roachpb.StoreDeadReplicas
if err := protoutil.Unmarshal(bytes, &deadReplicas); err != nil {
return "", errors.Wrapf(err, "failed to parse value for key %q", key)
}
output = append(output, fmt.Sprintf("%q: %+v", key, deadReplicas))
}
}
sort.Strings(output)
return strings.Join(output, "\n"), nil
}
var debugSyncTestCmd = &cobra.Command{
Use: "synctest [directory]",
Short: "Run a performance test for WAL sync speed",
Long: `
`,
Args: cobra.MaximumNArgs(1),
Hidden: true,
RunE: MaybeDecorateGRPCError(runDebugSyncTest),
}
var syncTestOpts = synctest.Options{
Concurrency: 1,
Duration: 10 * time.Second,
LogOnly: true,
}
func runDebugSyncTest(cmd *cobra.Command, args []string) error {
syncTestOpts.Dir = "./testdb"
if len(args) == 1 {
syncTestOpts.Dir = args[0]
}
return synctest.Run(syncTestOpts)
}
func init() {
debugCmd.AddCommand(debugCmds...)
f := debugSyncTestCmd.Flags()
f.IntVarP(&syncTestOpts.Concurrency, "concurrency", "c", syncTestOpts.Concurrency,
"number of concurrent writers")
f.DurationVarP(&syncTestOpts.Duration, "duration", "d", syncTestOpts.Duration,
"duration to run the test for")
f.BoolVarP(&syncTestOpts.LogOnly, "log-only", "l", syncTestOpts.LogOnly,
"only write to the WAL, not to sstables")
}
var debugCmds = []*cobra.Command{
debugKeysCmd,
debugRangeDataCmd,
debugRangeDescriptorsCmd,
debugDecodeKeyCmd,
debugRaftLogCmd,
debugGCCmd,
debugCheckStoreCmd,
debugRocksDBCmd,
debugCompactCmd,
debugSSTablesCmd,
debugGossipValuesCmd,
debugSyncTestCmd,
debugEnvCmd,
debugZipCmd,
}
var debugCmd = &cobra.Command{
Use: "debug [command]",
Short: "debugging commands",
Long: `Various commands for debugging.
These commands are useful for extracting data from the data files of a
process that has failed and cannot restart.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Usage()
},
}
|
}); err != nil {
return err
}
|
utils.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
def get_swagger_json_data():
|
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.flavor.swagger.json')
f = open(json_file)
json_data = json.JSONDecoder().decode(f.read())
f.close()
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.image.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.network.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.subnet.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.server.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.volume.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.vport.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.tenant.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.host.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_file = os.path.join(os.path.dirname(
__file__), 'multivim.limit.swagger.json')
f = open(json_file)
json_data_temp = json.JSONDecoder().decode(f.read())
f.close()
json_data["paths"].update(json_data_temp["paths"])
json_data["definitions"].update(json_data_temp["definitions"])
json_data["basePath"] = "/api/multicloud-vio/v0/"
json_data["info"]["title"] = "MultiVIM driver \
of OpenStack VIO Service NBI"
return json_data
|
|
phone_example_test.go
|
package phone
import "fmt"
func ExampleMask() {
phoneMasked := Mask("+55 123 1234567", nil)
fmt.Println(phoneMasked)
// Output:
// ...4567
}
func
|
() {
phoneMasked := Mask("+55 123 1234567", &Option{
NumberOfVisibleCharsOnSufix: 3,
})
fmt.Println(phoneMasked)
// Output:
// ...567
}
func ExampleMask_with_more_used_chars() {
phoneMasked := Mask("+55 (123) 123-456-789", &Option{
NumberOfVisibleCharsOnSufix: 6,
UseAsVisibleChars: DefaultUseAsVisibleChars + "-",
})
fmt.Println(phoneMasked)
// Output:
// ...56-789
}
|
ExampleMask_with_options
|
mask.go
|
package cron
import (
"fmt"
"strings"
"time"
"github.com/toolkits/pkg/logger"
"github.com/didi/nightingale/src/model"
"github.com/didi/nightingale/src/modules/monapi/mcache"
"github.com/didi/nightingale/src/toolkits/stats"
)
// SyncMaskconfLoop 周期性同步屏蔽策略,频率是9秒一次,一般采集周期不会低于10秒
func SyncMaskconfLoop() {
duration := time.Second * time.Duration(9)
for {
time.Sleep(duration)
logger.Debug("sync maskconf begin")
err := SyncMaskconf()
if err != nil {
stats.Counter.Set("maskconf.sync.err", 1)
logger.Error("sync maskconf fail: ", err)
} else {
logger.Debug("sync maskconf succ")
}
}
}
func SyncMaskconf() error {
err := model.CleanExpireMask(time.Now().Unix())
if err != nil {
return fmt.Errorf("clean expire mask fail: %v", err)
}
mcs, err := model.MaskconfGetAll()
if err != nil {
|
// key: metric#endpoint
// value: tags
maskMap := make(map[string][]string)
for i := 0; i < len(mcs); i++ {
err := mcs[i].FillEndpoints()
if err != nil {
return fmt.Errorf("%v fill endpoints fail: %v", mcs[i], err)
}
for j := 0; j < len(mcs[i].Endpoints); j++ {
key := mcs[i].Metric + "#" + mcs[i].Endpoints[j]
maskMap[key] = append(maskMap[key], mcs[i].Tags)
}
}
mcache.MaskCache.SetAll(maskMap)
return nil
}
func IsMaskEvent(event *model.Event) bool {
detail, err := event.GetEventDetail()
if err != nil {
logger.Errorf("get event detail failed, err: %v", err)
return false
}
for i := 0; i < len(detail); i++ {
eventMetric := detail[i].Metric
var eventTagsList []string
for k, v := range detail[i].Tags {
eventTagsList = append(eventTagsList, fmt.Sprintf("%s=%s", strings.TrimSpace(k), strings.TrimSpace(v)))
}
key := eventMetric + "#" + event.Endpoint
maskTagsList, exists := mcache.MaskCache.GetByKey(key)
if !exists {
continue
}
for i := 0; i < len(maskTagsList); i++ {
tagsList := strings.Split(maskTagsList[i], ",")
if inList("", tagsList) {
return true
}
if listContains(tagsList, eventTagsList) {
return true
}
}
}
return false
}
// 用来判断blist是否包含slist
func listContains(slist, blist []string) bool {
for i := 0; i < len(slist); i++ {
if !inList(slist[i], blist) {
return false
}
}
return true
}
func inList(v string, lst []string) bool {
for i := 0; i < len(lst); i++ {
if lst[i] == v {
return true
}
}
return false
}
|
return fmt.Errorf("get maskconf fail: %v", err)
}
|
writer.rs
|
use super::multivalued::MultiValuedFastFieldWriter;
use super::serializer::FastFieldStats;
use super::FastFieldDataAccess;
use crate::fastfield::{BytesFastFieldWriter, CompositeFastFieldSerializer};
use crate::indexer::doc_id_mapping::DocIdMapping;
use crate::postings::UnorderedTermId;
use crate::schema::{Cardinality, Document, Field, FieldEntry, FieldType, Schema};
use crate::termdict::TermOrdinal;
use common;
use fnv::FnvHashMap;
use std::collections::HashMap;
use std::io;
use tantivy_bitpacker::BlockedBitpacker;
/// The fastfieldswriter regroup all of the fast field writers.
pub struct FastFieldsWriter {
single_value_writers: Vec<IntFastFieldWriter>,
multi_values_writers: Vec<MultiValuedFastFieldWriter>,
bytes_value_writers: Vec<BytesFastFieldWriter>,
}
fn fast_field_default_value(field_entry: &FieldEntry) -> u64 {
match *field_entry.field_type() {
FieldType::I64(_) | FieldType::Date(_) => common::i64_to_u64(0i64),
FieldType::F64(_) => common::f64_to_u64(0.0f64),
_ => 0u64,
}
}
impl FastFieldsWriter {
/// Create all `FastFieldWriter` required by the schema.
pub fn from_schema(schema: &Schema) -> FastFieldsWriter {
let mut single_value_writers = Vec::new();
let mut multi_values_writers = Vec::new();
let mut bytes_value_writers = Vec::new();
for (field, field_entry) in schema.fields() {
match field_entry.field_type() {
FieldType::I64(ref int_options)
| FieldType::U64(ref int_options)
| FieldType::F64(ref int_options)
| FieldType::Date(ref int_options) => {
match int_options.get_fastfield_cardinality() {
Some(Cardinality::SingleValue) => {
let mut fast_field_writer = IntFastFieldWriter::new(field);
let default_value = fast_field_default_value(field_entry);
fast_field_writer.set_val_if_missing(default_value);
single_value_writers.push(fast_field_writer);
}
Some(Cardinality::MultiValues) => {
let fast_field_writer = MultiValuedFastFieldWriter::new(field, false);
multi_values_writers.push(fast_field_writer);
}
None => {}
}
}
FieldType::HierarchicalFacet(_) => {
let fast_field_writer = MultiValuedFastFieldWriter::new(field, true);
multi_values_writers.push(fast_field_writer);
}
FieldType::Bytes(bytes_option) => {
if bytes_option.is_fast() {
let fast_field_writer = BytesFastFieldWriter::new(field);
bytes_value_writers.push(fast_field_writer);
}
}
_ => {}
}
}
FastFieldsWriter {
single_value_writers,
multi_values_writers,
bytes_value_writers,
}
}
/// The memory used (inclusive childs)
pub fn mem_usage(&self) -> usize {
self.single_value_writers
.iter()
.map(|w| w.mem_usage())
.sum::<usize>()
+ self
.multi_values_writers
.iter()
.map(|w| w.mem_usage())
.sum::<usize>()
+ self
.bytes_value_writers
.iter()
.map(|w| w.mem_usage())
.sum::<usize>()
}
/// Get the `FastFieldWriter` associated to a field.
pub fn get_field_writer(&self, field: Field) -> Option<&IntFastFieldWriter> {
// TODO optimize
self.single_value_writers
.iter()
.find(|field_writer| field_writer.field() == field)
}
/// Get the `FastFieldWriter` associated to a field.
pub fn get_field_writer_mut(&mut self, field: Field) -> Option<&mut IntFastFieldWriter> {
// TODO optimize
self.single_value_writers
.iter_mut()
.find(|field_writer| field_writer.field() == field)
}
/// Returns the fast field multi-value writer for the given field.
///
/// Returns None if the field does not exist, or is not
/// configured as a multivalued fastfield in the schema.
pub fn get_multivalue_writer_mut(
&mut self,
field: Field,
) -> Option<&mut MultiValuedFastFieldWriter> {
// TODO optimize
self.multi_values_writers
.iter_mut()
.find(|multivalue_writer| multivalue_writer.field() == field)
}
/// Returns the bytes fast field writer for the given field.
///
/// Returns None if the field does not exist, or is not
/// configured as a bytes fastfield in the schema.
pub fn get_bytes_writer_mut(&mut self, field: Field) -> Option<&mut BytesFastFieldWriter> {
// TODO optimize
self.bytes_value_writers
.iter_mut()
.find(|field_writer| field_writer.field() == field)
}
/// Indexes all of the fastfields of a new document.
pub fn add_document(&mut self, doc: &Document) {
for field_writer in &mut self.single_value_writers {
field_writer.add_document(doc);
}
for field_writer in &mut self.multi_values_writers {
field_writer.add_document(doc);
}
for field_writer in &mut self.bytes_value_writers {
field_writer.add_document(doc);
}
}
/// Serializes all of the `FastFieldWriter`s by pushing them in
/// order to the fast field serializer.
pub fn serialize(
&self,
serializer: &mut CompositeFastFieldSerializer,
mapping: &HashMap<Field, FnvHashMap<UnorderedTermId, TermOrdinal>>,
doc_id_map: Option<&DocIdMapping>,
) -> io::Result<()>
|
}
/// Fast field writer for ints.
/// The fast field writer just keeps the values in memory.
///
/// Only when the segment writer can be closed and
/// persisted on disc, the fast field writer is
/// sent to a `FastFieldSerializer` via the `.serialize(...)`
/// method.
///
/// We cannot serialize earlier as the values are
/// bitpacked and the number of bits required for bitpacking
/// can only been known once we have seen all of the values.
///
/// Both u64, i64 and f64 use the same writer.
/// i64 and f64 are just remapped to the `0..2^64 - 1`
/// using `common::i64_to_u64` and `common::f64_to_u64`.
pub struct IntFastFieldWriter {
field: Field,
vals: BlockedBitpacker,
val_count: usize,
val_if_missing: u64,
val_min: u64,
val_max: u64,
}
impl IntFastFieldWriter {
/// Creates a new `IntFastFieldWriter`
pub fn new(field: Field) -> IntFastFieldWriter {
IntFastFieldWriter {
field,
vals: BlockedBitpacker::new(),
val_count: 0,
val_if_missing: 0u64,
val_min: u64::max_value(),
val_max: 0,
}
}
/// The memory used (inclusive childs)
pub fn mem_usage(&self) -> usize {
self.vals.mem_usage()
}
/// Returns the field that this writer is targetting.
pub fn field(&self) -> Field {
self.field
}
/// Sets the default value.
///
/// This default value is recorded for documents if
/// a document does not have any value.
fn set_val_if_missing(&mut self, val_if_missing: u64) {
self.val_if_missing = val_if_missing;
}
/// Records a new value.
///
/// The n-th value being recorded is implicitely
/// associated to the document with the `DocId` n.
/// (Well, `n-1` actually because of 0-indexing)
pub fn add_val(&mut self, val: u64) {
self.vals.add(val);
if val > self.val_max {
self.val_max = val;
}
if val < self.val_min {
self.val_min = val;
}
self.val_count += 1;
}
/// Extract the value associated to the fast field for
/// this document.
///
/// i64 and f64 are remapped to u64 using the logic
/// in `common::i64_to_u64` and `common::f64_to_u64`.
///
/// If the value is missing, then the default value is used
/// instead.
/// If the document has more than one value for the given field,
/// only the first one is taken in account.
fn extract_val(&self, doc: &Document) -> u64 {
match doc.get_first(self.field) {
Some(v) => super::value_to_u64(v),
None => self.val_if_missing,
}
}
/// Extract the fast field value from the document
/// (or use the default value) and records it.
pub fn add_document(&mut self, doc: &Document) {
let val = self.extract_val(doc);
self.add_val(val);
}
/// get iterator over the data
pub(crate) fn iter(&self) -> impl Iterator<Item = u64> + '_ {
self.vals.iter()
}
/// Push the fast fields value to the `FastFieldWriter`.
pub fn serialize(
&self,
serializer: &mut CompositeFastFieldSerializer,
doc_id_map: Option<&DocIdMapping>,
) -> io::Result<()> {
let (min, max) = if self.val_min > self.val_max {
(0, 0)
} else {
(self.val_min, self.val_max)
};
let fastfield_accessor = WriterFastFieldAccessProvider {
doc_id_map,
vals: &self.vals,
};
let stats = FastFieldStats {
min_value: min,
max_value: max,
num_vals: self.val_count as u64,
};
if let Some(doc_id_map) = doc_id_map {
let iter = doc_id_map
.iter_old_doc_ids()
.map(|doc_id| self.vals.get(doc_id as usize));
serializer.create_auto_detect_u64_fast_field(
self.field,
stats,
fastfield_accessor,
iter.clone(),
iter,
)?;
} else {
serializer.create_auto_detect_u64_fast_field(
self.field,
stats,
fastfield_accessor,
self.vals.iter(),
self.vals.iter(),
)?;
};
Ok(())
}
}
#[derive(Clone)]
struct WriterFastFieldAccessProvider<'map, 'bitp> {
doc_id_map: Option<&'map DocIdMapping>,
vals: &'bitp BlockedBitpacker,
}
impl<'map, 'bitp> FastFieldDataAccess for WriterFastFieldAccessProvider<'map, 'bitp> {
/// Return the value associated to the given doc.
///
/// Whenever possible use the Iterator passed to the fastfield creation instead, for performance reasons.
///
/// # Panics
///
/// May panic if `doc` is greater than the index.
fn get_val(&self, doc: u64) -> u64 {
if let Some(doc_id_map) = self.doc_id_map {
self.vals
.get(doc_id_map.get_old_doc_id(doc as u32) as usize) // consider extra FastFieldReader wrapper for non doc_id_map
} else {
self.vals.get(doc as usize)
}
}
}
|
{
for field_writer in &self.single_value_writers {
field_writer.serialize(serializer, doc_id_map)?;
}
for field_writer in &self.multi_values_writers {
let field = field_writer.field();
field_writer.serialize(serializer, mapping.get(&field), doc_id_map)?;
}
for field_writer in &self.bytes_value_writers {
field_writer.serialize(serializer, doc_id_map)?;
}
Ok(())
}
|
attention.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 5 22:53:44 2020
@author: Emmett
"""
import tensorflow as tf
import os
from tensorflow.python.keras.layers import Layer
from tensorflow.python.keras import backend as K
class AttentionLayer(Layer):
"""
This class implements Bahdanau attention (https://arxiv.org/pdf/1409.0473.pdf).
There are three sets of weights introduced W_a, U_a, and V_a
"""
def __init__(self, **kwargs):
super(AttentionLayer, self).__init__(**kwargs)
def build(self, input_shape):
assert isinstance(input_shape, list)
# Create a trainable weight variable for this layer.
self.W_a = self.add_weight(name='W_a',
shape=tf.TensorShape((input_shape[0][2], input_shape[0][2])),
initializer='uniform',
trainable=True)
self.U_a = self.add_weight(name='U_a',
shape=tf.TensorShape((input_shape[1][2], input_shape[0][2])),
initializer='uniform',
trainable=True)
self.V_a = self.add_weight(name='V_a',
shape=tf.TensorShape((input_shape[0][2], 1)),
initializer='uniform',
trainable=True)
super(AttentionLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, inputs, verbose=False):
"""
inputs: [encoder_output_sequence, decoder_output_sequence]
"""
assert type(inputs) == list
encoder_out_seq, decoder_out_seq = inputs
if verbose:
print('encoder_out_seq>', encoder_out_seq.shape)
print('decoder_out_seq>', decoder_out_seq.shape)
def energy_step(inputs, states):
""" Step function for computing energy for a single decoder state
inputs: (batchsize * 1 * de_in_dim)
states: (batchsize * 1 * de_latent_dim)
"""
assert_msg = "States must be an iterable. Got {} of type {}".format(states, type(states))
assert isinstance(states, list) or isinstance(states, tuple), assert_msg
""" Some parameters required for shaping tensors"""
en_seq_len, en_hidden = encoder_out_seq.shape[1], encoder_out_seq.shape[2]
de_hidden = inputs.shape[-1]
""" Computing S.Wa where S=[s0, s1, ..., si]"""
# <= batch size * en_seq_len * latent_dim
W_a_dot_s = K.dot(encoder_out_seq, self.W_a)
""" Computing hj.Ua """
U_a_dot_h = K.expand_dims(K.dot(inputs, self.U_a), 1) # <= batch_size, 1, latent_dim
if verbose:
print('Ua.h>', U_a_dot_h.shape)
""" tanh(S.Wa + hj.Ua) """
# <= batch_size*en_seq_len, latent_dim
Ws_plus_Uh = K.tanh(W_a_dot_s + U_a_dot_h)
if verbose:
print('Ws+Uh>', Ws_plus_Uh.shape)
""" softmax(va.tanh(S.Wa + hj.Ua)) """
# <= batch_size, en_seq_len
e_i = K.squeeze(K.dot(Ws_plus_Uh, self.V_a), axis=-1)
# <= batch_size, en_seq_len
e_i = K.softmax(e_i)
if verbose:
print('ei>', e_i.shape)
return e_i, [e_i]
def context_step(inputs, states):
""" Step function for computing ci using ei """
assert_msg = "States must be an iterable. Got {} of type {}".format(states, type(states))
assert isinstance(states, list) or isinstance(states, tuple), assert_msg
# <= batch_size, hidden_size
c_i = K.sum(encoder_out_seq * K.expand_dims(inputs, -1), axis=1)
if verbose:
print('ci>', c_i.shape)
return c_i, [c_i]
fake_state_c = K.sum(encoder_out_seq, axis=1)
fake_state_e = K.sum(encoder_out_seq, axis=2) # <= (batch_size, enc_seq_len, latent_dim
""" Computing energy outputs """
# e_outputs => (batch_size, de_seq_len, en_seq_len)
last_out, e_outputs, _ = K.rnn(
energy_step, decoder_out_seq, [fake_state_e],
)
""" Computing context vectors """
|
context_step, e_outputs, [fake_state_c],
)
return c_outputs, e_outputs
def compute_output_shape(self, input_shape):
""" Outputs produced by the layer """
return [
tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[1][2])),
tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[0][1]))
]
|
last_out, c_outputs, _ = K.rnn(
|
config.go
|
package config
import (
"github.com/spf13/viper"
)
const Version = "0.0.1"
//Transfer queue information
type TransferQueueInfo struct{
BufferSize int `mapstructure:"buffer_size" json:"buffer_size"`
}
//Node information
type NodeInfo struct {
Name string `mapstructure:"name" json:"name"`
IP string `mapstructure:"ip" json:"ip"`
TransferQueue TransferQueueInfo `mapstructure:"transfer_queue" json:"transfer_queue"`
}
//Input plugin information
type InputPluginInfo struct {
Name string `mapstructure:"plugin_name" json:"plugin_name"`
Path string `mapstructure:"plugin_path" json:"plugin_path"`
Duration int `mapstructure:"duration" json:"duration"`
Active bool `mapstructure:"active" json:"active"`
PluginConfig map[string]string `mapstructure:"config" json:"config"`
}
//Output plugin information
type OutputPluginInfo struct {
Name string `mapstructure:"plugin_name" json:"plugin_name"`
Path string `mapstructure:"plugin_path" json:"plugin_path"`
Active bool `mapstructure:"active" json:"active"`
Inputs map[string]bool `mapstructure:"inputs" json:"inputs"`
PluginConfig map[string]string `mapstructure:"config" json:"config"`
}
//Config sturcture
type Config struct {
Node NodeInfo `mapstructure:"node" json:"node"`
Inputs []InputPluginInfo `mapstructure:"input_plugin" json:"input_plugin"`
Outputs []OutputPluginInfo `mapstructure:"output_plugin" json:"output_plugin"`
}
//Global config
var globalConfig *Config
//New Config
func NewConfig() *Config {
return &Config{
Node:NodeInfo{Name:"unknown", TransferQueue:TransferQueueInfo{BufferSize:1000}},
Inputs:[]InputPluginInfo{},
Outputs:[]OutputPluginInfo{},
}
|
func GetConfig() *Config {
if globalConfig == nil {
globalConfig = NewConfig()
}
return globalConfig
}
//Init config from json file
func (config *Config) Init (path string) error {
//Set viper setting
viper.SetConfigType("json")
viper.SetConfigFile(path)
viper.AddConfigPath("../conf/")
//Read in config
err := viper.ReadInConfig()
if err != nil {
return err
}
//Unmarshal config
err = viper.Unmarshal(config)
if err != nil {
return err
}
return nil
}
|
}
//Get singleton config
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.