file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
privacy_utils.py | import torch
import torch.nn as nn
import math
import numpy as np
def weights_init(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
def pate(data, teachers, lap_scale, device="cpu"):
"""PATE implementation for GANs.
"""
num_teachers = len(teachers)
labels = torch.Tensor(num_teachers, data.shape[0]).type(torch.int64).to(device)
for i in range(num_teachers):
output = teachers[i](data)
pred = (output > 0.5).type(torch.Tensor).squeeze().to(device)
# print(pred.shape)
# print(labels[i].shape)
labels[i] = pred
votes = torch.sum(labels, dim=0).unsqueeze(1).type(torch.DoubleTensor).to(device)
noise = torch.from_numpy(np.random.laplace(loc=0, scale=1 / lap_scale, size=votes.size())).to(
device
)
noisy_votes = votes + noise
noisy_labels = (noisy_votes > num_teachers / 2).type(torch.DoubleTensor).to(device)
return noisy_labels, votes
def | (num_teachers, votes, lap_scale, l_list, device="cpu"):
q = (2 + lap_scale * torch.abs(2 * votes - num_teachers)) / (
4 * torch.exp(lap_scale * torch.abs(2 * votes - num_teachers))
).to(device)
alpha = []
for l_val in l_list:
a = 2 * lap_scale ** 2 * l_val * (l_val + 1)
t_one = (1 - q) * torch.pow((1 - q) / (1 - math.exp(2 * lap_scale) * q), l_val)
t_two = q * torch.exp(2 * lap_scale * l_val)
t = t_one + t_two
alpha.append(torch.clamp(t, max=a).sum())
return torch.DoubleTensor(alpha).to(device)
| moments_acc |
registry.go | package event
var evtreg EventRegistry
var ctxreg ContextRegistry
func init() {
ctxreg = make(map[ContextType]ContextContructor)
evtreg = make(map[Type]bool)
}
type EventConstructor func(...Option) *Event
type EventRegistry map[Type]bool
func RegisterType(typ Type) {
evtreg[typ] = true
}
type ContextContructor func() Context
type ContextRegistry map[ContextType]ContextContructor
func RegisterContext(typ ContextType, ctor ContextContructor) {
ctxreg[typ] = ctor
}
func | (typ string, v interface{}) {
ctyp := ContextType(typ)
ctor := func() Context {
return NewContext(ctyp, v)
}
RegisterContext(ctyp, ctor)
}
| RegisterNewContext |
lowercase_key.rs | use crate::common::*;
#[test]
fn | () {
let contents = vec![
"A=B\nF=BAR\nFOO=BAR\n",
"A=B\r\nF=BAR\r\nFOO=BAR\r\n",
"# comment\nABC=DEF\n",
];
for content in contents {
let testdir = TestDir::new();
let testfile = testdir.create_testfile(".env", content);
let args = &[testfile.as_str()];
let expected_output = check_output(&[(".env", &[])]);
testdir.test_command_success_with_args(args, expected_output);
}
}
#[test]
fn incorrect_files() {
let contents = vec!["FOO=TEST\nbar=hello\n", "BAR_FoO=hello\nFOO=TEST\n"];
let expected = vec![(2, "bar"), (1, "BAR_FoO")];
for (i, content) in contents.iter().enumerate() {
let testdir = TestDir::new();
let testfile = testdir.create_testfile(".env", content);
let args = &[testfile.as_str()];
let expected_output = check_output(&[(
".env",
&[format!(
".env:{} LowercaseKey: The {} key should be in uppercase",
expected[i].0, expected[i].1,
)
.as_str()],
)]);
testdir.test_command_fail_with_args(args, expected_output);
}
}
#[test]
fn many_incorrect_variables() {
let content = "FOO=TEST\nFoo_BAZ=BAR\nbar=TEST\n";
let testdir = TestDir::new();
let testfile = testdir.create_testfile(".env", content);
let args = &[testfile.as_str()];
let expected_output = check_output(&[(
".env",
&[
".env:2 LowercaseKey: The Foo_BAZ key should be in uppercase",
".env:3 LowercaseKey: The bar key should be in uppercase",
],
)]);
testdir.test_command_fail_with_args(args, expected_output);
}
| correct_files |
generate_completions.rs | use std::io;
use clap::{Arg, Command};
use clap_complete::{generate, shells::*};
#[test]
fn generate_completions() | {
let mut cmd = Command::new("test_app")
.arg(Arg::new("config").short('c').global(true))
.arg(Arg::new("v").short('v').conflicts_with("config"))
.subcommand(
Command::new("test")
.about("Subcommand")
.arg(Arg::new("debug").short('d')),
);
generate(Bash, &mut cmd, "test_app", &mut io::sink());
generate(Fish, &mut cmd, "test_app", &mut io::sink());
generate(PowerShell, &mut cmd, "test_app", &mut io::sink());
generate(Elvish, &mut cmd, "test_app", &mut io::sink());
generate(Zsh, &mut cmd, "test_app", &mut io::sink());
} |
|
JetsonYolo.py | import cv2
import numpy as np
from elements.yolo import OBJ_DETECTION
Object_classes = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
'hair drier', 'toothbrush' ]
Object_colors = list(np.random.rand(80,3)*255)
Object_detector = OBJ_DETECTION('weights/yolov5s.pt', Object_classes)
def gstreamer_pipeline(
capture_width=1280,
capture_height=720,
display_width=1280,
display_height=720,
framerate=60,
flip_method=0,
):
return (
"nvarguscamerasrc ! "
"video/x-raw(memory:NVMM), "
"width=(int)%d, height=(int)%d, "
"format=(string)NV12, framerate=(fraction)%d/1 ! "
"nvvidconv flip-method=%d ! "
"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
"videoconvert ! "
"video/x-raw, format=(string)BGR ! appsink"
% (
capture_width,
capture_height,
framerate,
flip_method,
display_width,
display_height,
)
)
# To flip the image, modify the flip_method parameter (0 and 2 are the most common)
print(gstreamer_pipeline(flip_method=0))
# cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
cap = cv2.VideoCapture("1627775013.mp4")
if cap.isOpened():
window_handle = cv2.namedWindow("CSI Camera", cv2.WINDOW_AUTOSIZE)
# Window
while cv2.getWindowProperty("CSI Camera", 0) >= 0:
ret, frame = cap.read()
if ret and not(frame is None):
# detection process
objs = Object_detector.detect(frame)
# plotting
for obj in objs:
# print(obj)
label = obj['label']
score = obj['score']
[(xmin,ymin),(xmax,ymax)] = obj['bbox']
color = Object_colors[Object_classes.index(label)]
frame = cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), color, 2)
frame = cv2.putText(frame, f'{label} ({str(score)})', (xmin,ymin), cv2.FONT_HERSHEY_SIMPLEX , 0.75, color, 1, cv2.LINE_AA)
else:
break
cv2.imshow("CSI Camera", frame)
keyCode = cv2.waitKey(30)
if keyCode == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
else:
| print("Unable to open camera") |
|
main.rs | fn main() {
let mut vec = Vec::new();
vec.push(1);
vec.push(2);
take(vec);
println!("The value is {}", vec[1]);
}
fn | (vec: Vec<i32>){
// Just take ownership
} | take |
uninit_assumed_init.rs | use crate::utils::{match_def_path, match_qpath, paths, span_lint};
use if_chain::if_chain;
use rustc_hir as hir;
use rustc_lint::LateContext;
use rustc_middle::ty::{self, Ty};
use super::UNINIT_ASSUMED_INIT;
/// lint for `MaybeUninit::uninit().assume_init()` (we already have the latter)
pub(super) fn check(cx: &LateContext<'_>, expr: &hir::Expr<'_>, outer: &hir::Expr<'_>) |
fn is_maybe_uninit_ty_valid(cx: &LateContext<'_>, ty: Ty<'_>) -> bool {
match ty.kind() {
ty::Array(ref component, _) => is_maybe_uninit_ty_valid(cx, component),
ty::Tuple(ref types) => types.types().all(|ty| is_maybe_uninit_ty_valid(cx, ty)),
ty::Adt(ref adt, _) => match_def_path(cx, adt.did, &paths::MEM_MAYBEUNINIT),
_ => false,
}
}
| {
if_chain! {
if let hir::ExprKind::Call(ref callee, ref args) = expr.kind;
if args.is_empty();
if let hir::ExprKind::Path(ref path) = callee.kind;
if match_qpath(path, &paths::MEM_MAYBEUNINIT_UNINIT);
if !is_maybe_uninit_ty_valid(cx, cx.typeck_results().expr_ty_adjusted(outer));
then {
span_lint(
cx,
UNINIT_ASSUMED_INIT,
outer.span,
"this call for this type may be undefined behavior"
);
}
}
} |
pwd.rs | #[doc = "Register `PWD` reader"]
pub struct R(crate::R<PWD_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<PWD_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::convert::From<crate::R<PWD_SPEC>> for R {
fn from(reader: crate::R<PWD_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `PWD` writer"]
pub struct W(crate::W<PWD_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<PWD_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl core::convert::From<crate::W<PWD_SPEC>> for W {
fn from(writer: crate::W<PWD_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TXPWDFS_A {
#[doc = "0: Normal operation."]
VALUE0 = 0,
#[doc = "1: Power-down the USB full-speed drivers. This turns off the current starvation sources and puts the"]
VALUE1 = 1,
}
impl From<TXPWDFS_A> for bool {
#[inline(always)]
fn from(variant: TXPWDFS_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `TXPWDFS` reader - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
pub struct TXPWDFS_R(crate::FieldReader<bool, TXPWDFS_A>);
impl TXPWDFS_R {
pub(crate) fn new(bits: bool) -> Self {
TXPWDFS_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TXPWDFS_A {
match self.bits {
false => TXPWDFS_A::VALUE0,
true => TXPWDFS_A::VALUE1,
}
}
#[doc = "Checks if the value of the field is `VALUE0`"]
#[inline(always)]
pub fn is_value0(&self) -> bool {
**self == TXPWDFS_A::VALUE0
}
#[doc = "Checks if the value of the field is `VALUE1`"]
#[inline(always)]
pub fn is_value1(&self) -> bool {
**self == TXPWDFS_A::VALUE1
}
}
impl core::ops::Deref for TXPWDFS_R {
type Target = crate::FieldReader<bool, TXPWDFS_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `TXPWDFS` writer - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
pub struct TXPWDFS_W<'a> {
w: &'a mut W,
}
impl<'a> TXPWDFS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: TXPWDFS_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Normal operation."]
#[inline(always)]
pub fn value0(self) -> &'a mut W {
self.variant(TXPWDFS_A::VALUE0)
}
#[doc = "Power-down the USB full-speed drivers. This turns off the current starvation sources and puts the"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(TXPWDFS_A::VALUE1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | ((value as u32 & 0x01) << 10);
self.w
}
}
#[doc = "Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TXPWDIBIAS_A {
#[doc = "0: Normal operation."]
VALUE0 = 0,
#[doc = "1: Power-down the USB PHY current bias block for the transmitter. This bit should be set only when the"]
VALUE1 = 1,
}
impl From<TXPWDIBIAS_A> for bool {
#[inline(always)]
fn from(variant: TXPWDIBIAS_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `TXPWDIBIAS` reader - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
pub struct TXPWDIBIAS_R(crate::FieldReader<bool, TXPWDIBIAS_A>);
impl TXPWDIBIAS_R {
pub(crate) fn new(bits: bool) -> Self {
TXPWDIBIAS_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TXPWDIBIAS_A {
match self.bits {
false => TXPWDIBIAS_A::VALUE0,
true => TXPWDIBIAS_A::VALUE1,
}
}
#[doc = "Checks if the value of the field is `VALUE0`"]
#[inline(always)]
pub fn is_value0(&self) -> bool {
**self == TXPWDIBIAS_A::VALUE0
}
#[doc = "Checks if the value of the field is `VALUE1`"]
#[inline(always)]
pub fn is_value1(&self) -> bool {
**self == TXPWDIBIAS_A::VALUE1
}
}
impl core::ops::Deref for TXPWDIBIAS_R {
type Target = crate::FieldReader<bool, TXPWDIBIAS_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `TXPWDIBIAS` writer - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
pub struct TXPWDIBIAS_W<'a> {
w: &'a mut W,
}
impl<'a> TXPWDIBIAS_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: TXPWDIBIAS_A) -> &'a mut W {
self.bit(variant.into()) | pub fn value0(self) -> &'a mut W {
self.variant(TXPWDIBIAS_A::VALUE0)
}
#[doc = "Power-down the USB PHY current bias block for the transmitter. This bit should be set only when the"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(TXPWDIBIAS_A::VALUE1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | ((value as u32 & 0x01) << 11);
self.w
}
}
#[doc = "Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TXPWDV2I_A {
#[doc = "0: Normal operation."]
VALUE0 = 0,
#[doc = "1: Power-down the USB PHY transmit V-to-I converter and the current mirror"]
VALUE1 = 1,
}
impl From<TXPWDV2I_A> for bool {
#[inline(always)]
fn from(variant: TXPWDV2I_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `TXPWDV2I` reader - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
pub struct TXPWDV2I_R(crate::FieldReader<bool, TXPWDV2I_A>);
impl TXPWDV2I_R {
pub(crate) fn new(bits: bool) -> Self {
TXPWDV2I_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TXPWDV2I_A {
match self.bits {
false => TXPWDV2I_A::VALUE0,
true => TXPWDV2I_A::VALUE1,
}
}
#[doc = "Checks if the value of the field is `VALUE0`"]
#[inline(always)]
pub fn is_value0(&self) -> bool {
**self == TXPWDV2I_A::VALUE0
}
#[doc = "Checks if the value of the field is `VALUE1`"]
#[inline(always)]
pub fn is_value1(&self) -> bool {
**self == TXPWDV2I_A::VALUE1
}
}
impl core::ops::Deref for TXPWDV2I_R {
type Target = crate::FieldReader<bool, TXPWDV2I_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `TXPWDV2I` writer - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
pub struct TXPWDV2I_W<'a> {
w: &'a mut W,
}
impl<'a> TXPWDV2I_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: TXPWDV2I_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Normal operation."]
#[inline(always)]
pub fn value0(self) -> &'a mut W {
self.variant(TXPWDV2I_A::VALUE0)
}
#[doc = "Power-down the USB PHY transmit V-to-I converter and the current mirror"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(TXPWDV2I_A::VALUE1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | ((value as u32 & 0x01) << 12);
self.w
}
}
#[doc = "Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RXPWDENV_A {
#[doc = "0: Normal operation."]
VALUE0 = 0,
#[doc = "1: Power-down the USB high-speed receiver envelope detector (squelch signal)"]
VALUE1 = 1,
}
impl From<RXPWDENV_A> for bool {
#[inline(always)]
fn from(variant: RXPWDENV_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `RXPWDENV` reader - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
pub struct RXPWDENV_R(crate::FieldReader<bool, RXPWDENV_A>);
impl RXPWDENV_R {
pub(crate) fn new(bits: bool) -> Self {
RXPWDENV_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RXPWDENV_A {
match self.bits {
false => RXPWDENV_A::VALUE0,
true => RXPWDENV_A::VALUE1,
}
}
#[doc = "Checks if the value of the field is `VALUE0`"]
#[inline(always)]
pub fn is_value0(&self) -> bool {
**self == RXPWDENV_A::VALUE0
}
#[doc = "Checks if the value of the field is `VALUE1`"]
#[inline(always)]
pub fn is_value1(&self) -> bool {
**self == RXPWDENV_A::VALUE1
}
}
impl core::ops::Deref for RXPWDENV_R {
type Target = crate::FieldReader<bool, RXPWDENV_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RXPWDENV` writer - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
pub struct RXPWDENV_W<'a> {
w: &'a mut W,
}
impl<'a> RXPWDENV_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: RXPWDENV_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Normal operation."]
#[inline(always)]
pub fn value0(self) -> &'a mut W {
self.variant(RXPWDENV_A::VALUE0)
}
#[doc = "Power-down the USB high-speed receiver envelope detector (squelch signal)"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(RXPWDENV_A::VALUE1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 17)) | ((value as u32 & 0x01) << 17);
self.w
}
}
#[doc = "Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RXPWD1PT1_A {
#[doc = "0: Normal operation."]
VALUE0 = 0,
#[doc = "1: Power-down the USB full-speed differential receiver."]
VALUE1 = 1,
}
impl From<RXPWD1PT1_A> for bool {
#[inline(always)]
fn from(variant: RXPWD1PT1_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `RXPWD1PT1` reader - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
pub struct RXPWD1PT1_R(crate::FieldReader<bool, RXPWD1PT1_A>);
impl RXPWD1PT1_R {
pub(crate) fn new(bits: bool) -> Self {
RXPWD1PT1_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RXPWD1PT1_A {
match self.bits {
false => RXPWD1PT1_A::VALUE0,
true => RXPWD1PT1_A::VALUE1,
}
}
#[doc = "Checks if the value of the field is `VALUE0`"]
#[inline(always)]
pub fn is_value0(&self) -> bool {
**self == RXPWD1PT1_A::VALUE0
}
#[doc = "Checks if the value of the field is `VALUE1`"]
#[inline(always)]
pub fn is_value1(&self) -> bool {
**self == RXPWD1PT1_A::VALUE1
}
}
impl core::ops::Deref for RXPWD1PT1_R {
type Target = crate::FieldReader<bool, RXPWD1PT1_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RXPWD1PT1` writer - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
pub struct RXPWD1PT1_W<'a> {
w: &'a mut W,
}
impl<'a> RXPWD1PT1_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: RXPWD1PT1_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Normal operation."]
#[inline(always)]
pub fn value0(self) -> &'a mut W {
self.variant(RXPWD1PT1_A::VALUE0)
}
#[doc = "Power-down the USB full-speed differential receiver."]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(RXPWD1PT1_A::VALUE1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | ((value as u32 & 0x01) << 18);
self.w
}
}
#[doc = "Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RXPWDDIFF_A {
#[doc = "0: Normal operation."]
VALUE0 = 0,
#[doc = "1: Power-down the USB high-speed differential receive"]
VALUE1 = 1,
}
impl From<RXPWDDIFF_A> for bool {
#[inline(always)]
fn from(variant: RXPWDDIFF_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `RXPWDDIFF` reader - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
pub struct RXPWDDIFF_R(crate::FieldReader<bool, RXPWDDIFF_A>);
impl RXPWDDIFF_R {
pub(crate) fn new(bits: bool) -> Self {
RXPWDDIFF_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RXPWDDIFF_A {
match self.bits {
false => RXPWDDIFF_A::VALUE0,
true => RXPWDDIFF_A::VALUE1,
}
}
#[doc = "Checks if the value of the field is `VALUE0`"]
#[inline(always)]
pub fn is_value0(&self) -> bool {
**self == RXPWDDIFF_A::VALUE0
}
#[doc = "Checks if the value of the field is `VALUE1`"]
#[inline(always)]
pub fn is_value1(&self) -> bool {
**self == RXPWDDIFF_A::VALUE1
}
}
impl core::ops::Deref for RXPWDDIFF_R {
type Target = crate::FieldReader<bool, RXPWDDIFF_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RXPWDDIFF` writer - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
pub struct RXPWDDIFF_W<'a> {
w: &'a mut W,
}
impl<'a> RXPWDDIFF_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: RXPWDDIFF_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Normal operation."]
#[inline(always)]
pub fn value0(self) -> &'a mut W {
self.variant(RXPWDDIFF_A::VALUE0)
}
#[doc = "Power-down the USB high-speed differential receive"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(RXPWDDIFF_A::VALUE1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 19)) | ((value as u32 & 0x01) << 19);
self.w
}
}
#[doc = "This bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RXPWDRX_A {
#[doc = "0: Normal operation."]
VALUE0 = 0,
#[doc = "1: Power-down the entire USB PHY receiver block except for the full-speed differential receiver"]
VALUE1 = 1,
}
impl From<RXPWDRX_A> for bool {
#[inline(always)]
fn from(variant: RXPWDRX_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `RXPWDRX` reader - This bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
pub struct RXPWDRX_R(crate::FieldReader<bool, RXPWDRX_A>);
impl RXPWDRX_R {
pub(crate) fn new(bits: bool) -> Self {
RXPWDRX_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RXPWDRX_A {
match self.bits {
false => RXPWDRX_A::VALUE0,
true => RXPWDRX_A::VALUE1,
}
}
#[doc = "Checks if the value of the field is `VALUE0`"]
#[inline(always)]
pub fn is_value0(&self) -> bool {
**self == RXPWDRX_A::VALUE0
}
#[doc = "Checks if the value of the field is `VALUE1`"]
#[inline(always)]
pub fn is_value1(&self) -> bool {
**self == RXPWDRX_A::VALUE1
}
}
impl core::ops::Deref for RXPWDRX_R {
type Target = crate::FieldReader<bool, RXPWDRX_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RXPWDRX` writer - This bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
pub struct RXPWDRX_W<'a> {
w: &'a mut W,
}
impl<'a> RXPWDRX_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: RXPWDRX_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Normal operation."]
#[inline(always)]
pub fn value0(self) -> &'a mut W {
self.variant(RXPWDRX_A::VALUE0)
}
#[doc = "Power-down the entire USB PHY receiver block except for the full-speed differential receiver"]
#[inline(always)]
pub fn value1(self) -> &'a mut W {
self.variant(RXPWDRX_A::VALUE1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 20)) | ((value as u32 & 0x01) << 20);
self.w
}
}
impl R {
#[doc = "Bit 10 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
#[inline(always)]
pub fn txpwdfs(&self) -> TXPWDFS_R {
TXPWDFS_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 11 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
#[inline(always)]
pub fn txpwdibias(&self) -> TXPWDIBIAS_R {
TXPWDIBIAS_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 12 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
#[inline(always)]
pub fn txpwdv2i(&self) -> TXPWDV2I_R {
TXPWDV2I_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bit 17 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
#[inline(always)]
pub fn rxpwdenv(&self) -> RXPWDENV_R {
RXPWDENV_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bit 18 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
#[inline(always)]
pub fn rxpwd1pt1(&self) -> RXPWD1PT1_R {
RXPWD1PT1_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 19 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
#[inline(always)]
pub fn rxpwddiff(&self) -> RXPWDDIFF_R {
RXPWDDIFF_R::new(((self.bits >> 19) & 0x01) != 0)
}
#[doc = "Bit 20 - This bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
#[inline(always)]
pub fn rxpwdrx(&self) -> RXPWDRX_R {
RXPWDRX_R::new(((self.bits >> 20) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 10 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
#[inline(always)]
pub fn txpwdfs(&mut self) -> TXPWDFS_W {
TXPWDFS_W { w: self }
}
#[doc = "Bit 11 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
#[inline(always)]
pub fn txpwdibias(&mut self) -> TXPWDIBIAS_W {
TXPWDIBIAS_W { w: self }
}
#[doc = "Bit 12 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
#[inline(always)]
pub fn txpwdv2i(&mut self) -> TXPWDV2I_W {
TXPWDV2I_W { w: self }
}
#[doc = "Bit 17 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
#[inline(always)]
pub fn rxpwdenv(&mut self) -> RXPWDENV_W {
RXPWDENV_W { w: self }
}
#[doc = "Bit 18 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
#[inline(always)]
pub fn rxpwd1pt1(&mut self) -> RXPWD1PT1_W {
RXPWD1PT1_W { w: self }
}
#[doc = "Bit 19 - Note that this bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
#[inline(always)]
pub fn rxpwddiff(&mut self) -> RXPWDDIFF_W {
RXPWDDIFF_W { w: self }
}
#[doc = "Bit 20 - This bit will be auto cleared if there is USB wakeup event while ENAUTOCLR_PHY_PWD bit of CTRL is enabled"]
#[inline(always)]
pub fn rxpwdrx(&mut self) -> RXPWDRX_W {
RXPWDRX_W { w: self }
}
#[doc = "Writes raw bits to the register."]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "USB PHY Power-Down Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pwd](index.html) module"]
pub struct PWD_SPEC;
impl crate::RegisterSpec for PWD_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [pwd::R](R) reader structure"]
impl crate::Readable for PWD_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [pwd::W](W) writer structure"]
impl crate::Writable for PWD_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets PWD to value 0x001e_1c00"]
impl crate::Resettable for PWD_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0x001e_1c00
}
} | }
#[doc = "Normal operation."]
#[inline(always)] |
kendo.culture.be.min.js | /*
* Kendo UI Web v2014.1.318 (http://kendoui.com) | * http://www.telerik.com/purchase/license-agreement/kendo-ui-web
* If you do not own a commercial license, this file shall be governed by the
* GNU General Public License (GPL) version 3.
* For GPL requirements, please review: http://www.gnu.org/copyleft/gpl.html
*/
!function(e,define){define([],e)}(function(){return function(e){var t=e.kendo||(e.kendo={cultures:{}});t.cultures.be={name:"be",numberFormat:{pattern:["-n"],decimals:2,",":" ",".":",",groupSize:[3],percent:{pattern:["-n %","n %"],decimals:2,",":" ",".":",",groupSize:[3],symbol:"%"},currency:{pattern:["-n $","n $"],decimals:2,",":" ",".":",",groupSize:[3],symbol:"р."}},calendars:{standard:{days:{names:["нядзеля","панядзелак","аўторак","серада","чацвер","пятніца","субота"],namesAbbr:["нд","пн","аў","ср","чц","пт","сб"],namesShort:["нд","пн","аў","ср","чц","пт","сб"]},months:{names:["Студзень","Люты","Сакавік","Красавік","Май","Чэрвень","Ліпень","Жнівень","Верасень","Кастрычнік","Лістапад","Снежань",""],namesAbbr:["Сту","Лют","Сак","Кра","Май","Чэр","Ліп","Жні","Вер","Кас","Ліс","Сне",""]},AM:[""],PM:[""],patterns:{d:"dd.MM.yyyy",D:"d MMMM yyyy",F:"d MMMM yyyy H:mm:ss",g:"dd.MM.yyyy H:mm",G:"dd.MM.yyyy H:mm:ss",m:"d MMMM",M:"d MMMM",s:"yyyy'-'MM'-'dd'T'HH':'mm':'ss",t:"H:mm",T:"H:mm:ss",u:"yyyy'-'MM'-'dd HH':'mm':'ss'Z'",y:"MMMM yyyy",Y:"MMMM yyyy"},"/":".",":":":",firstDay:1}}}}(this),window.kendo},"function"==typeof define&&define.amd?define:function(e,t){t()}); | * Copyright 2014 Telerik AD. All rights reserved.
*
* Kendo UI Web commercial licenses may be obtained at |
nvicip57.rs | #[doc = "Register `NVICIP57` reader"]
pub struct R(crate::R<NVICIP57_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<NVICIP57_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<NVICIP57_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<NVICIP57_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `NVICIP57` writer"]
pub struct W(crate::W<NVICIP57_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<NVICIP57_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<NVICIP57_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<NVICIP57_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `PRI57` reader - Priority of interrupt 57"]
pub struct PRI57_R(crate::FieldReader<u8, u8>);
impl PRI57_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
PRI57_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for PRI57_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `PRI57` writer - Priority of interrupt 57"]
pub struct PRI57_W<'a> {
w: &'a mut W,
}
impl<'a> PRI57_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = value;
self.w
}
}
impl R {
#[doc = "Bits 0:7 - Priority of interrupt 57"]
#[inline(always)]
pub fn pri57(&self) -> PRI57_R {
PRI57_R::new(self.bits)
}
}
impl W {
#[doc = "Bits 0:7 - Priority of interrupt 57"]
#[inline(always)]
pub fn pri57(&mut self) -> PRI57_W {
PRI57_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u8) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Interrupt Priority Register n\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [nvicip57](index.html) module"]
pub struct NVICIP57_SPEC;
impl crate::RegisterSpec for NVICIP57_SPEC {
type Ux = u8;
}
#[doc = "`read()` method returns [nvicip57::R](R) reader structure"]
impl crate::Readable for NVICIP57_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [nvicip57::W](W) writer structure"]
impl crate::Writable for NVICIP57_SPEC {
type Writer = W;
} | 0
}
} | #[doc = "`reset()` method sets NVICIP57 to value 0"]
impl crate::Resettable for NVICIP57_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux { |
process_create_token_governance.rs | //! Program state processor
use crate::{
state::{
enums::GovernanceAccountType,
governance::{
assert_valid_create_governance_args, get_token_governance_address_seeds, Governance,
GovernanceConfig,
},
realm::get_realm_data,
token_owner_record::get_token_owner_record_data_for_realm,
},
tools::spl_token::{assert_spl_token_owner_is_signer, set_spl_token_owner},
};
use solana_program::{
account_info::{next_account_info, AccountInfo},
entrypoint::ProgramResult,
pubkey::Pubkey,
rent::Rent,
sysvar::Sysvar,
};
use spl_governance_tools::account::create_and_serialize_account_signed;
/// Processes CreateTokenGovernance instruction
pub fn process_create_token_governance(
program_id: &Pubkey,
accounts: &[AccountInfo],
config: GovernanceConfig,
transfer_token_owner: bool,
) -> ProgramResult {
let account_info_iter = &mut accounts.iter();
let realm_info = next_account_info(account_info_iter)?; // 0
let token_governance_info = next_account_info(account_info_iter)?; // 1
let governed_token_info = next_account_info(account_info_iter)?; // 2
let governed_token_owner_info = next_account_info(account_info_iter)?; // 3
let token_owner_record_info = next_account_info(account_info_iter)?; // 4
let payer_info = next_account_info(account_info_iter)?; // 5
let spl_token_info = next_account_info(account_info_iter)?; // 6
let system_info = next_account_info(account_info_iter)?; // 7
let rent_sysvar_info = next_account_info(account_info_iter)?; // 8
let rent = &Rent::from_account_info(rent_sysvar_info)?;
let governance_authority_info = next_account_info(account_info_iter)?; // 9
assert_valid_create_governance_args(program_id, &config, realm_info)?;
let realm_data = get_realm_data(program_id, realm_info)?;
let token_owner_record_data = | token_owner_record_data.assert_token_owner_or_delegate_is_signer(governance_authority_info)?;
let voter_weight = token_owner_record_data.resolve_voter_weight(
program_id,
account_info_iter,
realm_info.key,
&realm_data,
)?;
token_owner_record_data.assert_can_create_governance(&realm_data, voter_weight)?;
let token_governance_data = Governance {
account_type: GovernanceAccountType::TokenGovernance,
realm: *realm_info.key,
governed_account: *governed_token_info.key,
config,
proposals_count: 0,
reserved: [0; 8],
};
create_and_serialize_account_signed::<Governance>(
payer_info,
token_governance_info,
&token_governance_data,
&get_token_governance_address_seeds(realm_info.key, governed_token_info.key),
program_id,
system_info,
rent,
)?;
if transfer_token_owner {
set_spl_token_owner(
governed_token_info,
governed_token_owner_info,
token_governance_info.key,
spl_token_info,
)?;
} else {
assert_spl_token_owner_is_signer(governed_token_info, governed_token_owner_info)?;
}
Ok(())
} | get_token_owner_record_data_for_realm(program_id, token_owner_record_info, realm_info.key)?;
|
profile.rs | use anyhow::Result;
use async_trait::async_trait;
use crate::models::{
profile::{Profile, ProfileId, ProfilePosition},
servant::ServantId,
};
pub struct | {
pub servant_id: ServantId,
pub position: ProfilePosition,
pub text: String,
}
#[async_trait]
pub trait ProfileRepository {
async fn find(&self, id: &ProfileId) -> Result<Option<Profile>>;
async fn list_for_servant(&self, servant_id: &ServantId) -> Result<Vec<Profile>>;
async fn list_for_servants(&self, ids: &[ServantId]) -> Result<Vec<Profile>>;
async fn register(&self, profile: NewProfile) -> Result<Profile>;
}
| NewProfile |
build.rs | // Copyright © 2015, Peter Atashian
// Licensed under the MIT License <LICENSE.md>
fn m | ) {
println!("cargo:rustc-flags=-l iepmapi");
}
| ain( |
starter.go | package operator
import (
"context"
"fmt"
"time"
configv1 "github.com/openshift/api/config/v1"
operatorv1 "github.com/openshift/api/operator/v1"
configv1client "github.com/openshift/client-go/config/clientset/versioned"
configinformers "github.com/openshift/client-go/config/informers/externalversions"
"github.com/openshift/library-go/pkg/controller/controllercmd"
"github.com/openshift/library-go/pkg/operator/genericoperatorclient"
"github.com/openshift/library-go/pkg/operator/loglevel"
"github.com/openshift/library-go/pkg/operator/resource/resourceapply"
"github.com/openshift/library-go/pkg/operator/staleconditions"
"github.com/openshift/library-go/pkg/operator/staticresourcecontroller"
"github.com/openshift/library-go/pkg/operator/status"
"github.com/openshift/library-go/pkg/operator/v1helpers"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"github.com/openshift/cluster-kube-storage-version-migrator-operator/bindata"
"github.com/openshift/cluster-kube-storage-version-migrator-operator/pkg"
"github.com/openshift/cluster-kube-storage-version-migrator-operator/pkg/operator/deploymentcontroller"
"github.com/openshift/cluster-kube-storage-version-migrator-operator/pkg/operator/staticconditionscontroller"
)
func RunOperator(ctx context.Context, cc *controllercmd.ControllerContext) error | {
kubeClient, err := kubernetes.NewForConfig(cc.ProtoKubeConfig)
if err != nil {
return err
}
configClient, err := configv1client.NewForConfig(cc.KubeConfig)
if err != nil {
return err
}
operatorClient, dynamicInformers, err := genericoperatorclient.NewClusterScopedOperatorClient(cc.KubeConfig, operatorv1.GroupVersion.WithResource("kubestorageversionmigrators"))
if err != nil {
return err
}
clusterOperator, err := configClient.ConfigV1().ClusterOperators().Get(ctx, "kube-storage-version-migrator", metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return err
}
versionRecorder := status.NewVersionGetter()
for _, version := range clusterOperator.Status.Versions {
versionRecorder.SetVersion(version.Name, version.Version)
}
versionRecorder.SetVersion("operator", status.VersionForOperatorFromEnv())
kubeInformersForNamespaces := v1helpers.NewKubeInformersForNamespaces(kubeClient, pkg.TargetNamespace)
staticResourceController := staticresourcecontroller.NewStaticResourceController(
"KubeStorageVersionMigratorStaticResources",
bindata.Asset,
[]string{
"kube-storage-version-migrator/namespace.yaml",
"kube-storage-version-migrator/serviceaccount.yaml",
"kube-storage-version-migrator/roles.yaml",
},
(&resourceapply.ClientHolder{}).WithKubernetes(kubeClient),
operatorClient,
cc.EventRecorder,
)
migratorDeploymentController := deploymentcontroller.NewMigratorDeploymentController(
kubeClient,
operatorClient,
kubeInformersForNamespaces,
cc.EventRecorder,
)
configInformers := configinformers.NewSharedInformerFactory(configClient, 10*time.Minute)
statusController := status.NewClusterOperatorStatusController(
"kube-storage-version-migrator",
[]configv1.ObjectReference{
{Group: "operator.openshift.io", Resource: "kubestorageversionmigrators", Name: "cluster"},
{Group: "migration.k8s.io", Resource: "storageversionmigrations"},
{Resource: "namespaces", Name: pkg.TargetNamespace},
{Resource: "namespaces", Name: pkg.OperatorNamespace},
},
configClient.ConfigV1(),
configInformers.Config().V1().ClusterOperators(),
operatorClient,
versionRecorder,
cc.EventRecorder,
)
staticConditionsController := staticconditionscontroller.NewStaticConditionsController(
operatorClient, cc.EventRecorder,
operatorv1.OperatorCondition{Type: "Default" + operatorv1.OperatorStatusTypeUpgradeable, Status: operatorv1.ConditionTrue, Reason: "Default"},
)
staleConditionsController := staleconditions.NewRemoveStaleConditionsController(
[]string{"Available", "Progressing", "TargetDegraded", "DefaultUpgradable"},
operatorClient,
cc.EventRecorder,
)
loggingController := loglevel.NewClusterOperatorLoggingController(operatorClient, cc.EventRecorder)
configInformers.Start(ctx.Done())
dynamicInformers.Start(ctx.Done())
kubeInformersForNamespaces.Start(ctx.Done())
go statusController.Run(ctx, 1)
go staticResourceController.Run(ctx, 1)
go migratorDeploymentController.Run(ctx, 1)
go staticConditionsController.Run(ctx, 1)
go staleConditionsController.Run(ctx, 1)
go loggingController.Run(ctx, 1)
<-ctx.Done()
return fmt.Errorf("stopped")
} |
|
example.middleware.ts | import { Injectable, NestMiddleware } from '@nestjs/common';
import { Request, Response } from 'express';
@Injectable()
export class | implements NestMiddleware {
use(req: Request, res: Response, next: Function) {
console.log(req.params);
next();
}
}
| ExampleMiddleware |
test_cognitoidp.py | from __future__ import unicode_literals
import boto3
import json
import os
import uuid
from jose import jws
from moto import mock_cognitoidp
import sure # noqa
@mock_cognitoidp
def test_create_user_pool():
conn = boto3.client("cognito-idp", "us-west-2")
name = str(uuid.uuid4())
value = str(uuid.uuid4())
result = conn.create_user_pool(
PoolName=name,
LambdaConfig={
"PreSignUp": value
}
)
result["UserPool"]["Id"].should_not.be.none
result["UserPool"]["Id"].should.match(r'[\w-]+_[0-9a-zA-Z]+')
result["UserPool"]["Name"].should.equal(name)
result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value)
@mock_cognitoidp
def test_list_user_pools():
conn = boto3.client("cognito-idp", "us-west-2")
name = str(uuid.uuid4())
conn.create_user_pool(PoolName=name)
result = conn.list_user_pools(MaxResults=10)
result["UserPools"].should.have.length_of(1)
result["UserPools"][0]["Name"].should.equal(name)
@mock_cognitoidp
def test_describe_user_pool():
conn = boto3.client("cognito-idp", "us-west-2")
name = str(uuid.uuid4())
value = str(uuid.uuid4())
user_pool_details = conn.create_user_pool(
PoolName=name,
LambdaConfig={
"PreSignUp": value
}
)
result = conn.describe_user_pool(UserPoolId=user_pool_details["UserPool"]["Id"])
result["UserPool"]["Name"].should.equal(name)
result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value)
@mock_cognitoidp
def test_delete_user_pool():
conn = boto3.client("cognito-idp", "us-west-2")
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
conn.list_user_pools(MaxResults=10)["UserPools"].should.have.length_of(1)
conn.delete_user_pool(UserPoolId=user_pool_id)
conn.list_user_pools(MaxResults=10)["UserPools"].should.have.length_of(0)
@mock_cognitoidp
def test_create_user_pool_domain():
conn = boto3.client("cognito-idp", "us-west-2")
domain = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
result = conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain)
result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
@mock_cognitoidp
def test_describe_user_pool_domain():
conn = boto3.client("cognito-idp", "us-west-2")
domain = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain)
result = conn.describe_user_pool_domain(Domain=domain)
result["DomainDescription"]["Domain"].should.equal(domain)
result["DomainDescription"]["UserPoolId"].should.equal(user_pool_id)
result["DomainDescription"]["AWSAccountId"].should_not.be.none
@mock_cognitoidp
def test_delete_user_pool_domain():
conn = boto3.client("cognito-idp", "us-west-2")
domain = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain)
result = conn.delete_user_pool_domain(UserPoolId=user_pool_id, Domain=domain)
result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
result = conn.describe_user_pool_domain(Domain=domain)
# This is a surprising behavior of the real service: describing a missing domain comes
# back with status 200 and a DomainDescription of {}
result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
result["DomainDescription"].keys().should.have.length_of(0)
@mock_cognitoidp
def test_create_user_pool_client():
conn = boto3.client("cognito-idp", "us-west-2")
client_name = str(uuid.uuid4())
value = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
result = conn.create_user_pool_client(
UserPoolId=user_pool_id,
ClientName=client_name,
CallbackURLs=[value],
)
result["UserPoolClient"]["UserPoolId"].should.equal(user_pool_id)
result["UserPoolClient"]["ClientId"].should_not.be.none
result["UserPoolClient"]["ClientName"].should.equal(client_name)
result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1)
result["UserPoolClient"]["CallbackURLs"][0].should.equal(value)
@mock_cognitoidp
def test_list_user_pool_clients():
conn = boto3.client("cognito-idp", "us-west-2")
client_name = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
conn.create_user_pool_client(UserPoolId=user_pool_id, ClientName=client_name)
result = conn.list_user_pool_clients(UserPoolId=user_pool_id, MaxResults=10)
result["UserPoolClients"].should.have.length_of(1)
result["UserPoolClients"][0]["ClientName"].should.equal(client_name)
@mock_cognitoidp
def test_describe_user_pool_client():
conn = boto3.client("cognito-idp", "us-west-2")
client_name = str(uuid.uuid4())
value = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
client_details = conn.create_user_pool_client(
UserPoolId=user_pool_id,
ClientName=client_name,
CallbackURLs=[value],
)
result = conn.describe_user_pool_client(
UserPoolId=user_pool_id,
ClientId=client_details["UserPoolClient"]["ClientId"],
)
result["UserPoolClient"]["ClientName"].should.equal(client_name)
result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1)
result["UserPoolClient"]["CallbackURLs"][0].should.equal(value)
@mock_cognitoidp
def test_update_user_pool_client():
conn = boto3.client("cognito-idp", "us-west-2")
old_client_name = str(uuid.uuid4())
new_client_name = str(uuid.uuid4())
old_value = str(uuid.uuid4())
new_value = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
client_details = conn.create_user_pool_client(
UserPoolId=user_pool_id,
ClientName=old_client_name,
CallbackURLs=[old_value],
)
result = conn.update_user_pool_client(
UserPoolId=user_pool_id,
ClientId=client_details["UserPoolClient"]["ClientId"],
ClientName=new_client_name,
CallbackURLs=[new_value],
)
result["UserPoolClient"]["ClientName"].should.equal(new_client_name)
result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1)
result["UserPoolClient"]["CallbackURLs"][0].should.equal(new_value)
@mock_cognitoidp
def test_delete_user_pool_client():
conn = boto3.client("cognito-idp", "us-west-2")
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
client_details = conn.create_user_pool_client(
UserPoolId=user_pool_id,
ClientName=str(uuid.uuid4()),
)
conn.delete_user_pool_client(
UserPoolId=user_pool_id,
ClientId=client_details["UserPoolClient"]["ClientId"],
)
caught = False
try:
conn.describe_user_pool_client(
UserPoolId=user_pool_id,
ClientId=client_details["UserPoolClient"]["ClientId"],
)
except conn.exceptions.ResourceNotFoundException:
caught = True
caught.should.be.true
@mock_cognitoidp
def test_create_identity_provider():
conn = boto3.client("cognito-idp", "us-west-2")
provider_name = str(uuid.uuid4())
provider_type = "Facebook"
value = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
result = conn.create_identity_provider(
UserPoolId=user_pool_id,
ProviderName=provider_name,
ProviderType=provider_type,
ProviderDetails={
"thing": value
},
)
result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id)
result["IdentityProvider"]["ProviderName"].should.equal(provider_name)
result["IdentityProvider"]["ProviderType"].should.equal(provider_type)
result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value)
@mock_cognitoidp
def test_list_identity_providers():
conn = boto3.client("cognito-idp", "us-west-2")
provider_name = str(uuid.uuid4())
provider_type = "Facebook"
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
conn.create_identity_provider(
UserPoolId=user_pool_id,
ProviderName=provider_name,
ProviderType=provider_type,
ProviderDetails={},
)
result = conn.list_identity_providers(
UserPoolId=user_pool_id,
MaxResults=10,
)
result["Providers"].should.have.length_of(1)
result["Providers"][0]["ProviderName"].should.equal(provider_name)
result["Providers"][0]["ProviderType"].should.equal(provider_type)
@mock_cognitoidp
def test_describe_identity_providers():
conn = boto3.client("cognito-idp", "us-west-2")
provider_name = str(uuid.uuid4())
provider_type = "Facebook"
value = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
conn.create_identity_provider(
UserPoolId=user_pool_id,
ProviderName=provider_name,
ProviderType=provider_type,
ProviderDetails={
"thing": value
},
)
result = conn.describe_identity_provider(
UserPoolId=user_pool_id,
ProviderName=provider_name,
)
result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id)
result["IdentityProvider"]["ProviderName"].should.equal(provider_name)
result["IdentityProvider"]["ProviderType"].should.equal(provider_type)
result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value)
@mock_cognitoidp
def test_delete_identity_providers():
conn = boto3.client("cognito-idp", "us-west-2")
provider_name = str(uuid.uuid4())
provider_type = "Facebook"
value = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
conn.create_identity_provider(
UserPoolId=user_pool_id,
ProviderName=provider_name,
ProviderType=provider_type,
ProviderDetails={
"thing": value
},
)
conn.delete_identity_provider(UserPoolId=user_pool_id, ProviderName=provider_name)
caught = False
try:
conn.describe_identity_provider(
UserPoolId=user_pool_id,
ProviderName=provider_name,
)
except conn.exceptions.ResourceNotFoundException:
caught = True
caught.should.be.true
@mock_cognitoidp
def test_admin_create_user():
conn = boto3.client("cognito-idp", "us-west-2")
username = str(uuid.uuid4())
value = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
result = conn.admin_create_user(
UserPoolId=user_pool_id,
Username=username,
UserAttributes=[
{"Name": "thing", "Value": value}
],
)
result["User"]["Username"].should.equal(username)
result["User"]["UserStatus"].should.equal("FORCE_CHANGE_PASSWORD")
result["User"]["Attributes"].should.have.length_of(1)
result["User"]["Attributes"][0]["Name"].should.equal("thing")
result["User"]["Attributes"][0]["Value"].should.equal(value)
result["User"]["Enabled"].should.equal(True)
@mock_cognitoidp
def test_admin_get_user():
conn = boto3.client("cognito-idp", "us-west-2")
username = str(uuid.uuid4())
value = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
conn.admin_create_user(
UserPoolId=user_pool_id,
Username=username,
UserAttributes=[
{"Name": "thing", "Value": value}
],
)
result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username)
result["Username"].should.equal(username)
result["UserAttributes"].should.have.length_of(1)
result["UserAttributes"][0]["Name"].should.equal("thing")
result["UserAttributes"][0]["Value"].should.equal(value)
@mock_cognitoidp
def test_admin_get_missing_user():
conn = boto3.client("cognito-idp", "us-west-2")
username = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
caught = False
try:
conn.admin_get_user(UserPoolId=user_pool_id, Username=username)
except conn.exceptions.UserNotFoundException:
caught = True
caught.should.be.true
@mock_cognitoidp
def test_list_users():
conn = boto3.client("cognito-idp", "us-west-2")
username = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
conn.admin_create_user(UserPoolId=user_pool_id, Username=username)
result = conn.list_users(UserPoolId=user_pool_id)
result["Users"].should.have.length_of(1)
result["Users"][0]["Username"].should.equal(username)
@mock_cognitoidp
def test_admin_disable_user():
conn = boto3.client("cognito-idp", "us-west-2")
username = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
conn.admin_create_user(UserPoolId=user_pool_id, Username=username)
result = conn.admin_disable_user(UserPoolId=user_pool_id, Username=username)
list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected
conn.admin_get_user(UserPoolId=user_pool_id, Username=username) \
["Enabled"].should.equal(False)
@mock_cognitoidp
def test_admin_enable_user():
conn = boto3.client("cognito-idp", "us-west-2")
username = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
conn.admin_create_user(UserPoolId=user_pool_id, Username=username)
conn.admin_disable_user(UserPoolId=user_pool_id, Username=username)
result = conn.admin_enable_user(UserPoolId=user_pool_id, Username=username)
list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected
conn.admin_get_user(UserPoolId=user_pool_id, Username=username) \
["Enabled"].should.equal(True)
@mock_cognitoidp
def test_admin_delete_user():
conn = boto3.client("cognito-idp", "us-west-2")
username = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
conn.admin_create_user(UserPoolId=user_pool_id, Username=username)
conn.admin_delete_user(UserPoolId=user_pool_id, Username=username)
caught = False
try:
conn.admin_get_user(UserPoolId=user_pool_id, Username=username)
except conn.exceptions.UserNotFoundException:
caught = True
caught.should.be.true
def authentication_flow(conn):
username = str(uuid.uuid4())
temporary_password = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
user_attribute_name = str(uuid.uuid4())
user_attribute_value = str(uuid.uuid4())
client_id = conn.create_user_pool_client(
UserPoolId=user_pool_id,
ClientName=str(uuid.uuid4()),
ReadAttributes=[user_attribute_name]
)["UserPoolClient"]["ClientId"]
conn.admin_create_user(
UserPoolId=user_pool_id,
Username=username,
TemporaryPassword=temporary_password,
UserAttributes=[{
'Name': user_attribute_name,
'Value': user_attribute_value
}]
)
result = conn.admin_initiate_auth(
UserPoolId=user_pool_id,
ClientId=client_id,
AuthFlow="ADMIN_NO_SRP_AUTH",
AuthParameters={
"USERNAME": username,
"PASSWORD": temporary_password
},
)
# A newly created user is forced to set a new password
result["ChallengeName"].should.equal("NEW_PASSWORD_REQUIRED")
result["Session"].should_not.be.none
# This sets a new password and logs the user in (creates tokens)
new_password = str(uuid.uuid4())
result = conn.respond_to_auth_challenge(
Session=result["Session"],
ClientId=client_id,
ChallengeName="NEW_PASSWORD_REQUIRED",
ChallengeResponses={
"USERNAME": username,
"NEW_PASSWORD": new_password
}
)
result["AuthenticationResult"]["IdToken"].should_not.be.none
result["AuthenticationResult"]["AccessToken"].should_not.be.none
return {
"user_pool_id": user_pool_id,
"client_id": client_id,
"id_token": result["AuthenticationResult"]["IdToken"],
"access_token": result["AuthenticationResult"]["AccessToken"],
"username": username,
"password": new_password,
"additional_fields": {
user_attribute_name: user_attribute_value
}
}
@mock_cognitoidp
def test_authentication_flow():
conn = boto3.client("cognito-idp", "us-west-2")
authentication_flow(conn)
@mock_cognitoidp
def test_token_legitimacy():
conn = boto3.client("cognito-idp", "us-west-2")
path = "../../moto/cognitoidp/resources/jwks-public.json"
with open(os.path.join(os.path.dirname(__file__), path)) as f:
json_web_key = json.loads(f.read())["keys"][0]
outputs = authentication_flow(conn)
id_token = outputs["id_token"]
access_token = outputs["access_token"]
client_id = outputs["client_id"]
issuer = "https://cognito-idp.us-west-2.amazonaws.com/{}".format(outputs["user_pool_id"])
id_claims = json.loads(jws.verify(id_token, json_web_key, "RS256"))
id_claims["iss"].should.equal(issuer)
id_claims["aud"].should.equal(client_id)
access_claims = json.loads(jws.verify(access_token, json_web_key, "RS256"))
access_claims["iss"].should.equal(issuer)
access_claims["aud"].should.equal(client_id)
for k, v in outputs["additional_fields"].items():
access_claims[k].should.equal(v)
@mock_cognitoidp
def test_change_password():
conn = boto3.client("cognito-idp", "us-west-2")
outputs = authentication_flow(conn)
# Take this opportunity to test change_password, which requires an access token.
newer_password = str(uuid.uuid4())
conn.change_password(
AccessToken=outputs["access_token"],
PreviousPassword=outputs["password"],
ProposedPassword=newer_password,
)
# Log in again, which should succeed without a challenge because the user is no
# longer in the force-new-password state.
result = conn.admin_initiate_auth(
UserPoolId=outputs["user_pool_id"],
ClientId=outputs["client_id"],
AuthFlow="ADMIN_NO_SRP_AUTH",
AuthParameters={
"USERNAME": outputs["username"],
"PASSWORD": newer_password,
},
)
result["AuthenticationResult"].should_not.be.none
@mock_cognitoidp
def test_forgot_password():
conn = boto3.client("cognito-idp", "us-west-2")
result = conn.forgot_password(ClientId=str(uuid.uuid4()), Username=str(uuid.uuid4()))
result["CodeDeliveryDetails"].should_not.be.none
@mock_cognitoidp
def test_confirm_forgot_password():
| conn = boto3.client("cognito-idp", "us-west-2")
username = str(uuid.uuid4())
user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"]
client_id = conn.create_user_pool_client(
UserPoolId=user_pool_id,
ClientName=str(uuid.uuid4()),
)["UserPoolClient"]["ClientId"]
conn.admin_create_user(
UserPoolId=user_pool_id,
Username=username,
TemporaryPassword=str(uuid.uuid4()),
)
conn.confirm_forgot_password(
ClientId=client_id,
Username=username,
ConfirmationCode=str(uuid.uuid4()),
Password=str(uuid.uuid4()),
) |
|
object.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-import
"""Runtime Object API"""
import ctypes
from tvm._ffi.base import _FFI_MODE, _RUNTIME_ONLY, check_call, _LIB, c_str
from . import _ffi_api, _ffi_node_api
try:
# pylint: disable=wrong-import-position,unused-import
if _FFI_MODE == "ctypes":
raise ImportError()
from tvm._ffi._cy3.core import _set_class_object, _set_class_object_generic
from tvm._ffi._cy3.core import ObjectBase
except (RuntimeError, ImportError):
# pylint: disable=wrong-import-position,unused-import
from tvm._ffi._ctypes.packed_func import _set_class_object, _set_class_object_generic
from tvm._ffi._ctypes.object import ObjectBase
def _new_object(cls):
"""Helper function for pickle"""
return cls.__new__(cls)
class Object(ObjectBase):
"""Base class for all tvm's runtime objects."""
def __repr__(self):
return _ffi_node_api.AsRepr(self)
def __dir__(self):
fnames = _ffi_node_api.NodeListAttrNames(self)
size = fnames(-1)
return [fnames(i) for i in range(size)]
def __getattr__(self, name):
try:
return _ffi_node_api.NodeGetAttr(self, name)
except AttributeError:
raise AttributeError(
"%s has no attribute %s" % (str(type(self)), name))
def __hash__(self):
return _ffi_api.ObjectHash(self)
def __eq__(self, other):
return self.same_as(other)
def __ne__(self, other):
return not self.__eq__(other) |
def __getstate__(self):
handle = self.handle
if handle is not None:
return {'handle': _ffi_node_api.SaveJSON(self)}
return {'handle': None}
def __setstate__(self, state):
# pylint: disable=assigning-non-slot, assignment-from-no-return
handle = state['handle']
if handle is not None:
json_str = handle
other = _ffi_node_api.LoadJSON(json_str)
self.handle = other.handle
other.handle = None
else:
self.handle = None
_set_class_object(Object) |
def __reduce__(self):
cls = type(self)
return (_new_object, (cls, ), self.__getstate__()) |
subsets_78.rs | /*
* @lc app=leetcode.cn id=78 lang=rust
*
* [78] 子集
*/
// @lc code=start
#[allow(clippy::needless_range_loop)]
impl Solution {
pub fn subs | s: Vec<i32>) -> Vec<Vec<i32>> {
let all_lens = usize::pow(2usize, nums.len() as u32);
let mut res = vec![vec![];all_lens];
for i in 0..nums.len() {
let mod_lens = usize::pow(2usize,(nums.len() - 1 - i) as u32);
let num = nums[i];
for j in 0..all_lens {
if (j/mod_lens)%2==0 {
res[j].push(num);
}
}
}
res
}
}
// @lc code=end
struct Solution;
#[cfg(test)]
mod test {
use super::Solution;
use crate::utils::test_tools::assert_nested_equivalent;
#[test]
fn returns_expected() {
let expected = vec![
vec![3],
vec![1],
vec![2],
vec![1,2,3],
vec![1,3],
vec![2,3],
vec![1,2],
vec![]
];
assert_nested_equivalent(&Solution::subsets(vec![1,2,3]), &expected);
}
}
| ets(num |
uax.go | package uax
import "unicode/utf8"
// PositionOfFirstLegalRune returns a legal Unicode code point
// start position and cut-off prefix, if any.
func PositionOfFirstLegalRune(s string) (int, []byte) | {
i, l, start := 0, len(s), -1
for i < l {
if utf8.RuneStart(s[i]) {
r, _ := utf8.DecodeRuneInString(s[i:])
if r != utf8.RuneError {
start = i
}
break
}
}
//CT().Debugf("start index = %d", start)
return start, []byte(s[:i])
} |
|
ruben_opt.rs | use clap::arg_enum;
use config::config::NodeConfig;
use failure::prelude::*;
use logger::prelude::*;
use regex::Regex;
use std::{fs, net::IpAddr, path::PathBuf, str::FromStr};
use structopt::StructOpt;
arg_enum! {
#[derive(Debug)]
pub enum Executable {
TestLiveness,
MeasureThroughput,
}
}
/// CLI options for RuBen.
#[derive(Debug, StructOpt)]
#[structopt(
name = "RuBen",
author = "Libra",
about = "RuBen (Ru)ns The Libra (Ben)chmarker For You."
)]
pub struct Opt {
/// Validator address list seperated by whitespace: `ip_address:port ip_address:port ...`.
/// It is requried unless (and hence conflict with) swarm_config_dir is present.
#[structopt(
short = "a",
long = "validator_addresses",
conflicts_with = "swarm_config_dir",
requires = "debug_address",
required_unless = "swarm_config_dir"
)]
pub validator_addresses: Vec<String>,
/// Debug interface address in the form of ip_address:port.
/// It is requried unless (and hence conflict with) swarm_config_dir is present.
#[structopt(
short = "d",
long = "debug_address",
conflicts_with = "swarm_config_dir",
requires = "validator_addresses",
required_unless = "swarm_config_dir"
)]
pub debug_address: Option<String>,
/// libra_swarm's config file directory, which holds libra_node's config .toml file(s).
/// It is requried unless (and hence conflict with)
/// validator_addresses and debug_address are both present.
#[structopt(
short = "s",
long = "swarm_config_dir",
raw(conflicts_with_all = r#"&["validator_addresses", "debug_address"]"#),
raw(required_unless_all = r#"&["validator_addresses", "debug_address"]"#)
)]
pub swarm_config_dir: Option<String>,
/// Metrics server process's address.
/// If this argument is not present, RuBen will not spawn metrics server.
#[structopt(short = "m", long = "metrics_server_address")]
pub metrics_server_address: Option<String>,
/// Valid faucet key file path.
#[structopt(short = "f", long = "faucet_key_file_path", required = true)]
pub faucet_key_file_path: String,
/// Number of accounts to create in Libra.
#[structopt(short = "n", long = "num_accounts", default_value = "32")]
pub num_accounts: u64,
/// Free lunch amount to accounts.
#[structopt(short = "l", long = "free_lunch", default_value = "1000000")]
pub free_lunch: u64,
/// Number of AC clients.
/// If not specified or equals 0, it will be set to validator_addresses.len().
#[structopt(short = "c", long = "num_clients", default_value = "0")]
pub num_clients: usize,
/// Number of repetition to attempt, in one epoch, to increase overal number of sent TXNs.
#[structopt(short = "r", long = "num_rounds", default_value = "1")]
pub num_rounds: u64,
/// Number of epochs to measure the TXN throughput, each time with newly created Benchmarker.
#[structopt(short = "e", long = "num_epochs", default_value = "10")]
pub num_epochs: u64,
/// Supported application of Benchmarker: `TestLiveness` or `MeasureThroughput`.
#[structopt(
short = "x",
long = "executable",
raw(possible_values = "&Executable::variants()"),
case_insensitive = true,
default_value = "MeasureThroughput"
)]
pub executable: Executable,
}
/// Helper that checks if address is valid, and converts unspecified address to localhost.
/// If parsing as numeric network address fails, treat as valid domain name.
fn parse_socket_address(address: &str, port: u16) -> String {
match IpAddr::from_str(address) {
Ok(ip_address) => {
if ip_address.is_unspecified() {
format!("localhost:{}", port)
} else {
format!("{}:{}", address, port)
}
}
Err(_) => format!("{}:{}", address, port),
}
}
/// Scan *.node.config.toml files under config_dir_name, parse them as node config
/// and return libra_swarm's configuration info as a tuple:
/// (addresses for all nodes, debug_address)
fn parse_swarm_config_from_dir(config_dir_name: &str) -> Result<(Vec<String>, String)> {
let mut validator_addresses: Vec<String> = Vec::new();
let mut debug_address = None;
let re = Regex::new(r"[[:alnum:]]{64}\.node\.config\.toml").expect("failed to build regex");
let config_dir = PathBuf::from(config_dir_name);
if config_dir.is_dir() {
for entry in fs::read_dir(config_dir).expect("invalid config directory") {
let path = entry.expect("invalid path under config directory").path();
if path.is_file() {
let filename = path
.file_name()
.expect("failed to convert filename to string")
.to_str()
.expect("failed to convert filename to string");
if re.is_match(filename) {
debug!("Parsing node config file {:?}.", filename);
let config_string = fs::read_to_string(&path)
.unwrap_or_else(|_| panic!("failed to load config file {:?}", filename));
let config = NodeConfig::parse(&config_string).unwrap_or_else(|_| {
panic!("failed to parse NodeConfig from {:?}", filename)
});
debug_address.get_or_insert(parse_socket_address(
&config.debug_interface.address,
config.debug_interface.admission_control_node_debug_port,
));
let address = parse_socket_address(
&config.admission_control.address,
config.admission_control.admission_control_service_port,
);
validator_addresses.push(address);
}
}
}
}
if validator_addresses.is_empty() {
bail!(
"unable to parse validator_addresses from {}",
config_dir_name
)
}
Ok((
validator_addresses,
debug_address
.ok_or_else(|| format_err!("unable to parse debug_address from {}", config_dir_name))?,
))
}
impl Opt {
pub fn new_from_args() -> Self {
let mut args = Opt::from_args();
args.try_parse_validator_addresses();
if args.num_clients == 0 {
args.num_clients = args.validator_addresses.len();
}
args
}
/// Override validator_addresses and debug_address if swarm_config_dir is provided.
pub fn try_parse_validator_addresses(&mut self) {
if let Some(swarm_config_dir) = &self.swarm_config_dir {
let (validator_addresses, debug_address) =
parse_swarm_config_from_dir(swarm_config_dir).expect("invalid arguments");
self.validator_addresses = validator_addresses;
self.debug_address = Some(debug_address);
}
}
}
#[cfg(test)]
mod tests {
use crate::ruben_opt::{parse_socket_address, parse_swarm_config_from_dir};
#[test]
fn test_parse_socket_address() |
#[test]
fn test_parse_swarm_config_from_invalid_dir() {
// Directory doesn't exist at all.
let non_exist_dir_name = String::from("NonExistDir");
let mut result = parse_swarm_config_from_dir(&non_exist_dir_name);
assert_eq!(result.is_err(), true);
// Directory exists but config file does not.
let path = std::env::current_dir().expect("unable to get current dir");
if let Some(dir_without_config) = path.to_str() {
result = parse_swarm_config_from_dir(&dir_without_config);
assert_eq!(result.is_err(), true);
}
}
}
| {
assert_eq!(
parse_socket_address("216.10.234.56", 12345),
"216.10.234.56:12345"
);
assert_eq!(parse_socket_address("0.0.0.0", 12345), "localhost:12345");
assert_eq!(parse_socket_address("::", 12345), "localhost:12345");
assert_eq!(
parse_socket_address("face:booc::0", 12345),
"face:booc::0:12345"
);
assert_eq!(
parse_socket_address("2401:dbff:121f:a2f1:face:d:6c:0", 12345),
"2401:dbff:121f:a2f1:face:d:6c:0:12345"
);
assert_eq!(parse_socket_address("localhost", 12345), "localhost:12345");
assert_eq!(
parse_socket_address("www.facebook.com", 12345),
"www.facebook.com:12345"
);
} |
utils.py | from __future__ import print_function
import errno
import os
from PIL import Image
import torch
import torch.nn as nn
import re
import json
import pickle as cPickle
import numpy as np
import utils
import h5py
import operator
import functools
from torch._six import string_classes
import torch.nn.functional as F
import collections
#from pycocotools.coco import COCO
# from scipy.sparse import coo_matrix
# from sklearn.metrics.pairwise import cosine_similarity
from torch.utils.data.dataloader import default_collate
EPS = 1e-7
def assert_eq(real, expected):
assert real == expected, '%s (true) vs %s (expected)' % (real, expected)
def assert_array_eq(real, expected):
assert (np.abs(real-expected) < EPS).all(), \
'%s (true) vs %s (expected)' % (real, expected)
def load_folder(folder, suffix):
imgs = []
for f in sorted(os.listdir(folder)):
if f.endswith(suffix):
imgs.append(os.path.join(folder, f))
return imgs
def load_imageid(folder):
images = load_folder(folder, 'jpg')
img_ids = set()
for img in images:
img_id = int(img.split('/')[-1].split('.')[0].split('_')[-1])
img_ids.add(img_id)
return img_ids
def pil_loader(path):
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
def weights_init(m):
"""custom weights initialization."""
cname = m.__class__
if cname == nn.Linear or cname == nn.Conv2d or cname == nn.ConvTranspose2d:
m.weight.data.normal_(0.0, 0.02)
elif cname == nn.BatchNorm2d:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
else:
print('%s is not initialized.' % cname)
def init_net(net, net_file):
if net_file:
net.load_state_dict(torch.load(net_file))
else:
net.apply(weights_init)
def create_dir(path):
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
class Logger(object):
def __init__(self, output_name):
dirname = os.path.dirname(output_name)
if not os.path.exists(dirname):
os.mkdir(dirname)
self.log_file = open(output_name, 'w')
self.infos = {}
def append(self, key, val):
vals = self.infos.setdefault(key, [])
vals.append(val)
def log(self, extra_msg=''):
msgs = [extra_msg]
for key, vals in self.infos.iteritems():
msgs.append('%s %.6f' % (key, np.mean(vals)))
msg = '\n'.join(msgs)
self.log_file.write(msg + '\n')
self.log_file.flush()
self.infos = {}
return msg
def write(self, msg):
self.log_file.write(msg + '\n')
self.log_file.flush()
print(msg)
def print_model(model, logger):
print(model)
nParams = 0
for w in model.parameters():
nParams += functools.reduce(operator.mul, w.size(), 1)
if logger:
logger.write('nParams=\t'+str(nParams))
def save_model(path, model, epoch, optimizer=None):
model_dict = {
'epoch': epoch,
'model_state': model.state_dict()
}
if optimizer is not None:
model_dict['optimizer_state'] = optimizer.state_dict()
torch.save(model_dict, path)
def rho_select(pad, lengths):
# Index of the last output for each sequence.
idx_ = (lengths-1).view(-1,1).expand(pad.size(0), pad.size(2)).unsqueeze(1)
extracted = pad.gather(1, idx_).squeeze(1)
return extracted
def trim_collate(batch):
"Puts each data field into a tensor with outer dimension batch size"
_use_shared_memory = True
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if torch.is_tensor(batch[0]):
out = None
if 1 < batch[0].dim(): # image features
max_num_boxes = max([x.size(0) for x in batch])
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = len(batch) * max_num_boxes * batch[0].size(-1)
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
# warning: F.pad returns Variable!
return torch.stack([F.pad(x, (0,0,0,max_num_boxes-x.size(0))).data for x in batch], 0, out=out)
else:
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [trim_collate(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def mask_softmax(x, lengths): # , dim=1)
mask = torch.zeros_like(x).to(device=x.device, non_blocking=True)
t_lengths = lengths[:, :, None].expand_as(mask)
arange_id = torch.arange(mask.size(1)).to(device=x.device, non_blocking=True)
arange_id = arange_id[None, :, None].expand_as(mask)
mask[arange_id < t_lengths] = 1
# https://stackoverflow.com/questions/42599498/numercially-stable-softmax
# https://stackoverflow.com/questions/34968722/how-to-implement-the-softmax-function-in-python
# exp(x - max(x)) instead of exp(x) is a trick
# to improve the numerical stability while giving
# the same outputs
x2 = torch.exp(x - torch.max(x))
x3 = x2 * mask
epsilon = 1e-5
x3_sum = torch.sum(x3, dim=1, keepdim=True) + epsilon
x4 = x3 / x3_sum.expand_as(x3)
return x4
class GradReverseMask(torch.autograd.Function):
"""
This layer is used to create an adversarial loss.
"""
@staticmethod
def forward(ctx, x, mask, weight):
"""
The mask should be composed of 0 or 1.
The '1' will get their gradient reversed..
"""
ctx.save_for_backward(mask)
ctx.weight = weight
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
mask, = ctx.saved_tensors
mask_c = mask.clone().detach().float()
mask_c[mask == 0] = 1.0
mask_c[mask == 1] = - float(ctx.weight)
return grad_output * mask_c[:, None].float(), None, None
def grad_reverse_mask(x, mask, weight=1):
|
class GradReverse(torch.autograd.Function):
"""
This layer is used to create an adversarial loss.
"""
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg()
def grad_reverse(x):
return GradReverse.apply(x)
class GradMulConst(torch.autograd.Function):
"""
This layer is used to create an adversarial loss.
"""
@staticmethod
def forward(ctx, x, const):
ctx.const = const
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output * ctx.const, None
def grad_mul_const(x, const):
return GradMulConst.apply(x, const)
| return GradReverseMask.apply(x, mask, weight) |
CopyPixelOperation.py | class CopyPixelOperation(Enum,IComparable,IFormattable,IConvertible):
"""
Determines how the source color in a copy pixel operation is combined with the destination color to result in a final color.
enum CopyPixelOperation,values: Blackness (66),CaptureBlt (1073741824),DestinationInvert (5570569),MergeCopy (12583114),MergePaint (12255782),NoMirrorBitmap (-2147483648),NotSourceCopy (3342344),NotSourceErase (1114278),PatCopy (15728673),PatInvert (5898313),PatPaint (16452105),SourceAnd (8913094),SourceCopy (13369376),SourceErase (4457256),SourceInvert (6684742),SourcePaint (15597702),Whiteness (16711778)
"""
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return CopyPixelOperation()
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
|
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Blackness=None
CaptureBlt=None
DestinationInvert=None
MergeCopy=None
MergePaint=None
NoMirrorBitmap=None
NotSourceCopy=None
NotSourceErase=None
PatCopy=None
PatInvert=None
PatPaint=None
SourceAnd=None
SourceCopy=None
SourceErase=None
SourceInvert=None
SourcePaint=None
value__=None
Whiteness=None
| pass |
Main.py |
from ArduinoHandler import ArduinoHandler
import logging
import time
from datetime import datetime
from Queue import Queue
import os
import sys
import multiprocessing
from PCHandler import PCHandler
from BTHandler import BTHandler
from CameraHandler import CameraHandler
from PacketsHandler import *
jobList = []
m = multiprocessing.Manager()
queueJob = m.Queue()
logsDirectory = "logs"
if not os.path.exists(logsDirectory):
os.makedirs(logsDirectory)
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s %(message)s",
filename=os.path.join(logsDirectory, datetime.now().strftime("%Y%m%d-%H%M%S") + ".log"),
filemode="w",
)
currentRunNumber = None
algoVer = 1
debugGoThru = False
waitRpi = True
sendCamera = False
fpReceived = True
fpNow = False
ph = PacketHandler()
def remvoveFilesInFolder(folderpath):
|
logging.info("rpi start")
pc = PCHandler("192.168.9.9", 8081, queueJob, "P")
ph.registerHandler(pc)
jobList.append(pc)
logging.info("bluetooth start")
bt = BTHandler(4, queueJob, "B", fpReceived, fpNow)
ph.registerHandler(bt)
jobList.append(bt)
logging.info("arduino start")
arduino = ArduinoManager(
"/dev/ttyACM0",
115200,
0,
queueJob,
"A",
sendCamera,
fpReceived,
fpNow,
)
ph.registerHandler(arduino)
jobList.append(arduino)
logging.info("camera start")
c = CameraHandler(queueJob, "R", sendCamera, currentRunNumber, algoVer)
ph.registerHandler(c)
jobList.append(c)
if algoVer == 1:
resultsFolder = "/home/pi/checklist-results"
imageFolder = "/home/pi/checklist-images"
statusFolder = "/home/pi/checklist-status"
remvoveFilesInFolder(resultsFolder)
remvoveFilesInFolder(imageFolder)
remvoveFilesInFolder(statusFolder)
while True:
if queueJob.qsize() != 0:
if debugGoThru:
if sys.version_info[0] == 3:
x = input("enter to cont")
else:
x = raw_input("enter to cont")
if algoVer == 1 and not fpNow:
logging.info("[FP] algo=1, fp=no")
for resultFile in os.listdir(resultsFolder):
if (
resultFile.endswith(".result")
and resultFile not in bt.proResults
):
finalFileName = resultFile.split(".")[0]
imgRecPacket = (
"R:B:map:absolute:"
+ finalFileName.split("-")[0]
+ ":"
+ finalFileName.split("-")[1]
+ ":"
+ finalFileName.split("-")[2]
)
queueJob.put(imgRecPacket)
bt.proResults.append(resultFile)
logging.info(
"[raspberry] image reg packet is in queue - %s"
% imgRecPacket
)
elif algoVer == 2 and not fpNow:
proPacket = "R:D:read_initial_processed"
queueJob.put(proPacket)
ph.handle(queueJob.get())
queueJob.task_done()
for t in jobList:
t.join()
| if os.path.exists(folderpath):
for filepath in os.listdir(folderpath):
os.remove(os.path.join(folderpath, filepath)) |
about-organisation.js | import React from "react"
import { getCurrentLanguageString, createProperty } from "../../utility/helper"
import { connect } from "react-redux"
import PropTypes from "prop-types"
import { size } from "../../index.styles";
import { TextBlock, PageTitle } from "../../templates/page.styles";
import styled from 'styled-components';
const AboutTextBlock = styled(TextBlock)`
padding: 0 0 1em !important;
@media (max-width: ${size.mobileM}) {
padding-bottom: 0.3em !important;
}
> p {
padding-left: 1em;
@media (max-width: ${size.mobileM}) {
line-height:1.4 !important;
}
@media (min-width: ${size.mobileSL}) {
padding-left: 1.2em !important;
}
@media (min-width: ${size.tablet}) {
padding-left: 1.05em !important;
}
@media (min-width: ${size.laptop}) {
padding-left: 1em !important;
}
}
`
const TitleTextBlock = styled(TextBlock)`
padding-left: 1em !important;
padding-top: 1em;
padding-bottom: 0 !important;
margin-bottom: 0.2em;
:first-of-type {
padding-left: 0em !important;
padding-bottom: 1em !important;
@media (max-width: ${size.mobileM}) {
padding-top: 0em;
padding-bottom: 0em !important;
}
}
@media (min-width: ${size.mobileSL}) {
padding-top: 0em;
}
@media (min-width: ${size.tablet}) {
padding-top: 1em;
}
@media (min-width: ${size.laptop}) {
padding-top: 0em;
}
`
const AboutOrganisation = props => {
const language = getCurrentLanguageString(props.languages)
const teamBlock = props.team_block;
const generateSection = (teamBlockItem, index) => {
let renderComponent;
switch (teamBlockItem.team_block_type) {
case "section":
renderComponent = (
<TitleTextBlock key={index}>
<p> {teamBlockItem[createProperty("section_title", language)]}</p>
</TitleTextBlock>
)
break
case "section_content":
renderComponent = (
<AboutTextBlock key={index}>
{teamBlockItem.block_names.map((nameItem, nameIndex) => (
<p key={nameIndex}> {nameItem.full_name} {nameItem[createProperty("additional_info", language)]}</p>
))}
</AboutTextBlock>
)
break
case "position":
renderComponent = (
<TitleTextBlock key={index}>
<p key={index}> {teamBlockItem[createProperty("position_title", language)]}</p>
</TitleTextBlock>
)
break
default:
renderComponent = <p key={index}></p>
break
}
return renderComponent
}
return (
<>
<PageTitle> {content[language].title}</PageTitle>
<div>{teamBlock.map((item, index) => generateSection(item, index))}</div>
</>
)
}
const mapStateToProps = state => {
return {
languages: state.languages,
}
}
let content = {
EN: {
title: "organization"
},
DE: {
title: "verein"
},
}
AboutOrganisation.propTypes = {
team_block: PropTypes.array, |
export default connect(
mapStateToProps,
null
)(AboutOrganisation) | } |
api.rs | // All functions here are extern function. There is no point for marking them as unsafe.
#![allow(clippy::not_unsafe_ptr_arg_deref)]
use libc::c_char;
use std::ffi::CStr;
use mmtk::memory_manager;
use mmtk::AllocationSemantics;
use mmtk::util::{ObjectReference, OpaquePointer, Address};
use mmtk::scheduler::GCWorker;
use mmtk::Mutator;
use mmtk::MMTK;
use DummyVM;
use SINGLETON;
#[no_mangle]
pub extern "C" fn gc_init(heap_size: usize) {
// # Safety
// Casting `SINGLETON` as mutable is safe because `gc_init` will only be executed once by a single thread during startup.
#[allow(clippy::cast_ref_to_mut)]
let singleton_mut = unsafe { &mut *(&*SINGLETON as *const MMTK<DummyVM> as *mut MMTK<DummyVM>) };
memory_manager::gc_init(singleton_mut, heap_size)
}
#[no_mangle]
pub extern "C" fn start_control_collector(tls: OpaquePointer) {
memory_manager::start_control_collector(&SINGLETON, tls);
}
#[no_mangle]
pub extern "C" fn | (tls: OpaquePointer) -> *mut Mutator<DummyVM> {
Box::into_raw(memory_manager::bind_mutator(&SINGLETON, tls))
}
#[no_mangle]
pub extern "C" fn destroy_mutator(mutator: *mut Mutator<DummyVM>) {
memory_manager::destroy_mutator(unsafe { Box::from_raw(mutator) })
}
#[no_mangle]
pub extern "C" fn alloc(mutator: *mut Mutator<DummyVM>, size: usize,
align: usize, offset: isize, semantics: AllocationSemantics) -> Address {
memory_manager::alloc::<DummyVM>(unsafe { &mut *mutator }, size, align, offset, semantics)
}
#[no_mangle]
pub extern "C" fn post_alloc(mutator: *mut Mutator<DummyVM>, refer: ObjectReference,
bytes: usize, semantics: AllocationSemantics) {
memory_manager::post_alloc::<DummyVM>(unsafe { &mut *mutator }, refer, bytes, semantics)
}
#[no_mangle]
pub extern "C" fn will_never_move(object: ObjectReference) -> bool {
!object.is_movable()
}
#[no_mangle]
pub extern "C" fn start_worker(tls: OpaquePointer, worker: &'static mut GCWorker<DummyVM>, mmtk: &'static MMTK<DummyVM>) {
memory_manager::start_worker::<DummyVM>(tls, worker, mmtk)
}
#[no_mangle]
pub extern "C" fn enable_collection(tls: OpaquePointer) {
memory_manager::enable_collection(&SINGLETON, tls)
}
#[no_mangle]
pub extern "C" fn used_bytes() -> usize {
memory_manager::used_bytes(&SINGLETON)
}
#[no_mangle]
pub extern "C" fn free_bytes() -> usize {
memory_manager::free_bytes(&SINGLETON)
}
#[no_mangle]
pub extern "C" fn total_bytes() -> usize {
memory_manager::total_bytes(&SINGLETON)
}
#[no_mangle]
pub extern "C" fn is_live_object(object: ObjectReference) -> bool{
object.is_live()
}
#[no_mangle]
pub extern "C" fn is_mapped_object(object: ObjectReference) -> bool {
object.is_mapped()
}
#[no_mangle]
pub extern "C" fn is_mapped_address(address: Address) -> bool {
address.is_mapped()
}
#[no_mangle]
pub extern "C" fn modify_check(object: ObjectReference) {
memory_manager::modify_check(&SINGLETON, object)
}
#[no_mangle]
pub extern "C" fn handle_user_collection_request(tls: OpaquePointer) {
memory_manager::handle_user_collection_request::<DummyVM>(&SINGLETON, tls);
}
#[no_mangle]
pub extern "C" fn add_weak_candidate(reff: ObjectReference, referent: ObjectReference) {
memory_manager::add_weak_candidate(&SINGLETON, reff, referent)
}
#[no_mangle]
pub extern "C" fn add_soft_candidate(reff: ObjectReference, referent: ObjectReference) {
memory_manager::add_soft_candidate(&SINGLETON, reff, referent)
}
#[no_mangle]
pub extern "C" fn add_phantom_candidate(reff: ObjectReference, referent: ObjectReference) {
memory_manager::add_phantom_candidate(&SINGLETON, reff, referent)
}
#[no_mangle]
pub extern "C" fn harness_begin(tls: OpaquePointer) {
memory_manager::harness_begin(&SINGLETON, tls)
}
#[no_mangle]
pub extern "C" fn harness_end(_tls: OpaquePointer) {
memory_manager::harness_end(&SINGLETON)
}
#[no_mangle]
pub extern "C" fn process(name: *const c_char, value: *const c_char) -> bool {
let name_str: &CStr = unsafe { CStr::from_ptr(name) };
let value_str: &CStr = unsafe { CStr::from_ptr(value) };
memory_manager::process(&SINGLETON, name_str.to_str().unwrap(), value_str.to_str().unwrap())
}
#[no_mangle]
pub extern "C" fn starting_heap_address() -> Address {
memory_manager::starting_heap_address()
}
#[no_mangle]
pub extern "C" fn last_heap_address() -> Address {
memory_manager::last_heap_address()
}
| bind_mutator |
greet.py | async def | (ctx):
greetings = [
"Ahn nyong ha se yo",
"Ahn-nyong-ha-se-yo",
"Ahoj",
"An-nyŏng-ha-se-yo",
"As-salamu alaykum",
"Assalamo aleikum",
"Assalamualaikum",
"Avuxeni",
"Bonġu",
"Bonjour",
"Bună ziua",
"Ciao",
"Cześć",
"Dia dhuit",
"Dobar dan",
"Dobra većer",
"Dobro jutro",
"God dag",
"Góðan dag",
"Grüß gott",
"Guten tag",
"Hafa adai",
"Hallå",
"Hallo",
"Hello",
"Hoi",
"Hola",
"How ya doing",
"How you doing",
"Howdy",
"Hujambo",
"Hyvää päivää",
"Ia orna",
"Jo napot",
"Konnichiwa",
"Marhaba",
"Merhaba",
"Moïen",
"Namaskar",
"Namaste",
"Namastē",
"Nde-ewo",
"Nǐ hǎo",
"Niltze",
"Now then",
"Olá",
"Salam",
"Salve",
"Sawasdee",
"Sawubona",
"Selamat siang",
"Shalom",
"Shwmae",
"Sveiki",
"Wassup",
"What's up",
"Xin chào",
"Yasou",
"Zdraveite",
"Zdravo",
"Zdravstvuyte",
"안녕하세요",
"こんにちは",
"你好",
]
message = ctx.content.lower()
# if no one is tagged in the message
if "@" not in message:
message_greetings = []
# check if any of the greetings are in the message
for greeting in greetings:
if greeting.lower() in message:
message_greetings.append(greeting)
# if any are, format them into a greeting back to the user
if len(message_greetings) > 0:
greetings_string = message_greetings[0]
if len(message_greetings) > 1:
first_greeting = message_greetings[0]
other_greetings = []
for greeting in message_greetings[1 : len(message_greetings)]:
other_greetings.append(greeting.lower())
all_greetings = [first_greeting] + other_greetings
if len(message_greetings) > 2:
greetings_string = (
f"{', '.join(all_greetings[0:-1])} and {all_greetings[-1]}"
)
else:
greetings_string = " and ".join(all_greetings)
# respond to user
await ctx.channel.send(f"{greetings_string}, @{ctx.author.name}!")
| greet |
stream.go | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package kinesis
import (
"context"
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// Provides a Kinesis Stream resource. Amazon Kinesis is a managed service that
// scales elastically for real-time processing of streaming big data.
//
// For more details, see the [Amazon Kinesis Documentation](https://aws.amazon.com/documentation/kinesis/).
//
// ## Example Usage
//
// ```go
// package main
//
// import (
// "github.com/pulumi/pulumi-aws/sdk/v3/go/aws/kinesis"
// "github.com/pulumi/pulumi/sdk/v2/go/pulumi"
// )
//
// func main() {
// pulumi.Run(func(ctx *pulumi.Context) error {
// _, err := kinesis.NewStream(ctx, "testStream", &kinesis.StreamArgs{
// RetentionPeriod: pulumi.Int(48),
// ShardCount: pulumi.Int(1),
// ShardLevelMetrics: pulumi.StringArray{
// pulumi.String("IncomingBytes"),
// pulumi.String("OutgoingBytes"),
// },
// Tags: pulumi.StringMap{
// "Environment": pulumi.String("test"),
// },
// })
// if err != nil {
// return err
// }
// return nil
// })
// }
// ```
//
// ## Import
//
// Kinesis Streams can be imported using the `name`, e.g.
//
// ```sh
// $ pulumi import aws:kinesis/stream:Stream test_stream kinesis-test
// ```
//
// [1]https://aws.amazon.com/documentation/kinesis/ [2]https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html [3]https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html
type Stream struct {
pulumi.CustomResourceState
// The Amazon Resource Name (ARN) specifying the Stream (same as `id`)
Arn pulumi.StringOutput `pulumi:"arn"`
// The encryption type to use. The only acceptable values are `NONE` or `KMS`. The default value is `NONE`.
EncryptionType pulumi.StringPtrOutput `pulumi:"encryptionType"`
// A boolean that indicates all registered consumers should be deregistered from the stream so that the stream can be destroyed without error. The default value is `false`.
EnforceConsumerDeletion pulumi.BoolPtrOutput `pulumi:"enforceConsumerDeletion"`
// The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias `alias/aws/kinesis`.
KmsKeyId pulumi.StringPtrOutput `pulumi:"kmsKeyId"`
// A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
Name pulumi.StringOutput `pulumi:"name"`
// Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24.
RetentionPeriod pulumi.IntPtrOutput `pulumi:"retentionPeriod"`
// The number of shards that the stream will use.
// Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See [Amazon Kinesis Streams](https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html) for more.
ShardCount pulumi.IntOutput `pulumi:"shardCount"`
// A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html) for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable.
ShardLevelMetrics pulumi.StringArrayOutput `pulumi:"shardLevelMetrics"`
// A map of tags to assign to the resource.
Tags pulumi.StringMapOutput `pulumi:"tags"`
}
// NewStream registers a new resource with the given unique name, arguments, and options.
func NewStream(ctx *pulumi.Context,
name string, args *StreamArgs, opts ...pulumi.ResourceOption) (*Stream, error) {
if args == nil {
return nil, errors.New("missing one or more required arguments")
}
if args.ShardCount == nil |
var resource Stream
err := ctx.RegisterResource("aws:kinesis/stream:Stream", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetStream gets an existing Stream resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetStream(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *StreamState, opts ...pulumi.ResourceOption) (*Stream, error) {
var resource Stream
err := ctx.ReadResource("aws:kinesis/stream:Stream", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering Stream resources.
type streamState struct {
// The Amazon Resource Name (ARN) specifying the Stream (same as `id`)
Arn *string `pulumi:"arn"`
// The encryption type to use. The only acceptable values are `NONE` or `KMS`. The default value is `NONE`.
EncryptionType *string `pulumi:"encryptionType"`
// A boolean that indicates all registered consumers should be deregistered from the stream so that the stream can be destroyed without error. The default value is `false`.
EnforceConsumerDeletion *bool `pulumi:"enforceConsumerDeletion"`
// The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias `alias/aws/kinesis`.
KmsKeyId *string `pulumi:"kmsKeyId"`
// A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
Name *string `pulumi:"name"`
// Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24.
RetentionPeriod *int `pulumi:"retentionPeriod"`
// The number of shards that the stream will use.
// Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See [Amazon Kinesis Streams](https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html) for more.
ShardCount *int `pulumi:"shardCount"`
// A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html) for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable.
ShardLevelMetrics []string `pulumi:"shardLevelMetrics"`
// A map of tags to assign to the resource.
Tags map[string]string `pulumi:"tags"`
}
type StreamState struct {
// The Amazon Resource Name (ARN) specifying the Stream (same as `id`)
Arn pulumi.StringPtrInput
// The encryption type to use. The only acceptable values are `NONE` or `KMS`. The default value is `NONE`.
EncryptionType pulumi.StringPtrInput
// A boolean that indicates all registered consumers should be deregistered from the stream so that the stream can be destroyed without error. The default value is `false`.
EnforceConsumerDeletion pulumi.BoolPtrInput
// The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias `alias/aws/kinesis`.
KmsKeyId pulumi.StringPtrInput
// A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
Name pulumi.StringPtrInput
// Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24.
RetentionPeriod pulumi.IntPtrInput
// The number of shards that the stream will use.
// Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See [Amazon Kinesis Streams](https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html) for more.
ShardCount pulumi.IntPtrInput
// A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html) for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable.
ShardLevelMetrics pulumi.StringArrayInput
// A map of tags to assign to the resource.
Tags pulumi.StringMapInput
}
func (StreamState) ElementType() reflect.Type {
return reflect.TypeOf((*streamState)(nil)).Elem()
}
type streamArgs struct {
// The Amazon Resource Name (ARN) specifying the Stream (same as `id`)
Arn *string `pulumi:"arn"`
// The encryption type to use. The only acceptable values are `NONE` or `KMS`. The default value is `NONE`.
EncryptionType *string `pulumi:"encryptionType"`
// A boolean that indicates all registered consumers should be deregistered from the stream so that the stream can be destroyed without error. The default value is `false`.
EnforceConsumerDeletion *bool `pulumi:"enforceConsumerDeletion"`
// The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias `alias/aws/kinesis`.
KmsKeyId *string `pulumi:"kmsKeyId"`
// A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
Name *string `pulumi:"name"`
// Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24.
RetentionPeriod *int `pulumi:"retentionPeriod"`
// The number of shards that the stream will use.
// Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See [Amazon Kinesis Streams](https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html) for more.
ShardCount int `pulumi:"shardCount"`
// A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html) for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable.
ShardLevelMetrics []string `pulumi:"shardLevelMetrics"`
// A map of tags to assign to the resource.
Tags map[string]string `pulumi:"tags"`
}
// The set of arguments for constructing a Stream resource.
type StreamArgs struct {
// The Amazon Resource Name (ARN) specifying the Stream (same as `id`)
Arn pulumi.StringPtrInput
// The encryption type to use. The only acceptable values are `NONE` or `KMS`. The default value is `NONE`.
EncryptionType pulumi.StringPtrInput
// A boolean that indicates all registered consumers should be deregistered from the stream so that the stream can be destroyed without error. The default value is `false`.
EnforceConsumerDeletion pulumi.BoolPtrInput
// The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias `alias/aws/kinesis`.
KmsKeyId pulumi.StringPtrInput
// A name to identify the stream. This is unique to the AWS account and region the Stream is created in.
Name pulumi.StringPtrInput
// Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 8760 hours. Minimum value is 24. Default is 24.
RetentionPeriod pulumi.IntPtrInput
// The number of shards that the stream will use.
// Amazon has guidelines for specifying the Stream size that should be referenced when creating a Kinesis stream. See [Amazon Kinesis Streams](https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html) for more.
ShardCount pulumi.IntInput
// A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html) for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable.
ShardLevelMetrics pulumi.StringArrayInput
// A map of tags to assign to the resource.
Tags pulumi.StringMapInput
}
func (StreamArgs) ElementType() reflect.Type {
return reflect.TypeOf((*streamArgs)(nil)).Elem()
}
type StreamInput interface {
pulumi.Input
ToStreamOutput() StreamOutput
ToStreamOutputWithContext(ctx context.Context) StreamOutput
}
func (*Stream) ElementType() reflect.Type {
return reflect.TypeOf((*Stream)(nil))
}
func (i *Stream) ToStreamOutput() StreamOutput {
return i.ToStreamOutputWithContext(context.Background())
}
func (i *Stream) ToStreamOutputWithContext(ctx context.Context) StreamOutput {
return pulumi.ToOutputWithContext(ctx, i).(StreamOutput)
}
func (i *Stream) ToStreamPtrOutput() StreamPtrOutput {
return i.ToStreamPtrOutputWithContext(context.Background())
}
func (i *Stream) ToStreamPtrOutputWithContext(ctx context.Context) StreamPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(StreamPtrOutput)
}
type StreamPtrInput interface {
pulumi.Input
ToStreamPtrOutput() StreamPtrOutput
ToStreamPtrOutputWithContext(ctx context.Context) StreamPtrOutput
}
type streamPtrType StreamArgs
func (*streamPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**Stream)(nil))
}
func (i *streamPtrType) ToStreamPtrOutput() StreamPtrOutput {
return i.ToStreamPtrOutputWithContext(context.Background())
}
func (i *streamPtrType) ToStreamPtrOutputWithContext(ctx context.Context) StreamPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(StreamPtrOutput)
}
// StreamArrayInput is an input type that accepts StreamArray and StreamArrayOutput values.
// You can construct a concrete instance of `StreamArrayInput` via:
//
// StreamArray{ StreamArgs{...} }
type StreamArrayInput interface {
pulumi.Input
ToStreamArrayOutput() StreamArrayOutput
ToStreamArrayOutputWithContext(context.Context) StreamArrayOutput
}
type StreamArray []StreamInput
func (StreamArray) ElementType() reflect.Type {
return reflect.TypeOf(([]*Stream)(nil))
}
func (i StreamArray) ToStreamArrayOutput() StreamArrayOutput {
return i.ToStreamArrayOutputWithContext(context.Background())
}
func (i StreamArray) ToStreamArrayOutputWithContext(ctx context.Context) StreamArrayOutput {
return pulumi.ToOutputWithContext(ctx, i).(StreamArrayOutput)
}
// StreamMapInput is an input type that accepts StreamMap and StreamMapOutput values.
// You can construct a concrete instance of `StreamMapInput` via:
//
// StreamMap{ "key": StreamArgs{...} }
type StreamMapInput interface {
pulumi.Input
ToStreamMapOutput() StreamMapOutput
ToStreamMapOutputWithContext(context.Context) StreamMapOutput
}
type StreamMap map[string]StreamInput
func (StreamMap) ElementType() reflect.Type {
return reflect.TypeOf((map[string]*Stream)(nil))
}
func (i StreamMap) ToStreamMapOutput() StreamMapOutput {
return i.ToStreamMapOutputWithContext(context.Background())
}
func (i StreamMap) ToStreamMapOutputWithContext(ctx context.Context) StreamMapOutput {
return pulumi.ToOutputWithContext(ctx, i).(StreamMapOutput)
}
type StreamOutput struct {
*pulumi.OutputState
}
func (StreamOutput) ElementType() reflect.Type {
return reflect.TypeOf((*Stream)(nil))
}
func (o StreamOutput) ToStreamOutput() StreamOutput {
return o
}
func (o StreamOutput) ToStreamOutputWithContext(ctx context.Context) StreamOutput {
return o
}
func (o StreamOutput) ToStreamPtrOutput() StreamPtrOutput {
return o.ToStreamPtrOutputWithContext(context.Background())
}
func (o StreamOutput) ToStreamPtrOutputWithContext(ctx context.Context) StreamPtrOutput {
return o.ApplyT(func(v Stream) *Stream {
return &v
}).(StreamPtrOutput)
}
type StreamPtrOutput struct {
*pulumi.OutputState
}
func (StreamPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**Stream)(nil))
}
func (o StreamPtrOutput) ToStreamPtrOutput() StreamPtrOutput {
return o
}
func (o StreamPtrOutput) ToStreamPtrOutputWithContext(ctx context.Context) StreamPtrOutput {
return o
}
type StreamArrayOutput struct{ *pulumi.OutputState }
func (StreamArrayOutput) ElementType() reflect.Type {
return reflect.TypeOf((*[]Stream)(nil))
}
func (o StreamArrayOutput) ToStreamArrayOutput() StreamArrayOutput {
return o
}
func (o StreamArrayOutput) ToStreamArrayOutputWithContext(ctx context.Context) StreamArrayOutput {
return o
}
func (o StreamArrayOutput) Index(i pulumi.IntInput) StreamOutput {
return pulumi.All(o, i).ApplyT(func(vs []interface{}) Stream {
return vs[0].([]Stream)[vs[1].(int)]
}).(StreamOutput)
}
type StreamMapOutput struct{ *pulumi.OutputState }
func (StreamMapOutput) ElementType() reflect.Type {
return reflect.TypeOf((*map[string]Stream)(nil))
}
func (o StreamMapOutput) ToStreamMapOutput() StreamMapOutput {
return o
}
func (o StreamMapOutput) ToStreamMapOutputWithContext(ctx context.Context) StreamMapOutput {
return o
}
func (o StreamMapOutput) MapIndex(k pulumi.StringInput) StreamOutput {
return pulumi.All(o, k).ApplyT(func(vs []interface{}) Stream {
return vs[0].(map[string]Stream)[vs[1].(string)]
}).(StreamOutput)
}
func init() {
pulumi.RegisterOutputType(StreamOutput{})
pulumi.RegisterOutputType(StreamPtrOutput{})
pulumi.RegisterOutputType(StreamArrayOutput{})
pulumi.RegisterOutputType(StreamMapOutput{})
}
| {
return nil, errors.New("invalid value for required argument 'ShardCount'")
} |
synchronizer.test.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import * as assert from 'assert';
import { IUserDataSyncStoreService, SyncResource, SyncStatus, IUserDataSyncResourceEnablementService, IRemoteUserData, ISyncData, Change, USER_DATA_SYNC_SCHEME, IUserDataManifest } from 'vs/platform/userDataSync/common/userDataSync';
import { UserDataSyncClient, UserDataSyncTestServer } from 'vs/platform/userDataSync/test/common/userDataSyncClient';
import { DisposableStore, toDisposable } from 'vs/base/common/lifecycle';
import { AbstractSynchroniser, ISyncResourcePreview, IResourcePreview } from 'vs/platform/userDataSync/common/abstractSynchronizer';
import { Barrier } from 'vs/base/common/async';
import { Emitter, Event } from 'vs/base/common/event';
import { CancellationToken } from 'vs/base/common/cancellation';
import { URI } from 'vs/base/common/uri';
interface ITestResourcePreview extends IResourcePreview {
ref?: string;
}
const resource = URI.from({ scheme: USER_DATA_SYNC_SCHEME, authority: 'testResource', path: `/current.json` });
class TestSynchroniser extends AbstractSynchroniser {
syncBarrier: Barrier = new Barrier();
syncResult: { hasConflicts: boolean, hasError: boolean } = { hasConflicts: false, hasError: false };
onDoSyncCall: Emitter<void> = this._register(new Emitter<void>());
failWhenGettingLatestRemoteUserData: boolean = false;
readonly resource: SyncResource = SyncResource.Settings;
protected readonly version: number = 1;
private cancelled: boolean = false;
protected getLatestRemoteUserData(manifest: IUserDataManifest | null, lastSyncUserData: IRemoteUserData | null): Promise<IRemoteUserData> {
if (this.failWhenGettingLatestRemoteUserData) {
throw new Error();
}
return super.getLatestRemoteUserData(manifest, lastSyncUserData);
}
protected async doSync(remoteUserData: IRemoteUserData, lastSyncUserData: IRemoteUserData | null, apply: boolean): Promise<SyncStatus> {
this.cancelled = false;
this.onDoSyncCall.fire();
await this.syncBarrier.wait();
if (this.cancelled) {
return SyncStatus.Idle;
}
return super.doSync(remoteUserData, lastSyncUserData, apply);
}
protected async generatePullPreview(remoteUserData: IRemoteUserData, lastSyncUserData: IRemoteUserData | null, token: CancellationToken): Promise<ITestResourcePreview[]> {
return [{ localContent: null, localResource: resource, remoteContent: null, remoteResource: resource, previewContent: null, previewResource: resource, localChange: Change.None, remoteChange: Change.None, hasConflicts: this.syncResult.hasConflicts }];
}
protected async generatePushPreview(remoteUserData: IRemoteUserData, lastSyncUserData: IRemoteUserData | null, token: CancellationToken): Promise<ITestResourcePreview[]> {
return [{ localContent: null, localResource: resource, remoteContent: null, remoteResource: resource, previewContent: null, previewResource: resource, localChange: Change.None, remoteChange: Change.None, hasConflicts: this.syncResult.hasConflicts }];
}
protected async generateReplacePreview(syncData: ISyncData, remoteUserData: IRemoteUserData, lastSyncUserData: IRemoteUserData | null): Promise<ITestResourcePreview[]> {
return [{ localContent: null, localResource: resource, remoteContent: null, remoteResource: resource, previewContent: null, previewResource: resource, localChange: Change.None, remoteChange: Change.None, hasConflicts: this.syncResult.hasConflicts }];
}
protected async generateSyncPreview(remoteUserData: IRemoteUserData, lastSyncUserData: IRemoteUserData | null, token: CancellationToken): Promise<ITestResourcePreview[]> {
if (this.syncResult.hasError) {
throw new Error('failed');
}
return [{ localContent: null, localResource: resource, remoteContent: null, remoteResource: resource, previewContent: null, previewResource: resource, localChange: Change.None, remoteChange: Change.None, hasConflicts: this.syncResult.hasConflicts, ref: remoteUserData.ref }];
}
protected async updatePreviewWithConflict(preview: ISyncResourcePreview, conflictResource: URI, conflictContent: string): Promise<ISyncResourcePreview> {
return preview;
}
protected async applyPreview(remoteUserData: IRemoteUserData, lastSyncUserData: IRemoteUserData | null, preview: ITestResourcePreview[], forcePush: boolean): Promise<void> {
if (preview[0]?.ref) {
await this.applyRef(preview[0].ref);
}
}
async applyRef(ref: string): Promise<void> {
const remoteUserData = await this.updateRemoteUserData('', ref);
await this.updateLastSyncUserData(remoteUserData);
}
async stop(): Promise<void> {
this.cancelled = true;
this.syncBarrier.open();
super.stop();
}
async triggerLocalChange(): Promise<void> {
super.triggerLocalChange();
}
onDidTriggerLocalChangeCall: Emitter<void> = this._register(new Emitter<void>());
protected async doTriggerLocalChange(): Promise<void> {
await super.doTriggerLocalChange();
this.onDidTriggerLocalChangeCall.fire();
}
}
suite('TestSynchronizer', () => {
const disposableStore = new DisposableStore();
const server = new UserDataSyncTestServer();
let client: UserDataSyncClient;
let userDataSyncStoreService: IUserDataSyncStoreService;
setup(async () => {
client = disposableStore.add(new UserDataSyncClient(server));
await client.setUp();
userDataSyncStoreService = client.instantiationService.get(IUserDataSyncStoreService);
disposableStore.add(toDisposable(() => userDataSyncStoreService.clear()));
});
teardown(() => disposableStore.clear());
test('status is syncing', async () => {
const testObject: TestSynchroniser = client.instantiationService.createInstance(TestSynchroniser, SyncResource.Settings);
const actual: SyncStatus[] = [];
disposableStore.add(testObject.onDidChangeStatus(status => actual.push(status)));
const promise = Event.toPromise(testObject.onDoSyncCall.event);
testObject.sync(await client.manifest());
await promise;
assert.deepEqual(actual, [SyncStatus.Syncing]);
assert.deepEqual(testObject.status, SyncStatus.Syncing);
testObject.stop();
});
test('status is set correctly when sync is finished', async () => {
const testObject: TestSynchroniser = client.instantiationService.createInstance(TestSynchroniser, SyncResource.Settings);
testObject.syncBarrier.open();
const actual: SyncStatus[] = [];
disposableStore.add(testObject.onDidChangeStatus(status => actual.push(status)));
await testObject.sync(await client.manifest());
assert.deepEqual(actual, [SyncStatus.Syncing, SyncStatus.Idle]); | const testObject: TestSynchroniser = client.instantiationService.createInstance(TestSynchroniser, SyncResource.Settings);
testObject.syncResult = { hasConflicts: true, hasError: false };
testObject.syncBarrier.open();
const actual: SyncStatus[] = [];
disposableStore.add(testObject.onDidChangeStatus(status => actual.push(status)));
await testObject.sync(await client.manifest());
assert.deepEqual(actual, [SyncStatus.Syncing, SyncStatus.HasConflicts]);
assert.deepEqual(testObject.status, SyncStatus.HasConflicts);
});
test('status is set correctly when sync has errors', async () => {
const testObject: TestSynchroniser = client.instantiationService.createInstance(TestSynchroniser, SyncResource.Settings);
testObject.syncResult = { hasError: true, hasConflicts: false };
testObject.syncBarrier.open();
const actual: SyncStatus[] = [];
disposableStore.add(testObject.onDidChangeStatus(status => actual.push(status)));
try {
await testObject.sync(await client.manifest());
assert.fail('Should fail');
} catch (e) {
assert.deepEqual(actual, [SyncStatus.Syncing, SyncStatus.Idle]);
assert.deepEqual(testObject.status, SyncStatus.Idle);
}
});
test('sync should not run if syncing already', async () => {
const testObject: TestSynchroniser = client.instantiationService.createInstance(TestSynchroniser, SyncResource.Settings);
const promise = Event.toPromise(testObject.onDoSyncCall.event);
testObject.sync(await client.manifest());
await promise;
const actual: SyncStatus[] = [];
disposableStore.add(testObject.onDidChangeStatus(status => actual.push(status)));
await testObject.sync(await client.manifest());
assert.deepEqual(actual, []);
assert.deepEqual(testObject.status, SyncStatus.Syncing);
await testObject.stop();
});
test('sync should not run if disabled', async () => {
const testObject: TestSynchroniser = client.instantiationService.createInstance(TestSynchroniser, SyncResource.Settings);
client.instantiationService.get(IUserDataSyncResourceEnablementService).setResourceEnablement(testObject.resource, false);
const actual: SyncStatus[] = [];
disposableStore.add(testObject.onDidChangeStatus(status => actual.push(status)));
await testObject.sync(await client.manifest());
assert.deepEqual(actual, []);
assert.deepEqual(testObject.status, SyncStatus.Idle);
});
test('sync should not run if there are conflicts', async () => {
const testObject: TestSynchroniser = client.instantiationService.createInstance(TestSynchroniser, SyncResource.Settings);
testObject.syncResult = { hasConflicts: true, hasError: false };
testObject.syncBarrier.open();
await testObject.sync(await client.manifest());
const actual: SyncStatus[] = [];
disposableStore.add(testObject.onDidChangeStatus(status => actual.push(status)));
await testObject.sync(await client.manifest());
assert.deepEqual(actual, []);
assert.deepEqual(testObject.status, SyncStatus.HasConflicts);
});
test('request latest data on precondition failure', async () => {
const testObject: TestSynchroniser = client.instantiationService.createInstance(TestSynchroniser, SyncResource.Settings);
// Sync once
testObject.syncBarrier.open();
await testObject.sync(await client.manifest());
testObject.syncBarrier = new Barrier();
// update remote data before syncing so that 412 is thrown by server
const disposable = testObject.onDoSyncCall.event(async () => {
disposable.dispose();
await testObject.applyRef(ref);
server.reset();
testObject.syncBarrier.open();
});
// Start sycing
const manifest = await client.manifest();
const ref = manifest!.latest![testObject.resource];
await testObject.sync(await client.manifest());
assert.deepEqual(server.requests, [
{ type: 'POST', url: `${server.url}/v1/resource/${testObject.resource}`, headers: { 'If-Match': ref } },
{ type: 'GET', url: `${server.url}/v1/resource/${testObject.resource}/latest`, headers: {} },
{ type: 'POST', url: `${server.url}/v1/resource/${testObject.resource}`, headers: { 'If-Match': `${parseInt(ref) + 1}` } },
]);
});
test('no requests are made to server when local change is triggered', async () => {
const testObject: TestSynchroniser = client.instantiationService.createInstance(TestSynchroniser, SyncResource.Settings);
testObject.syncBarrier.open();
await testObject.sync(await client.manifest());
server.reset();
const promise = Event.toPromise(testObject.onDidTriggerLocalChangeCall.event);
await testObject.triggerLocalChange();
await promise;
assert.deepEqual(server.requests, []);
});
test('status is reset when getting latest remote data fails', async () => {
const testObject: TestSynchroniser = client.instantiationService.createInstance(TestSynchroniser, SyncResource.Settings);
testObject.failWhenGettingLatestRemoteUserData = true;
try {
await testObject.sync(await client.manifest());
assert.fail('Should throw an error');
} catch (error) {
}
assert.equal(testObject.status, SyncStatus.Idle);
});
}); | assert.deepEqual(testObject.status, SyncStatus.Idle);
});
test('status is set correctly when sync has conflicts', async () => { |
pulsepoint.go | package pulsepoint
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"github.com/prebid/prebid-server/adapters"
"github.com/prebid/prebid-server/config"
"github.com/prebid/prebid-server/errortypes"
"github.com/prebid/prebid-server/openrtb_ext"
"github.com/mxmCherry/openrtb/v15/openrtb2"
)
type PulsePointAdapter struct {
URI string
}
// Builds an instance of PulsePointAdapter
func Builder(bidderName openrtb_ext.BidderName, config config.Adapter) (adapters.Bidder, error) {
bidder := &PulsePointAdapter{
URI: config.Endpoint,
}
return bidder, nil
}
func (a *PulsePointAdapter) MakeRequests(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) {
errs := make([]error, 0, len(request.Imp))
var err error
pubID := ""
imps := make([]openrtb2.Imp, 0, len(request.Imp))
for i := 0; i < len(request.Imp); i++ {
imp := request.Imp[i]
var bidderExt adapters.ExtImpBidder
if err = json.Unmarshal(imp.Ext, &bidderExt); err != nil {
errs = append(errs, &errortypes.BadInput{
Message: err.Error(),
})
continue
}
var pulsepointExt openrtb_ext.ExtImpPulsePoint
if err = json.Unmarshal(bidderExt.Bidder, &pulsepointExt); err != nil {
errs = append(errs, &errortypes.BadInput{
Message: err.Error(),
})
continue
}
// parse pubid and keep it for reference
if pubID == "" && pulsepointExt.PubID > 0 {
pubID = strconv.Itoa(pulsepointExt.PubID)
}
// tag id to be sent
imp.TagID = strconv.Itoa(pulsepointExt.TagID)
imps = append(imps, imp)
}
// verify there are valid impressions
if len(imps) == 0 {
return nil, errs
}
// add the publisher id from ext to the site.pub.id or app.pub.id
if request.Site != nil {
site := *request.Site
if site.Publisher != nil {
publisher := *site.Publisher
publisher.ID = pubID
site.Publisher = &publisher
} else {
site.Publisher = &openrtb2.Publisher{ID: pubID}
}
request.Site = &site
} else if request.App != nil |
request.Imp = imps
reqJSON, err := json.Marshal(request)
if err != nil {
errs = append(errs, err)
return nil, errs
}
headers := http.Header{}
headers.Add("Content-Type", "application/json;charset=utf-8")
headers.Add("Accept", "application/json")
return []*adapters.RequestData{{
Method: "POST",
Uri: a.URI,
Body: reqJSON,
Headers: headers,
}}, errs
}
func (a *PulsePointAdapter) MakeBids(internalRequest *openrtb2.BidRequest, externalRequest *adapters.RequestData, response *adapters.ResponseData) (*adapters.BidderResponse, []error) {
// passback
if response.StatusCode == http.StatusNoContent {
return nil, nil
}
// bad requests
if response.StatusCode == http.StatusBadRequest {
return nil, []error{&errortypes.BadInput{
Message: fmt.Sprintf("Bad user input: HTTP status %d", response.StatusCode),
}}
}
// error
if response.StatusCode != http.StatusOK {
return nil, []error{&errortypes.BadServerResponse{
Message: fmt.Sprintf("Bad server response: HTTP status %d", response.StatusCode),
}}
}
// parse response
var bidResp openrtb2.BidResponse
if err := json.Unmarshal(response.Body, &bidResp); err != nil {
return nil, []error{err}
}
bidResponse := adapters.NewBidderResponseWithBidsCapacity(5)
// map imps by id
impsByID := make(map[string]openrtb2.Imp)
for i := 0; i < len(internalRequest.Imp); i++ {
impsByID[internalRequest.Imp[i].ID] = internalRequest.Imp[i]
}
var errs []error
for _, sb := range bidResp.SeatBid {
for i := 0; i < len(sb.Bid); i++ {
bid := sb.Bid[i]
imp := impsByID[bid.ImpID]
bidType := getBidType(imp)
if &imp != nil && bidType != "" {
bidResponse.Bids = append(bidResponse.Bids, &adapters.TypedBid{
Bid: &bid,
BidType: bidType,
})
}
}
}
return bidResponse, errs
}
func getBidType(imp openrtb2.Imp) openrtb_ext.BidType {
// derive the bidtype purely from the impression itself
if imp.Banner != nil {
return openrtb_ext.BidTypeBanner
} else if imp.Video != nil {
return openrtb_ext.BidTypeVideo
} else if imp.Audio != nil {
return openrtb_ext.BidTypeAudio
} else if imp.Native != nil {
return openrtb_ext.BidTypeNative
}
return ""
}
| {
app := *request.App
if app.Publisher != nil {
publisher := *app.Publisher
publisher.ID = pubID
app.Publisher = &publisher
} else {
app.Publisher = &openrtb2.Publisher{ID: pubID}
}
request.App = &app
} |
token.go | package filetoken
import (
"encoding/csv"
"errors"
"io"
"os"
"k8s.io/apiserver/pkg/authentication/user"
)
type TokenAuthenticator struct {
path string
tokens map[string]*user.DefaultInfo
}
func | (path string) (*TokenAuthenticator, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
tokens := make(map[string]*user.DefaultInfo)
reader := csv.NewReader(file)
for {
record, err := reader.Read()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
if len(record) < 2 {
continue
}
obj := &user.DefaultInfo{
Name: record[1],
}
if len(record) > 2 {
obj.UID = record[2]
}
tokens[record[0]] = obj
}
return &TokenAuthenticator{
path: file.Name(),
tokens: tokens,
}, nil
}
func (a *TokenAuthenticator) AuthenticateToken(value string) (user.Info, bool, error) {
user, ok := a.tokens[value]
if !ok {
return nil, false, errors.New("invalid token")
}
return user, true, nil
}
| NewTokenAuthenticator |
extHostLogService.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { join } from 'vs/base/common/paths';
import { ILogService, DelegatedLogService, LogLevel } from 'vs/platform/log/common/log';
import { createSpdLogService } from 'vs/platform/log/node/spdlogService';
import { ExtHostLogServiceShape } from 'vs/workbench/api/node/extHost.protocol';
import { ExtensionHostLogFileName } from 'vs/workbench/services/extensions/common/extensions';
import { URI } from 'vs/base/common/uri';
import { CanonicalExtensionIdentifier } from 'vs/platform/extensions/common/extensions';
export class | extends DelegatedLogService implements ILogService, ExtHostLogServiceShape {
private _logsPath: string;
readonly logFile: URI;
constructor(
logLevel: LogLevel,
logsPath: string,
) {
super(createSpdLogService(ExtensionHostLogFileName, logLevel, logsPath));
this._logsPath = logsPath;
this.logFile = URI.file(join(logsPath, `${ExtensionHostLogFileName}.log`));
}
$setLevel(level: LogLevel): void {
this.setLevel(level);
}
getLogDirectory(extensionID: CanonicalExtensionIdentifier): string {
return join(this._logsPath, extensionID.value);
}
}
| ExtHostLogService |
zlibCompressor.py | import gzip, zlib, base64
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__copyright__ = """\
(c). Copyright 2008-2020, Vyper Logix Corp., All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
def decompress_zlib(s):
return zlib.decompress(base64.decodestring(s), 15)
def | (s):
return base64.encodestring(zlib.compress(s, 9))
| zlib_compress |
sagas.ts | import { all, fork, call, put, select, takeEvery } from 'redux-saga/effects'
import { waitForBackendSetup } from 'store/backend/sagas'
import { waitForValue } from 'utils/sagaHelpers'
import { watchServiceSelectionErrors } from './errorSagas'
import AudiusBackend from 'services/AudiusBackend'
import {
fetchServices,
fetchServicesSucceeded,
fetchServicesFailed,
setSelected,
setSelectedSucceeded,
setSelectedFailed,
setSyncing as setSyncingAction,
Service
} from './slice'
import * as cacheActions from 'store/cache/actions'
import { getAccountUser } from 'store/account/selectors'
import { getSecondaries, getSelectedServices } from './selectors'
import { Kind } from 'store/types'
export function* watchFetchServices() {
yield takeEvery(fetchServices.type, function* () {
yield call(waitForBackendSetup)
const currentUser = yield call(waitForValue, getAccountUser)
try {
let primary: string,
secondaries: string[],
services: { [name: string]: Service }
if (currentUser.creator_node_endpoint) {
services = yield call(AudiusBackend.getSelectableCreatorNodes)
const userEndpoints = currentUser.creator_node_endpoint.split(',')
primary = userEndpoints[0]
secondaries = userEndpoints.slice(1)
// Filter out a secondary that is unhealthy.
secondaries = secondaries.filter(Boolean).filter(s => services[s])
} else {
const autoselect = yield call(AudiusBackend.autoSelectCreatorNodes)
primary = autoselect.primary
secondaries = autoselect.secondaries
services = autoselect.services
yield call(
AudiusBackend.creatorNodeSelectionCallback,
primary,
secondaries,
'autoselect'
)
}
if (!primary || !secondaries || secondaries.length < 1) {
throw new Error(
`Too few services found. Primary: ${primary}, Secondaries: ${secondaries}`
)
}
yield put(fetchServicesSucceeded({ services, primary, secondaries }))
// Check if secondaries are syncing
yield all(
secondaries.map(s => {
return fork(updateSyncing, s)
})
)
} catch (e) {
console.error(e)
yield put(fetchServicesFailed())
}
})
}
const checkIsSyncing = async (service: string) => {
return new Promise(resolve => {
const interval = setInterval(async () => {
const isSyncing = await AudiusBackend.isCreatorNodeSyncing(service)
if (!isSyncing) {
clearInterval(interval)
resolve()
}
}, 1000)
})
}
function* updateSyncing(service: string) {
yield put(setSyncingAction({ service, isSyncing: true }))
yield call(checkIsSyncing, service)
yield put(setSyncingAction({ service, isSyncing: false }))
}
function* setSyncing(service: string) {
yield put(setSyncingAction({ service, isSyncing: true }))
}
function* watchSetSelected() {
yield takeEvery(setSelected.type, function* (
action: ReturnType<typeof setSelected>
) {
const user = yield call(waitForValue, getAccountUser)
const currentSecondaries = yield select(getSecondaries)
const { primary, secondaries } = action.payload
yield call(
AudiusBackend.creatorNodeSelectionCallback,
primary,
secondaries,
'manual'
)
const newEndpoint = `${primary},${secondaries.join(',')}`
const [oldPrimary, ...oldSecondaries] = yield select(getSelectedServices)
// Update the endpoint for the user
yield put(
setSelectedSucceeded({
primary,
secondaries
})
)
yield put(
cacheActions.update(Kind.USERS, [
{ id: user.user_id, metadata: { creator_node_endpoint: newEndpoint } }
])
)
yield all(
secondaries.map(s => {
if (currentSecondaries.includes(s)) return null
return fork(setSyncing, s)
})
)
yield call(AudiusBackend.setCreatorNodeEndpoint, primary)
if (user.is_creator) {
user.creator_node_endpoint = newEndpoint
const success = yield call(
AudiusBackend.updateCreator,
user,
user.user_id
)
if (!success) {
yield put(
setSelectedFailed({
primary: oldPrimary,
secondaries: oldSecondaries
})
)
}
}
// Any new secondaries need to check if they are syncing
yield all(
secondaries.map(s => {
if (currentSecondaries.includes(s)) return null
return fork(updateSyncing, s)
})
)
})
}
const sagas = () => { |
export default sagas | return [watchFetchServices, watchSetSelected, watchServiceSelectionErrors]
} |
unchecked_update_many.rs | use indoc::indoc;
use query_engine_tests::*;
#[test_suite]
mod unchecked_update_many {
fn schema_1() -> String {
let schema = indoc! {
r#"model ModelA {
#id(id, Int, @id)
b_id_1 String
b_id_2 String
c_id_1 String?
c_id_2 String?
b ModelB @relation(fields: [b_id_1, b_id_2], references: [uniq_1, uniq_2])
c ModelC? @relation(fields: [c_id_1, c_id_2], references: [uniq_1, uniq_2])
}
model ModelB {
uniq_1 String
uniq_2 String
a ModelA[]
@@unique([uniq_1, uniq_2])
}
model ModelC {
uniq_1 String
uniq_2 String
a ModelA[]
@@unique([uniq_1, uniq_2])
}"#
};
schema.to_owned()
}
// "Unchecked update many" should "allow writing inlined relation scalars"
#[connector_test(schema(schema_1))]
async fn allow_write_non_prent_inline_rel_sclrs(runner: Runner) -> TestResult<()> |
fn schema_2() -> String {
let schema = indoc! {
r#"model ModelA {
#id(id, Int, @id)
int Int @default(autoincrement())
@@index([int])
}"#
};
schema.to_owned()
}
// "Unchecked updates" should "allow to write to autoincrement IDs directly"
#[connector_test(schema(schema_2), exclude(SqlServer, Sqlite))]
async fn allow_write_autoinc_id(runner: Runner) -> TestResult<()> {
run_query!(&runner, r#"mutation { createOneModelA(data: { id: 1 }) { id } }"#);
run_query!(&runner, r#"mutation { createOneModelA(data: { id: 2 }) { id } }"#);
insta::assert_snapshot!(
run_query!(&runner, r#"mutation {
updateManyModelA(where: { id: { not: 0 }}, data: { int: 111 }) {
count
}
}"#),
@r###"{"data":{"updateManyModelA":{"count":2}}}"###
);
Ok(())
}
}
| {
run_query!(
&runner,
r#"mutation {
createOneModelA(data: {
id: 1
b: { create: { uniq_1: "b1_1", uniq_2: "b1_2" }}
c: { create: { uniq_1: "c1_1", uniq_2: "c1_2" }}
}) {
id
}
}"#
);
run_query!(
&runner,
r#"mutation {
createOneModelA(data: {
id: 2
b: { create: { uniq_1: "b2_1", uniq_2: "b2_2" }}
c: { create: { uniq_1: "c2_1", uniq_2: "c2_2" }}
}) {
id
}
}"#
);
// Connect all As to b2 and c2
insta::assert_snapshot!(
run_query!(&runner, r#"mutation {
updateManyModelA(where: { id: { not: 0 } }, data: {
b_id_1: "b2_1"
b_id_2: "b2_2"
c_id_1: "c2_1"
c_id_2: "c2_2"
}) {
count
}
}"#),
@r###"{"data":{"updateManyModelA":{"count":2}}}"###
);
insta::assert_snapshot!(
run_query!(&runner, r#"mutation {
updateManyModelA(where: { id: { not: 0 }}, data: {
c_id_1: null
}) {
count
}
}"#),
@r###"{"data":{"updateManyModelA":{"count":2}}}"###
);
Ok(())
} |
stats.rs | use super::Floats2d;
use ndarray::{parallel::prelude::*, Axis};
use ndarray_stats::QuantileExt;
use wasm_bindgen::prelude::*;
use crate::one_dimensional::floats::Floats1d;
#[wasm_bindgen]
impl Floats2d {
/// Get the maximum element in the array
pub fn max(&self) -> f64 |
/// Get the maximum element of each column in the matrix
#[wasm_bindgen(js_name = maxC)]
pub fn max_c(&self) -> Floats1d {
let mut vec = Vec::new();
self.data
.axis_iter(Axis(0))
.into_par_iter()
.map(|x| *x.max().unwrap())
.collect_into_vec(&mut vec);
Floats1d {
data: ndarray::Array1::from_vec(vec),
}
}
/// Get the maximum element of each row in the matrix
#[wasm_bindgen(js_name = maxR)]
pub fn max_r(&self) -> Floats1d {
let mut vec = Vec::new();
self.data
.axis_iter(Axis(1))
.into_par_iter()
.map(|x| *x.max().unwrap())
.collect_into_vec(&mut vec);
Floats1d {
data: ndarray::Array1::from_vec(vec),
}
}
/// Get the minimum element in the array
pub fn min(&self) -> f64 {
*self
.data
.into_par_iter()
.min_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap()
}
/// Get the minimum element of each column in the matrix
#[wasm_bindgen(js_name = minC)]
pub fn min_c(&self) -> Floats1d {
let mut vec = Vec::new();
self.data
.axis_iter(Axis(0))
.into_par_iter()
.map(|x| *x.min().unwrap())
.collect_into_vec(&mut vec);
Floats1d {
data: ndarray::Array1::from_vec(vec),
}
}
/// Get the minimum element of each row in the matrix
#[wasm_bindgen(js_name = minR)]
pub fn min_r(&self) -> Floats1d {
let mut vec = Vec::new();
self.data
.axis_iter(Axis(1))
.into_par_iter()
.map(|x| *x.min().unwrap())
.collect_into_vec(&mut vec);
Floats1d {
data: ndarray::Array1::from_vec(vec),
}
}
/// Get the var of all the elements in the array
#[wasm_bindgen]
pub fn var(&self, dof: f64) -> f64 {
self.data.var(dof)
}
/// Get the var of each row in the matrix
#[wasm_bindgen(js_name = varC)]
pub fn var_c(&self, dof: f64) -> Floats1d {
Floats1d {
data: self.data.var_axis(Axis(0), dof),
}
}
/// Get the var of each column in the matrix
#[wasm_bindgen(js_name = varR)]
pub fn var_r(&self, dof: f64) -> Floats1d {
Floats1d {
data: self.data.var_axis(Axis(1), dof),
}
}
/// Get the mean of all the elements in the array
#[wasm_bindgen(js_name = std)]
pub fn std(&self, dof: f64) -> f64 {
self.data.std(dof)
}
/// Get the standard deviation of each row in the matrix
#[wasm_bindgen(js_name = stdC)]
pub fn std_c(&self, dof: f64) -> Floats1d {
Floats1d {
data: self.data.std_axis(Axis(0), dof),
}
}
/// Get the standard deviation of each column in the matrix
#[wasm_bindgen(js_name = stdR)]
pub fn std_r(&self, dof: f64) -> Floats1d {
Floats1d {
data: self.data.std_axis(Axis(1), dof),
}
}
}
| {
*self
.data
.into_par_iter()
.max_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap()
} |
__f.py | import bpy
import sys
sys.path.append("/home/vpoblete/yetzabethg/New Folder/") #DIRECTORIO CON CÓDIGOS DE CONTROL
import handctrl as hc
def d | f,m,r): #mov dedos especificos
y1='thumb.01.R'
y2='f_index.01.R'
y3='f_middle.01.R'
y4='f_ring.01.R'
y5='f_pinky.01.R'
if f==0:
m=0
r=0
elif f==1:
d=bpy.context.object.pose.bones[y1]
elif f==2:
d=bpy.context.object.pose.bones[y2]
elif f==3:
d=bpy.context.object.pose.bones[y3]
elif f==4:
d=bpy.context.object.pose.bones[y4]
elif f==5:
d=bpy.context.object.pose.bones[y5]
else:
print ('error')
##EXTENSIoN DE LA MANO
if m==0:
mov=0
elif m==1: #alejar el dedo
mov= -0.2
elif m==-1: #acercar el dedo
mov= 0.2
if f==1 or f==5:
mov=0.2
elif f==4:
mov=0.1
else:
print('error')
if f>0 and f<=2:
d.rotation_quaternion[3]=mov
elif f>2 and f<=5:
d.rotation_quaternion[3]=-mov
else:
print ('error')
##ANGULO DE LA MANO
if r==1:
if f==1:
d.rotation_quaternion[1]=0.2
else:
d.rotation_quaternion[1]=0.5
elif r==0:
d.rotation_quaternion[1]=0
#####CONTROL DE LAS MANOS######
#Mano derecha
hc.hRH(3) #ALTURA(0= REPOSO, 1=ESTOMAGO, 2=PECHO, 3=CUELLO, 4=CARA, 5=CABEZA)
hc.dRH(0) #DISTANCIA AL CUERPO(0= CENTRO, 1= ALEJADO, 2= CONTRARIO)
hc.rhF(2,0,0,0,0) #CONTROL DEDOS(1=PULGAR, 2=INDICE, 3=MEDIO, 4=ANULAR, 5=MEÑIQUE) VALORES DEL 0(ABIERTO) A 6(CERRADO)
detRF(1,-1,0)
detRF(2,0,1)
detRF(4,-1,0)
| etRF( |
test_image.py | import pytest
import mal_tier_list_bbcode_gen.exceptions as exceptions
from mal_tier_list_bbcode_gen.image import Image
def test_source_direct_url():
|
def test_source_google_drive_file_id():
expected_url = ('https://drive.google.com/uc'
'?id=1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z')
image_url = '1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z'
image = Image('Google Drive', image_url)
assert image.image_url == expected_url
def test_source_google_drive_share_link():
expected_url = ('https://drive.google.com/uc'
'?id=1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z')
image_url = ('https://drive.google.com/file/d/'
'1olKc6TUJ1kPJa7cKWVp7dNZFwHb_0k8Z/view?usp=sharing')
image = Image('Google Drive', image_url)
assert image.image_url == expected_url
def test_source_google_no_file_id():
image_url = ('https://drive.google.com/file/d/view?usp=sharing')
with pytest.raises(exceptions.GoogleDriveSourceError):
Image('Google Drive', image_url)
def test_source_not_valid():
with pytest.raises(exceptions.InvalidImageSourceError,
match=r".*is not a valid image source.*"):
Image('not valid', 'example.com/test.png')
def test_get_bbcode():
image_url = 'example.com/test.png'
expected_bbcode = f'[img]{image_url}[/img]'
image = Image('direct URL', image_url)
assert image.get_bbcode() == expected_bbcode
| image_url = 'example.com/test.png'
image = Image('direct URL', image_url)
assert image.image_url == image_url |
db.py | import sqlite3
import click
from flask import current_app, g
from flask.cli import with_appcontext
def get_db():
if 'db' not in g:
g.db = sqlite3.connect(
'database.db',
detect_types=sqlite3.PARSE_DECLTYPES
)
g.db.row_factory = sqlite3.Row
return g.db
def close_db(e=None):
|
def init_db():
db = get_db()
with current_app.open_resource('schema.sql') as f:
db.executescript(f.read().decode('utf8'))
@click.command('init-db')
@with_appcontext
def init_db_command():
"""Clear the existing data and create new tables."""
init_db()
click.echo('Initialized the database.')
def init_app(app):
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
| db = g.pop('db', None)
if db is not None:
db.close() |
Offer.ts | import {Product} from "./Product"
import {SpecialOfferType} from "./SpecialOfferType"
export class Offer {
public constructor(public readonly offerType: SpecialOfferType,
public readonly product: Product,
public readonly argument: number) {
} | }
} |
getProduct(): Product {
return this.product; |
test_evaluate_subscript.py | import pytest
from hypothesis import given, settings
from hypothesis import strategies as st
from vyper import ast as vy_ast
@pytest.mark.fuzzing
@settings(max_examples=50, deadline=1000)
@given(
idx=st.integers(min_value=0, max_value=9),
array=st.lists(st.integers(), min_size=10, max_size=10),
)
def test_subscript(get_contract, array, idx):
source = """
@public
def foo(array: int128[10], idx: uint256) -> int128:
return array[idx]
""" | new_node = old_node.evaluate()
assert contract.foo(array, idx) == new_node.value | contract = get_contract(source)
vyper_ast = vy_ast.parse_to_ast(f"{array}[{idx}]")
old_node = vyper_ast.body[0].value |
retweet.controller.ts | import { ReTweet } from "../models/retweet.model";
import { Tweet } from "../models/tweet.model";
export default class RetweetController {
static getAll = async (): Promise<ReTweet[]> => {
return await ReTweet.find({ relations: ["post"] });
};
static addRetweet = async (
username: string,
tweet: Tweet
): Promise<ReTweet> => {
const retweet = ReTweet.create({ username, post: tweet });
return await retweet.save();
}; | } |
|
base_widget.js | var noop = require("../../core/utils/common").noop,
windowUtils = require("../../core/utils/window"),
domAdapter = require("../../core/dom_adapter"),
typeUtils = require("../../core/utils/type"),
each = require("../../core/utils/iterator").each,
version = require("../../core/version"),
_windowResizeCallbacks = require("../../core/utils/resize_callbacks"),
_stringFormat = require("../../core/utils/string").format,
_isObject = require("../../core/utils/type").isObject,
extend = require("../../core/utils/extend").extend,
_floor = Math.floor,
DOMComponent = require("../../core/dom_component"),
helpers = require("./helpers"),
_parseScalar = require("./utils").parseScalar,
errors = require("./errors_warnings"),
_log = errors.log,
rendererModule = require("./renderers/renderer"),
_Layout = require("./layout"),
OPTION_RTL_ENABLED = "rtlEnabled",
SIZED_ELEMENT_CLASS = "dx-sized-element",
_option = DOMComponent.prototype.option;
function | () {
return true;
}
function getFalse() {
return false;
}
function areCanvasesDifferent(canvas1, canvas2) {
return !(canvas1.width === canvas2.width && canvas1.height === canvas2.height &&
canvas1.left === canvas2.left && canvas1.top === canvas2.top && canvas1.right === canvas2.right && canvas1.bottom === canvas2.bottom);
}
function createResizeHandler(callback) {
var timeout,
handler = function() {
clearTimeout(timeout);
timeout = setTimeout(callback, 100);
};
handler.dispose = function() {
clearTimeout(timeout);
return this;
};
return handler;
}
function defaultOnIncidentOccurred(e) {
if(!e.component.hasEvent("incidentOccurred")) {
_log.apply(null, [e.target.id].concat(e.target.args || []));
}
}
var createIncidentOccurred = function(widgetName, eventTrigger) {
return function incidentOccurred(id, args) {
eventTrigger("incidentOccurred", {
target: {
id: id,
type: id[0] === "E" ? "error" : "warning",
args: args,
text: _stringFormat.apply(null, [errors.ERROR_MESSAGES[id]].concat(args || [])),
widget: widgetName,
version: version
}
});
}
};
function pickPositiveValue(values) {
return values.reduce(function(result, value) {
return (value > 0 && !result) ? value : result;
}, 0);
}
// TODO - Changes handling
// * Provide more validation - something like
// _changes: [{
// code: "THEME",
// options: ["theme"],
// type: "option",
// handler: function () {
// this._setThemeAndRtl();
// }
// }, {
// code: "CONTAINER_SIZE",
// options: ["size", "option"],
// type: "layout",
// handler: function () {
// this._updateSize();
// }
// }]
var getEmptyComponent = function() {
var emptyComponentConfig = {};
emptyComponentConfig.ctor = function(element, options) {
this.callBase(element, options);
var sizedElement = domAdapter.createElement("div");
var width = options && typeUtils.isNumeric(options.width) ? options.width + "px" : "100%";
var height = options && typeUtils.isNumeric(options.height) ? options.height + "px" : this._getDefaultSize().height + "px";
domAdapter.setStyle(sizedElement, "width", width);
domAdapter.setStyle(sizedElement, "height", height);
domAdapter.setClass(sizedElement, SIZED_ELEMENT_CLASS);
domAdapter.insertElement(element, sizedElement);
};
var EmptyComponent = DOMComponent.inherit(emptyComponentConfig);
var originalInherit = EmptyComponent.inherit;
EmptyComponent.inherit = function(config) {
for(var field in config) {
if(typeUtils.isFunction(config[field]) && field.substr(0, 1) !== "_" || field === "_dispose" || field === "_optionChanged") {
config[field] = noop;
}
}
return originalInherit.call(this, config);
};
return EmptyComponent;
};
var isServerSide = !windowUtils.hasWindow();
module.exports = isServerSide ? getEmptyComponent() : DOMComponent.inherit({
_eventsMap: {
"onIncidentOccurred": { name: "incidentOccurred" },
"onDrawn": { name: "drawn" }
},
_getDefaultOptions: function() {
return extend(this.callBase(), {
onIncidentOccurred: defaultOnIncidentOccurred
});
},
_useLinks: true,
_init: function() {
var that = this,
linkTarget;
that._$element.children("." + SIZED_ELEMENT_CLASS).remove();
that.callBase.apply(that, arguments);
that._changesLocker = 0;
that._changes = helpers.changes();
that._suspendChanges();
that._themeManager = that._createThemeManager();
that._themeManager.setCallback(function() {
that._requestChange(that._themeDependentChanges);
});
that._renderElementAttributes();
that._initRenderer();
// Shouldn't "_useLinks" be passed to the renderer instead of doing 3 checks here?
linkTarget = that._useLinks && that._renderer.root;
// There is an implicit relation between `_useLinks` and `loading indicator` - it uses links
// Though this relation is not ensured in code we will immediately know when it is broken - `loading indicator` will break on construction
linkTarget && linkTarget.enableLinks().virtualLink("core").virtualLink("peripheral");
that._renderVisibilityChange();
that._attachVisibilityChangeHandlers();
that._initEventTrigger();
that._incidentOccurred = createIncidentOccurred(that.NAME, that._eventTrigger);
that._layout = new _Layout();
// Such solution is used only to avoid writing lots of "after" for all core elements in all widgets
// May be later a proper solution would be found
linkTarget && linkTarget.linkAfter("core");
that._initPlugins();
that._initCore();
linkTarget && linkTarget.linkAfter();
that._change(that._initialChanges);
},
_initialChanges: ["LAYOUT", "RESIZE_HANDLER", "THEME"],
_initPlugins: function() {
var that = this;
each(that._plugins, function(_, plugin) {
plugin.init.call(that);
});
},
_disposePlugins: function() {
var that = this;
each(that._plugins.slice().reverse(), function(_, plugin) {
plugin.dispose.call(that);
});
},
_change: function(codes) {
this._changes.add(codes);
},
_suspendChanges: function() {
++this._changesLocker;
},
_resumeChanges: function() {
var that = this;
if(--that._changesLocker === 0 && that._changes.count() > 0 && !that._applyingChanges) {
that._renderer.lock();
that._applyingChanges = true;
that._applyChanges();
that._changes.reset();
that._applyingChanges = false;
that._renderer.unlock();
if(that._optionsQueue) {
that._applyQueuedOptions();
}
}
},
_applyQueuedOptions: function() {
var that = this,
queue = that._optionsQueue;
that._optionsQueue = null;
that.beginUpdate();
each(queue, function(_, action) {
action();
});
that.endUpdate();
},
_requestChange: function(codes) {
this._suspendChanges();
this._change(codes);
this._resumeChanges();
},
_applyChanges: function() {
var that = this,
changes = that._changes,
order = that._totalChangesOrder,
i,
ii = order.length;
for(i = 0; i < ii; ++i) {
if(changes.has(order[i])) {
that["_change_" + order[i]]();
}
}
},
_optionChangesOrder: ["EVENTS", "THEME", "RENDERER", "RESIZE_HANDLER"],
_layoutChangesOrder: ["ELEMENT_ATTR", "CONTAINER_SIZE", "LAYOUT"],
_customChangesOrder: [],
_change_EVENTS: function() {
this._eventTrigger.applyChanges();
},
_change_THEME: function() {
this._setThemeAndRtl();
},
_change_RENDERER: function() {
this._setRendererOptions();
},
_change_RESIZE_HANDLER: function() {
this._setupResizeHandler();
},
_change_ELEMENT_ATTR: function() {
this._renderElementAttributes();
this._change(["CONTAINER_SIZE"]);
},
_change_CONTAINER_SIZE: function() {
this._updateSize();
},
_change_LAYOUT: function() {
this._setContentSize();
},
_themeDependentChanges: ["RENDERER"],
_initRenderer: function() {
var that = this;
// Canvas is calculated before the renderer is created in order to capture actual size of the container
that._canvas = that._calculateCanvas();
that._renderer = new rendererModule.Renderer({ cssClass: that._rootClassPrefix + " " + that._rootClass, pathModified: that.option("pathModified"), container: that._$element[0] });
that._renderer.resize(that._canvas.width, that._canvas.height);
},
_disposeRenderer: function() {
///#DEBUG
// NOTE: This is temporary - until links mechanism is stabilized
this._useLinks && this._renderer.root.checkLinks();
///#ENDDEBUG
this._renderer.dispose();
},
_getAnimationOptions: noop,
render: function() {
this._requestChange(["CONTAINER_SIZE"]);
this._onRender();
},
// This is actually added only to make tooltip pluggable. This is bad but much better than entire tooltip in BaseWidget.
_onRender: noop,
_dispose: function() {
var that = this;
that.callBase.apply(that, arguments);
that._removeResizeHandler();
that._layout.dispose();
that._eventTrigger.dispose();
that._disposeCore();
that._disposePlugins();
that._disposeRenderer();
that._themeManager.dispose();
that._themeManager = that._renderer = that._eventTrigger = null;
},
_initEventTrigger: function() {
var that = this;
that._eventTrigger = createEventTrigger(that._eventsMap, function(name) { return that._createActionByOption(name); });
},
_calculateCanvas: function() {
var that = this,
size = that.option("size") || {},
margin = that.option("margin") || {},
defaultCanvas = that._getDefaultSize() || {},
elementWidth = windowUtils.hasWindow() ? that._$element.width() : 0,
elementHeight = windowUtils.hasWindow() ? that._$element.height() : 0,
canvas = {
width: size.width <= 0 ? 0 : _floor(pickPositiveValue([size.width, elementWidth, defaultCanvas.width])),
height: size.height <= 0 ? 0 : _floor(pickPositiveValue([size.height, elementHeight, defaultCanvas.height])),
left: pickPositiveValue([margin.left, defaultCanvas.left]),
top: pickPositiveValue([margin.top, defaultCanvas.top]),
right: pickPositiveValue([margin.right, defaultCanvas.right]),
bottom: pickPositiveValue([margin.bottom, defaultCanvas.bottom])
};
// This for backward compatibility - widget was not rendered when canvas is empty.
// Now it will be rendered but because of "width" and "height" of the root both set to 0 it will not be visible.
if(canvas.width - canvas.left - canvas.right <= 0 || canvas.height - canvas.top - canvas.bottom <= 0) {
canvas = { width: 0, height: 0 };
}
return canvas;
},
_updateSize: function() {
var that = this,
canvas = that._calculateCanvas();
that._renderer.fixPlacement();
if(areCanvasesDifferent(that._canvas, canvas) || that.__forceRender /* for charts */) {
that._canvas = canvas;
that._recreateSizeDependentObjects(true);
that._renderer.resize(canvas.width, canvas.height);
that._change(["LAYOUT"]);
}
},
_recreateSizeDependentObjects: noop,
_getMinSize: function() {
return [0, 0];
},
_getAlignmentRect: noop,
_setContentSize: function() {
var canvas = this._canvas,
layout = this._layout,
rect = canvas.width > 0 && canvas.height > 0 ? [canvas.left, canvas.top, canvas.width - canvas.right, canvas.height - canvas.bottom] : [0, 0, 0, 0],
nextRect;
rect = layout.forward(rect, this._getMinSize());
nextRect = this._applySize(rect) || rect;
layout.backward(nextRect, this._getAlignmentRect() || nextRect);
},
///#DEBUG
DEBUG_getCanvas: function() {
return this._canvas;
},
DEBUG_getEventTrigger: function() {
return this._eventTrigger;
},
///#ENDDEBUG
_getOption: function(name, isScalar) {
var theme = this._themeManager.theme(name),
option = this.option(name);
return isScalar ? (option !== undefined ? option : theme) : extend(true, {}, theme, option);
},
_setupResizeHandler: function() {
var that = this,
redrawOnResize = _parseScalar(this._getOption("redrawOnResize", true), true);
if(that._resizeHandler) {
that._removeResizeHandler();
}
that._resizeHandler = createResizeHandler(function() {
if(redrawOnResize) {
that._requestChange(["CONTAINER_SIZE"]);
} else {
that._renderer.fixPlacement();
}
});
_windowResizeCallbacks.add(that._resizeHandler);
},
_removeResizeHandler: function() {
if(this._resizeHandler) {
_windowResizeCallbacks.remove(this._resizeHandler);
this._resizeHandler.dispose();
this._resizeHandler = null;
}
},
// This is actually added only to make loading indicator pluggable. This is bad but much better than entire loading indicator in BaseWidget.
_onBeginUpdate: noop,
beginUpdate: function() {
var that = this;
// The "_initialized" flag is checked because first time "beginUpdate" is called in the constructor.
if(that._initialized && that._updateLockCount === 0) {
that._onBeginUpdate();
that._suspendChanges();
}
that.callBase.apply(that, arguments);
return that;
},
endUpdate: function() {
var that = this;
that.callBase.apply(that, arguments);
if(that._updateLockCount === 0) {
that._resumeChanges();
}
return that;
},
option: function(name) {
var that = this;
// NOTE: `undefined` has to be returned because base option setter returns `undefined`.
// `argument.length` and `isObject` checks are copypaste from Component.
if(that._initialized && that._applyingChanges && (arguments.length > 1 || _isObject(name))) {
that._optionsQueue = that._optionsQueue || [];
that._optionsQueue.push(that._getActionForUpdating(arguments));
} else {
return _option.apply(that, arguments);
}
},
_getActionForUpdating: function(args) {
var that = this;
return that._deprecatedOptionsSuppressed ? function() { // T479911
that._suppressDeprecatedWarnings();
_option.apply(that, args);
that._resumeDeprecatedWarnings();
} : function() {
_option.apply(that, args);
};
},
// For quite a long time the following method were abstract (from the Component perspective).
// Now they are not but that basic functionality is not required here.
_clean: noop,
_render: noop,
_optionChanged: function(arg) {
var that = this;
if(that._eventTrigger.change(arg.name)) {
that._change(["EVENTS"]);
} else if(that._optionChangesMap[arg.name]) {
that._change([that._optionChangesMap[arg.name]]);
} else {
that.callBase.apply(that, arguments);
}
},
_optionChangesMap: {
size: "CONTAINER_SIZE",
margin: "CONTAINER_SIZE",
redrawOnResize: "RESIZE_HANDLER",
theme: "THEME",
rtlEnabled: "THEME",
encodeHtml: "THEME",
elementAttr: "ELEMENT_ATTR"
},
_visibilityChanged: function() {
this.render();
},
_setThemeAndRtl: function() {
this._themeManager.setTheme(this.option("theme"), this.option(OPTION_RTL_ENABLED));
},
_getRendererOptions: function() {
return {
rtl: this.option(OPTION_RTL_ENABLED),
encodeHtml: this.option("encodeHtml"),
animation: this._getAnimationOptions()
};
},
_setRendererOptions: function() {
this._renderer.setOptions(this._getRendererOptions());
},
svg: function() {
return this._renderer.svg();
},
getSize: function() {
var canvas = this._canvas || {};
return { width: canvas.width, height: canvas.height };
},
isReady: getFalse,
_dataIsReady: getTrue,
_resetIsReady: function() {
this.isReady = getFalse;
},
_drawn: function() {
var that = this;
that.isReady = getFalse;
if(that._dataIsReady()) {
that._renderer.onEndAnimation(function() {
that.isReady = getTrue;
});
}
that._eventTrigger("drawn", {});
}
});
helpers.replaceInherit(module.exports);
function createEventTrigger(eventsMap, callbackGetter) {
var triggers = {};
each(eventsMap, function(name, info) {
if(info.name) {
createEvent(name);
}
});
var changes;
triggerEvent.change = function(name) {
var eventInfo = eventsMap[name];
if(eventInfo) {
(changes = changes || {})[name] = eventInfo;
}
return !!eventInfo;
};
triggerEvent.applyChanges = function() {
if(changes) {
each(changes, function(name, eventInfo) {
createEvent(eventInfo.newName || name);
});
changes = null;
}
};
triggerEvent.dispose = function() {
eventsMap = callbackGetter = triggers = null;
};
return triggerEvent;
function createEvent(name) {
var eventInfo = eventsMap[name];
triggers[eventInfo.name] = callbackGetter(name);
}
function triggerEvent(name, arg, complete) {
triggers[name](arg);
complete && complete();
}
}
///#DEBUG
module.exports.DEBUG_createEventTrigger = createEventTrigger;
module.exports.DEBUG_createIncidentOccurred = createIncidentOccurred;
module.exports.DEBUG_stub_createIncidentOccurred = function(stub) {
createIncidentOccurred = stub;
};
module.exports.DEBUG_restore_createIncidentOccurred = function() {
createIncidentOccurred = module.exports.DEBUG_createIncidentOccurred;
};
module.exports.DEBUG_createResizeHandler = createResizeHandler;
///#ENDDEBUG
| getTrue |
main.rs | mod configuration;
mod datamodel;
mod errors;
mod filesystem;
mod geolocator;
mod logging;
mod process;
mod processor;
mod storage;
mod to_bson;
#[macro_use]
extern crate lazy_static;
use std::sync::{atomic::AtomicBool, Arc};
use anyhow::Result;
#[cfg(unix)]
use anyhow::Context;
fn main() {
if let Err(err) = try_main() {
logging::print(&err);
if err.is::<errors::TerminatedError>() {
return;
}
std::process::exit(1);
}
}
fn | () -> Result<()> {
let args = configuration::parse_args(&std::env::args().collect::<Vec<_>>())?;
let args = if let Some(args) = args {
args
} else {
return Ok(());
};
let term_token = Arc::new(AtomicBool::new(false));
register_signal(&term_token)?;
process::process(args, &term_token)?;
Ok(())
}
#[cfg(unix)]
fn register_signal(token: &Arc<AtomicBool>) -> Result<()> {
signal_hook::flag::register(signal_hook::consts::SIGTERM, Arc::clone(token))
.map(|_| ())
.context("Failed to register signal")
}
#[cfg(not(unix))]
fn register_signal(_: &Arc<AtomicBool>) -> Result<()> {
Ok(())
}
| try_main |
basicplotter.py | # -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Basic plotting methods using matplotlib.
These include methods to plot Bloch vectors, histograms, and quantum spheres.
Author: Andrew Cross, Jay Gambetta
"""
from mpl_toolkits.mplot3d import proj3d
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch
import numpy as np
from collections import Counter
from functools import reduce
def plot_histogram(data, number_to_keep=None):
"""Plot a histogram of data.
data is a dictionary of {'000': 5, '010': 113, ...}
number_to_keep is the number of terms to plot and rest is made into a
single bar called other values
"""
if number_to_keep is not None:
data_temp = dict(Counter(data).most_common(number_to_keep))
data_temp["rest"] = sum(data.values()) - sum(data_temp.values())
data = data_temp
labels = sorted(data)
values = np.array([data[key] for key in labels], dtype=float)
pvalues = values / sum(values)
numelem = len(values)
ind = np.arange(numelem) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects = ax.bar(ind, pvalues, width, color='seagreen')
# add some text for labels, title, and axes ticks
ax.set_ylabel('Probabilities', fontsize=12)
ax.set_xticks(ind)
ax.set_xticklabels(labels, fontsize=12)
ax.set_ylim([0., min([1.2, max([1.2 * val for val in pvalues])])])
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
'%f' % float(height),
ha='center', va='bottom')
plt.show()
# Functions used for plotting on the qsphere.
#
# See:
# lex_index:
# https://msdn.microsoft.com/en-us/library/aa289166%28v=vs.71%29.aspx
# n_choose_k: http://stackoverflow.com/questions/
# 2096573/counting-combinations-and-permutations-efficiently
class Arrow3D(FancyArrowPatch):
"""Standard 3D arrow."""
def __init__(self, xs, ys, zs, *args, **kwargs):
"""Create arrow."""
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
"""Draw the arrow."""
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
def compliment(value):
"""Swap 1 and 0 in a vector."""
return ''.join(COMPLEMENT[x] for x in value)
|
def n_choose_k(n, k):
"""Return the number of combinations."""
if n == 0:
return 0.0
else:
return reduce(lambda x, y: x * y[0] / y[1],
zip(range(n - k + 1, n + 1),
range(1, k + 1)), 1)
def lex_index(n, k, lst):
"""Return the index of a combination."""
assert len(lst) == k, "list should have length k"
comb = list(map(lambda x: n - 1 - x, lst))
dualm = sum([n_choose_k(comb[k - 1 - i], i + 1) for i in range(k)])
m = dualm
return int(m)
def bit_string_index(s):
"""Return the index of a string of 0s and 1s."""
n = len(s)
k = s.count("1")
assert s.count("0") == n - k, "s must be a string of 0 and 1"
ones = [pos for pos, char in enumerate(s) if char == "1"]
return lex_index(n, k, ones)
def plot_qsphere(data, number_to_keep, number_of_qubits):
"""Plot the qsphere of data."""
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
ax.axes.set_xlim3d(-1.0, 1.0)
ax.axes.set_ylim3d(-1.0, 1.0)
ax.axes.set_zlim3d(-1.0, 1.0)
ax.set_aspect("equal")
ax.axes.grid(False)
# Plot semi-transparent sphere
u = np.linspace(0, 2 * np.pi, 25)
v = np.linspace(0, np.pi, 25)
x = np.outer(np.cos(u), np.sin(v))
y = np.outer(np.sin(u), np.sin(v))
z = np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x, y, z, rstride=1, cstride=1, color='k', alpha=0.05,
linewidth=0)
# wireframe
# Get rid of the panes
# ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# Get rid of the spines
# ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
# ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
# ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
# Get rid of the ticks
# ax.set_xticks([])
# ax.set_yticks([])
# ax.set_zticks([])
d = number_of_qubits
total_values = sum(data.values())
for key in data:
weight = key.count("1")
zvalue = -2 * weight / d + 1
number_of_divisions = n_choose_k(d, weight)
weight_order = bit_string_index(key)
if weight_order >= number_of_divisions / 2:
com_key = compliment(key)
weight_order_temp = bit_string_index(com_key)
weight_order = np.floor(
number_of_divisions / 2) + weight_order_temp + 1
print(key + " " + str(weight_order))
angle = (weight_order) * 2 * np.pi / number_of_divisions
xvalue = np.sqrt(1 - zvalue**2) * np.cos(angle)
yvalue = np.sqrt(1 - zvalue**2) * np.sin(angle)
linewidth = 5 * data.get(key) / total_values
print([xvalue, yvalue, zvalue])
a = Arrow3D([0, xvalue], [0, yvalue], [0, zvalue], mutation_scale=20,
lw=linewidth, arrowstyle="->", color="k")
ax.add_artist(a)
for weight in range(d + 1):
theta = np.linspace(-2 * np.pi, 2 * np.pi, 100)
z = -2 * weight / d + 1
if weight == 0:
z = z - 0.001
if weight == d:
z = z + 0.001
r = np.sqrt(1 - z**2)
x = r * np.cos(theta)
y = r * np.sin(theta)
ax.plot(x, y, z, 'k')
plt.show()
# Functions used for plotting tomography.
def plot_bloch_vector(bloch, title=""):
"""Plot a Bloch vector.
Plot a sphere, axes, the Bloch vector, and its projections onto each axis.
bloch is a 3-tuple (x, y, z)
title is a string, the plot title
"""
# Set arrow lengths
arlen = 1.3
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_aspect("equal")
# Plot semi-transparent sphere
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = np.outer(np.cos(u), np.sin(v))
y = np.outer(np.sin(u), np.sin(v))
z = np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x, y, z, color="b", alpha=0.1)
# Plot arrows (axes, Bloch vector, its projections)
xa = Arrow3D([0, arlen], [0, 0], [0, 0], mutation_scale=20, lw=1,
arrowstyle="-|>", color="k")
ya = Arrow3D([0, 0], [0, arlen], [0, 0], mutation_scale=20, lw=1,
arrowstyle="-|>", color="k")
za = Arrow3D([0, 0], [0, 0], [0, arlen], mutation_scale=20, lw=1,
arrowstyle="-|>", color="k")
a = Arrow3D([0, bloch[0]], [0, bloch[1]], [0, bloch[2]], mutation_scale=20,
lw=2, arrowstyle="simple", color="k")
bax = Arrow3D([0, bloch[0]], [0, 0], [0, 0], mutation_scale=20, lw=2,
arrowstyle="-", color="r")
bay = Arrow3D([0, 0], [0, bloch[1]], [0, 0], mutation_scale=20, lw=2,
arrowstyle="-", color="g")
baz = Arrow3D([0, 0], [0, 0], [0, bloch[2]], mutation_scale=20, lw=2,
arrowstyle="-", color="b")
arrowlist = [xa, ya, za, a, bax, bay, baz]
for arr in arrowlist:
ax.add_artist(arr)
# Rotate the view
ax.view_init(30, 30)
# Annotate the axes, shifts are ad-hoc for this (30, 30) view
xp, yp, _ = proj3d.proj_transform(arlen, 0, 0, ax.get_proj())
plt.annotate("x", xy=(xp, yp), xytext=(-3, -8),
textcoords='offset points', ha='right', va='bottom')
xp, yp, _ = proj3d.proj_transform(0, arlen, 0, ax.get_proj())
plt.annotate("y", xy=(xp, yp), xytext=(6, -5),
textcoords='offset points', ha='right', va='bottom')
xp, yp, _ = proj3d.proj_transform(0, 0, arlen, ax.get_proj())
plt.annotate("z", xy=(xp, yp), xytext=(2, 0),
textcoords='offset points', ha='right', va='bottom')
plt.title(title)
plt.show()
# Functions used by randomized benchmarking.
def plot_rb_data(xdata, ydatas, yavg, fit, survival_prob):
"""Plot randomized benchmarking data.
xdata = list of subsequence lengths
ydatas = list of lists of survival probabilities for each sequence
yavg = mean of the survival probabilities at each sequence length
fit = list of fitting parameters [a, b, alpha]
survival_prob = function that computes survival probability
"""
# Plot the result for each sequence
for ydata in ydatas:
plt.plot(xdata, ydata, 'rx')
# Plot the mean
plt.plot(xdata, yavg, 'bo')
# Plot the fit
plt.plot(xdata, survival_prob(xdata, *fit), 'b-')
plt.show() | COMPLEMENT = {'1': '0', '0': '1'} |
autoserialize_annotation_spec.js | var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
///<reference path="./typings/jasmine.d.ts"/>
import { __TypeMap, autoserialize, Serialize, Deserialize, DeserializeInto, autoserializeAs } from '../src/serialize';
class T {
}
__decorate([
autoserialize
], T.prototype, "x", void 0);
class Vector2 {
}
__decorate([
autoserialize
], Vector2.prototype, "x", void 0);
__decorate([
autoserialize
], Vector2.prototype, "y", void 0);
class AsTest {
}
__decorate([
autoserializeAs(Vector2)
], AsTest.prototype, "v", void 0);
class | {
}
__decorate([
autoserializeAs(Vector2, "VECTOR")
], AsTest2.prototype, "v", void 0);
class AsTest3 {
}
__decorate([
autoserializeAs("z")
], AsTest3.prototype, "y", void 0);
class Test3 {
}
__decorate([
autoserialize
], Test3.prototype, "primitiveArray", void 0);
describe('autoserialize', function () {
it('should create meta data for serialize and deserialize', function () {
expect(__TypeMap.get(T)).toBeDefined();
expect(__TypeMap.get(T).length).toBe(1);
expect(__TypeMap.get(T)[0].serializedKey).toBe('x');
expect(__TypeMap.get(T)[0].serializedType).toBe(null);
expect(__TypeMap.get(T)[0].deserializedType).toBe(null);
expect(__TypeMap.get(T)[0].deserializedKey).toBe('x');
});
});
describe('autoserializeAs', function () {
it('should create meta data', function () {
expect(__TypeMap.get(AsTest)).toBeDefined();
expect(__TypeMap.get(AsTest).length).toBe(1);
expect(__TypeMap.get(AsTest)[0].serializedKey).toBe('v');
expect(__TypeMap.get(AsTest)[0].serializedType).toBe(Vector2);
expect(__TypeMap.get(AsTest)[0].deserializedKey).toBe('v');
expect(__TypeMap.get(AsTest)[0].deserializedType).toBe(Vector2);
});
it('should create meta data with a different key', function () {
expect(__TypeMap.get(AsTest3)).toBeDefined();
expect(__TypeMap.get(AsTest3).length).toBe(1);
expect(__TypeMap.get(AsTest3)[0].serializedKey).toBe('z');
expect(__TypeMap.get(AsTest3)[0].serializedType).toBe(null);
expect(__TypeMap.get(AsTest3)[0].deserializedKey).toBe('z');
expect(__TypeMap.get(AsTest3)[0].deserializedType).toBe(null);
});
it('should create meta data with a different key and type', function () {
expect(__TypeMap.get(AsTest2)).toBeDefined();
expect(__TypeMap.get(AsTest2).length).toBe(1);
expect(__TypeMap.get(AsTest2)[0].serializedKey).toBe('VECTOR');
expect(__TypeMap.get(AsTest2)[0].serializedType).toBe(Vector2);
expect(__TypeMap.get(AsTest2)[0].deserializedKey).toBe('VECTOR');
expect(__TypeMap.get(AsTest2)[0].deserializedType).toBe(Vector2);
});
it("handles strings", function () {
});
});
var Utility;
(function (Utility) {
function unpackSet(_set) {
const result = [];
_set.forEach(v => result.push(v));
return result;
}
Utility.unpackSet = unpackSet;
})(Utility || (Utility = {}));
describe('autoserializeAs using Serializer', () => {
describe('to wrapped data', () => {
const JSON = {
children: {
wrap: [11, 22, 33]
}
};
const Serializer = {
Serialize(_set) {
return { wrap: Utility.unpackSet(_set) };
},
Deserialize(json, instance) {
return new Set(json.wrap);
}
};
class TestClass {
constructor() {
this.children = new Set();
}
}
__decorate([
autoserializeAs(Serializer)
], TestClass.prototype, "children", void 0);
it("will be serialized", () => {
const instance = new TestClass();
JSON.children.wrap.forEach(v => instance.children.add(v));
const json = Serialize(instance);
expect(json).toEqual(JSON);
});
it("will be deserialized", () => {
const result = Deserialize(JSON, TestClass);
expect(result instanceof TestClass).toBeTruthy();
expect(result.children instanceof Set).toBeTruthy();
expect(Utility.unpackSet(result.children)).toEqual(JSON.children.wrap);
});
it("will be deserializedInto", () => {
const result = DeserializeInto(JSON, TestClass, new TestClass());
expect(result instanceof TestClass).toBeTruthy();
expect(result.children instanceof Set).toBeTruthy();
expect(Utility.unpackSet(result.children)).toEqual(JSON.children.wrap);
});
});
describe('should handle primitive arrays', function () {
it('should handle serializing a primitive array', function () {
var t = new Test3();
t.primitiveArray = [1, 2, 3];
var result = Serialize(t);
expect(result.primitiveArray.length).toBe(3);
expect(result.primitiveArray[0]).toBe(1);
expect(result.primitiveArray[1]).toBe(2);
expect(result.primitiveArray[2]).toBe(3);
});
it('should handle deserializing a primitive array', function () {
var t = new Test3();
t.primitiveArray = [1, 2, 3];
var result = Deserialize({ primitiveArray: [1, 2, 3] }, Test3);
expect(Array.isArray(result.primitiveArray)).toBe(true);
});
});
describe('to plain array data', () => {
const JSON = {
children: [11, 22, 33]
};
const Serializer = {
Serialize(_set) {
return Utility.unpackSet(_set);
},
Deserialize(json, instance) {
return new Set(json);
}
};
class TestClass {
constructor() {
this.children = new Set();
}
}
__decorate([
autoserializeAs(Serializer)
], TestClass.prototype, "children", void 0);
it("will be serialized", () => {
const instance = new TestClass();
JSON.children.forEach(v => instance.children.add(v));
const json = Serialize(instance);
expect(json).toEqual(JSON);
});
it("will be deserialized", () => {
const result = Deserialize(JSON, TestClass);
expect(result instanceof TestClass).toBeTruthy();
expect(result.children instanceof Set).toBeTruthy();
expect(Utility.unpackSet(result.children)).toEqual(JSON.children);
});
it("will be deserializedInto", () => {
const result = DeserializeInto(JSON, TestClass, new TestClass());
expect(result instanceof TestClass).toBeTruthy();
expect(result.children instanceof Set).toBeTruthy();
expect(Utility.unpackSet(result.children)).toEqual(JSON.children);
});
});
});
| AsTest2 |
array.rs | use gc::{Gc, GcCell};
use super::*;
pub type ArrayValue = Gc<GcCell<Vec<Expression>>>;
#[derive(Clone)]
pub struct | {
pub value: ArrayValue,
pub spread: bool
}
impl ArrayExpr {
pub fn new(items: Vec<Expression>, spread: bool) -> Expression {
Box::new(Self { value: Gc::new(GcCell::new(items)), spread })
}
pub fn uid(&self) -> usize {
&*self.value.borrow() as *const Vec<Expression> as usize
}
}
impl Expr for ArrayExpr {
fn as_any(&self) -> &dyn std::any::Any {
return self
}
fn evaluate(&self, _ctx: &mut Ctx) -> Expression {
return Box::new(Self { value: self.value.clone(), spread: self.spread });
}
fn stringify(&self) -> String {
format!("[{}]", self.value.borrow().iter().map(|x| x.visualize()).collect::<Vec<String>>().join(", "))
}
fn visualize(&self) -> String {
format!("[{}]", self.value.borrow().iter().map(|x| x.visualize()).collect::<Vec<String>>().join(", "))
}
fn plus(&self, _other: &Expression) -> Expression {
VoidExpr::new()
}
fn minus(&self, _other: &Expression) -> Expression {
VoidExpr::new()
}
fn multiply(&self, _other: &Expression) -> Expression {
VoidExpr::new()
}
fn divide(&self, _other: &Expression) -> Expression {
VoidExpr::new()
}
}
| ArrayExpr |
events_resultdone.rs | #[doc = "Register `EVENTS_RESULTDONE` reader"]
pub struct R(crate::R<EVENTS_RESULTDONE_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<EVENTS_RESULTDONE_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<EVENTS_RESULTDONE_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<EVENTS_RESULTDONE_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `EVENTS_RESULTDONE` writer"]
pub struct W(crate::W<EVENTS_RESULTDONE_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<EVENTS_RESULTDONE_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<EVENTS_RESULTDONE_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<EVENTS_RESULTDONE_SPEC>) -> Self {
W(writer)
}
}
#[doc = "A result is ready to get transferred to RAM\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EVENTS_RESULTDONE_A {
#[doc = "0: Event not generated"]
NOTGENERATED = 0,
#[doc = "1: Event generated"]
GENERATED = 1,
}
impl From<EVENTS_RESULTDONE_A> for bool {
#[inline(always)]
fn from(variant: EVENTS_RESULTDONE_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `EVENTS_RESULTDONE` reader - A result is ready to get transferred to RAM"]
pub struct EVENTS_RESULTDONE_R(crate::FieldReader<bool, EVENTS_RESULTDONE_A>);
impl EVENTS_RESULTDONE_R {
pub(crate) fn new(bits: bool) -> Self {
EVENTS_RESULTDONE_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> EVENTS_RESULTDONE_A {
match self.bits {
false => EVENTS_RESULTDONE_A::NOTGENERATED,
true => EVENTS_RESULTDONE_A::GENERATED,
}
}
#[doc = "Checks if the value of the field is `NOTGENERATED`"]
#[inline(always)]
pub fn is_not_generated(&self) -> bool { | #[doc = "Checks if the value of the field is `GENERATED`"]
#[inline(always)]
pub fn is_generated(&self) -> bool {
**self == EVENTS_RESULTDONE_A::GENERATED
}
}
impl core::ops::Deref for EVENTS_RESULTDONE_R {
type Target = crate::FieldReader<bool, EVENTS_RESULTDONE_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `EVENTS_RESULTDONE` writer - A result is ready to get transferred to RAM"]
pub struct EVENTS_RESULTDONE_W<'a> {
w: &'a mut W,
}
impl<'a> EVENTS_RESULTDONE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: EVENTS_RESULTDONE_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Event not generated"]
#[inline(always)]
pub fn not_generated(self) -> &'a mut W {
self.variant(EVENTS_RESULTDONE_A::NOTGENERATED)
}
#[doc = "Event generated"]
#[inline(always)]
pub fn generated(self) -> &'a mut W {
self.variant(EVENTS_RESULTDONE_A::GENERATED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01);
self.w
}
}
impl R {
#[doc = "Bit 0 - A result is ready to get transferred to RAM"]
#[inline(always)]
pub fn events_resultdone(&self) -> EVENTS_RESULTDONE_R {
EVENTS_RESULTDONE_R::new((self.bits & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - A result is ready to get transferred to RAM"]
#[inline(always)]
pub fn events_resultdone(&mut self) -> EVENTS_RESULTDONE_W {
EVENTS_RESULTDONE_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "A result is ready to get transferred to RAM\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [events_resultdone](index.html) module"]
pub struct EVENTS_RESULTDONE_SPEC;
impl crate::RegisterSpec for EVENTS_RESULTDONE_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [events_resultdone::R](R) reader structure"]
impl crate::Readable for EVENTS_RESULTDONE_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [events_resultdone::W](W) writer structure"]
impl crate::Writable for EVENTS_RESULTDONE_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets EVENTS_RESULTDONE to value 0"]
impl crate::Resettable for EVENTS_RESULTDONE_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
} | **self == EVENTS_RESULTDONE_A::NOTGENERATED
} |
reducer.ts | import {
LOGOUT_INIT,
} from 'store/security/types';
import {
TOGGLE_LAYER,
MapViewerActions,
MapViewerState
} from 'store/map/types';
const initialState: MapViewerState = {
selected: [],
};
export function | (
state = initialState,
action: MapViewerActions
): MapViewerState {
switch (action.type) {
case LOGOUT_INIT:
return {
...initialState
};
case TOGGLE_LAYER:
return {
...state,
selected: state.selected.includes(action.index) ? state.selected.filter(i => i !== action.index) : [...state.selected, action.index],
};
default:
return state;
}
}
| mapViewerReducer |
color-picker.component.ts | import { Component, EventEmitter, Input, Output } from '@angular/core';
import { MARKER_COLORS, PositionFormService } from '../position-form.service';
@Component({
selector: 'app-color-picker',
templateUrl: './color-picker.component.html',
styleUrls: ['./color-picker.component.scss']
})
export class | {
private _selectedIndex = 0;
@Input()
set selectedIndex(value) {
this._selectedIndex = value;
this.selectedIndexChange.emit(value);
}
get selectedIndex(): number {
return this._selectedIndex;
}
@Input('Active') Active;
@Output('togglePickedEmitter') togglePickedEmitter = new EventEmitter();
@Output('selectedIndexChange') selectedIndexChange = new EventEmitter();
@Input('disabledAction') disabledAction: boolean;
@Input('iconsPerRow') public iconsPerRow: number;
@Input('backdrop') public backdrop: boolean;
@Output() labelChange = new EventEmitter();
@Input() label = '';
constructor(public positionFormService: PositionFormService) {
}
submitLabel(popDirective, label) {
this.label = label;
this.labelChange.emit(label);
popDirective.hide();
}
get selectedColor(): string {
return MARKER_COLORS[this.selectedIndex].icon;
}
changeMarkerColor(selectedColorIndex) {
this.selectedIndex = selectedColorIndex;
if (!this.Active) {
this.togglePickedEmitter.emit(true);
}
}
markerColors() {
return MARKER_COLORS;
}
getMarkerUrlByColor(color: string): string {
return this.positionFormService.getMarkerUrlByColor(color);
}
calcIndex(parentIndex: number, childIndex: number, iconsPerRow = 1) {
return (iconsPerRow * parentIndex) + childIndex;
}
}
| ColorPickerComponent |
folding.test.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import 'mocha';
import * as assert from 'assert';
import { TextDocument } from 'vscode-html-languageservice';
import { getFoldingRanges } from '../modes/htmlFolding';
import { getLanguageModes } from '../modes/languageModes';
import { ClientCapabilities } from 'vscode-css-languageservice';
interface ExpectedIndentRange {
startLine: number;
endLine: number;
kind?: string;
}
function | (lines: string[], expected: ExpectedIndentRange[], message?: string, nRanges?: number): void {
let document = TextDocument.create('test://foo/bar.json', 'json', 1, lines.join('\n'));
let workspace = {
settings: {},
folders: [{ name: 'foo', uri: 'test://foo' }]
};
let languageModes = getLanguageModes({ css: true, javascript: true }, workspace, ClientCapabilities.LATEST);
let actual = getFoldingRanges(languageModes, document, nRanges, null);
let actualRanges = [];
for (let i = 0; i < actual.length; i++) {
actualRanges[i] = r(actual[i].startLine, actual[i].endLine, actual[i].kind);
}
actualRanges = actualRanges.sort((r1, r2) => r1.startLine - r2.startLine);
assert.deepEqual(actualRanges, expected, message);
}
function r(startLine: number, endLine: number, kind?: string): ExpectedIndentRange {
return { startLine, endLine, kind };
}
suite('HTML Folding', () => {
test('Embedded JavaScript', () => {
let input = [
/*0*/'<html>',
/*1*/'<head>',
/*2*/'<script>',
/*3*/'function f() {',
/*4*/'}',
/*5*/'</script>',
/*6*/'</head>',
/*7*/'</html>',
];
assertRanges(input, [r(0, 6), r(1, 5), r(2, 4), r(3, 4)]);
});
test('Embedded JavaScript - multiple areas', () => {
let input = [
/* 0*/'<html>',
/* 1*/'<head>',
/* 2*/'<script>',
/* 3*/' var x = {',
/* 4*/' foo: true,',
/* 5*/' bar: {}',
/* 6*/' };',
/* 7*/'</script>',
/* 8*/'<script>',
/* 9*/' test(() => { // hello',
/*10*/' f();',
/*11*/' });',
/*12*/'</script>',
/*13*/'</head>',
/*14*/'</html>',
];
assertRanges(input, [r(0, 13), r(1, 12), r(2, 6), r(3, 6), r(8, 11), r(9, 11)]);
});
test('Embedded JavaScript - incomplete', () => {
let input = [
/* 0*/'<html>',
/* 1*/'<head>',
/* 2*/'<script>',
/* 3*/' var x = {',
/* 4*/'</script>',
/* 5*/'<script>',
/* 6*/' });',
/* 7*/'</script>',
/* 8*/'</head>',
/* 9*/'</html>',
];
assertRanges(input, [r(0, 8), r(1, 7), r(2, 3), r(5, 6)]);
});
test('Embedded JavaScript - regions', () => {
let input = [
/* 0*/'<html>',
/* 1*/'<head>',
/* 2*/'<script>',
/* 3*/' // #region Lalala',
/* 4*/' // #region',
/* 5*/' x = 9;',
/* 6*/' // #endregion',
/* 7*/' // #endregion Lalala',
/* 8*/'</script>',
/* 9*/'</head>',
/*10*/'</html>',
];
assertRanges(input, [r(0, 9), r(1, 8), r(2, 7), r(3, 7, 'region'), r(4, 6, 'region')]);
});
test('Embedded CSS', () => {
let input = [
/* 0*/'<html>',
/* 1*/'<head>',
/* 2*/'<style>',
/* 3*/' foo {',
/* 4*/' display: block;',
/* 5*/' color: black;',
/* 6*/' }',
/* 7*/'</style>',
/* 8*/'</head>',
/* 9*/'</html>',
];
assertRanges(input, [r(0, 8), r(1, 7), r(2, 6), r(3, 5)]);
});
test('Embedded CSS - multiple areas', () => {
let input = [
/* 0*/'<html>',
/* 1*/'<head style="color:red">',
/* 2*/'<style>',
/* 3*/' /*',
/* 4*/' foo: true,',
/* 5*/' bar: {}',
/* 6*/' */',
/* 7*/'</style>',
/* 8*/'<style>',
/* 9*/' @keyframes mymove {',
/*10*/' from {top: 0px;}',
/*11*/' }',
/*12*/'</style>',
/*13*/'</head>',
/*14*/'</html>',
];
assertRanges(input, [r(0, 13), r(1, 12), r(2, 6), r(3, 6, 'comment'), r(8, 11), r(9, 10)]);
});
test('Embedded CSS - regions', () => {
let input = [
/* 0*/'<html>',
/* 1*/'<head>',
/* 2*/'<style>',
/* 3*/' /* #region Lalala */',
/* 4*/' /* #region*/',
/* 5*/' x = 9;',
/* 6*/' /* #endregion*/',
/* 7*/' /* #endregion Lalala*/',
/* 8*/'</style>',
/* 9*/'</head>',
/*10*/'</html>',
];
assertRanges(input, [r(0, 9), r(1, 8), r(2, 7), r(3, 7, 'region'), r(4, 6, 'region')]);
});
// test('Embedded JavaScript - multi line comment', () => {
// let input = [
// /* 0*/'<html>',
// /* 1*/'<head>',
// /* 2*/'<script>',
// /* 3*/' /*',
// /* 4*/' * Hello',
// /* 5*/' */',
// /* 6*/'</script>',
// /* 7*/'</head>',
// /* 8*/'</html>',
// ];
// assertRanges(input, [r(0, 7), r(1, 6), r(2, 5), r(3, 5, 'comment')]);
// });
test('Test limit', () => {
let input = [
/* 0*/'<div>',
/* 1*/' <span>',
/* 2*/' <b>',
/* 3*/' ',
/* 4*/' </b>,',
/* 5*/' <b>',
/* 6*/' <pre>',
/* 7*/' ',
/* 8*/' </pre>,',
/* 9*/' <pre>',
/*10*/' ',
/*11*/' </pre>,',
/*12*/' </b>,',
/*13*/' <b>',
/*14*/' ',
/*15*/' </b>,',
/*16*/' <b>',
/*17*/' ',
/*18*/' </b>',
/*19*/' </span>',
/*20*/'</div>',
];
assertRanges(input, [r(0, 19), r(1, 18), r(2, 3), r(5, 11), r(6, 7), r(9, 10), r(13, 14), r(16, 17)], 'no limit', undefined);
assertRanges(input, [r(0, 19), r(1, 18), r(2, 3), r(5, 11), r(6, 7), r(9, 10), r(13, 14), r(16, 17)], 'limit 8', 8);
assertRanges(input, [r(0, 19), r(1, 18), r(2, 3), r(5, 11), r(6, 7), r(13, 14), r(16, 17)], 'limit 7', 7);
assertRanges(input, [r(0, 19), r(1, 18), r(2, 3), r(5, 11), r(13, 14), r(16, 17)], 'limit 6', 6);
assertRanges(input, [r(0, 19), r(1, 18), r(2, 3), r(5, 11), r(13, 14)], 'limit 5', 5);
assertRanges(input, [r(0, 19), r(1, 18), r(2, 3), r(5, 11)], 'limit 4', 4);
assertRanges(input, [r(0, 19), r(1, 18), r(2, 3)], 'limit 3', 3);
assertRanges(input, [r(0, 19), r(1, 18)], 'limit 2', 2);
assertRanges(input, [r(0, 19)], 'limit 1', 1);
});
});
| assertRanges |
zz_generated.conversion.go | // +build !ignore_autogenerated
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
ignite "github.com/weaveworks/ignite/pkg/apis/ignite"
metav1alpha1 "github.com/weaveworks/ignite/pkg/apis/meta/v1alpha1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*FileMapping)(nil), (*ignite.FileMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_FileMapping_To_ignite_FileMapping(a.(*FileMapping), b.(*ignite.FileMapping), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.FileMapping)(nil), (*FileMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_FileMapping_To_v1alpha1_FileMapping(a.(*ignite.FileMapping), b.(*FileMapping), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Image)(nil), (*ignite.Image)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Image_To_ignite_Image(a.(*Image), b.(*ignite.Image), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.Image)(nil), (*Image)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_Image_To_v1alpha1_Image(a.(*ignite.Image), b.(*Image), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageSpec)(nil), (*ignite.ImageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImageSpec_To_ignite_ImageSpec(a.(*ImageSpec), b.(*ignite.ImageSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.ImageSpec)(nil), (*ImageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_ImageSpec_To_v1alpha1_ImageSpec(a.(*ignite.ImageSpec), b.(*ImageSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageStatus)(nil), (*ignite.ImageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { | }); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.ImageStatus)(nil), (*ImageStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_ImageStatus_To_v1alpha1_ImageStatus(a.(*ignite.ImageStatus), b.(*ImageStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Kernel)(nil), (*ignite.Kernel)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Kernel_To_ignite_Kernel(a.(*Kernel), b.(*ignite.Kernel), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.Kernel)(nil), (*Kernel)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_Kernel_To_v1alpha1_Kernel(a.(*ignite.Kernel), b.(*Kernel), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*KernelSpec)(nil), (*ignite.KernelSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_KernelSpec_To_ignite_KernelSpec(a.(*KernelSpec), b.(*ignite.KernelSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.KernelSpec)(nil), (*KernelSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_KernelSpec_To_v1alpha1_KernelSpec(a.(*ignite.KernelSpec), b.(*KernelSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*KernelStatus)(nil), (*ignite.KernelStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_KernelStatus_To_ignite_KernelStatus(a.(*KernelStatus), b.(*ignite.KernelStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.KernelStatus)(nil), (*KernelStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_KernelStatus_To_v1alpha1_KernelStatus(a.(*ignite.KernelStatus), b.(*KernelStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*OCIImageSource)(nil), (*ignite.OCIImageSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_OCIImageSource_To_ignite_OCIImageSource(a.(*OCIImageSource), b.(*ignite.OCIImageSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.OCIImageSource)(nil), (*OCIImageSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_OCIImageSource_To_v1alpha1_OCIImageSource(a.(*ignite.OCIImageSource), b.(*OCIImageSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Pool)(nil), (*ignite.Pool)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Pool_To_ignite_Pool(a.(*Pool), b.(*ignite.Pool), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.Pool)(nil), (*Pool)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_Pool_To_v1alpha1_Pool(a.(*ignite.Pool), b.(*Pool), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*PoolDevice)(nil), (*ignite.PoolDevice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PoolDevice_To_ignite_PoolDevice(a.(*PoolDevice), b.(*ignite.PoolDevice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.PoolDevice)(nil), (*PoolDevice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_PoolDevice_To_v1alpha1_PoolDevice(a.(*ignite.PoolDevice), b.(*PoolDevice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*PoolSpec)(nil), (*ignite.PoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PoolSpec_To_ignite_PoolSpec(a.(*PoolSpec), b.(*ignite.PoolSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.PoolSpec)(nil), (*PoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_PoolSpec_To_v1alpha1_PoolSpec(a.(*ignite.PoolSpec), b.(*PoolSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*PoolStatus)(nil), (*ignite.PoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PoolStatus_To_ignite_PoolStatus(a.(*PoolStatus), b.(*ignite.PoolStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.PoolStatus)(nil), (*PoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_PoolStatus_To_v1alpha1_PoolStatus(a.(*ignite.PoolStatus), b.(*PoolStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*SSH)(nil), (*ignite.SSH)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_SSH_To_ignite_SSH(a.(*SSH), b.(*ignite.SSH), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.SSH)(nil), (*SSH)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_SSH_To_v1alpha1_SSH(a.(*ignite.SSH), b.(*SSH), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*VM)(nil), (*ignite.VM)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VM_To_ignite_VM(a.(*VM), b.(*ignite.VM), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.VM)(nil), (*VM)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_VM_To_v1alpha1_VM(a.(*ignite.VM), b.(*VM), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*VMImageSpec)(nil), (*ignite.VMImageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VMImageSpec_To_ignite_VMImageSpec(a.(*VMImageSpec), b.(*ignite.VMImageSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.VMImageSpec)(nil), (*VMImageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_VMImageSpec_To_v1alpha1_VMImageSpec(a.(*ignite.VMImageSpec), b.(*VMImageSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*VMKernelSpec)(nil), (*ignite.VMKernelSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VMKernelSpec_To_ignite_VMKernelSpec(a.(*VMKernelSpec), b.(*ignite.VMKernelSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.VMKernelSpec)(nil), (*VMKernelSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_VMKernelSpec_To_v1alpha1_VMKernelSpec(a.(*ignite.VMKernelSpec), b.(*VMKernelSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*VMNetworkSpec)(nil), (*ignite.VMNetworkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VMNetworkSpec_To_ignite_VMNetworkSpec(a.(*VMNetworkSpec), b.(*ignite.VMNetworkSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.VMNetworkSpec)(nil), (*VMNetworkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_VMNetworkSpec_To_v1alpha1_VMNetworkSpec(a.(*ignite.VMNetworkSpec), b.(*VMNetworkSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*VMSpec)(nil), (*ignite.VMSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VMSpec_To_ignite_VMSpec(a.(*VMSpec), b.(*ignite.VMSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.VMSpec)(nil), (*VMSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_VMSpec_To_v1alpha1_VMSpec(a.(*ignite.VMSpec), b.(*VMSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*VMStatus)(nil), (*ignite.VMStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VMStatus_To_ignite_VMStatus(a.(*VMStatus), b.(*ignite.VMStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ignite.VMStatus)(nil), (*VMStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_VMStatus_To_v1alpha1_VMStatus(a.(*ignite.VMStatus), b.(*VMStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*ignite.ImageSpec)(nil), (*ImageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_ImageSpec_To_v1alpha1_ImageSpec(a.(*ignite.ImageSpec), b.(*ImageSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*ignite.KernelSpec)(nil), (*KernelSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_KernelSpec_To_v1alpha1_KernelSpec(a.(*ignite.KernelSpec), b.(*KernelSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*ignite.OCIImageSource)(nil), (*OCIImageSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_OCIImageSource_To_v1alpha1_OCIImageSource(a.(*ignite.OCIImageSource), b.(*OCIImageSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*ignite.VMImageSpec)(nil), (*VMImageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_VMImageSpec_To_v1alpha1_VMImageSpec(a.(*ignite.VMImageSpec), b.(*VMImageSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*ignite.VMKernelSpec)(nil), (*VMKernelSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_VMKernelSpec_To_v1alpha1_VMKernelSpec(a.(*ignite.VMKernelSpec), b.(*VMKernelSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*ignite.VMSpec)(nil), (*VMSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_VMSpec_To_v1alpha1_VMSpec(a.(*ignite.VMSpec), b.(*VMSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*ignite.VMStatus)(nil), (*VMStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_ignite_VMStatus_To_v1alpha1_VMStatus(a.(*ignite.VMStatus), b.(*VMStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*ImageSpec)(nil), (*ignite.ImageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImageSpec_To_ignite_ImageSpec(a.(*ImageSpec), b.(*ignite.ImageSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*KernelSpec)(nil), (*ignite.KernelSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_KernelSpec_To_ignite_KernelSpec(a.(*KernelSpec), b.(*ignite.KernelSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*OCIImageSource)(nil), (*ignite.OCIImageSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_OCIImageSource_To_ignite_OCIImageSource(a.(*OCIImageSource), b.(*ignite.OCIImageSource), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*VMImageSpec)(nil), (*ignite.VMImageSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VMImageSpec_To_ignite_VMImageSpec(a.(*VMImageSpec), b.(*ignite.VMImageSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*VMKernelSpec)(nil), (*ignite.VMKernelSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VMKernelSpec_To_ignite_VMKernelSpec(a.(*VMKernelSpec), b.(*ignite.VMKernelSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*VMNetworkSpec)(nil), (*ignite.VMNetworkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VMNetworkSpec_To_ignite_VMNetworkSpec(a.(*VMNetworkSpec), b.(*ignite.VMNetworkSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*VMSpec)(nil), (*ignite.VMSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VMSpec_To_ignite_VMSpec(a.(*VMSpec), b.(*ignite.VMSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*VMStatus)(nil), (*ignite.VMStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_VMStatus_To_ignite_VMStatus(a.(*VMStatus), b.(*ignite.VMStatus), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_FileMapping_To_ignite_FileMapping(in *FileMapping, out *ignite.FileMapping, s conversion.Scope) error {
out.HostPath = in.HostPath
out.VMPath = in.VMPath
return nil
}
// Convert_v1alpha1_FileMapping_To_ignite_FileMapping is an autogenerated conversion function.
func Convert_v1alpha1_FileMapping_To_ignite_FileMapping(in *FileMapping, out *ignite.FileMapping, s conversion.Scope) error {
return autoConvert_v1alpha1_FileMapping_To_ignite_FileMapping(in, out, s)
}
func autoConvert_ignite_FileMapping_To_v1alpha1_FileMapping(in *ignite.FileMapping, out *FileMapping, s conversion.Scope) error {
out.HostPath = in.HostPath
out.VMPath = in.VMPath
return nil
}
// Convert_ignite_FileMapping_To_v1alpha1_FileMapping is an autogenerated conversion function.
func Convert_ignite_FileMapping_To_v1alpha1_FileMapping(in *ignite.FileMapping, out *FileMapping, s conversion.Scope) error {
return autoConvert_ignite_FileMapping_To_v1alpha1_FileMapping(in, out, s)
}
func autoConvert_v1alpha1_Image_To_ignite_Image(in *Image, out *ignite.Image, s conversion.Scope) error {
out.TypeMeta = in.TypeMeta
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_ImageSpec_To_ignite_ImageSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1alpha1_ImageStatus_To_ignite_ImageStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_Image_To_ignite_Image is an autogenerated conversion function.
func Convert_v1alpha1_Image_To_ignite_Image(in *Image, out *ignite.Image, s conversion.Scope) error {
return autoConvert_v1alpha1_Image_To_ignite_Image(in, out, s)
}
func autoConvert_ignite_Image_To_v1alpha1_Image(in *ignite.Image, out *Image, s conversion.Scope) error {
out.TypeMeta = in.TypeMeta
out.ObjectMeta = in.ObjectMeta
if err := Convert_ignite_ImageSpec_To_v1alpha1_ImageSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_ignite_ImageStatus_To_v1alpha1_ImageStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_ignite_Image_To_v1alpha1_Image is an autogenerated conversion function.
func Convert_ignite_Image_To_v1alpha1_Image(in *ignite.Image, out *Image, s conversion.Scope) error {
return autoConvert_ignite_Image_To_v1alpha1_Image(in, out, s)
}
func autoConvert_v1alpha1_ImageSpec_To_ignite_ImageSpec(in *ImageSpec, out *ignite.ImageSpec, s conversion.Scope) error {
// WARNING: in.OCIClaim requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_ignite_ImageSpec_To_v1alpha1_ImageSpec(in *ignite.ImageSpec, out *ImageSpec, s conversion.Scope) error {
// WARNING: in.OCI requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1alpha1_ImageStatus_To_ignite_ImageStatus(in *ImageStatus, out *ignite.ImageStatus, s conversion.Scope) error {
if err := Convert_v1alpha1_OCIImageSource_To_ignite_OCIImageSource(&in.OCISource, &out.OCISource, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_ImageStatus_To_ignite_ImageStatus is an autogenerated conversion function.
func Convert_v1alpha1_ImageStatus_To_ignite_ImageStatus(in *ImageStatus, out *ignite.ImageStatus, s conversion.Scope) error {
return autoConvert_v1alpha1_ImageStatus_To_ignite_ImageStatus(in, out, s)
}
func autoConvert_ignite_ImageStatus_To_v1alpha1_ImageStatus(in *ignite.ImageStatus, out *ImageStatus, s conversion.Scope) error {
if err := Convert_ignite_OCIImageSource_To_v1alpha1_OCIImageSource(&in.OCISource, &out.OCISource, s); err != nil {
return err
}
return nil
}
// Convert_ignite_ImageStatus_To_v1alpha1_ImageStatus is an autogenerated conversion function.
func Convert_ignite_ImageStatus_To_v1alpha1_ImageStatus(in *ignite.ImageStatus, out *ImageStatus, s conversion.Scope) error {
return autoConvert_ignite_ImageStatus_To_v1alpha1_ImageStatus(in, out, s)
}
func autoConvert_v1alpha1_Kernel_To_ignite_Kernel(in *Kernel, out *ignite.Kernel, s conversion.Scope) error {
out.TypeMeta = in.TypeMeta
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_KernelSpec_To_ignite_KernelSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1alpha1_KernelStatus_To_ignite_KernelStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_Kernel_To_ignite_Kernel is an autogenerated conversion function.
func Convert_v1alpha1_Kernel_To_ignite_Kernel(in *Kernel, out *ignite.Kernel, s conversion.Scope) error {
return autoConvert_v1alpha1_Kernel_To_ignite_Kernel(in, out, s)
}
func autoConvert_ignite_Kernel_To_v1alpha1_Kernel(in *ignite.Kernel, out *Kernel, s conversion.Scope) error {
out.TypeMeta = in.TypeMeta
out.ObjectMeta = in.ObjectMeta
if err := Convert_ignite_KernelSpec_To_v1alpha1_KernelSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_ignite_KernelStatus_To_v1alpha1_KernelStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_ignite_Kernel_To_v1alpha1_Kernel is an autogenerated conversion function.
func Convert_ignite_Kernel_To_v1alpha1_Kernel(in *ignite.Kernel, out *Kernel, s conversion.Scope) error {
return autoConvert_ignite_Kernel_To_v1alpha1_Kernel(in, out, s)
}
func autoConvert_v1alpha1_KernelSpec_To_ignite_KernelSpec(in *KernelSpec, out *ignite.KernelSpec, s conversion.Scope) error {
// WARNING: in.OCIClaim requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_ignite_KernelSpec_To_v1alpha1_KernelSpec(in *ignite.KernelSpec, out *KernelSpec, s conversion.Scope) error {
// WARNING: in.OCI requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1alpha1_KernelStatus_To_ignite_KernelStatus(in *KernelStatus, out *ignite.KernelStatus, s conversion.Scope) error {
out.Version = in.Version
if err := Convert_v1alpha1_OCIImageSource_To_ignite_OCIImageSource(&in.OCISource, &out.OCISource, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_KernelStatus_To_ignite_KernelStatus is an autogenerated conversion function.
func Convert_v1alpha1_KernelStatus_To_ignite_KernelStatus(in *KernelStatus, out *ignite.KernelStatus, s conversion.Scope) error {
return autoConvert_v1alpha1_KernelStatus_To_ignite_KernelStatus(in, out, s)
}
func autoConvert_ignite_KernelStatus_To_v1alpha1_KernelStatus(in *ignite.KernelStatus, out *KernelStatus, s conversion.Scope) error {
out.Version = in.Version
if err := Convert_ignite_OCIImageSource_To_v1alpha1_OCIImageSource(&in.OCISource, &out.OCISource, s); err != nil {
return err
}
return nil
}
// Convert_ignite_KernelStatus_To_v1alpha1_KernelStatus is an autogenerated conversion function.
func Convert_ignite_KernelStatus_To_v1alpha1_KernelStatus(in *ignite.KernelStatus, out *KernelStatus, s conversion.Scope) error {
return autoConvert_ignite_KernelStatus_To_v1alpha1_KernelStatus(in, out, s)
}
func autoConvert_v1alpha1_OCIImageSource_To_ignite_OCIImageSource(in *OCIImageSource, out *ignite.OCIImageSource, s conversion.Scope) error {
// WARNING: in.ID requires manual conversion: inconvertible types (string vs *github.com/weaveworks/ignite/pkg/apis/meta/v1alpha1.OCIContentID)
out.Size = in.Size
// WARNING: in.RepoDigests requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_ignite_OCIImageSource_To_v1alpha1_OCIImageSource(in *ignite.OCIImageSource, out *OCIImageSource, s conversion.Scope) error {
// WARNING: in.ID requires manual conversion: inconvertible types (*github.com/weaveworks/ignite/pkg/apis/meta/v1alpha1.OCIContentID vs string)
out.Size = in.Size
return nil
}
func autoConvert_v1alpha1_Pool_To_ignite_Pool(in *Pool, out *ignite.Pool, s conversion.Scope) error {
out.TypeMeta = in.TypeMeta
if err := Convert_v1alpha1_PoolSpec_To_ignite_PoolSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1alpha1_PoolStatus_To_ignite_PoolStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_Pool_To_ignite_Pool is an autogenerated conversion function.
func Convert_v1alpha1_Pool_To_ignite_Pool(in *Pool, out *ignite.Pool, s conversion.Scope) error {
return autoConvert_v1alpha1_Pool_To_ignite_Pool(in, out, s)
}
func autoConvert_ignite_Pool_To_v1alpha1_Pool(in *ignite.Pool, out *Pool, s conversion.Scope) error {
out.TypeMeta = in.TypeMeta
if err := Convert_ignite_PoolSpec_To_v1alpha1_PoolSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_ignite_PoolStatus_To_v1alpha1_PoolStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_ignite_Pool_To_v1alpha1_Pool is an autogenerated conversion function.
func Convert_ignite_Pool_To_v1alpha1_Pool(in *ignite.Pool, out *Pool, s conversion.Scope) error {
return autoConvert_ignite_Pool_To_v1alpha1_Pool(in, out, s)
}
func autoConvert_v1alpha1_PoolDevice_To_ignite_PoolDevice(in *PoolDevice, out *ignite.PoolDevice, s conversion.Scope) error {
out.Size = in.Size
out.Parent = in.Parent
out.Type = ignite.PoolDeviceType(in.Type)
out.MetadataPath = in.MetadataPath
return nil
}
// Convert_v1alpha1_PoolDevice_To_ignite_PoolDevice is an autogenerated conversion function.
func Convert_v1alpha1_PoolDevice_To_ignite_PoolDevice(in *PoolDevice, out *ignite.PoolDevice, s conversion.Scope) error {
return autoConvert_v1alpha1_PoolDevice_To_ignite_PoolDevice(in, out, s)
}
func autoConvert_ignite_PoolDevice_To_v1alpha1_PoolDevice(in *ignite.PoolDevice, out *PoolDevice, s conversion.Scope) error {
out.Size = in.Size
out.Parent = in.Parent
out.Type = PoolDeviceType(in.Type)
out.MetadataPath = in.MetadataPath
return nil
}
// Convert_ignite_PoolDevice_To_v1alpha1_PoolDevice is an autogenerated conversion function.
func Convert_ignite_PoolDevice_To_v1alpha1_PoolDevice(in *ignite.PoolDevice, out *PoolDevice, s conversion.Scope) error {
return autoConvert_ignite_PoolDevice_To_v1alpha1_PoolDevice(in, out, s)
}
func autoConvert_v1alpha1_PoolSpec_To_ignite_PoolSpec(in *PoolSpec, out *ignite.PoolSpec, s conversion.Scope) error {
out.MetadataSize = in.MetadataSize
out.DataSize = in.DataSize
out.AllocationSize = in.AllocationSize
out.MetadataPath = in.MetadataPath
out.DataPath = in.DataPath
return nil
}
// Convert_v1alpha1_PoolSpec_To_ignite_PoolSpec is an autogenerated conversion function.
func Convert_v1alpha1_PoolSpec_To_ignite_PoolSpec(in *PoolSpec, out *ignite.PoolSpec, s conversion.Scope) error {
return autoConvert_v1alpha1_PoolSpec_To_ignite_PoolSpec(in, out, s)
}
func autoConvert_ignite_PoolSpec_To_v1alpha1_PoolSpec(in *ignite.PoolSpec, out *PoolSpec, s conversion.Scope) error {
out.MetadataSize = in.MetadataSize
out.DataSize = in.DataSize
out.AllocationSize = in.AllocationSize
out.MetadataPath = in.MetadataPath
out.DataPath = in.DataPath
return nil
}
// Convert_ignite_PoolSpec_To_v1alpha1_PoolSpec is an autogenerated conversion function.
func Convert_ignite_PoolSpec_To_v1alpha1_PoolSpec(in *ignite.PoolSpec, out *PoolSpec, s conversion.Scope) error {
return autoConvert_ignite_PoolSpec_To_v1alpha1_PoolSpec(in, out, s)
}
func autoConvert_v1alpha1_PoolStatus_To_ignite_PoolStatus(in *PoolStatus, out *ignite.PoolStatus, s conversion.Scope) error {
out.Devices = *(*[]*ignite.PoolDevice)(unsafe.Pointer(&in.Devices))
return nil
}
// Convert_v1alpha1_PoolStatus_To_ignite_PoolStatus is an autogenerated conversion function.
func Convert_v1alpha1_PoolStatus_To_ignite_PoolStatus(in *PoolStatus, out *ignite.PoolStatus, s conversion.Scope) error {
return autoConvert_v1alpha1_PoolStatus_To_ignite_PoolStatus(in, out, s)
}
func autoConvert_ignite_PoolStatus_To_v1alpha1_PoolStatus(in *ignite.PoolStatus, out *PoolStatus, s conversion.Scope) error {
out.Devices = *(*[]*PoolDevice)(unsafe.Pointer(&in.Devices))
return nil
}
// Convert_ignite_PoolStatus_To_v1alpha1_PoolStatus is an autogenerated conversion function.
func Convert_ignite_PoolStatus_To_v1alpha1_PoolStatus(in *ignite.PoolStatus, out *PoolStatus, s conversion.Scope) error {
return autoConvert_ignite_PoolStatus_To_v1alpha1_PoolStatus(in, out, s)
}
func autoConvert_v1alpha1_SSH_To_ignite_SSH(in *SSH, out *ignite.SSH, s conversion.Scope) error {
out.Generate = in.Generate
out.PublicKey = in.PublicKey
return nil
}
// Convert_v1alpha1_SSH_To_ignite_SSH is an autogenerated conversion function.
func Convert_v1alpha1_SSH_To_ignite_SSH(in *SSH, out *ignite.SSH, s conversion.Scope) error {
return autoConvert_v1alpha1_SSH_To_ignite_SSH(in, out, s)
}
func autoConvert_ignite_SSH_To_v1alpha1_SSH(in *ignite.SSH, out *SSH, s conversion.Scope) error {
out.Generate = in.Generate
out.PublicKey = in.PublicKey
return nil
}
// Convert_ignite_SSH_To_v1alpha1_SSH is an autogenerated conversion function.
func Convert_ignite_SSH_To_v1alpha1_SSH(in *ignite.SSH, out *SSH, s conversion.Scope) error {
return autoConvert_ignite_SSH_To_v1alpha1_SSH(in, out, s)
}
func autoConvert_v1alpha1_VM_To_ignite_VM(in *VM, out *ignite.VM, s conversion.Scope) error {
out.TypeMeta = in.TypeMeta
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_VMSpec_To_ignite_VMSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1alpha1_VMStatus_To_ignite_VMStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_VM_To_ignite_VM is an autogenerated conversion function.
func Convert_v1alpha1_VM_To_ignite_VM(in *VM, out *ignite.VM, s conversion.Scope) error {
return autoConvert_v1alpha1_VM_To_ignite_VM(in, out, s)
}
func autoConvert_ignite_VM_To_v1alpha1_VM(in *ignite.VM, out *VM, s conversion.Scope) error {
out.TypeMeta = in.TypeMeta
out.ObjectMeta = in.ObjectMeta
if err := Convert_ignite_VMSpec_To_v1alpha1_VMSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_ignite_VMStatus_To_v1alpha1_VMStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_ignite_VM_To_v1alpha1_VM is an autogenerated conversion function.
func Convert_ignite_VM_To_v1alpha1_VM(in *ignite.VM, out *VM, s conversion.Scope) error {
return autoConvert_ignite_VM_To_v1alpha1_VM(in, out, s)
}
func autoConvert_v1alpha1_VMImageSpec_To_ignite_VMImageSpec(in *VMImageSpec, out *ignite.VMImageSpec, s conversion.Scope) error {
// WARNING: in.OCIClaim requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_ignite_VMImageSpec_To_v1alpha1_VMImageSpec(in *ignite.VMImageSpec, out *VMImageSpec, s conversion.Scope) error {
// WARNING: in.OCI requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1alpha1_VMKernelSpec_To_ignite_VMKernelSpec(in *VMKernelSpec, out *ignite.VMKernelSpec, s conversion.Scope) error {
// WARNING: in.OCIClaim requires manual conversion: does not exist in peer-type
out.CmdLine = in.CmdLine
return nil
}
func autoConvert_ignite_VMKernelSpec_To_v1alpha1_VMKernelSpec(in *ignite.VMKernelSpec, out *VMKernelSpec, s conversion.Scope) error {
// WARNING: in.OCI requires manual conversion: does not exist in peer-type
out.CmdLine = in.CmdLine
return nil
}
func autoConvert_v1alpha1_VMNetworkSpec_To_ignite_VMNetworkSpec(in *VMNetworkSpec, out *ignite.VMNetworkSpec, s conversion.Scope) error {
// WARNING: in.Mode requires manual conversion: does not exist in peer-type
out.Ports = *(*metav1alpha1.PortMappings)(unsafe.Pointer(&in.Ports))
return nil
}
func autoConvert_ignite_VMNetworkSpec_To_v1alpha1_VMNetworkSpec(in *ignite.VMNetworkSpec, out *VMNetworkSpec, s conversion.Scope) error {
out.Ports = *(*metav1alpha1.PortMappings)(unsafe.Pointer(&in.Ports))
return nil
}
// Convert_ignite_VMNetworkSpec_To_v1alpha1_VMNetworkSpec is an autogenerated conversion function.
func Convert_ignite_VMNetworkSpec_To_v1alpha1_VMNetworkSpec(in *ignite.VMNetworkSpec, out *VMNetworkSpec, s conversion.Scope) error {
return autoConvert_ignite_VMNetworkSpec_To_v1alpha1_VMNetworkSpec(in, out, s)
}
func autoConvert_v1alpha1_VMSpec_To_ignite_VMSpec(in *VMSpec, out *ignite.VMSpec, s conversion.Scope) error {
if err := Convert_v1alpha1_VMImageSpec_To_ignite_VMImageSpec(&in.Image, &out.Image, s); err != nil {
return err
}
if err := Convert_v1alpha1_VMKernelSpec_To_ignite_VMKernelSpec(&in.Kernel, &out.Kernel, s); err != nil {
return err
}
out.CPUs = in.CPUs
out.Memory = in.Memory
out.DiskSize = in.DiskSize
if err := Convert_v1alpha1_VMNetworkSpec_To_ignite_VMNetworkSpec(&in.Network, &out.Network, s); err != nil {
return err
}
out.CopyFiles = *(*[]ignite.FileMapping)(unsafe.Pointer(&in.CopyFiles))
out.SSH = (*ignite.SSH)(unsafe.Pointer(in.SSH))
return nil
}
func autoConvert_ignite_VMSpec_To_v1alpha1_VMSpec(in *ignite.VMSpec, out *VMSpec, s conversion.Scope) error {
if err := Convert_ignite_VMImageSpec_To_v1alpha1_VMImageSpec(&in.Image, &out.Image, s); err != nil {
return err
}
if err := Convert_ignite_VMKernelSpec_To_v1alpha1_VMKernelSpec(&in.Kernel, &out.Kernel, s); err != nil {
return err
}
out.CPUs = in.CPUs
out.Memory = in.Memory
out.DiskSize = in.DiskSize
if err := Convert_ignite_VMNetworkSpec_To_v1alpha1_VMNetworkSpec(&in.Network, &out.Network, s); err != nil {
return err
}
// WARNING: in.Storage requires manual conversion: does not exist in peer-type
out.CopyFiles = *(*[]FileMapping)(unsafe.Pointer(&in.CopyFiles))
out.SSH = (*SSH)(unsafe.Pointer(in.SSH))
return nil
}
func autoConvert_v1alpha1_VMStatus_To_ignite_VMStatus(in *VMStatus, out *ignite.VMStatus, s conversion.Scope) error {
// WARNING: in.State requires manual conversion: does not exist in peer-type
out.IPAddresses = *(*metav1alpha1.IPAddresses)(unsafe.Pointer(&in.IPAddresses))
if err := Convert_v1alpha1_OCIImageSource_To_ignite_OCIImageSource(&in.Image, &out.Image, s); err != nil {
return err
}
if err := Convert_v1alpha1_OCIImageSource_To_ignite_OCIImageSource(&in.Kernel, &out.Kernel, s); err != nil {
return err
}
return nil
}
func autoConvert_ignite_VMStatus_To_v1alpha1_VMStatus(in *ignite.VMStatus, out *VMStatus, s conversion.Scope) error {
// WARNING: in.Running requires manual conversion: does not exist in peer-type
// WARNING: in.Runtime requires manual conversion: does not exist in peer-type
// WARNING: in.StartTime requires manual conversion: does not exist in peer-type
out.IPAddresses = *(*metav1alpha1.IPAddresses)(unsafe.Pointer(&in.IPAddresses))
if err := Convert_ignite_OCIImageSource_To_v1alpha1_OCIImageSource(&in.Image, &out.Image, s); err != nil {
return err
}
if err := Convert_ignite_OCIImageSource_To_v1alpha1_OCIImageSource(&in.Kernel, &out.Kernel, s); err != nil {
return err
}
return nil
} | return Convert_v1alpha1_ImageStatus_To_ignite_ImageStatus(a.(*ImageStatus), b.(*ignite.ImageStatus), scope) |
Intra_MLP.py | import torch
import numpy
# codes of this function are borrowed from https://github.com/yanx27/Pointnet_Pointnet2_pytorch/blob/master/models/pointnet2_utils.py
def | (device, points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
# batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
batch_indices = torch.arange(B, dtype=torch.long).cuda().view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def knn_l2(device, net, k, u):
'''
Input:
k: int32, number of k in k-nn search
net: (batch_size, npoint, c) float32 array, points
u: int32, block size
Output:
idx: (batch_size, npoint, k) int32 array, indices to input points
'''
INF = 1e8
batch_size = net.size(0)
npoint = net.size(1)
n_channel = net.size(2)
square = torch.pow(torch.norm(net, dim=2,keepdim=True),2)
def u_block(batch_size, npoint, u):
block = numpy.zeros([batch_size, npoint, npoint])
n = npoint // u
for i in range(n):
block[:, (i*u):(i*u+u), (i*u):(i*u+u)] = numpy.ones([batch_size, u, u]) * (-INF)
return block
# minus_distance = 2 * torch.matmul(net, net.transpose(2,1)) - square - square.transpose(2,1) + torch.Tensor(u_block(batch_size, npoint, u)).to(device)
minus_distance = 2 * torch.matmul(net, net.transpose(2,1)) - square - square.transpose(2,1) + torch.Tensor(u_block(batch_size, npoint, u)).cuda()
_, indices = torch.topk(minus_distance, k, largest=True, sorted=False)
return indices
| index_points |
get_grok_repos.py | import logging
from github import Github
from typing import Dict, Tuple, List
import os
import argparse
import traceback
from collections import Counter
from tenacity import retry, stop_after_attempt, wait_exponential
from time import sleep
import pandas as pd
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
# https://docs.github.com/en/github/searching-for-information-on-github/searching-on-github/searching-for-repositories#search-by-when-a-repository-was-created-or-last-updated
def get_query_string_to_exclude()->str:
"""
Generates query string instead of hard-coding and appends to the query string
:return:
"""
logger.info("Inside function to generate query to hit API")
languages_to_exclude = ['Jinja', 'Shell', 'YAML', 'INI', 'Perl', 'Haskell']
exclude_languages = " ".join(["NOT language:{}".format(language) for language in languages_to_exclude])
return " " + exclude_languages
def get_matching_code(args: Dict)->None:
|
def get_inputs()->Dict:
"""Gets the username and password from the console """
parser = argparse.ArgumentParser()
parser.add_argument("--token", dest="token", help="Enter the oAuth token", required=True)
args = vars(parser.parse_args())
return args
def main():
logger.info("Inside Main")
args = get_inputs()
get_matching_code(args=args)
if __name__ == '__main__':
main()
| """
Gets the top matches of code based on pattern where grok is used and is of not YAML etc
"""
logger.info("Inside to get top repositories function")
master_data = []
observed_licences = []
try:
g_obj = Github(args['token'], timeout=3000) # Overriding timeout of 3000 seconds
pattern_file_extension = '"grok" in:file extension:j2'
lang_to_exclude = get_query_string_to_exclude()
_query_str = f"{pattern_file_extension}{lang_to_exclude}"
logger.info(f"Processing query {_query_str}")
sleep(10)
results = g_obj.search_code(_query_str)
for repo in results:
master_data.append(vars(repo))
observed_licences.append(repo.license)
file_name = str(repo).split("ContentFile(path=")[1].replace('"',"")[:-1].replace("/", "_")
path_to_dump = os.path.join(os.getcwd(), "data", file_name)
logger.info("Dumping file {}".format(file_name))
with open(path_to_dump, "wb") as f:
f.write(repo.decoded_content)
logger.info(Counter(observed_licences))
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
pd.DataFrame(master_data).to_csv("RepoData.csv", index=False) |
container_create_unix_test.go | // +build !windows
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"context"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/contrib/apparmor"
"github.com/containerd/containerd/contrib/seccomp"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/oci"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runc/libcontainer/devices"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"github.com/containerd/cri/pkg/annotations"
"github.com/containerd/cri/pkg/config"
"github.com/containerd/cri/pkg/containerd/opts"
ctrdutil "github.com/containerd/cri/pkg/containerd/util"
ostesting "github.com/containerd/cri/pkg/os/testing"
"github.com/containerd/cri/pkg/util"
)
func getCreateContainerTestData() (*runtime.ContainerConfig, *runtime.PodSandboxConfig,
*imagespec.ImageConfig, func(*testing.T, string, string, uint32, *runtimespec.Spec)) {
config := &runtime.ContainerConfig{
Metadata: &runtime.ContainerMetadata{
Name: "test-name",
Attempt: 1,
},
Image: &runtime.ImageSpec{
Image: "sha256:c75bebcdd211f41b3a460c7bf82970ed6c75acaab9cd4c9a4e125b03ca113799",
},
Command: []string{"test", "command"},
Args: []string{"test", "args"},
WorkingDir: "test-cwd",
Envs: []*runtime.KeyValue{
{Key: "k1", Value: "v1"},
{Key: "k2", Value: "v2"},
{Key: "k3", Value: "v3=v3bis"},
{Key: "k4", Value: "v4=v4bis=foop"},
},
Mounts: []*runtime.Mount{
// everything default
{
ContainerPath: "container-path-1",
HostPath: "host-path-1",
},
// readOnly
{
ContainerPath: "container-path-2",
HostPath: "host-path-2",
Readonly: true,
},
},
Labels: map[string]string{"a": "b"},
Annotations: map[string]string{"ca-c": "ca-d"},
Linux: &runtime.LinuxContainerConfig{
Resources: &runtime.LinuxContainerResources{
CpuPeriod: 100,
CpuQuota: 200,
CpuShares: 300,
MemoryLimitInBytes: 400,
OomScoreAdj: 500,
CpusetCpus: "0-1",
CpusetMems: "2-3",
},
SecurityContext: &runtime.LinuxContainerSecurityContext{
SupplementalGroups: []int64{1111, 2222},
NoNewPrivs: true,
},
},
}
sandboxConfig := &runtime.PodSandboxConfig{
Metadata: &runtime.PodSandboxMetadata{
Name: "test-sandbox-name",
Uid: "test-sandbox-uid",
Namespace: "test-sandbox-ns",
Attempt: 2,
},
Annotations: map[string]string{"c": "d"},
Linux: &runtime.LinuxPodSandboxConfig{
CgroupParent: "/test/cgroup/parent",
SecurityContext: &runtime.LinuxSandboxSecurityContext{},
},
}
imageConfig := &imagespec.ImageConfig{
Env: []string{"ik1=iv1", "ik2=iv2", "ik3=iv3=iv3bis", "ik4=iv4=iv4bis=boop"},
Entrypoint: []string{"/entrypoint"},
Cmd: []string{"cmd"},
WorkingDir: "/workspace",
}
specCheck := func(t *testing.T, id string, sandboxID string, sandboxPid uint32, spec *runtimespec.Spec) {
assert.Equal(t, relativeRootfsPath, spec.Root.Path)
assert.Equal(t, []string{"test", "command", "test", "args"}, spec.Process.Args)
assert.Equal(t, "test-cwd", spec.Process.Cwd)
assert.Contains(t, spec.Process.Env, "k1=v1", "k2=v2", "k3=v3=v3bis", "ik4=iv4=iv4bis=boop")
assert.Contains(t, spec.Process.Env, "ik1=iv1", "ik2=iv2", "ik3=iv3=iv3bis", "k4=v4=v4bis=foop")
t.Logf("Check cgroups bind mount")
checkMount(t, spec.Mounts, "cgroup", "/sys/fs/cgroup", "cgroup", []string{"ro"}, nil)
t.Logf("Check bind mount")
checkMount(t, spec.Mounts, "host-path-1", "container-path-1", "bind", []string{"rbind", "rprivate", "rw"}, nil)
checkMount(t, spec.Mounts, "host-path-2", "container-path-2", "bind", []string{"rbind", "rprivate", "ro"}, nil)
t.Logf("Check resource limits")
assert.EqualValues(t, *spec.Linux.Resources.CPU.Period, 100)
assert.EqualValues(t, *spec.Linux.Resources.CPU.Quota, 200)
assert.EqualValues(t, *spec.Linux.Resources.CPU.Shares, 300)
assert.EqualValues(t, spec.Linux.Resources.CPU.Cpus, "0-1")
assert.EqualValues(t, spec.Linux.Resources.CPU.Mems, "2-3")
assert.EqualValues(t, *spec.Linux.Resources.Memory.Limit, 400)
assert.EqualValues(t, *spec.Process.OOMScoreAdj, 500)
t.Logf("Check supplemental groups")
assert.Contains(t, spec.Process.User.AdditionalGids, uint32(1111))
assert.Contains(t, spec.Process.User.AdditionalGids, uint32(2222))
t.Logf("Check no_new_privs")
assert.Equal(t, spec.Process.NoNewPrivileges, true)
t.Logf("Check cgroup path")
assert.Equal(t, getCgroupsPath("/test/cgroup/parent", id), spec.Linux.CgroupsPath)
t.Logf("Check namespaces")
assert.Contains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
Type: runtimespec.NetworkNamespace,
Path: opts.GetNetworkNamespace(sandboxPid),
})
assert.Contains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
Type: runtimespec.IPCNamespace,
Path: opts.GetIPCNamespace(sandboxPid),
})
assert.Contains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
Type: runtimespec.UTSNamespace,
Path: opts.GetUTSNamespace(sandboxPid),
})
assert.Contains(t, spec.Linux.Namespaces, runtimespec.LinuxNamespace{
Type: runtimespec.PIDNamespace,
Path: opts.GetPIDNamespace(sandboxPid),
})
t.Logf("Check PodSandbox annotations")
assert.Contains(t, spec.Annotations, annotations.SandboxID)
assert.EqualValues(t, spec.Annotations[annotations.SandboxID], sandboxID)
assert.Contains(t, spec.Annotations, annotations.ContainerType)
assert.EqualValues(t, spec.Annotations[annotations.ContainerType], annotations.ContainerTypeContainer)
}
return config, sandboxConfig, imageConfig, specCheck
}
func TestContainerCapabilities(t *testing.T) {
testID := "test-id"
testSandboxID := "sandbox-id"
testPid := uint32(1234)
for desc, test := range map[string]struct {
capability *runtime.Capability
includes []string
excludes []string
}{
"should be able to add/drop capabilities": {
capability: &runtime.Capability{
AddCapabilities: []string{"SYS_ADMIN"},
DropCapabilities: []string{"CHOWN"},
},
includes: []string{"CAP_SYS_ADMIN"},
excludes: []string{"CAP_CHOWN"},
},
"should be able to add all capabilities": {
capability: &runtime.Capability{
AddCapabilities: []string{"ALL"},
},
includes: oci.GetAllCapabilities(),
},
"should be able to drop all capabilities": {
capability: &runtime.Capability{
DropCapabilities: []string{"ALL"},
},
excludes: oci.GetAllCapabilities(),
},
"should be able to drop capabilities with add all": {
capability: &runtime.Capability{
AddCapabilities: []string{"ALL"},
DropCapabilities: []string{"CHOWN"},
},
includes: util.SubtractStringSlice(oci.GetAllCapabilities(), "CAP_CHOWN"),
excludes: []string{"CAP_CHOWN"},
},
"should be able to add capabilities with drop all": {
capability: &runtime.Capability{
AddCapabilities: []string{"SYS_ADMIN"},
DropCapabilities: []string{"ALL"},
},
includes: []string{"CAP_SYS_ADMIN"},
excludes: util.SubtractStringSlice(oci.GetAllCapabilities(), "CAP_SYS_ADMIN"),
},
} {
t.Logf("TestCase %q", desc)
containerConfig, sandboxConfig, imageConfig, specCheck := getCreateContainerTestData()
ociRuntime := config.Runtime{}
c := newTestCRIService()
containerConfig.Linux.SecurityContext.Capabilities = test.capability
spec, err := c.containerSpec(testID, testSandboxID, testPid, "", containerConfig, sandboxConfig, imageConfig, nil, ociRuntime)
require.NoError(t, err)
specCheck(t, testID, testSandboxID, testPid, spec)
for _, include := range test.includes {
assert.Contains(t, spec.Process.Capabilities.Bounding, include)
assert.Contains(t, spec.Process.Capabilities.Effective, include)
assert.Contains(t, spec.Process.Capabilities.Inheritable, include)
assert.Contains(t, spec.Process.Capabilities.Permitted, include)
}
for _, exclude := range test.excludes {
assert.NotContains(t, spec.Process.Capabilities.Bounding, exclude)
assert.NotContains(t, spec.Process.Capabilities.Effective, exclude)
assert.NotContains(t, spec.Process.Capabilities.Inheritable, exclude)
assert.NotContains(t, spec.Process.Capabilities.Permitted, exclude)
}
assert.Empty(t, spec.Process.Capabilities.Ambient)
}
}
func TestContainerSpecTty(t *testing.T) {
testID := "test-id"
testSandboxID := "sandbox-id"
testPid := uint32(1234)
containerConfig, sandboxConfig, imageConfig, specCheck := getCreateContainerTestData()
ociRuntime := config.Runtime{}
c := newTestCRIService()
for _, tty := range []bool{true, false} {
containerConfig.Tty = tty
spec, err := c.containerSpec(testID, testSandboxID, testPid, "", containerConfig, sandboxConfig, imageConfig, nil, ociRuntime)
require.NoError(t, err)
specCheck(t, testID, testSandboxID, testPid, spec)
assert.Equal(t, tty, spec.Process.Terminal)
if tty {
assert.Contains(t, spec.Process.Env, "TERM=xterm")
} else {
assert.NotContains(t, spec.Process.Env, "TERM=xterm")
}
}
}
func TestContainerSpecDefaultPath(t *testing.T) {
testID := "test-id"
testSandboxID := "sandbox-id"
testPid := uint32(1234)
expectedDefault := "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
containerConfig, sandboxConfig, imageConfig, specCheck := getCreateContainerTestData()
ociRuntime := config.Runtime{}
c := newTestCRIService()
for _, pathenv := range []string{"", "PATH=/usr/local/bin/games"} {
expected := expectedDefault
if pathenv != "" {
imageConfig.Env = append(imageConfig.Env, pathenv)
expected = pathenv
}
spec, err := c.containerSpec(testID, testSandboxID, testPid, "", containerConfig, sandboxConfig, imageConfig, nil, ociRuntime)
require.NoError(t, err)
specCheck(t, testID, testSandboxID, testPid, spec)
assert.Contains(t, spec.Process.Env, expected)
}
}
func TestContainerSpecReadonlyRootfs(t *testing.T) {
testID := "test-id"
testSandboxID := "sandbox-id"
testPid := uint32(1234)
containerConfig, sandboxConfig, imageConfig, specCheck := getCreateContainerTestData()
ociRuntime := config.Runtime{}
c := newTestCRIService()
for _, readonly := range []bool{true, false} {
containerConfig.Linux.SecurityContext.ReadonlyRootfs = readonly
spec, err := c.containerSpec(testID, testSandboxID, testPid, "", containerConfig, sandboxConfig, imageConfig, nil, ociRuntime)
require.NoError(t, err)
specCheck(t, testID, testSandboxID, testPid, spec)
assert.Equal(t, readonly, spec.Root.Readonly)
}
}
func TestContainerSpecWithExtraMounts(t *testing.T) {
testID := "test-id"
testSandboxID := "sandbox-id"
testPid := uint32(1234)
containerConfig, sandboxConfig, imageConfig, specCheck := getCreateContainerTestData()
ociRuntime := config.Runtime{}
c := newTestCRIService()
mountInConfig := &runtime.Mount{
// Test cleanpath
ContainerPath: "test-container-path/",
HostPath: "test-host-path",
Readonly: false,
}
containerConfig.Mounts = append(containerConfig.Mounts, mountInConfig)
extraMounts := []*runtime.Mount{
{
ContainerPath: "test-container-path",
HostPath: "test-host-path-extra",
Readonly: true,
},
{
ContainerPath: "/sys",
HostPath: "test-sys-extra",
Readonly: false,
},
{
ContainerPath: "/dev",
HostPath: "test-dev-extra",
Readonly: false,
},
}
spec, err := c.containerSpec(testID, testSandboxID, testPid, "", containerConfig, sandboxConfig, imageConfig, extraMounts, ociRuntime)
require.NoError(t, err)
specCheck(t, testID, testSandboxID, testPid, spec)
var mounts, sysMounts, devMounts []runtimespec.Mount
for _, m := range spec.Mounts {
if strings.HasPrefix(m.Destination, "test-container-path") {
mounts = append(mounts, m)
} else if m.Destination == "/sys" {
sysMounts = append(sysMounts, m)
} else if strings.HasPrefix(m.Destination, "/dev") {
devMounts = append(devMounts, m)
}
}
t.Logf("CRI mount should override extra mount")
require.Len(t, mounts, 1)
assert.Equal(t, "test-host-path", mounts[0].Source)
assert.Contains(t, mounts[0].Options, "rw")
t.Logf("Extra mount should override default mount")
require.Len(t, sysMounts, 1)
assert.Equal(t, "test-sys-extra", sysMounts[0].Source)
assert.Contains(t, sysMounts[0].Options, "rw")
t.Logf("Dev mount should override all default dev mounts")
require.Len(t, devMounts, 1)
assert.Equal(t, "test-dev-extra", devMounts[0].Source)
assert.Contains(t, devMounts[0].Options, "rw")
}
func TestContainerAndSandboxPrivileged(t *testing.T) {
testID := "test-id"
testSandboxID := "sandbox-id"
testPid := uint32(1234)
containerConfig, sandboxConfig, imageConfig, _ := getCreateContainerTestData()
ociRuntime := config.Runtime{}
c := newTestCRIService()
for desc, test := range map[string]struct {
containerPrivileged bool
sandboxPrivileged bool
expectError bool
}{
"privileged container in non-privileged sandbox should fail": {
containerPrivileged: true,
sandboxPrivileged: false,
expectError: true,
},
"privileged container in privileged sandbox should be fine": {
containerPrivileged: true,
sandboxPrivileged: true,
expectError: false,
},
"non-privileged container in privileged sandbox should be fine": {
containerPrivileged: false,
sandboxPrivileged: true,
expectError: false,
},
"non-privileged container in non-privileged sandbox should be fine": {
containerPrivileged: false,
sandboxPrivileged: false,
expectError: false,
},
} {
t.Logf("TestCase %q", desc)
containerConfig.Linux.SecurityContext.Privileged = test.containerPrivileged
sandboxConfig.Linux.SecurityContext = &runtime.LinuxSandboxSecurityContext{
Privileged: test.sandboxPrivileged,
}
_, err := c.containerSpec(testID, testSandboxID, testPid, "", containerConfig, sandboxConfig, imageConfig, nil, ociRuntime)
if test.expectError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
}
}
func TestContainerMounts(t *testing.T) {
const testSandboxID = "test-id"
for desc, test := range map[string]struct {
statFn func(string) (os.FileInfo, error)
criMounts []*runtime.Mount
securityContext *runtime.LinuxContainerSecurityContext
expectedMounts []*runtime.Mount
}{
"should setup ro mount when rootfs is read-only": {
securityContext: &runtime.LinuxContainerSecurityContext{
ReadonlyRootfs: true,
},
expectedMounts: []*runtime.Mount{
{
ContainerPath: "/etc/hostname",
HostPath: filepath.Join(testRootDir, sandboxesDir, testSandboxID, "hostname"),
Readonly: true,
},
{
ContainerPath: "/etc/hosts",
HostPath: filepath.Join(testRootDir, sandboxesDir, testSandboxID, "hosts"),
Readonly: true,
},
{
ContainerPath: resolvConfPath,
HostPath: filepath.Join(testRootDir, sandboxesDir, testSandboxID, "resolv.conf"),
Readonly: true,
},
{
ContainerPath: "/dev/shm",
HostPath: filepath.Join(testStateDir, sandboxesDir, testSandboxID, "shm"),
Readonly: false,
},
},
},
"should setup rw mount when rootfs is read-write": {
securityContext: &runtime.LinuxContainerSecurityContext{},
expectedMounts: []*runtime.Mount{
{
ContainerPath: "/etc/hostname",
HostPath: filepath.Join(testRootDir, sandboxesDir, testSandboxID, "hostname"),
Readonly: false,
},
{
ContainerPath: "/etc/hosts",
HostPath: filepath.Join(testRootDir, sandboxesDir, testSandboxID, "hosts"),
Readonly: false,
},
{
ContainerPath: resolvConfPath,
HostPath: filepath.Join(testRootDir, sandboxesDir, testSandboxID, "resolv.conf"),
Readonly: false,
},
{
ContainerPath: "/dev/shm",
HostPath: filepath.Join(testStateDir, sandboxesDir, testSandboxID, "shm"),
Readonly: false,
},
},
},
"should use host /dev/shm when host ipc is set": {
securityContext: &runtime.LinuxContainerSecurityContext{
NamespaceOptions: &runtime.NamespaceOption{Ipc: runtime.NamespaceMode_NODE},
},
expectedMounts: []*runtime.Mount{
{
ContainerPath: "/etc/hostname",
HostPath: filepath.Join(testRootDir, sandboxesDir, testSandboxID, "hostname"),
Readonly: false,
},
{
ContainerPath: "/etc/hosts",
HostPath: filepath.Join(testRootDir, sandboxesDir, testSandboxID, "hosts"),
Readonly: false,
},
{
ContainerPath: resolvConfPath,
HostPath: filepath.Join(testRootDir, sandboxesDir, testSandboxID, "resolv.conf"),
Readonly: false,
},
{
ContainerPath: "/dev/shm",
HostPath: "/dev/shm",
Readonly: false,
},
},
},
"should skip container mounts if already mounted by CRI": {
criMounts: []*runtime.Mount{
{
ContainerPath: "/etc/hostname",
HostPath: "/test-etc-hostname",
},
{
ContainerPath: "/etc/hosts",
HostPath: "/test-etc-host",
},
{
ContainerPath: resolvConfPath,
HostPath: "test-resolv-conf",
},
{
ContainerPath: "/dev/shm",
HostPath: "test-dev-shm",
},
},
securityContext: &runtime.LinuxContainerSecurityContext{},
expectedMounts: nil,
},
"should skip hostname mount if the old sandbox doesn't have hostname file": {
statFn: func(path string) (os.FileInfo, error) {
assert.Equal(t, filepath.Join(testRootDir, sandboxesDir, testSandboxID, "hostname"), path)
return nil, errors.New("random error")
},
securityContext: &runtime.LinuxContainerSecurityContext{},
expectedMounts: []*runtime.Mount{
{
ContainerPath: "/etc/hosts",
HostPath: filepath.Join(testRootDir, sandboxesDir, testSandboxID, "hosts"),
Readonly: false,
},
{
ContainerPath: resolvConfPath,
HostPath: filepath.Join(testRootDir, sandboxesDir, testSandboxID, "resolv.conf"),
Readonly: false,
},
{
ContainerPath: "/dev/shm",
HostPath: filepath.Join(testStateDir, sandboxesDir, testSandboxID, "shm"),
Readonly: false,
},
},
},
} {
config := &runtime.ContainerConfig{
Metadata: &runtime.ContainerMetadata{
Name: "test-name",
Attempt: 1,
},
Mounts: test.criMounts,
Linux: &runtime.LinuxContainerConfig{
SecurityContext: test.securityContext,
},
}
c := newTestCRIService()
c.os.(*ostesting.FakeOS).StatFn = test.statFn
mounts := c.containerMounts(testSandboxID, config)
assert.Equal(t, test.expectedMounts, mounts, desc)
}
}
func TestPrivilegedBindMount(t *testing.T) {
testPid := uint32(1234)
c := newTestCRIService()
testSandboxID := "sandbox-id"
containerConfig, sandboxConfig, imageConfig, _ := getCreateContainerTestData()
ociRuntime := config.Runtime{}
for desc, test := range map[string]struct {
privileged bool
expectedSysFSRO bool
expectedCgroupFSRO bool
}{
"sysfs and cgroupfs should mount as 'ro' by default": {
expectedSysFSRO: true,
expectedCgroupFSRO: true,
},
"sysfs and cgroupfs should not mount as 'ro' if privileged": {
privileged: true,
expectedSysFSRO: false,
expectedCgroupFSRO: false,
},
} {
t.Logf("TestCase %q", desc)
containerConfig.Linux.SecurityContext.Privileged = test.privileged
sandboxConfig.Linux.SecurityContext.Privileged = test.privileged
spec, err := c.containerSpec(t.Name(), testSandboxID, testPid, "", containerConfig, sandboxConfig, imageConfig, nil, ociRuntime)
assert.NoError(t, err)
if test.expectedSysFSRO {
checkMount(t, spec.Mounts, "sysfs", "/sys", "sysfs", []string{"ro"}, []string{"rw"})
} else {
checkMount(t, spec.Mounts, "sysfs", "/sys", "sysfs", []string{"rw"}, []string{"ro"})
}
if test.expectedCgroupFSRO {
checkMount(t, spec.Mounts, "cgroup", "/sys/fs/cgroup", "cgroup", []string{"ro"}, []string{"rw"})
} else {
checkMount(t, spec.Mounts, "cgroup", "/sys/fs/cgroup", "cgroup", []string{"rw"}, []string{"ro"})
}
}
}
func TestMountPropagation(t *testing.T) {
sharedLookupMountFn := func(string) (mount.Info, error) {
return mount.Info{
Mountpoint: "host-path",
Optional: "shared:",
}, nil
}
slaveLookupMountFn := func(string) (mount.Info, error) {
return mount.Info{
Mountpoint: "host-path",
Optional: "master:",
}, nil
}
othersLookupMountFn := func(string) (mount.Info, error) {
return mount.Info{
Mountpoint: "host-path",
Optional: "others",
}, nil
}
for desc, test := range map[string]struct {
criMount *runtime.Mount
fakeLookupMountFn func(string) (mount.Info, error)
optionsCheck []string
expectErr bool
}{
"HostPath should mount as 'rprivate' if propagation is MountPropagation_PROPAGATION_PRIVATE": {
criMount: &runtime.Mount{
ContainerPath: "container-path",
HostPath: "host-path",
Propagation: runtime.MountPropagation_PROPAGATION_PRIVATE,
},
fakeLookupMountFn: nil,
optionsCheck: []string{"rbind", "rprivate"},
expectErr: false,
},
"HostPath should mount as 'rslave' if propagation is MountPropagation_PROPAGATION_HOST_TO_CONTAINER": {
criMount: &runtime.Mount{
ContainerPath: "container-path",
HostPath: "host-path",
Propagation: runtime.MountPropagation_PROPAGATION_HOST_TO_CONTAINER,
},
fakeLookupMountFn: slaveLookupMountFn,
optionsCheck: []string{"rbind", "rslave"},
expectErr: false,
},
"HostPath should mount as 'rshared' if propagation is MountPropagation_PROPAGATION_BIDIRECTIONAL": {
criMount: &runtime.Mount{
ContainerPath: "container-path",
HostPath: "host-path",
Propagation: runtime.MountPropagation_PROPAGATION_BIDIRECTIONAL,
},
fakeLookupMountFn: sharedLookupMountFn,
optionsCheck: []string{"rbind", "rshared"},
expectErr: false,
},
"HostPath should mount as 'rprivate' if propagation is illegal": {
criMount: &runtime.Mount{
ContainerPath: "container-path",
HostPath: "host-path",
Propagation: runtime.MountPropagation(42),
},
fakeLookupMountFn: nil,
optionsCheck: []string{"rbind", "rprivate"},
expectErr: false,
},
"Expect an error if HostPath isn't shared and mount propagation is MountPropagation_PROPAGATION_BIDIRECTIONAL": {
criMount: &runtime.Mount{
ContainerPath: "container-path",
HostPath: "host-path",
Propagation: runtime.MountPropagation_PROPAGATION_BIDIRECTIONAL,
},
fakeLookupMountFn: slaveLookupMountFn,
expectErr: true,
},
"Expect an error if HostPath isn't slave or shared and mount propagation is MountPropagation_PROPAGATION_HOST_TO_CONTAINER": {
criMount: &runtime.Mount{
ContainerPath: "container-path",
HostPath: "host-path",
Propagation: runtime.MountPropagation_PROPAGATION_HOST_TO_CONTAINER,
},
fakeLookupMountFn: othersLookupMountFn,
expectErr: true,
},
} {
t.Logf("TestCase %q", desc)
c := newTestCRIService()
c.os.(*ostesting.FakeOS).LookupMountFn = test.fakeLookupMountFn
config, _, _, _ := getCreateContainerTestData()
var spec runtimespec.Spec
spec.Linux = &runtimespec.Linux{}
err := opts.WithMounts(c.os, config, []*runtime.Mount{test.criMount}, "")(context.Background(), nil, nil, &spec)
if test.expectErr {
require.Error(t, err)
} else {
require.NoError(t, err)
checkMount(t, spec.Mounts, test.criMount.HostPath, test.criMount.ContainerPath, "bind", test.optionsCheck, nil)
}
}
}
func TestPidNamespace(t *testing.T) {
testID := "test-id"
testPid := uint32(1234)
testSandboxID := "sandbox-id"
containerConfig, sandboxConfig, imageConfig, _ := getCreateContainerTestData()
ociRuntime := config.Runtime{}
c := newTestCRIService()
for desc, test := range map[string]struct {
pidNS runtime.NamespaceMode
expected runtimespec.LinuxNamespace
}{
"node namespace mode": {
pidNS: runtime.NamespaceMode_NODE,
expected: runtimespec.LinuxNamespace{
Type: runtimespec.PIDNamespace,
Path: opts.GetPIDNamespace(testPid),
},
},
"container namespace mode": {
pidNS: runtime.NamespaceMode_CONTAINER,
expected: runtimespec.LinuxNamespace{
Type: runtimespec.PIDNamespace,
},
},
"pod namespace mode": {
pidNS: runtime.NamespaceMode_POD,
expected: runtimespec.LinuxNamespace{
Type: runtimespec.PIDNamespace,
Path: opts.GetPIDNamespace(testPid),
},
},
} {
t.Logf("TestCase %q", desc)
containerConfig.Linux.SecurityContext.NamespaceOptions = &runtime.NamespaceOption{Pid: test.pidNS}
spec, err := c.containerSpec(testID, testSandboxID, testPid, "", containerConfig, sandboxConfig, imageConfig, nil, ociRuntime)
require.NoError(t, err)
assert.Contains(t, spec.Linux.Namespaces, test.expected)
}
}
func TestNoDefaultRunMount(t *testing.T) {
testID := "test-id"
testPid := uint32(1234)
testSandboxID := "sandbox-id"
containerConfig, sandboxConfig, imageConfig, _ := getCreateContainerTestData()
ociRuntime := config.Runtime{}
c := newTestCRIService()
spec, err := c.containerSpec(testID, testSandboxID, testPid, "", containerConfig, sandboxConfig, imageConfig, nil, ociRuntime)
assert.NoError(t, err)
for _, mount := range spec.Mounts {
assert.NotEqual(t, "/run", mount.Destination)
}
}
func TestGenerateSeccompSpecOpts(t *testing.T) |
func TestGenerateApparmorSpecOpts(t *testing.T) {
for desc, test := range map[string]struct {
profile string
privileged bool
disable bool
specOpts oci.SpecOpts
expectErr bool
}{
"should return error if apparmor is specified when apparmor is not supported": {
profile: runtimeDefault,
disable: true,
expectErr: true,
},
"should not return error if apparmor is not specified when apparmor is not supported": {
profile: "",
disable: true,
},
"should set default apparmor when apparmor is not specified": {
profile: "",
specOpts: apparmor.WithDefaultProfile(appArmorDefaultProfileName),
},
"should not apparmor when apparmor is not specified and privileged is true": {
profile: "",
privileged: true,
},
"should not return error if apparmor is unconfined when apparmor is not supported": {
profile: unconfinedProfile,
disable: true,
},
"should not apparmor when apparmor is unconfined": {
profile: unconfinedProfile,
},
"should not apparmor when apparmor is unconfined and privileged is true": {
profile: unconfinedProfile,
privileged: true,
},
"should set default apparmor when apparmor is runtime/default": {
profile: runtimeDefault,
specOpts: apparmor.WithDefaultProfile(appArmorDefaultProfileName),
},
"should not apparmor when apparmor is default and privileged is true": {
profile: runtimeDefault,
privileged: true,
},
"should set specified profile when local profile is specified": {
profile: profileNamePrefix + "test-profile",
specOpts: apparmor.WithProfile("test-profile"),
},
"should set apparmor when local profile is specified and privileged is true": {
profile: profileNamePrefix + "test-profile",
privileged: true,
specOpts: apparmor.WithProfile("test-profile"),
},
"should return error if specified profile is invalid": {
profile: "test-profile",
expectErr: true,
},
} {
t.Logf("TestCase %q", desc)
specOpts, err := generateApparmorSpecOpts(test.profile, test.privileged, !test.disable)
assert.Equal(t,
reflect.ValueOf(test.specOpts).Pointer(),
reflect.ValueOf(specOpts).Pointer())
if test.expectErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
}
}
func TestMaskedAndReadonlyPaths(t *testing.T) {
testID := "test-id"
testSandboxID := "sandbox-id"
testPid := uint32(1234)
containerConfig, sandboxConfig, imageConfig, specCheck := getCreateContainerTestData()
ociRuntime := config.Runtime{}
c := newTestCRIService()
defaultSpec, err := oci.GenerateSpec(ctrdutil.NamespacedContext(), nil, &containers.Container{ID: testID})
require.NoError(t, err)
for desc, test := range map[string]struct {
disableProcMount bool
masked []string
readonly []string
expectedMasked []string
expectedReadonly []string
privileged bool
}{
"should apply default if not specified when disable_proc_mount = true": {
disableProcMount: true,
masked: nil,
readonly: nil,
expectedMasked: defaultSpec.Linux.MaskedPaths,
expectedReadonly: defaultSpec.Linux.ReadonlyPaths,
privileged: false,
},
"should always apply CRI specified paths when disable_proc_mount = false": {
disableProcMount: false,
masked: nil,
readonly: nil,
expectedMasked: nil,
expectedReadonly: nil,
privileged: false,
},
"should be able to specify empty paths": {
masked: []string{},
readonly: []string{},
expectedMasked: []string{},
expectedReadonly: []string{},
privileged: false,
},
"should apply CRI specified paths": {
masked: []string{"/proc"},
readonly: []string{"/sys"},
expectedMasked: []string{"/proc"},
expectedReadonly: []string{"/sys"},
privileged: false,
},
"default should be nil for privileged": {
expectedMasked: nil,
expectedReadonly: nil,
privileged: true,
},
"should be able to specify empty paths, esp. if privileged": {
masked: []string{},
readonly: []string{},
expectedMasked: nil,
expectedReadonly: nil,
privileged: true,
},
"should not apply CRI specified paths if privileged": {
masked: []string{"/proc"},
readonly: []string{"/sys"},
expectedMasked: nil,
expectedReadonly: nil,
privileged: true,
},
} {
t.Logf("TestCase %q", desc)
c.config.DisableProcMount = test.disableProcMount
containerConfig.Linux.SecurityContext.MaskedPaths = test.masked
containerConfig.Linux.SecurityContext.ReadonlyPaths = test.readonly
containerConfig.Linux.SecurityContext.Privileged = test.privileged
sandboxConfig.Linux.SecurityContext = &runtime.LinuxSandboxSecurityContext{
Privileged: test.privileged,
}
spec, err := c.containerSpec(testID, testSandboxID, testPid, "", containerConfig, sandboxConfig, imageConfig, nil, ociRuntime)
require.NoError(t, err)
if !test.privileged { // specCheck presumes an unprivileged container
specCheck(t, testID, testSandboxID, testPid, spec)
}
assert.Equal(t, test.expectedMasked, spec.Linux.MaskedPaths)
assert.Equal(t, test.expectedReadonly, spec.Linux.ReadonlyPaths)
}
}
func TestHostname(t *testing.T) {
testID := "test-id"
testSandboxID := "sandbox-id"
testPid := uint32(1234)
containerConfig, sandboxConfig, imageConfig, specCheck := getCreateContainerTestData()
ociRuntime := config.Runtime{}
c := newTestCRIService()
c.os.(*ostesting.FakeOS).HostnameFn = func() (string, error) {
return "real-hostname", nil
}
for desc, test := range map[string]struct {
hostname string
networkNs runtime.NamespaceMode
expectedEnv string
}{
"should add HOSTNAME=sandbox.Hostname for pod network namespace": {
hostname: "test-hostname",
networkNs: runtime.NamespaceMode_POD,
expectedEnv: "HOSTNAME=test-hostname",
},
"should add HOSTNAME=sandbox.Hostname for host network namespace": {
hostname: "test-hostname",
networkNs: runtime.NamespaceMode_NODE,
expectedEnv: "HOSTNAME=test-hostname",
},
"should add HOSTNAME=os.Hostname for host network namespace if sandbox.Hostname is not set": {
hostname: "",
networkNs: runtime.NamespaceMode_NODE,
expectedEnv: "HOSTNAME=real-hostname",
},
} {
t.Logf("TestCase %q", desc)
sandboxConfig.Hostname = test.hostname
sandboxConfig.Linux.SecurityContext = &runtime.LinuxSandboxSecurityContext{
NamespaceOptions: &runtime.NamespaceOption{Network: test.networkNs},
}
spec, err := c.containerSpec(testID, testSandboxID, testPid, "", containerConfig, sandboxConfig, imageConfig, nil, ociRuntime)
require.NoError(t, err)
specCheck(t, testID, testSandboxID, testPid, spec)
assert.Contains(t, spec.Process.Env, test.expectedEnv)
}
}
func TestDisableCgroup(t *testing.T) {
containerConfig, sandboxConfig, imageConfig, _ := getCreateContainerTestData()
ociRuntime := config.Runtime{}
c := newTestCRIService()
c.config.DisableCgroup = true
spec, err := c.containerSpec("test-id", "sandbox-id", 1234, "", containerConfig, sandboxConfig, imageConfig, nil, ociRuntime)
require.NoError(t, err)
t.Log("resource limit should not be set")
assert.Nil(t, spec.Linux.Resources.Memory)
assert.Nil(t, spec.Linux.Resources.CPU)
t.Log("cgroup path should be empty")
assert.Empty(t, spec.Linux.CgroupsPath)
}
func TestGenerateUserString(t *testing.T) {
type testcase struct {
// the name of the test case
name string
u string
uid, gid *runtime.Int64Value
result string
expectedError bool
}
testcases := []testcase{
{
name: "Empty",
result: "",
},
{
name: "Username Only",
u: "testuser",
result: "testuser",
},
{
name: "Username, UID",
u: "testuser",
uid: &runtime.Int64Value{Value: 1},
result: "testuser",
},
{
name: "Username, UID, GID",
u: "testuser",
uid: &runtime.Int64Value{Value: 1},
gid: &runtime.Int64Value{Value: 10},
result: "testuser:10",
},
{
name: "Username, GID",
u: "testuser",
gid: &runtime.Int64Value{Value: 10},
result: "testuser:10",
},
{
name: "UID only",
uid: &runtime.Int64Value{Value: 1},
result: "1",
},
{
name: "UID, GID",
uid: &runtime.Int64Value{Value: 1},
gid: &runtime.Int64Value{Value: 10},
result: "1:10",
},
{
name: "GID only",
gid: &runtime.Int64Value{Value: 10},
result: "",
expectedError: true,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
r, err := generateUserString(tc.u, tc.uid, tc.gid)
if tc.expectedError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
assert.Equal(t, tc.result, r)
})
}
}
func TestPrivilegedDevices(t *testing.T) {
testPid := uint32(1234)
c := newTestCRIService()
testSandboxID := "sandbox-id"
containerConfig, sandboxConfig, imageConfig, _ := getCreateContainerTestData()
for desc, test := range map[string]struct {
privileged bool
privilegedWithoutHostDevices bool
expectHostDevices bool
}{
"expect no host devices when privileged is false": {
privileged: false,
privilegedWithoutHostDevices: false,
expectHostDevices: false,
},
"expect no host devices when privileged is false and privilegedWithoutHostDevices is true": {
privileged: false,
privilegedWithoutHostDevices: true,
expectHostDevices: false,
},
"expect host devices when privileged is true": {
privileged: true,
privilegedWithoutHostDevices: false,
expectHostDevices: true,
},
"expect no host devices when privileged is true and privilegedWithoutHostDevices is true": {
privileged: true,
privilegedWithoutHostDevices: true,
expectHostDevices: false,
},
} {
t.Logf("TestCase %q", desc)
containerConfig.Linux.SecurityContext.Privileged = test.privileged
sandboxConfig.Linux.SecurityContext.Privileged = test.privileged
ociRuntime := config.Runtime{
PrivilegedWithoutHostDevices: test.privilegedWithoutHostDevices,
}
spec, err := c.containerSpec(t.Name(), testSandboxID, testPid, "", containerConfig, sandboxConfig, imageConfig, nil, ociRuntime)
assert.NoError(t, err)
hostDevices, err := devices.HostDevices()
assert.NoError(t, err)
if test.expectHostDevices {
assert.Len(t, spec.Linux.Devices, len(hostDevices))
} else {
assert.Empty(t, spec.Linux.Devices)
}
}
}
| {
for desc, test := range map[string]struct {
profile string
privileged bool
disable bool
specOpts oci.SpecOpts
expectErr bool
}{
"should return error if seccomp is specified when seccomp is not supported": {
profile: runtimeDefault,
disable: true,
expectErr: true,
},
"should not return error if seccomp is not specified when seccomp is not supported": {
profile: "",
disable: true,
},
"should not return error if seccomp is unconfined when seccomp is not supported": {
profile: unconfinedProfile,
disable: true,
},
"should not set seccomp when privileged is true": {
profile: seccompDefaultProfile,
privileged: true,
},
"should not set seccomp when seccomp is unconfined": {
profile: unconfinedProfile,
},
"should not set seccomp when seccomp is not specified": {
profile: "",
},
"should set default seccomp when seccomp is runtime/default": {
profile: runtimeDefault,
specOpts: seccomp.WithDefaultProfile(),
},
"should set default seccomp when seccomp is docker/default": {
profile: dockerDefault,
specOpts: seccomp.WithDefaultProfile(),
},
"should set specified profile when local profile is specified": {
profile: profileNamePrefix + "test-profile",
specOpts: seccomp.WithProfile("test-profile"),
},
"should return error if specified profile is invalid": {
profile: "test-profile",
expectErr: true,
},
} {
t.Logf("TestCase %q", desc)
specOpts, err := generateSeccompSpecOpts(test.profile, test.privileged, !test.disable)
assert.Equal(t,
reflect.ValueOf(test.specOpts).Pointer(),
reflect.ValueOf(specOpts).Pointer())
if test.expectErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
}
} |
android_test.go | // Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package geth
import (
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
"time"
"github.com/tenderly/solidity-hmr/ethereum/internal/build"
)
// androidTestClass is a Java class to do some lightweight tests against the Android
// bindings. The goal is not to test each individual functionality, rather just to
// catch breaking API and/or implementation changes.
const androidTestClass = `
package go;
import android.test.InstrumentationTestCase;
import android.test.MoreAsserts;
import java.math.BigInteger;
import java.util.Arrays;
import org.ethereum.geth.*;
public class AndroidTest extends InstrumentationTestCase {
public AndroidTest() {}
public void testAccountManagement() {
// Create an encrypted keystore with light crypto parameters.
KeyStore ks = new KeyStore(getInstrumentation().getContext().getFilesDir() + "/keystore", Geth.LightScryptN, Geth.LightScryptP);
try {
// Create a new account with the specified encryption passphrase.
Account newAcc = ks.newAccount("Creation password");
// Export the newly created account with a different passphrase. The returned
// data from this method invocation is a JSON encoded, encrypted key-file.
byte[] jsonAcc = ks.exportKey(newAcc, "Creation password", "Export password");
// Update the passphrase on the account created above inside the local keystore.
ks.updateAccount(newAcc, "Creation password", "Update password");
// Delete the account updated above from the local keystore.
ks.deleteAccount(newAcc, "Update password");
// Import back the account we've exported (and then deleted) above with yet
// again a fresh passphrase.
Account impAcc = ks.importKey(jsonAcc, "Export password", "Import password");
// Create a new account to sign transactions with
Account signer = ks.newAccount("Signer password");
Transaction tx = new Transaction(
1, new Address("0x0000000000000000000000000000000000000000"),
new BigInt(0), 0, new BigInt(1), null); // Random empty transaction
BigInt chain = new BigInt(1); // Chain identifier of the main net
// Sign a transaction with a single authorization
Transaction signed = ks.signTxPassphrase(signer, "Signer password", tx, chain);
// Sign a transaction with multiple manually cancelled authorizations
ks.unlock(signer, "Signer password");
signed = ks.signTx(signer, tx, chain);
ks.lock(signer.getAddress());
// Sign a transaction with multiple automatically cancelled authorizations
ks.timedUnlock(signer, "Signer password", 1000000000);
signed = ks.signTx(signer, tx, chain);
} catch (Exception e) {
fail(e.toString());
}
}
public void testInprocNode() {
Context ctx = new Context();
try {
// Start up a new inprocess node
Node node = new Node(getInstrumentation().getContext().getFilesDir() + "/.ethereum", new NodeConfig());
node.start();
// Retrieve some data via function calls (we don't really care about the results)
NodeInfo info = node.getNodeInfo();
info.getName();
info.getListenerAddress();
info.getProtocols();
// Retrieve some data via the APIs (we don't really care about the results)
EthereumClient ec = node.getEthereumClient();
ec.getBlockByNumber(ctx, -1).getNumber();
NewHeadHandler handler = new NewHeadHandler() {
@Override public void onError(String error) {}
@Override public void onNewHead(final Header header) {}
};
ec.subscribeNewHead(ctx, handler, 16);
} catch (Exception e) {
fail(e.toString());
}
}
// Tests that recovering transaction signers works for both Homestead and EIP155
// signatures too. Regression test for go-ethereum issue #14599.
public void testIssue14599() {
try {
byte[] preEIP155RLP = new BigInteger("f901fc8032830138808080b901ae60056013565b6101918061001d6000396000f35b3360008190555056006001600060e060020a6000350480630a874df61461003a57806341c0e1b514610058578063a02b161e14610066578063dbbdf0831461007757005b610045600435610149565b80600160a060020a031660005260206000f35b610060610161565b60006000f35b6100716004356100d4565b60006000f35b61008560043560243561008b565b60006000f35b600054600160a060020a031632600160a060020a031614156100ac576100b1565b6100d0565b8060018360005260205260406000208190555081600060005260206000a15b5050565b600054600160a060020a031633600160a060020a031614158015610118575033600160a060020a0316600182600052602052604060002054600160a060020a031614155b61012157610126565b610146565b600060018260005260205260406000208190555080600060005260206000a15b50565b60006001826000526020526040600020549050919050565b600054600160a060020a031633600160a060020a0316146101815761018f565b600054600160a060020a0316ff5b561ca0c5689ed1ad124753d54576dfb4b571465a41900a1dff4058d8adf16f752013d0a01221cbd70ec28c94a3b55ec771bcbc70778d6ee0b51ca7ea9514594c861b1884", 16).toByteArray();
preEIP155RLP = Arrays.copyOfRange(preEIP155RLP, 1, preEIP155RLP.length);
byte[] postEIP155RLP = new BigInteger("f86b80847735940082520894ef5bbb9bba2e1ca69ef81b23a8727d889f3ef0a1880de0b6b3a7640000802ba06fef16c44726a102e6d55a651740636ef8aec6df3ebf009e7b0c1f29e4ac114aa057e7fbc69760b522a78bb568cfc37a58bfdcf6ea86cb8f9b550263f58074b9cc", 16).toByteArray();
postEIP155RLP = Arrays.copyOfRange(postEIP155RLP, 1, postEIP155RLP.length);
Transaction preEIP155 = new Transaction(preEIP155RLP);
Transaction postEIP155 = new Transaction(postEIP155RLP);
preEIP155.getFrom(null); // Homestead should accept homestead
preEIP155.getFrom(new BigInt(4)); // EIP155 should accept homestead (missing chain ID)
postEIP155.getFrom(new BigInt(4)); // EIP155 should accept EIP 155
try {
postEIP155.getFrom(null);
fail("EIP155 transaction accepted by Homestead");
} catch (Exception e) {}
} catch (Exception e) {
fail(e.toString());
}
}
}
`
// TestAndroid runs the Android java test class specified above.
//
// This requires the gradle command in PATH and the Android SDK whose path is available
// through ANDROID_HOME environment variable. To successfully run the tests, an Android
// device must also be available with debugging enabled.
//
// This method has been adapted from golang.org/x/mobile/bind/java/seq_test.go/runTest
func | (t *testing.T) {
// Skip tests on Windows altogether
if runtime.GOOS == "windows" {
t.Skip("cannot test Android bindings on Windows, skipping")
}
// Make sure all the Android tools are installed
if _, err := exec.Command("which", "gradle").CombinedOutput(); err != nil {
t.Skip("command gradle not found, skipping")
}
if sdk := os.Getenv("ANDROID_HOME"); sdk == "" {
// Android SDK not explicitly given, try to auto-resolve
autopath := filepath.Join(os.Getenv("HOME"), "Android", "Sdk")
if _, err := os.Stat(autopath); err != nil {
t.Skip("ANDROID_HOME environment var not set, skipping")
}
os.Setenv("ANDROID_HOME", autopath)
}
if _, err := exec.Command("which", "gomobile").CombinedOutput(); err != nil {
t.Log("gomobile missing, installing it...")
if out, err := exec.Command("go", "get", "golang.org/x/mobile/cmd/gomobile").CombinedOutput(); err != nil {
t.Fatalf("install failed: %v\n%s", err, string(out))
}
t.Log("initializing gomobile...")
start := time.Now()
if _, err := exec.Command("gomobile", "init").CombinedOutput(); err != nil {
t.Fatalf("initialization failed: %v", err)
}
t.Logf("initialization took %v", time.Since(start))
}
// Create and switch to a temporary workspace
workspace, err := ioutil.TempDir("", "geth-android-")
if err != nil {
t.Fatalf("failed to create temporary workspace: %v", err)
}
defer os.RemoveAll(workspace)
pwd, err := os.Getwd()
if err != nil {
t.Fatalf("failed to get current working directory: %v", err)
}
if err := os.Chdir(workspace); err != nil {
t.Fatalf("failed to switch to temporary workspace: %v", err)
}
defer os.Chdir(pwd)
// Create the skeleton of the Android project
for _, dir := range []string{"src/main", "src/androidTest/java/org/ethereum/gethtest", "libs"} {
err = os.MkdirAll(dir, os.ModePerm)
if err != nil {
t.Fatal(err)
}
}
// Generate the mobile bindings for Geth and add the tester class
gobind := exec.Command("gomobile", "bind", "-javapkg", "org.ethereum", "github.com/tenderly/solidity-hmr/ethereum/mobile")
if output, err := gobind.CombinedOutput(); err != nil {
t.Logf("%s", output)
t.Fatalf("failed to run gomobile bind: %v", err)
}
build.CopyFile(filepath.Join("libs", "geth.aar"), "geth.aar", os.ModePerm)
if err = ioutil.WriteFile(filepath.Join("src", "androidTest", "java", "org", "ethereum", "gethtest", "AndroidTest.java"), []byte(androidTestClass), os.ModePerm); err != nil {
t.Fatalf("failed to write Android test class: %v", err)
}
// Finish creating the project and run the tests via gradle
if err = ioutil.WriteFile(filepath.Join("src", "main", "AndroidManifest.xml"), []byte(androidManifest), os.ModePerm); err != nil {
t.Fatalf("failed to write Android manifest: %v", err)
}
if err = ioutil.WriteFile("build.gradle", []byte(gradleConfig), os.ModePerm); err != nil {
t.Fatalf("failed to write gradle build file: %v", err)
}
if output, err := exec.Command("gradle", "connectedAndroidTest").CombinedOutput(); err != nil {
t.Logf("%s", output)
t.Errorf("failed to run gradle test: %v", err)
}
}
const androidManifest = `<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="org.ethereum.gethtest"
android:versionCode="1"
android:versionName="1.0">
<uses-permission android:name="android.permission.INTERNET" />
</manifest>`
const gradleConfig = `buildscript {
repositories {
jcenter()
}
dependencies {
classpath 'com.android.tools.build:gradle:2.2.3'
}
}
allprojects {
repositories { jcenter() }
}
apply plugin: 'com.android.library'
android {
compileSdkVersion 'android-19'
buildToolsVersion '21.1.2'
defaultConfig { minSdkVersion 15 }
}
repositories {
flatDir { dirs 'libs' }
}
dependencies {
compile 'com.android.support:appcompat-v7:19.0.0'
compile(name: "geth", ext: "aar")
}
`
| TestAndroid |
index.js | const Cat = require("./endpoints/Catalog"); | const Sch = require("./schemas/index");
module.exports = { ApiCatalog: Cat, DatabaseSchemas: Sch }; |
|
demo3.py | """
2019/12/08 15:16
142.【Python多任务编程】使用类的方式创建子进程(进程)
"""
"""
使用类的方式创建子进程:
有些时候,你想以类的形式定义子进程的代码。那么你可以自定义一个类,让他继承自`Process`,
然后在这个类中实现run方法,以后这个子进程在执行的时候就会调用run方法中的代码。
"""
from multiprocessing import Process
import os
class zhiliao(Process): | print('子进程ID: %s' % os.getpid())
print('父进程ID: %s' % os.getppid())
for x in range(0, 5):
print('子进程: %s' % x)
if __name__ == '__main__':
p = zhiliao()
p.start()
print('主进程ID: %s' % os.getpid())
p.join()
print('所有子进程代码执行完毕...') | def run(self): |
update-user.dto.ts | import { PartialType } from '@nestjs/mapped-types';
import { ApiProperty } from '@nestjs/swagger';
import { CreateUserDto } from './create-user.dto';
export class | extends PartialType(CreateUserDto) {
@ApiProperty({
description: "User name of new user",
default: 'usernameexample',
type: String
})
username: String
@ApiProperty({
description: "Password of new user",
default: 'Example@2021',
minimum: 8,
type: String
})
password: String
@ApiProperty({
description: "Date of signup",
type: Date
})
joinedAt: Date
@ApiProperty({
description: "Date of profile update",
type: Date
})
updatedAt: Date
@ApiProperty({
description: "Is user verified",
type: Boolean
})
verified: Boolean
}
| UpdateUserDto |
error.go | package config
import (
"fmt"
"net/http"
"github.com/dynastymasra/cookbook"
)
type ServiceError struct {
code int
key string
message string
}
func NewError(code int, key, message string) *ServiceError {
return &ServiceError{
code: code,
key: key,
message: message,
}
}
func (s *ServiceError) Code() int {
return s.code
}
func (s *ServiceError) Key() string {
return s.key
}
func (s *ServiceError) Error() string {
return s.message
}
func | (err *ServiceError, w http.ResponseWriter, requestID string) {
if err.Code() >= 500 {
w.WriteHeader(err.Code())
fmt.Fprint(w, cookbook.ErrorResponse(err.message, requestID).Stringify())
}
if err.Code() >= 400 && err.Code() < 500 {
w.WriteHeader(err.Code())
fmt.Fprint(w, cookbook.FailResponse(&cookbook.JSON{
err.Key(): err.Error(),
}, requestID).Stringify())
}
}
| ParseToJSON |
errors.go | package drive
import (
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
"time"
)
const MaxErrorRetries = 5
func isBackendOrRateLimitError(err error) bool {
return isBackendError(err) || isRateLimitError(err)
}
func isBackendError(err error) bool {
if err == nil {
return false
}
ae, ok := err.(*googleapi.Error)
return ok && ae.Code >= 500 && ae.Code <= 599
}
func isRateLimitError(err error) bool {
if err == nil |
ae, ok := err.(*googleapi.Error)
return ok && ae.Code == 403
}
func isTimeoutError(err error) bool {
return err == context.Canceled
}
func exponentialBackoffSleep(try int) {
seconds := pow(2, try)
time.Sleep(time.Duration(seconds) * time.Second)
}
| {
return false
} |
mod.rs | use core::mem; | pub mod tiled_bg;
pub mod sprites;
pub use self::sprites::*;
pub fn vsync_busy() {
unsafe {
while reg::REG_VCOUNT.volatile_load() >= 160 {}
while reg::REG_VCOUNT.volatile_load() < 160 {}
}
}
pub fn vsync_int() {
interrupt!(0x05);
}
#[derive(Copy, Clone)]
#[repr(u16)]
pub enum ColorMode {
_4bpp = 0x0_u16,
_8bpp = 0x2000_u16,
}
#[derive(Clone, Copy)]
#[repr(u8)]
pub enum VideoMode {
Mode0 = 0,
Mode1 = 1,
Mode2 = 2,
Mode3 = 3,
Mode4 = 4,
Mode5 = 5,
}
impl VideoMode {
const MASK: u8 = 0b0000_0111_u8;
pub fn set(self, val: u32) -> u32 {
let p = self as u32;
(val & !(Self::MASK as u32)) | p
}
}
#[derive(Copy, Clone)]
#[repr(u8)]
pub enum FrameBufferStart {
/// The FrameBuffer should start at the address 0x06000000
Base = 0b0000_0000,
/// The FrameBuffer should start at the address 0x0600A000
Offset = 0b0001_0000,
}
impl FrameBufferStart {
const MASK: u8 = 0b0001_0000_u8;
pub fn set(self, val: u32) -> u32 {
let p = self as u32;
(val & !(Self::MASK as u32)) | p
}
}
#[derive(Copy, Clone)]
#[repr(u8)]
pub enum SpriteStorageMode {
_2D = 0b0000_0000,
_1D = 0b0100_0000
}
impl SpriteStorageMode {
const MASK: u8 = 0b0100_0000_u8;
pub fn set(self, val: u32) -> u32 {
let p = self as u32;
(val & !(Self::MASK as u32)) | p
}
}
#[derive(Copy, Clone)]
#[repr(u8)]
pub enum HBlankProcessing {
None = 0b0000_0000_u8,
Force = 0b0010_0000_u8,
}
impl HBlankProcessing {
const MASK: u8 = 0b0010_0000_u8;
pub fn set(self, val: u32) -> u32 {
let p = self as u32;
(val & !(Self::MASK as u32)) | p
}
}
#[derive(Copy, Clone)]
#[repr(u8)]
pub enum DisplayState {
Blank = 0b1000_0000_u8,
On = 0b0000_0000_u8,
}
impl DisplayState {
const MASK: u8 = 0b1000_0000_u8;
pub fn set(self, val: u32) -> u32 {
let p = self as u32;
(val & !(Self::MASK as u32)) | p
}
}
pub struct GraphicsMode {
pub vm: VideoMode,
pub frame_buffer_start: FrameBufferStart,
pub hblank_policy: HBlankProcessing,
pub sprite_storage_mode: SpriteStorageMode,
pub display_state: DisplayState,
pub bg0_enabled: bool,
pub bg1_enabled: bool,
pub bg2_enabled: bool,
pub bg3_enabled: bool,
pub sprites_enabled: bool,
pub window0_enabled: bool,
pub window1_enabled: bool,
pub sprite_windows_enabled: bool,
}
impl GraphicsMode {
const BG0_MASK: u16 = 0x0100;
const BG1_MASK: u16 = 0x0200;
const BG2_MASK: u16 = 0x0400;
const BG3_MASK: u16 = 0x0800;
const SPRITES_MASK: u16 = 0x1000;
const WINDOW0_MASK: u16 = 0x2000;
const WINDOW1_MASK: u16 = 0x2000;
const SPRITE_WINDOWS_MASK: u16 = 0x2000;
pub fn current() -> GraphicsMode {
unsafe { GraphicsMode::from_u16(mem::transmute(*reg::REG_GRAPHICS_MODE)) }
}
pub fn from_u16(n: u16) -> GraphicsMode {
GraphicsMode {
vm: VideoMode::Mode0,
frame_buffer_start: FrameBufferStart::Base,
hblank_policy: HBlankProcessing::None,
sprite_storage_mode: SpriteStorageMode::_2D,
display_state: DisplayState::On,
bg0_enabled: (n & GraphicsMode::BG0_MASK) != 0,
bg1_enabled: (n & GraphicsMode::BG1_MASK) != 0,
bg2_enabled: (n & GraphicsMode::BG2_MASK) != 0,
bg3_enabled: (n & GraphicsMode::BG3_MASK) != 0,
sprites_enabled: (n & GraphicsMode::SPRITES_MASK) != 0,
window0_enabled: (n & GraphicsMode::WINDOW0_MASK) != 0,
window1_enabled: (n & GraphicsMode::WINDOW1_MASK) != 0,
sprite_windows_enabled: (n & GraphicsMode::SPRITE_WINDOWS_MASK) != 0,
}
}
pub fn set(&self) {
let mut reg = 0u16;
reg |= self.vm as u16;
reg |= self.frame_buffer_start as u16;
reg |= self.hblank_policy as u16;
reg |= self.sprite_windows_enabled as u16;
reg |= self.display_state as u16;
reg |= self.sprite_storage_mode as u16;
let bg0 = if self.bg0_enabled { GraphicsMode::BG0_MASK } else { 0 };
let bg1 = if self.bg1_enabled { GraphicsMode::BG1_MASK } else { 0 };
let bg2 = if self.bg2_enabled { GraphicsMode::BG2_MASK } else { 0 };
let bg3 = if self.bg3_enabled { GraphicsMode::BG3_MASK } else { 0 };
let sprites = if self.sprites_enabled { GraphicsMode::SPRITES_MASK } else { 0 };
let window0 = if self.window0_enabled { GraphicsMode::WINDOW0_MASK } else { 0 };
let window1= if self.window1_enabled { GraphicsMode::WINDOW1_MASK } else { 0 };
let sprite_windows = if self.sprite_windows_enabled { GraphicsMode::SPRITE_WINDOWS_MASK} else { 0 };
reg |= bg0 | bg1 | bg2 | bg3 | sprites | window0 | window1 | sprite_windows;
// The * mut _ is to prevent a weird warning, that may be a bug in rustc
// when using reg::REG_GRAPHICS_MODE.ptr_mut, a warning as thrown that says:
//
// --> src\graphics\mod.rs:164:33
// |
// 164 | unsafe { volatile_store(reg::REG_GRAPHICS_MODE.ptr_mut, reg) }
// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
// |
// = note: #[warn(const_err)] on by default
unsafe { volatile_store(reg::REG_GRAPHICS_MODE.num as * mut _, reg) }
}
}
impl Default for GraphicsMode {
fn default() -> Self {
GraphicsMode::from_u16(0)
}
} | use core::default::Default;
use core::intrinsics::volatile_store;
use reg;
|
app.rs | use std::collections::HashSet;
// use indexmap::map::Entry;
use proc_macro2::TokenStream as TokenStream2;
use syn::{
parse::{self, ParseStream, Parser},
spanned::Spanned,
Expr, ExprArray, ExprParen, Fields, ForeignItem, Ident, Item, LitBool, Path, Token, Visibility,
};
use super::Input;
use crate::{
ast::{
App, AppArgs, ExternInterrupt, ExternInterrupts, HardwareTask, Idle, IdleArgs, Init,
InitArgs, LateResource, Resource, SoftwareTask,
},
parse::util,
Either, Map, Set, Settings,
};
impl AppArgs {
pub(crate) fn | (tokens: TokenStream2) -> parse::Result<Self> {
(|input: ParseStream<'_>| -> parse::Result<Self> {
let mut custom = Set::new();
let mut device = None;
let mut monotonic = None;
let mut peripherals = true;
let mut extern_interrupts = ExternInterrupts::new();
loop {
if input.is_empty() {
break;
}
// #ident = ..
let ident: Ident = input.parse()?;
let _eq_token: Token![=] = input.parse()?;
if custom.contains(&ident) {
return Err(parse::Error::new(
ident.span(),
"argument appears more than once",
));
}
custom.insert(ident.clone());
let ks = ident.to_string();
match &*ks {
"device" => {
if let Ok(p) = input.parse::<Path>() {
device = Some(p);
} else {
return Err(parse::Error::new(
ident.span(),
"unexpected argument value; this should be a path",
));
}
}
"monotonic" => {
if let Ok(p) = input.parse::<Path>() {
monotonic = Some(p);
} else {
return Err(parse::Error::new(
ident.span(),
"unexpected argument value; this should be a path",
));
}
}
"peripherals" => {
if let Ok(p) = input.parse::<LitBool>() {
peripherals = p.value;
} else {
return Err(parse::Error::new(
ident.span(),
"unexpected argument value; this should be a boolean",
));
}
}
"dispatchers" => {
if let Ok(p) = input.parse::<ExprArray>() {
for e in p.elems {
match e {
Expr::Path(ep) => {
let path = ep.path;
let ident = if path.leading_colon.is_some()
|| path.segments.len() != 1
{
return Err(parse::Error::new(
path.span(),
"interrupt must be an identifier, not a path",
));
} else {
path.segments[0].ident.clone()
};
let span = ident.span();
if extern_interrupts.contains_key(&ident) {
return Err(parse::Error::new(
span,
"this extern interrupt is listed more than once",
));
} else {
extern_interrupts.insert(
ident,
ExternInterrupt {
attrs: ep.attrs,
_extensible: (),
},
);
}
}
_ => {
return Err(parse::Error::new(
e.span(),
"interrupt must be an identifier",
));
}
}
}
} else {
return Err(parse::Error::new(
ident.span(),
// increasing the length of the error message will break rustfmt
"unexpected argument value; expected an array",
));
}
}
_ => {
return Err(parse::Error::new(ident.span(), "unexpected argument"));
}
}
if input.is_empty() {
break;
}
// ,
let _: Token![,] = input.parse()?;
}
Ok(AppArgs {
device,
monotonic,
peripherals,
extern_interrupts,
})
})
.parse2(tokens)
}
}
impl App {
pub(crate) fn parse(args: AppArgs, input: Input, settings: &Settings) -> parse::Result<Self> {
let mut inits = Vec::new();
let mut idles = Vec::new();
let mut late_resources = Map::new();
let mut resources = Map::new();
let mut resource_struct = Map::new();
let mut hardware_tasks = Map::new();
let mut software_tasks = Map::new();
let mut user_imports = vec![];
let mut user_code = vec![];
let mut seen_idents = HashSet::<Ident>::new();
let mut bindings = HashSet::<Ident>::new();
let mut check_binding = |ident: &Ident| {
if bindings.contains(ident) {
return Err(parse::Error::new(
ident.span(),
"a task has already been bound to this interrupt",
));
} else {
bindings.insert(ident.clone());
}
Ok(())
};
let mut check_ident = |ident: &Ident| {
if seen_idents.contains(ident) {
return Err(parse::Error::new(
ident.span(),
"this identifier has already been used",
));
} else {
seen_idents.insert(ident.clone());
}
Ok(())
};
for mut item in input.items {
match item {
Item::Fn(mut item) => {
let span = item.sig.ident.span();
if let Some(pos) = item
.attrs
.iter()
.position(|attr| util::attr_eq(attr, "init"))
{
let args = InitArgs::parse(item.attrs.remove(pos).tokens)?;
// If an init function already exists, error
if !inits.is_empty() {
return Err(parse::Error::new(
span,
"`#[init]` function must appear at most once",
));
}
check_ident(&item.sig.ident)?;
inits.push(Init::parse(args, item)?);
} else if let Some(pos) = item
.attrs
.iter()
.position(|attr| util::attr_eq(attr, "idle"))
{
let args = IdleArgs::parse(item.attrs.remove(pos).tokens)?;
// If an idle function already exists, error
if !idles.is_empty() {
return Err(parse::Error::new(
span,
"`#[idle]` function must appear at most once",
));
}
check_ident(&item.sig.ident)?;
idles.push(Idle::parse(args, item)?);
} else if let Some(pos) = item
.attrs
.iter()
.position(|attr| util::attr_eq(attr, "task"))
{
if hardware_tasks.contains_key(&item.sig.ident)
|| software_tasks.contains_key(&item.sig.ident)
{
return Err(parse::Error::new(
span,
"this task is defined multiple times",
));
}
match crate::parse::task_args(item.attrs.remove(pos).tokens, settings)? {
Either::Left(args) => {
check_binding(&args.binds)?;
check_ident(&item.sig.ident)?;
hardware_tasks.insert(
item.sig.ident.clone(),
HardwareTask::parse(args, item)?,
);
}
Either::Right(args) => {
check_ident(&item.sig.ident)?;
software_tasks.insert(
item.sig.ident.clone(),
SoftwareTask::parse(args, item)?,
);
}
}
} else {
return Err(parse::Error::new(
span,
"this item must live outside the `#[app]` module",
));
}
}
Item::Struct(ref mut struct_item) => {
// Match structures with the attribute #[resources], name of structure is not
// important
if let Some(_pos) = struct_item
.attrs
.iter()
.position(|attr| util::attr_eq(attr, "resources"))
{
let span = struct_item.ident.span();
if resource_struct.contains_key(&struct_item.ident) {
return Err(parse::Error::new(
span,
"`#[resources]` struct must appear at most once",
));
}
if struct_item.vis != Visibility::Inherited {
return Err(parse::Error::new(
struct_item.span(),
"this item must have inherited / private visibility",
));
}
if let Fields::Named(fields) = &mut struct_item.fields {
for field in &mut fields.named {
let ident = field.ident.as_ref().expect("UNREACHABLE");
if late_resources.contains_key(ident)
|| resources.contains_key(ident)
{
return Err(parse::Error::new(
ident.span(),
"this resource is listed more than once",
));
}
if let Some(pos) = field
.attrs
.iter()
.position(|attr| util::attr_eq(attr, "init"))
{
let attr = field.attrs.remove(pos);
let late = LateResource::parse(field, ident.span())?;
resources.insert(
ident.clone(),
Resource {
late,
expr: syn::parse2::<ExprParen>(attr.tokens)?.expr,
},
);
} else {
let late = LateResource::parse(field, ident.span())?;
late_resources.insert(ident.clone(), late);
}
}
} else {
return Err(parse::Error::new(
struct_item.span(),
"this `struct` must have named fields",
));
}
// resource_struct will be non-empty if #[resources] was encountered before
resource_struct.insert(struct_item.ident.clone(), struct_item.clone());
} else {
// Structure without the #[resources] attribute should just be passed along
user_code.push(item.clone());
}
}
Item::ForeignMod(mod_) => {
if !util::abi_is_rust(&mod_.abi) {
return Err(parse::Error::new(
mod_.abi.extern_token.span(),
"this `extern` block must use the \"Rust\" ABI",
));
}
for item in mod_.items {
if let ForeignItem::Fn(mut item) = item {
let span = item.sig.ident.span();
if let Some(pos) = item
.attrs
.iter()
.position(|attr| util::attr_eq(attr, "task"))
{
if hardware_tasks.contains_key(&item.sig.ident)
|| software_tasks.contains_key(&item.sig.ident)
{
return Err(parse::Error::new(
span,
"this task is defined multiple times",
));
}
if item.attrs.len() != 1 {
return Err(parse::Error::new(
span,
"`extern` task required `#[task(..)]` attribute",
));
}
match crate::parse::task_args(
item.attrs.remove(pos).tokens,
settings,
)? {
Either::Left(args) => {
check_binding(&args.binds)?;
check_ident(&item.sig.ident)?;
hardware_tasks.insert(
item.sig.ident.clone(),
HardwareTask::parse_foreign(args, item)?,
);
}
Either::Right(args) => {
check_ident(&item.sig.ident)?;
software_tasks.insert(
item.sig.ident.clone(),
SoftwareTask::parse_foreign(args, item)?,
);
}
}
} else {
return Err(parse::Error::new(
span,
"`extern` task required `#[task(..)]` attribute",
));
}
} else {
return Err(parse::Error::new(
item.span(),
"this item must live outside the `#[app]` module",
));
}
}
}
Item::Use(itemuse_) => {
// Store the user provided use-statements
user_imports.push(itemuse_.clone());
}
_ => {
// Anything else within the module should not make any difference
user_code.push(item.clone());
}
}
}
Ok(App {
args,
name: input.ident,
inits,
idles,
late_resources,
resources,
user_imports,
user_code,
hardware_tasks,
software_tasks,
_extensible: (),
})
}
}
#[cfg(test)]
mod tests {
use crate::ast::AppArgs;
#[test]
fn parse_app_args_true() {
let s = "peripherals = true";
let stream: proc_macro2::TokenStream = s.parse().unwrap();
let result = AppArgs::parse(stream).unwrap();
assert!(result.peripherals);
}
#[test]
fn parse_app_args_false() {
let s = "peripherals = false";
let stream: proc_macro2::TokenStream = s.parse().unwrap();
let result = AppArgs::parse(stream).unwrap();
assert!(!result.peripherals);
}
#[test]
fn parse_app_args_default() {
let s = "";
let stream: proc_macro2::TokenStream = s.parse().unwrap();
let result = AppArgs::parse(stream).unwrap();
assert!(result.peripherals);
}
}
| parse |
rule_functions.py | #rule_functions.py
#Copyright (c) 2020 Rachel Lea Ballantyne Draelos
#MIT License
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE
def delete_mainword(sentence, mainword, **kwargs): #Done with testing
if mainword not in sentence:
return False, sentence
return True, sentence.replace(mainword,'')
def delete_part(sentence, delete_part, mainword, **kwargs): #Done with testing
"""Delete all words in the sentence coming either <delete_part>='before'
or <delete_part>='after'"""
if mainword not in sentence:
return False, sentence
senthalved = sentence.split(mainword)
if delete_part == 'after':
return True, senthalved[0]
if delete_part == 'before':
#if the word occurs more than once then we want to make sure we delete
#everything before the LAST occurrence
return True, senthalved[-1]
def delete_part_until(sentence, delete_part, mainword, until_hit, **kwargs): #Done with testing
"""Delete all words in the sentence coming either <delete_part>='before'
or <delete_part>='after' the <mainword> until you hit any words in the
list <until_hit>"""
if mainword not in sentence:
return False, sentence
senthalved = sentence.split(mainword)
if delete_part == 'after':
keep = senthalved[0] #defintely keep before
dregs = senthalved[1] #you may keep some of 'after'
idx = len(dregs)
for u in until_hit:
d = dregs.find(u)
if d < idx and d!=-1:
idx = d
keep2 = dregs[idx:]
return True, keep+' '+keep2
if delete_part == 'before':
keep = senthalved[1]
dregs = senthalved[0]
idx = 0
for u in until_hit:
d = dregs.find(u)+len(u) #len(u) because we don't want to delete u
if d > idx and d!=-1:
idx = d
keep2 = dregs[0:idx]
return True, keep2+keep #don't need a space because one will already be included
def delete_entire_unless_immediate(sentence, mainword, position, wrange, unless_in, **kwargs): #Done with testing
"""Delete entire sentence if <mainword> is present, unless any of the words
in the list <unless_in> are present within <wrange> of position=='before' or
position=='after' the mainword in which case keep the entire sentence."""
if mainword not in sentence:
return False, sentence
if position == 'after':
if sentence.split()[-1]==mainword.strip(): #mainword is the last word so sentence can't be saved (no words after)
return True, ''
possible_save_words = ' '.join(sentence.split(mainword)[1].split()[0:wrange])
elif position == 'before':
if sentence.split()[0]==mainword.strip(): #mainword is the first word so sentence can't be saved (no words before)
return True, ''
possible_save_words = ' '.join(sentence.split(mainword)[0].split()[-1*wrange:])
#Check if any word in unless_in is a root of possible_save_word
saved = False
for u in unless_in:
if u in possible_save_words:
saved = True
if saved:
return False, sentence
else:
return True, ''
def delete(sentence, mainword, **kwargs): #Done with testing
"""Delete entire sentence if <mainword> is present"""
if mainword not in sentence:
return False, sentence
else:
return True, ''
def delete_if_first_word(sentence, mainword, **kwargs): #Done with testing
"""Delete entire sentence if exactly <mainword> is the first word"""
if mainword not in sentence: #e.g. if sentence=='' due to prior processing
return False, sentence
if mainword == sentence.split()[0]:
return True, ''
else:
return False, sentence
def delete_one_before_mainword(sentence, mainword, **kwargs):
"""Delete every word starting from (and including) one word before
<mainword>. Used in ambiguity detection e.g. 'there is scarring vs
atelectasis' -->mainword 'vs' --> 'there is' (delete both scarring and
atelectasis)"""
if mainword in sentence:
s = sentence.split(mainword)[0].split()
return True, (' ').join(s[0:-1])
else:
return False, sentence
def non_handling(sentence, mainword, **kwargs): #Done with testing
"""Delete any word that starts with 'non' or delete any word that comes
immediately after the standalone word 'non'. Prevents the term search
from making mistakes on words like noncalcified, nontuberculous,
noninfectious, etc."""
if 'non' not in sentence:
return False, sentence
else:
sentlist = sentence.split()
if ' non ' in sentence: #i.e., standalone word ' non '
idx = sentlist.index('non')
return True, ' '+' '.join(sentlist[0:idx]+sentlist[idx+2:])+' '
else: #non is prefixing another word
for word in sentlist:
if 'non' in word:
sentlist.remove(word)
return True, ' '+' '.join(sentlist)+' '
def patent_handling(sentence, mainword, **kwargs): #Done with testing
"""Function for handling the word 'patent' """
assert mainword==' patent'
if 'patent' not in sentence:
return False, sentence
sentlist = sentence.split()
if sentlist[0]=='patent':
return delete_part_until(sentence, delete_part = 'after',mainword = 'patent', until_hit = ['status','with'])
else: #patent is at the middle or the end of the sentence
return delete_part(sentence, delete_part = 'before',mainword = 'patent')
def | (sentence, mainword, **kwargs): #Done with testing
"""Function for handling the word 'clear' """
assert mainword==' clear'
if ' clear' not in sentence:
return False, sentence
changed1, sentence = delete_part(sentence, delete_part='before',mainword=mainword)
sentence = ' clear '+sentence #must keep word 'clear' at the beginning of the fragment so that the next step can work
changed2, sentence = delete_part_until(sentence, delete_part='after',mainword=mainword,until_hit=['status'])
return (changed1 or changed2), sentence
def subcentimeter_handling(sentence, mainword, **kwargs): #Done with testing
"""Example:
'a few scattered subcentimeter lymph nodes are visualized not
significantly changed from prior' --> 'a few scattered are visualized not
significantly changed from prior'
"""
assert mainword==' subcentimeter'
if mainword not in sentence:
return False, sentence
if 'node' in ' '.join(sentence.split(mainword)[1:]):
pre_idx = sentence.rfind(' subcentimeter')
pre = sentence[0:pre_idx]
post_idx = sentence.rfind('node')+len('node')
post = sentence[post_idx:]
sentence = pre+post
return True, sentence
else:
return False, sentence | clear_handling |
AlexaContact.ts | import { ApiError } from './ApiError';
import _camelCase = require('lodash.camelcase');
import { AlexaAPI, ApiCallOptions } from './AlexaAPI';
export class AlexaContact {
static NAME = 'name';
static GIVEN_NAME = 'given_name';
static EMAIL = 'email';
static MOBILE_NUMBER = 'mobile_number';
static async contactAPI(property: string, apiEndpoint: string, permissionToken: string) {
const validProperties = [
AlexaContact.NAME,
AlexaContact.GIVEN_NAME,
AlexaContact.EMAIL,
AlexaContact.MOBILE_NUMBER,
];
if (!permissionToken) {
return Promise.reject(new ApiError('No permissions from user.', ApiError.NO_USER_PERMISSION));
}
if (!validProperties.includes(property)) {
return Promise.reject(new Error(`${property} is not a valid property`));
}
const options: ApiCallOptions = {
endpoint: apiEndpoint,
path: `/v2/accounts/~current/settings/Profile.${_camelCase(property)}`,
permissionToken,
};
try {
const response: any = await AlexaAPI.apiCall(options); // tslint:disable-line
if (response.httpStatus === 403) {
const apiError = new ApiError(response.data.message, response.data.code);
if (response.data.message === 'Access to this resource has not yet been requested.') {
apiError.code = ApiError.NO_USER_PERMISSION; // user needs to grant access in app
}
if (response.data.message === 'Access to this resource cannot be requested.') {
apiError.code = ApiError.NO_SKILL_PERMISSION; // dev needs to set correct permissions in ASK console
}
return Promise.reject(apiError);
}
return Promise.resolve(response.data);
} catch (e) {
return Promise.reject( | } | new ApiError(e.message || 'Something went wrong.', e.code || ApiError.ERROR),
);
}
} |
load.go | package read
import (
"errors"
"fmt"
"golang.org/x/tools/go/packages"
)
func LoadPackage(path string) (*packages.Package, error) {
c := packages.Config{
Mode: packages.NeedTypes | packages.NeedTypesInfo,
Tests: false,
}
pkgs, err := packages.Load(&c, path)
if pkgsLen := len(pkgs); pkgsLen != 1 |
pkg := pkgs[0]
if len(pkg.Errors) > 0 {
err := errors.New(pkg.Errors[0].Msg)
return nil, err
}
return pkg, err
}
| {
err := fmt.Errorf("path must include only one package\ncurrent amount: %d\n", pkgsLen)
return nil, err
} |
statistics.py | ## Module statistics.py
##
## Copyright (c) 2014 Antonio Valente <[email protected]>
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""
Statistics module.
The basic functions are stolen from python 3.4 stdlib
"""
from __future__ import division
import collections
import math
import operator
import functools
from fractions import Fraction
from decimal import Decimal
from .exceptions import StatisticsError
from .py3comp import xrange, iteritems
def isfinite(n):
"""Return True if x is neither an infinity nor a NaN, and False otherwise.
(Note that 0.0 is considered finite.)
Backported from python 3
"""
return not (math.isinf(n) or math.isnan(n))
def sum(data, start=0):
"""sum(data [, start]) -> value
Return a high-precision sum of the given numeric data. If optional
argument ``start`` is given, it is added to the total. If ``data`` is
empty, ``start`` (defaulting to 0) is returned.
"""
n, d = exact_ratio(start)
T = type(start)
partials = {d: n} # map {denominator: sum of numerators}
# Micro-optimizations.
coerce_types_ = coerce_types
exact_ratio_ = exact_ratio
partials_get = partials.get
# Add numerators for each denominator, and track the "current" type.
for x in data:
T = coerce_types_(T, type(x))
n, d = exact_ratio_(x)
partials[d] = partials_get(d, 0) + n
if None in partials:
assert issubclass(T, (float, Decimal))
assert not isfinite(partials[None])
return T(partials[None])
total = Fraction()
for d, n in sorted(partials.items()):
total += Fraction(n, d)
if issubclass(T, int):
assert total.denominator == 1
return T(total.numerator)
if issubclass(T, Decimal):
return T(total.numerator) / total.denominator
return T(total)
def exact_ratio(x):
"""Convert Real number x exactly to (numerator, denominator) pair.
x is expected to be an int, Fraction, Decimal or float.
"""
try:
try:
# int, Fraction
return x.numerator, x.denominator
except AttributeError:
# float
try:
return x.as_integer_ratio()
except AttributeError:
# Decimal
try:
return decimal_to_ratio(x)
except AttributeError:
msg = "can't convert type '{}' to numerator/denominator"
raise TypeError(msg.format(type(x).__name__))
except (OverflowError, ValueError):
# INF or NAN
return (x, None)
# FIXME This is faster than Fraction.from_decimal, but still too slow.
def decimal_to_ratio(d):
"""Convert Decimal d to exact integer ratio (numerator, denominator).
"""
sign, digits, exp = d.as_tuple()
if exp in ('F', 'n', 'N'): # INF, NAN, sNAN
assert not d.is_finite()
raise ValueError
num = 0
for digit in digits:
num = num * 10 + digit
if sign:
num = -num
den = 10 ** -exp
return (num, den)
def coerce_types(T1, T2):
"""Coerce types T1 and T2 to a common type.
Coercion is performed according to this table, where "N/A" means
that a TypeError exception is raised.
+----------+-----------+-----------+-----------+----------+
| | int | Fraction | Decimal | float |
+----------+-----------+-----------+-----------+----------+
| int | int | Fraction | Decimal | float |
| Fraction | Fraction | Fraction | N/A | float |
| Decimal | Decimal | N/A | Decimal | float |
| float | float | float | float | float |
+----------+-----------+-----------+-----------+----------+
Subclasses trump their parent class; two subclasses of the same
base class will be coerced to the second of the two.
"""
# Get the common/fast cases out of the way first.
if T1 is T2: return T1
if T1 is int: return T2
if T2 is int: return T1
# Subclasses trump their parent class.
if issubclass(T2, T1): return T2
if issubclass(T1, T2): return T1
# Floats trump everything else.
if issubclass(T2, float): return T2
if issubclass(T1, float): return T1
# Subclasses of the same base class give priority to the second.
if T1.__base__ is T2.__base__: return T2
# Otherwise, just give up.
raise TypeError('cannot coerce types %r and %r' % (T1, T2))
def counts(data):
"""
Generate a table of sorted (value, frequency) pairs.
"""
if data is None:
raise TypeError('None is not iterable')
table = collections.Counter(data).most_common()
if not table:
return table
# Extract the values with the highest frequency.
maxfreq = table[0][1]
for i in range(1, len(table)):
if table[i][1] != maxfreq:
table = table[:i]
break
return table
# === Measures of central tendency (averages) ===
def mean(data):
"""Return the sample arithmetic mean of data.
If ``data`` is empty, StatisticsError will be raised.
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('mean requires at least one data point')
return sum(data) / n
# FIXME: investigate ways to calculate medians without sorting? Quickselect?
def median(data):
"""Return the median (middle value) of numeric data.
When the number of data points is odd, return the middle data point.
When the number of data points is even, the median is interpolated by
taking the average of the two middle values:
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
if n % 2 == 1:
return data[n // 2]
else:
i = n // 2
return (data[i - 1] + data[i]) / 2
def median_low(data):
"""Return the low median of numeric data.
When the number of data points is odd, the middle value is returned.
When it is even, the smaller of the two middle values is returned.
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
if n % 2 == 1:
return data[n // 2]
else:
return data[n // 2 - 1]
def median_high(data):
"""Return the high median of data.
When the number of data points is odd, the middle value is returned.
When it is even, the larger of the two middle values is returned.
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
return data[n // 2]
def mode(data):
"""Return the most common data point from discrete or nominal data.
``mode`` assumes discrete data, and returns a single value. This is the
standard treatment of the mode as commonly taught in schools:
If there is not exactly one most common value, ``mode`` will raise
StatisticsError.
"""
# Generate a table of sorted (value, frequency) pairs.
table = counts(data)
if len(table) == 1:
return table[0][0]
elif table:
raise StatisticsError(
'no unique mode; found %d equally common values' % len(table)
)
else:
raise StatisticsError('no mode for empty data')
# === Measures of spread ===
# See http://mathworld.wolfram.com/Variance.html
# http://mathworld.wolfram.com/SampleVariance.html
# http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
#
# Under no circumstances use the so-called "computational formula for
# variance", as that is only suitable for hand calculations with a small
# amount of low-precision data. It has terrible numeric properties.
#
# See a comparison of three computational methods here:
# http://www.johndcook.com/blog/2008/09/26/comparing-three-methods-of-computing-standard-deviation/
def _ss(data, c=None):
"""Return sum of square deviations of sequence data.
If ``c`` is None, the mean is calculated in one pass, and the deviations
from the mean are calculated in a second pass. Otherwise, deviations are
calculated from ``c`` as given. Use the second case with care, as it can
lead to garbage results.
"""
if c is None:
c = mean(data)
ss = sum((x - c) ** 2 for x in data)
# The following sum should mathematically equal zero, but due to rounding
# error may not.
ss -= sum((x - c) for x in data) ** 2 / len(data)
assert not ss < 0, 'negative sum of square deviations: %f' % ss
return ss
def variance(data, xbar=None):
"""Return the sample variance of data.
data should be an iterable of Real-valued numbers, with at least two
values. The optional argument xbar, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function when your data is a sample from a population. To
calculate the variance from the entire population, see ``pvariance``.
If you have already calculated the mean of your data, you can pass it as
the optional second argument ``xbar`` to avoid recalculating it:
This function does not check that ``xbar`` is actually the mean of
``data``. Giving arbitrary values for ``xbar`` may lead to invalid or
impossible results.
Decimals and Fractions are supported
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 2:
raise StatisticsError('variance requires at least two data points')
ss = _ss(data, xbar)
return ss / (n - 1)
def pvariance(data, mu=None):
"""Return the population variance of ``data``.
data should be an iterable of Real-valued numbers, with at least one
value. The optional argument mu, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function to calculate the variance from the entire population.
To estimate the variance from a sample, the ``variance`` function is
usually a better choice.
If you have already calculated the mean of the data, you can pass it as
the optional second argument to avoid recalculating it:
This function does not check that ``mu`` is actually the mean of ``data``.
Giving arbitrary values for ``mu`` may lead to invalid or impossible
results.
Decimals and Fractions are supported:
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('pvariance requires at least one data point')
ss = _ss(data, mu)
return ss / n
def stdev(data, xbar=None):
"""Return the square root of the sample variance.
See ``variance`` for arguments and other details.
"""
var = variance(data, xbar)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var)
def pstdev(data, mu=None):
"""Return the square root of the population variance.
See ``pvariance`` for arguments and other details.
"""
var = pvariance(data, mu)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var)
def geometric_mean(data):
"""Return the geometric mean of data
"""
if not data:
raise StatisticsError('geometric_mean requires at least one data point')
# in order to support negative or null values
data = [x if x > 0 else math.e if x == 0 else 1.0 for x in data]
return math.pow(math.fabs(functools.reduce(operator.mul, data)), 1.0 / len(data))
def harmonic_mean(data):
"""Return the harmonic mean of data
"""
if not data:
raise StatisticsError('harmonic_mean requires at least one data point')
divisor = sum(map(lambda x: 1.0 / x if x else 0.0, data))
return len(data) / divisor if divisor else 0.0
def skewness(data):
"""Return the skewness of the data's distribution
"""
if not data:
raise StatisticsError('skewness requires at least one data point')
size = len(data)
sd = stdev(data) ** 3
if not sd:
return 0.0
mn = mean(data)
return sum(map(lambda x: ((x - mn) ** 3 / sd), data)) / size
def kurtosis(data):
"""Return the kurtosis of the data's distribution
"""
if not data:
raise StatisticsError('kurtosis requires at least one data point')
size = len(data)
sd = stdev(data) ** 4
if not sd:
|
mn = mean(data)
return sum(map(lambda x: ((x - mn) ** 4 / sd), data)) / size - 3
def percentile(data, n):
"""Return the n-th percentile of the given data
Assume that the data are already sorted
"""
size = len(data)
idx = (n / 100.0) * size - 0.5
if idx < 0 or idx > size:
raise StatisticsError("Too few data points ({}) for {}th percentile".format(size, n))
return data[int(idx)]
def get_histogram(data):
"""Return the histogram relative to the given data
Assume that the data are already sorted
"""
count = len(data)
if count < 2:
raise StatisticsError('Too few data points ({}) for get_histogram'.format(count))
min_ = data[0]
max_ = data[-1]
std = stdev(data)
bins = get_histogram_bins(min_, max_, std, count)
res = {x: 0 for x in bins}
for value in data:
for bin_ in bins:
if value <= bin_:
res[bin_] += 1
break
return sorted(iteritems(res))
def get_histogram_bins(min_, max_, std, count):
"""
Return optimal bins given the input parameters
"""
width = _get_bin_width(std, count)
count = int(round((max_ - min_) / width) + 1)
if count:
bins = [i * width + min_ for i in xrange(1, count + 1)]
else:
bins = [min_]
return bins
def _get_bin_width(stdev, count):
"""Return the histogram's optimal bin width based on Sturges
http://www.jstor.org/pss/2965501
"""
w = int(round((3.5 * stdev) / (count ** (1.0 / 3))))
if w:
return w
else:
return 1
| return 0.0 |
AuthUserType.ts | @Field(type => ID)
id: string
@Field()
name: string
@Field()
email: string
}
export default AuthUserType | import { Field, ID, ObjectType } from 'type-graphql'
@ObjectType()
class AuthUserType { |
|
p255.py | def problem255():
| pass |
|
api_client.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package transcribe
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/defaults"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/retry"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
smithy "github.com/aws/smithy-go"
smithydocument "github.com/aws/smithy-go/document"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
"net"
"net/http"
"time"
)
const ServiceID = "Transcribe"
const ServiceAPIVersion = "2017-10-26"
// Client provides the API client to make operations call for Amazon Transcribe
// Service.
type Client struct {
options Options
}
// New returns an initialized Client based on the functional options. Provide
// additional functional options to further configure the behavior of the client,
// such as changing the client's endpoint or adding custom middleware behavior.
func New(options Options, optFns ...func(*Options)) *Client {
options = options.Copy()
resolveDefaultLogger(&options)
resolveRetryer(&options)
resolveHTTPClient(&options)
resolveHTTPSignerV4(&options)
setResolvedDefaultsMode(&options)
resolveDefaultEndpointConfiguration(&options)
for _, fn := range optFns {
fn(&options)
}
client := &Client{
options: options,
}
return client
}
type Options struct {
// Set of options to modify how an operation is invoked. These apply to all
// operations invoked for this client. Use functional options on operation call to
// modify this list for per operation behavior.
APIOptions []func(*middleware.Stack) error
// Configures the events that will be sent to the configured logger.
ClientLogMode aws.ClientLogMode
// The credentials object to use when signing requests.
Credentials aws.CredentialsProvider
// The configuration DefaultsMode that the SDK should use when constructing the
// clients initial default settings.
DefaultsMode aws.DefaultsMode
// The endpoint options to be used when attempting to resolve an endpoint.
EndpointOptions EndpointResolverOptions
// The service endpoint resolver.
EndpointResolver EndpointResolver
// Signature Version 4 (SigV4) Signer
HTTPSignerV4 HTTPSignerV4
// The logger writer interface to write logging messages to.
Logger logging.Logger
// The region to send requests to. (Required)
Region string
// Retryer guides how HTTP requests should be retried in case of recoverable
// failures. When nil the API client will use a default retryer.
Retryer aws.Retryer
// The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
// to AutoDefaultsMode and is initialized using config.LoadDefaultConfig. You
// should not populate this structure programmatically, or rely on the values here
// within your applications.
RuntimeEnvironment aws.RuntimeEnvironment
// The initial DefaultsMode used when the client options were constructed. If the
// DefaultsMode was set to aws.AutoDefaultsMode this will store what the resolved
// value was at that point in time.
resolvedDefaultsMode aws.DefaultsMode
// The HTTP client to invoke API calls with. Defaults to client's default HTTP
// implementation if nil.
HTTPClient HTTPClient
clientInitializedOptions map[struct{}]interface{}
}
// WithAPIOptions returns a functional option for setting the Client's APIOptions
// option.
func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
return func(o *Options) {
o.APIOptions = append(o.APIOptions, optFns...)
}
}
// WithEndpointResolver returns a functional option for setting the Client's
// EndpointResolver option.
func WithEndpointResolver(v EndpointResolver) func(*Options) {
return func(o *Options) {
o.EndpointResolver = v
}
}
type HTTPClient interface {
Do(*http.Request) (*http.Response, error)
}
// Copy creates a clone where the APIOptions list is deep copied.
func (o Options) Copy() Options {
to := o
to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
copy(to.APIOptions, o.APIOptions)
to.clientInitializedOptions = make(map[struct{}]interface{}, len(o.clientInitializedOptions))
for k, v := range o.clientInitializedOptions {
to.clientInitializedOptions[k] = v
}
return to
}
func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) {
ctx = middleware.ClearStackValues(ctx)
stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
options := c.options.Copy()
for _, fn := range optFns {
fn(&options)
}
finalizeClientEndpointResolverOptions(&options)
for _, fn := range stackFns {
if err := fn(stack, options); err != nil {
return nil, metadata, err
}
}
for _, fn := range options.APIOptions {
if err := fn(stack); err != nil {
return nil, metadata, err
}
}
handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
result, metadata, err = handler.Handle(ctx, params)
if err != nil {
err = &smithy.OperationError{
ServiceID: ServiceID,
OperationName: opID,
Err: err,
}
}
return result, metadata, err
}
type noSmithyDocumentSerde = smithydocument.NoSerde
func resolveDefaultLogger(o *Options) {
if o.Logger != nil {
return
}
o.Logger = logging.Nop{}
}
func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
return middleware.AddSetLoggerMiddleware(stack, o.Logger)
}
// NewFromConfig returns a new client from the provided config.
func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
opts := Options{
Region: cfg.Region,
DefaultsMode: cfg.DefaultsMode,
RuntimeEnvironment: cfg.RuntimeEnvironment,
HTTPClient: cfg.HTTPClient,
Credentials: cfg.Credentials,
APIOptions: cfg.APIOptions,
Logger: cfg.Logger,
ClientLogMode: cfg.ClientLogMode,
}
resolveAWSRetryerProvider(cfg, &opts)
resolveAWSEndpointResolver(cfg, &opts)
resolveUseDualStackEndpoint(cfg, &opts)
resolveUseFIPSEndpoint(cfg, &opts)
return New(opts, optFns...)
}
func resolveHTTPClient(o *Options) {
var buildable *awshttp.BuildableClient | var ok bool
buildable, ok = o.HTTPClient.(*awshttp.BuildableClient)
if !ok {
return
}
} else {
buildable = awshttp.NewBuildableClient()
}
var mode aws.DefaultsMode
if ok := mode.SetFromString(string(o.DefaultsMode)); !ok {
panic(fmt.Errorf("unsupported defaults mode constant %v", mode))
}
if mode == aws.DefaultsModeAuto {
mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment)
}
if mode != aws.DefaultsModeLegacy {
modeConfig, _ := defaults.GetModeConfiguration(mode)
buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) {
if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok {
dialer.Timeout = dialerTimeout
}
})
buildable = buildable.WithTransportOptions(func(transport *http.Transport) {
if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok {
transport.TLSHandshakeTimeout = tlsHandshakeTimeout
}
})
}
o.HTTPClient = buildable
}
func resolveRetryer(o *Options) {
if o.Retryer != nil {
return
}
o.Retryer = retry.NewStandard()
}
func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
if cfg.Retryer == nil {
return
}
o.Retryer = cfg.Retryer()
}
func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil {
return
}
o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver())
}
func addClientUserAgent(stack *middleware.Stack) error {
return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "transcribe", goModuleVersion)(stack)
}
func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error {
mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{
CredentialsProvider: o.Credentials,
Signer: o.HTTPSignerV4,
LogSigning: o.ClientLogMode.IsSigning(),
})
return stack.Finalize.Add(mw, middleware.After)
}
type HTTPSignerV4 interface {
SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
}
func resolveHTTPSignerV4(o *Options) {
if o.HTTPSignerV4 != nil {
return
}
o.HTTPSignerV4 = newDefaultV4Signer(*o)
}
func newDefaultV4Signer(o Options) *v4.Signer {
return v4.NewSigner(func(so *v4.SignerOptions) {
so.Logger = o.Logger
so.LogSigning = o.ClientLogMode.IsSigning()
})
}
func setResolvedDefaultsMode(o *Options) {
if len(o.resolvedDefaultsMode) > 0 {
return
}
var mode aws.DefaultsMode
mode.SetFromString(string(o.DefaultsMode))
if mode == aws.DefaultsModeAuto {
mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment)
}
o.resolvedDefaultsMode = mode
}
func addRetryMiddlewares(stack *middleware.Stack, o Options) error {
mo := retry.AddRetryMiddlewaresOptions{
Retryer: o.Retryer,
LogRetryAttempts: o.ClientLogMode.IsRetries(),
}
return retry.AddRetryMiddlewares(stack, mo)
}
// resolves dual-stack endpoint configuration
func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error {
if len(cfg.ConfigSources) == 0 {
return nil
}
value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources)
if err != nil {
return err
}
if found {
o.EndpointOptions.UseDualStackEndpoint = value
}
return nil
}
// resolves FIPS endpoint configuration
func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error {
if len(cfg.ConfigSources) == 0 {
return nil
}
value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources)
if err != nil {
return err
}
if found {
o.EndpointOptions.UseFIPSEndpoint = value
}
return nil
}
func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
return awsmiddleware.AddRequestIDRetrieverMiddleware(stack)
}
func addResponseErrorMiddleware(stack *middleware.Stack) error {
return awshttp.AddResponseErrorMiddleware(stack)
}
func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
LogRequest: o.ClientLogMode.IsRequest(),
LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(),
LogResponse: o.ClientLogMode.IsResponse(),
LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(),
}, middleware.After)
} |
if o.HTTPClient != nil { |
frequency_cap_event_type.pb.go | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.27.1
// protoc v3.17.3
// source: google/ads/googleads/v8/enums/frequency_cap_event_type.proto
package enums
import (
reflect "reflect"
sync "sync"
_ "google.golang.org/genproto/googleapis/api/annotations"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// The type of event that the cap applies to (e.g. impression).
type FrequencyCapEventTypeEnum_FrequencyCapEventType int32
const (
// Not specified.
FrequencyCapEventTypeEnum_UNSPECIFIED FrequencyCapEventTypeEnum_FrequencyCapEventType = 0
// Used for return value only. Represents value unknown in this version.
FrequencyCapEventTypeEnum_UNKNOWN FrequencyCapEventTypeEnum_FrequencyCapEventType = 1
// The cap applies on ad impressions.
FrequencyCapEventTypeEnum_IMPRESSION FrequencyCapEventTypeEnum_FrequencyCapEventType = 2
// The cap applies on video ad views.
FrequencyCapEventTypeEnum_VIDEO_VIEW FrequencyCapEventTypeEnum_FrequencyCapEventType = 3
)
// Enum value maps for FrequencyCapEventTypeEnum_FrequencyCapEventType.
var (
FrequencyCapEventTypeEnum_FrequencyCapEventType_name = map[int32]string{
0: "UNSPECIFIED",
1: "UNKNOWN",
2: "IMPRESSION",
3: "VIDEO_VIEW",
}
FrequencyCapEventTypeEnum_FrequencyCapEventType_value = map[string]int32{
"UNSPECIFIED": 0,
"UNKNOWN": 1,
"IMPRESSION": 2,
"VIDEO_VIEW": 3,
}
)
func (x FrequencyCapEventTypeEnum_FrequencyCapEventType) Enum() *FrequencyCapEventTypeEnum_FrequencyCapEventType {
p := new(FrequencyCapEventTypeEnum_FrequencyCapEventType)
*p = x
return p
}
func (x FrequencyCapEventTypeEnum_FrequencyCapEventType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (FrequencyCapEventTypeEnum_FrequencyCapEventType) Descriptor() protoreflect.EnumDescriptor {
return file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_enumTypes[0].Descriptor()
}
func (FrequencyCapEventTypeEnum_FrequencyCapEventType) Type() protoreflect.EnumType {
return &file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_enumTypes[0]
}
func (x FrequencyCapEventTypeEnum_FrequencyCapEventType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use FrequencyCapEventTypeEnum_FrequencyCapEventType.Descriptor instead.
func (FrequencyCapEventTypeEnum_FrequencyCapEventType) EnumDescriptor() ([]byte, []int) {
return file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_rawDescGZIP(), []int{0, 0}
}
// Container for enum describing the type of event that the cap applies to.
type FrequencyCapEventTypeEnum struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *FrequencyCapEventTypeEnum) Reset() {
*x = FrequencyCapEventTypeEnum{}
if protoimpl.UnsafeEnabled {
mi := &file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *FrequencyCapEventTypeEnum) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*FrequencyCapEventTypeEnum) ProtoMessage() {}
func (x *FrequencyCapEventTypeEnum) ProtoReflect() protoreflect.Message {
mi := &file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil |
return mi.MessageOf(x)
}
// Deprecated: Use FrequencyCapEventTypeEnum.ProtoReflect.Descriptor instead.
func (*FrequencyCapEventTypeEnum) Descriptor() ([]byte, []int) {
return file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_rawDescGZIP(), []int{0}
}
var File_google_ads_googleads_v8_enums_frequency_cap_event_type_proto protoreflect.FileDescriptor
var file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_rawDesc = []byte{
0x0a, 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x38, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f,
0x66, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x63, 0x61, 0x70, 0x5f, 0x65, 0x76,
0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x38, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x1a, 0x1c, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x72, 0x0a, 0x19, 0x46,
0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x43, 0x61, 0x70, 0x45, 0x76, 0x65, 0x6e, 0x74,
0x54, 0x79, 0x70, 0x65, 0x45, 0x6e, 0x75, 0x6d, 0x22, 0x55, 0x0a, 0x15, 0x46, 0x72, 0x65, 0x71,
0x75, 0x65, 0x6e, 0x63, 0x79, 0x43, 0x61, 0x70, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70,
0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x12,
0x0e, 0x0a, 0x0a, 0x49, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x12,
0x0e, 0x0a, 0x0a, 0x56, 0x49, 0x44, 0x45, 0x4f, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x03, 0x42,
0xef, 0x01, 0x0a, 0x21, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
0x64, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2e, 0x76, 0x38, 0x2e,
0x65, 0x6e, 0x75, 0x6d, 0x73, 0x42, 0x1a, 0x46, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79,
0x43, 0x61, 0x70, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x50, 0x72, 0x6f, 0x74,
0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x64, 0x73, 0x2f, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x64, 0x73, 0x2f, 0x76, 0x38, 0x2f, 0x65, 0x6e, 0x75, 0x6d,
0x73, 0x3b, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0xa2, 0x02, 0x03, 0x47, 0x41, 0x41, 0xaa, 0x02, 0x1d,
0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x73, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x41, 0x64, 0x73, 0x2e, 0x56, 0x38, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0xca, 0x02, 0x1d,
0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x73, 0x5c, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x41, 0x64, 0x73, 0x5c, 0x56, 0x38, 0x5c, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0xea, 0x02, 0x21,
0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x73, 0x3a, 0x3a, 0x47, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x41, 0x64, 0x73, 0x3a, 0x3a, 0x56, 0x38, 0x3a, 0x3a, 0x45, 0x6e, 0x75, 0x6d,
0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_rawDescOnce sync.Once
file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_rawDescData = file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_rawDesc
)
func file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_rawDescGZIP() []byte {
file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_rawDescOnce.Do(func() {
file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_rawDescData)
})
return file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_rawDescData
}
var file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_goTypes = []interface{}{
(FrequencyCapEventTypeEnum_FrequencyCapEventType)(0), // 0: google.ads.googleads.v8.enums.FrequencyCapEventTypeEnum.FrequencyCapEventType
(*FrequencyCapEventTypeEnum)(nil), // 1: google.ads.googleads.v8.enums.FrequencyCapEventTypeEnum
}
var file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_init() }
func file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_init() {
if File_google_ads_googleads_v8_enums_frequency_cap_event_type_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FrequencyCapEventTypeEnum); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_rawDesc,
NumEnums: 1,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_goTypes,
DependencyIndexes: file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_depIdxs,
EnumInfos: file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_enumTypes,
MessageInfos: file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_msgTypes,
}.Build()
File_google_ads_googleads_v8_enums_frequency_cap_event_type_proto = out.File
file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_rawDesc = nil
file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_goTypes = nil
file_google_ads_googleads_v8_enums_frequency_cap_event_type_proto_depIdxs = nil
}
| {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
} |
server_session.go | package session
import (
"time"
"crypto/rand"
"github.com/mongodb/mongo-go-driver/bson"
"github.com/mongodb/mongo-go-driver/core/uuid"
)
var rander = rand.Reader
// Server is an open session with the server.
type Server struct {
SessionID *bson.Document
TxnNumber int64
LastUsed time.Time
}
// returns whether or not a session has expired given a timeout in minutes
// a session is considered expired if it has less than 1 minute left before becoming stale
func (ss *Server) expired(timeoutMinutes uint32) bool {
if timeoutMinutes <= 0 {
return true
}
timeUnused := time.Since(ss.LastUsed).Minutes()
return timeUnused > float64(timeoutMinutes-1)
}
// update the last used time for this session. | func (ss *Server) updateUseTime() {
ss.LastUsed = time.Now()
}
func newServerSession() (*Server, error) {
id, err := uuid.New()
if err != nil {
return nil, err
}
idDoc := bson.NewDocument(
bson.EC.BinaryWithSubtype("id", id[:], UUIDSubtype),
)
return &Server{
SessionID: idDoc,
LastUsed: time.Now(),
}, nil
}
// IncrementTxnNumber increments the transaction number.
func (ss *Server) IncrementTxnNumber() {
ss.TxnNumber++
}
// UUIDSubtype is the BSON binary subtype that a UUID should be encoded as
const UUIDSubtype byte = 4 | // must be called whenever this server session is used to send a command to the server. |
map.rs | //! Forest of maps.
use super::{Comparator, Forest, Node, NodeData, NodePool, Path, INNER_SIZE};
use crate::packed_option::PackedOption;
#[cfg(test)]
use alloc::string::String;
#[cfg(test)]
use core::fmt;
use core::marker::PhantomData;
/// Tag type defining forest types for a map.
struct MapTypes<K, V>(PhantomData<(K, V)>);
impl<K, V> Forest for MapTypes<K, V>
where
K: Copy,
V: Copy,
{
type Key = K;
type Value = V;
type LeafKeys = [K; INNER_SIZE - 1];
type LeafValues = [V; INNER_SIZE - 1];
fn splat_key(key: Self::Key) -> Self::LeafKeys {
[key; INNER_SIZE - 1]
}
fn splat_value(value: Self::Value) -> Self::LeafValues {
[value; INNER_SIZE - 1]
}
}
/// Memory pool for a forest of `Map` instances.
pub struct MapForest<K, V>
where
K: Copy,
V: Copy,
{
nodes: NodePool<MapTypes<K, V>>,
}
impl<K, V> MapForest<K, V>
where
K: Copy,
V: Copy,
{
/// Create a new empty forest.
pub fn new() -> Self {
Self {
nodes: NodePool::new(),
}
}
/// Clear all maps in the forest.
///
/// All `Map` instances belong to this forest are invalidated and should no longer be used.
pub fn clear(&mut self) {
self.nodes.clear();
}
}
/// B-tree mapping from `K` to `V`.
///
/// This is not a general-purpose replacement for `BTreeMap`. See the [module
/// documentation](index.html) for more information about design tradeoffs.
///
/// Maps can be cloned, but that operation should only be used as part of cloning the whole forest
/// they belong to. *Cloning a map does not allocate new memory for the clone*. It creates an alias
/// of the same memory.
#[derive(Clone)]
pub struct Map<K, V>
where
K: Copy,
V: Copy,
{
root: PackedOption<Node>,
unused: PhantomData<(K, V)>,
}
impl<K, V> Map<K, V>
where
K: Copy,
V: Copy,
{
/// Make an empty map.
pub fn new() -> Self {
Self {
root: None.into(),
unused: PhantomData,
}
}
/// Is this an empty map?
pub fn is_empty(&self) -> bool {
self.root.is_none()
}
/// Get the value stored for `key`.
pub fn get<C: Comparator<K>>(&self, key: K, forest: &MapForest<K, V>, comp: &C) -> Option<V> {
self.root
.expand()
.and_then(|root| Path::default().find(key, root, &forest.nodes, comp))
}
/// Look up the value stored for `key`.
///
/// If it exists, return the stored key-value pair.
///
/// Otherwise, return the last key-value pair with a key that is less than or equal to `key`.
///
/// If no stored keys are less than or equal to `key`, return `None`.
pub fn get_or_less<C: Comparator<K>>(
&self,
key: K,
forest: &MapForest<K, V>,
comp: &C,
) -> Option<(K, V)> {
self.root.expand().and_then(|root| {
let mut path = Path::default();
match path.find(key, root, &forest.nodes, comp) {
Some(v) => Some((key, v)),
None => path.prev(root, &forest.nodes),
}
})
}
/// Insert `key, value` into the map and return the old value stored for `key`, if any.
pub fn insert<C: Comparator<K>>(
&mut self,
key: K,
value: V,
forest: &mut MapForest<K, V>,
comp: &C,
) -> Option<V> {
self.cursor(forest, comp).insert(key, value)
}
/// Remove `key` from the map and return the removed value for `key`, if any.
pub fn remove<C: Comparator<K>>(
&mut self,
key: K,
forest: &mut MapForest<K, V>,
comp: &C,
) -> Option<V> {
let mut c = self.cursor(forest, comp);
if c.goto(key).is_some() {
c.remove()
} else {
None
}
}
/// Remove all entries.
pub fn clear(&mut self, forest: &mut MapForest<K, V>) {
if let Some(root) = self.root.take() {
forest.nodes.free_tree(root);
}
}
/// Retains only the elements specified by the predicate.
///
/// Remove all key-value pairs where the predicate returns false.
///
/// The predicate is allowed to update the values stored in the map.
pub fn retain<F>(&mut self, forest: &mut MapForest<K, V>, mut predicate: F)
where
F: FnMut(K, &mut V) -> bool,
{
let mut path = Path::default();
if let Some(root) = self.root.expand() {
path.first(root, &forest.nodes);
}
while let Some((node, entry)) = path.leaf_pos() {
let keep = {
let (ks, vs) = forest.nodes[node].unwrap_leaf_mut();
predicate(ks[entry], &mut vs[entry])
};
if keep {
path.next(&forest.nodes);
} else {
self.root = path.remove(&mut forest.nodes).into();
}
}
}
/// Create a cursor for navigating this map. The cursor is initially positioned off the end of
/// the map.
pub fn cursor<'a, C: Comparator<K>>(
&'a mut self,
forest: &'a mut MapForest<K, V>,
comp: &'a C,
) -> MapCursor<'a, K, V, C> {
MapCursor::new(self, forest, comp)
}
/// Create an iterator traversing this map. The iterator type is `(K, V)`.
pub fn iter<'a>(&'a self, forest: &'a MapForest<K, V>) -> MapIter<'a, K, V> {
MapIter {
root: self.root,
pool: &forest.nodes,
path: Path::default(),
}
}
}
impl<K, V> Default for Map<K, V>
where
K: Copy,
V: Copy,
{
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
impl<K, V> Map<K, V>
where
K: Copy + fmt::Display,
V: Copy,
{
/// Verify consistency.
fn verify<C: Comparator<K>>(&self, forest: &MapForest<K, V>, comp: &C)
where
NodeData<MapTypes<K, V>>: fmt::Display,
{
if let Some(root) = self.root.expand() {
forest.nodes.verify_tree(root, comp);
}
}
/// Get a text version of the path to `key`.
fn tpath<C: Comparator<K>>(&self, key: K, forest: &MapForest<K, V>, comp: &C) -> String {
use alloc::string::ToString;
match self.root.expand() {
None => "map(empty)".to_string(),
Some(root) => {
let mut path = Path::default();
path.find(key, root, &forest.nodes, comp);
path.to_string()
}
}
}
}
/// A position in a `Map` used to navigate and modify the ordered map.
///
/// A cursor always points at a key-value pair in the map, or "off the end" which is a position
/// after the last entry in the map.
pub struct MapCursor<'a, K, V, C>
where
K: 'a + Copy,
V: 'a + Copy,
C: 'a + Comparator<K>,
{
root: &'a mut PackedOption<Node>,
pool: &'a mut NodePool<MapTypes<K, V>>,
comp: &'a C,
path: Path<MapTypes<K, V>>,
}
impl<'a, K, V, C> MapCursor<'a, K, V, C>
where
K: Copy,
V: Copy,
C: Comparator<K>,
{
/// Create a cursor with a default (off-the-end) location.
fn new(container: &'a mut Map<K, V>, forest: &'a mut MapForest<K, V>, comp: &'a C) -> Self {
Self {
root: &mut container.root,
pool: &mut forest.nodes,
comp,
path: Path::default(),
}
}
/// Is this cursor pointing to an empty map?
pub fn is_empty(&self) -> bool {
self.root.is_none()
}
/// Move cursor to the next key-value pair and return it.
///
/// If the cursor reaches the end, return `None` and leave the cursor at the off-the-end
/// position.
#[cfg_attr(feature = "cargo-clippy", allow(clippy::should_implement_trait))]
pub fn next(&mut self) -> Option<(K, V)> {
self.path.next(self.pool)
}
/// Move cursor to the previous key-value pair and return it.
///
/// If the cursor is already pointing at the first entry, leave it there and return `None`.
pub fn prev(&mut self) -> Option<(K, V)> {
self.root
.expand()
.and_then(|root| self.path.prev(root, self.pool))
}
/// Get the current key, or `None` if the cursor is at the end.
pub fn key(&self) -> Option<K> {
self.path
.leaf_pos()
.and_then(|(node, entry)| self.pool[node].unwrap_leaf().0.get(entry).cloned())
}
/// Get the current value, or `None` if the cursor is at the end.
pub fn value(&self) -> Option<V> {
self.path
.leaf_pos()
.and_then(|(node, entry)| self.pool[node].unwrap_leaf().1.get(entry).cloned())
}
/// Get a mutable reference to the current value, or `None` if the cursor is at the end.
pub fn value_mut(&mut self) -> Option<&mut V> {
self.path
.leaf_pos()
.and_then(move |(node, entry)| self.pool[node].unwrap_leaf_mut().1.get_mut(entry))
}
/// Move this cursor to `key`.
///
/// If `key` is in the map, place the cursor at `key` and return the corresponding value.
///
/// If `key` is not in the set, place the cursor at the next larger element (or the end) and
/// return `None`.
pub fn goto(&mut self, elem: K) -> Option<V> {
self.root.expand().and_then(|root| {
let v = self.path.find(elem, root, self.pool, self.comp);
if v.is_none() {
self.path.normalize(self.pool);
}
v
})
}
/// Move this cursor to the first element.
pub fn goto_first(&mut self) -> Option<V> {
self.root.map(|root| self.path.first(root, self.pool).1)
}
/// Insert `(key, value))` into the map and leave the cursor at the inserted pair.
///
/// If the map did not contain `key`, return `None`.
///
/// If `key` is already present, replace the existing with `value` and return the old value.
pub fn insert(&mut self, key: K, value: V) -> Option<V> {
match self.root.expand() {
None => {
let root = self.pool.alloc_node(NodeData::leaf(key, value));
*self.root = root.into();
self.path.set_root_node(root);
None
}
Some(root) => {
// TODO: Optimize the case where `self.path` is already at the correct insert pos.
let old = self.path.find(key, root, self.pool, self.comp);
if old.is_some() {
*self.path.value_mut(self.pool) = value;
} else {
*self.root = self.path.insert(key, value, self.pool).into();
}
old
}
}
}
/// Remove the current entry (if any) and return the mapped value.
/// This advances the cursor to the next entry after the removed one.
pub fn remove(&mut self) -> Option<V> {
let value = self.value();
if value.is_some() {
*self.root = self.path.remove(self.pool).into();
}
value
}
}
/// An iterator visiting the key-value pairs of a `Map`.
pub struct MapIter<'a, K, V>
where
K: 'a + Copy,
V: 'a + Copy,
{
root: PackedOption<Node>,
pool: &'a NodePool<MapTypes<K, V>>,
path: Path<MapTypes<K, V>>,
}
impl<'a, K, V> Iterator for MapIter<'a, K, V>
where
K: 'a + Copy,
V: 'a + Copy,
{
type Item = (K, V);
fn next(&mut self) -> Option<Self::Item> {
// We use `self.root` to indicate if we need to go to the first element. Reset to `None`
// once we've returned the first element. This also works for an empty tree since the
// `path.next()` call returns `None` when the path is empty. This also fuses the iterator.
match self.root.take() {
Some(root) => Some(self.path.first(root, self.pool)),
None => self.path.next(self.pool),
}
}
}
#[cfg(test)]
impl<'a, K, V, C> MapCursor<'a, K, V, C>
where
K: Copy + fmt::Display,
V: Copy + fmt::Display,
C: Comparator<K>,
{
fn verify(&self) {
self.path.verify(self.pool);
self.root.map(|root| self.pool.verify_tree(root, self.comp));
}
| use alloc::string::ToString;
self.path.to_string()
}
}
#[cfg(test)]
mod tests {
use super::super::NodeData;
use super::*;
use alloc::vec::Vec;
use core::mem;
#[test]
fn node_size() {
// check that nodes are cache line sized when keys and values are 32 bits.
type F = MapTypes<u32, u32>;
assert_eq!(mem::size_of::<NodeData<F>>(), 64);
}
#[test]
fn empty() {
let mut f = MapForest::<u32, f32>::new();
f.clear();
let mut m = Map::<u32, f32>::new();
assert!(m.is_empty());
m.clear(&mut f);
assert_eq!(m.get(7, &f, &()), None);
assert_eq!(m.iter(&f).next(), None);
assert_eq!(m.get_or_less(7, &f, &()), None);
m.retain(&mut f, |_, _| unreachable!());
let mut c = m.cursor(&mut f, &());
assert!(c.is_empty());
assert_eq!(c.key(), None);
assert_eq!(c.value(), None);
assert_eq!(c.next(), None);
assert_eq!(c.prev(), None);
c.verify();
assert_eq!(c.tpath(), "<empty path>");
assert_eq!(c.goto_first(), None);
assert_eq!(c.tpath(), "<empty path>");
}
#[test]
fn inserting() {
let f = &mut MapForest::<u32, f32>::new();
let mut m = Map::<u32, f32>::new();
// The first seven values stay in a single leaf node.
assert_eq!(m.insert(50, 5.0, f, &()), None);
assert_eq!(m.insert(50, 5.5, f, &()), Some(5.0));
assert_eq!(m.insert(20, 2.0, f, &()), None);
assert_eq!(m.insert(80, 8.0, f, &()), None);
assert_eq!(m.insert(40, 4.0, f, &()), None);
assert_eq!(m.insert(60, 6.0, f, &()), None);
assert_eq!(m.insert(90, 9.0, f, &()), None);
assert_eq!(m.insert(200, 20.0, f, &()), None);
m.verify(f, &());
assert_eq!(
m.iter(f).collect::<Vec<_>>(),
[
(20, 2.0),
(40, 4.0),
(50, 5.5),
(60, 6.0),
(80, 8.0),
(90, 9.0),
(200, 20.0),
]
);
assert_eq!(m.get(0, f, &()), None);
assert_eq!(m.get(20, f, &()), Some(2.0));
assert_eq!(m.get(30, f, &()), None);
assert_eq!(m.get(40, f, &()), Some(4.0));
assert_eq!(m.get(50, f, &()), Some(5.5));
assert_eq!(m.get(60, f, &()), Some(6.0));
assert_eq!(m.get(70, f, &()), None);
assert_eq!(m.get(80, f, &()), Some(8.0));
assert_eq!(m.get(100, f, &()), None);
assert_eq!(m.get_or_less(0, f, &()), None);
assert_eq!(m.get_or_less(20, f, &()), Some((20, 2.0)));
assert_eq!(m.get_or_less(30, f, &()), Some((20, 2.0)));
assert_eq!(m.get_or_less(40, f, &()), Some((40, 4.0)));
assert_eq!(m.get_or_less(200, f, &()), Some((200, 20.0)));
assert_eq!(m.get_or_less(201, f, &()), Some((200, 20.0)));
{
let mut c = m.cursor(f, &());
assert_eq!(c.prev(), Some((200, 20.0)));
assert_eq!(c.prev(), Some((90, 9.0)));
assert_eq!(c.prev(), Some((80, 8.0)));
assert_eq!(c.prev(), Some((60, 6.0)));
assert_eq!(c.prev(), Some((50, 5.5)));
assert_eq!(c.prev(), Some((40, 4.0)));
assert_eq!(c.prev(), Some((20, 2.0)));
assert_eq!(c.prev(), None);
}
// Test some removals where the node stays healthy.
assert_eq!(m.tpath(50, f, &()), "node0[2]");
assert_eq!(m.tpath(80, f, &()), "node0[4]");
assert_eq!(m.tpath(200, f, &()), "node0[6]");
assert_eq!(m.remove(80, f, &()), Some(8.0));
assert_eq!(m.tpath(50, f, &()), "node0[2]");
assert_eq!(m.tpath(80, f, &()), "node0[4]");
assert_eq!(m.tpath(200, f, &()), "node0[5]");
assert_eq!(m.remove(80, f, &()), None);
m.verify(f, &());
assert_eq!(m.remove(20, f, &()), Some(2.0));
assert_eq!(m.tpath(50, f, &()), "node0[1]");
assert_eq!(m.tpath(80, f, &()), "node0[3]");
assert_eq!(m.tpath(200, f, &()), "node0[4]");
assert_eq!(m.remove(20, f, &()), None);
m.verify(f, &());
// [ 40 50 60 90 200 ]
{
let mut c = m.cursor(f, &());
assert_eq!(c.goto_first(), Some(4.0));
assert_eq!(c.key(), Some(40));
assert_eq!(c.value(), Some(4.0));
assert_eq!(c.next(), Some((50, 5.5)));
assert_eq!(c.next(), Some((60, 6.0)));
assert_eq!(c.next(), Some((90, 9.0)));
assert_eq!(c.next(), Some((200, 20.0)));
c.verify();
assert_eq!(c.next(), None);
c.verify();
}
// Removals from the root leaf node beyond underflow.
assert_eq!(m.remove(200, f, &()), Some(20.0));
assert_eq!(m.remove(40, f, &()), Some(4.0));
assert_eq!(m.remove(60, f, &()), Some(6.0));
m.verify(f, &());
assert_eq!(m.remove(50, f, &()), Some(5.5));
m.verify(f, &());
assert_eq!(m.remove(90, f, &()), Some(9.0));
m.verify(f, &());
assert!(m.is_empty());
}
#[test]
fn split_level0_leaf() {
// Various ways of splitting a full leaf node at level 0.
let f = &mut MapForest::<u32, f32>::new();
fn full_leaf(f: &mut MapForest<u32, f32>) -> Map<u32, f32> {
let mut m = Map::new();
for n in 1..8 {
m.insert(n * 10, n as f32 * 1.1, f, &());
}
m
}
// Insert at front of leaf.
let mut m = full_leaf(f);
m.insert(5, 4.2, f, &());
m.verify(f, &());
assert_eq!(m.get(5, f, &()), Some(4.2));
// Retain even entries, with altered values.
m.retain(f, |k, v| {
*v = (k / 10) as f32;
(k % 20) == 0
});
assert_eq!(
m.iter(f).collect::<Vec<_>>(),
[(20, 2.0), (40, 4.0), (60, 6.0)]
);
// Insert at back of leaf.
let mut m = full_leaf(f);
m.insert(80, 4.2, f, &());
m.verify(f, &());
assert_eq!(m.get(80, f, &()), Some(4.2));
// Insert before middle (40).
let mut m = full_leaf(f);
m.insert(35, 4.2, f, &());
m.verify(f, &());
assert_eq!(m.get(35, f, &()), Some(4.2));
// Insert after middle (40).
let mut m = full_leaf(f);
m.insert(45, 4.2, f, &());
m.verify(f, &());
assert_eq!(m.get(45, f, &()), Some(4.2));
m.clear(f);
assert!(m.is_empty());
}
#[test]
fn split_level1_leaf() {
// Various ways of splitting a full leaf node at level 1.
let f = &mut MapForest::<u32, f32>::new();
// Return a map whose root node is a full inner node, and the leaf nodes are all full
// containing:
//
// 110, 120, ..., 170
// 210, 220, ..., 270
// ...
// 810, 820, ..., 870
fn full(f: &mut MapForest<u32, f32>) -> Map<u32, f32> {
let mut m = Map::new();
// Start by inserting elements in order.
// This should leave 8 leaf nodes with 4 elements in each.
for row in 1..9 {
for col in 1..5 {
m.insert(row * 100 + col * 10, row as f32 + col as f32 * 0.1, f, &());
}
}
// Then top up the leaf nodes without splitting them.
for row in 1..9 {
for col in 5..8 {
m.insert(row * 100 + col * 10, row as f32 + col as f32 * 0.1, f, &());
}
}
m
}
let mut m = full(f);
// Verify geometry. Get get node2 as the root and leaves node0, 1, 3, ...
m.verify(f, &());
assert_eq!(m.tpath(110, f, &()), "node2[0]--node0[0]");
assert_eq!(m.tpath(140, f, &()), "node2[0]--node0[3]");
assert_eq!(m.tpath(210, f, &()), "node2[1]--node1[0]");
assert_eq!(m.tpath(270, f, &()), "node2[1]--node1[6]");
assert_eq!(m.tpath(310, f, &()), "node2[2]--node3[0]");
assert_eq!(m.tpath(810, f, &()), "node2[7]--node8[0]");
assert_eq!(m.tpath(870, f, &()), "node2[7]--node8[6]");
{
let mut c = m.cursor(f, &());
assert_eq!(c.goto_first(), Some(1.1));
assert_eq!(c.key(), Some(110));
}
// Front of first leaf.
m.insert(0, 4.2, f, &());
m.verify(f, &());
assert_eq!(m.get(0, f, &()), Some(4.2));
// First leaf split 4-4 after appending to LHS.
f.clear();
m = full(f);
m.insert(135, 4.2, f, &());
m.verify(f, &());
assert_eq!(m.get(135, f, &()), Some(4.2));
// First leaf split 4-4 after prepending to RHS.
f.clear();
m = full(f);
m.insert(145, 4.2, f, &());
m.verify(f, &());
assert_eq!(m.get(145, f, &()), Some(4.2));
// First leaf split 4-4 after appending to RHS.
f.clear();
m = full(f);
m.insert(175, 4.2, f, &());
m.verify(f, &());
assert_eq!(m.get(175, f, &()), Some(4.2));
// Left-middle leaf split, ins LHS.
f.clear();
m = full(f);
m.insert(435, 4.2, f, &());
m.verify(f, &());
assert_eq!(m.get(435, f, &()), Some(4.2));
// Left-middle leaf split, ins RHS.
f.clear();
m = full(f);
m.insert(445, 4.2, f, &());
m.verify(f, &());
assert_eq!(m.get(445, f, &()), Some(4.2));
// Right-middle leaf split, ins LHS.
f.clear();
m = full(f);
m.insert(535, 4.2, f, &());
m.verify(f, &());
assert_eq!(m.get(535, f, &()), Some(4.2));
// Right-middle leaf split, ins RHS.
f.clear();
m = full(f);
m.insert(545, 4.2, f, &());
m.verify(f, &());
assert_eq!(m.get(545, f, &()), Some(4.2));
// Last leaf split, ins LHS.
f.clear();
m = full(f);
m.insert(835, 4.2, f, &());
m.verify(f, &());
assert_eq!(m.get(835, f, &()), Some(4.2));
// Last leaf split, ins RHS.
f.clear();
m = full(f);
m.insert(845, 4.2, f, &());
m.verify(f, &());
assert_eq!(m.get(845, f, &()), Some(4.2));
// Front of last leaf.
f.clear();
m = full(f);
m.insert(805, 4.2, f, &());
m.verify(f, &());
assert_eq!(m.get(805, f, &()), Some(4.2));
m.clear(f);
m.verify(f, &());
}
// Make a tree with two barely healthy leaf nodes:
// [ 10 20 30 40 ] [ 50 60 70 80 ]
fn two_leaf(f: &mut MapForest<u32, f32>) -> Map<u32, f32> {
f.clear();
let mut m = Map::new();
for n in 1..9 {
m.insert(n * 10, n as f32, f, &());
}
m
}
#[test]
fn remove_level1() {
let f = &mut MapForest::<u32, f32>::new();
let mut m = two_leaf(f);
// Verify geometry.
m.verify(f, &());
assert_eq!(m.tpath(10, f, &()), "node2[0]--node0[0]");
assert_eq!(m.tpath(40, f, &()), "node2[0]--node0[3]");
assert_eq!(m.tpath(49, f, &()), "node2[0]--node0[4]");
assert_eq!(m.tpath(50, f, &()), "node2[1]--node1[0]");
assert_eq!(m.tpath(80, f, &()), "node2[1]--node1[3]");
// Remove the front entry from a node that stays healthy.
assert_eq!(m.insert(55, 5.5, f, &()), None);
assert_eq!(m.remove(50, f, &()), Some(5.0));
m.verify(f, &());
assert_eq!(m.tpath(49, f, &()), "node2[0]--node0[4]");
assert_eq!(m.tpath(50, f, &()), "node2[0]--node0[4]");
assert_eq!(m.tpath(55, f, &()), "node2[1]--node1[0]");
// Remove the front entry from the first leaf node: No critical key to update.
assert_eq!(m.insert(15, 1.5, f, &()), None);
assert_eq!(m.remove(10, f, &()), Some(1.0));
m.verify(f, &());
// [ 15 20 30 40 ] [ 55 60 70 80 ]
// Remove the front entry from a right-most node that underflows.
// No rebalancing for the right-most node. Still need critical key update.
assert_eq!(m.remove(55, f, &()), Some(5.5));
m.verify(f, &());
assert_eq!(m.tpath(55, f, &()), "node2[0]--node0[4]");
assert_eq!(m.tpath(60, f, &()), "node2[1]--node1[0]");
// [ 15 20 30 40 ] [ 60 70 80 ]
// Replenish the right leaf.
assert_eq!(m.insert(90, 9.0, f, &()), None);
assert_eq!(m.insert(100, 10.0, f, &()), None);
m.verify(f, &());
assert_eq!(m.tpath(55, f, &()), "node2[0]--node0[4]");
assert_eq!(m.tpath(60, f, &()), "node2[1]--node1[0]");
// [ 15 20 30 40 ] [ 60 70 80 90 100 ]
// Removing one entry from the left leaf should trigger a rebalancing from the right
// sibling.
assert_eq!(m.remove(20, f, &()), Some(2.0));
m.verify(f, &());
// [ 15 30 40 60 ] [ 70 80 90 100 ]
// Check that the critical key was updated correctly.
assert_eq!(m.tpath(50, f, &()), "node2[0]--node0[3]");
assert_eq!(m.tpath(60, f, &()), "node2[0]--node0[3]");
assert_eq!(m.tpath(70, f, &()), "node2[1]--node1[0]");
// Remove front entry from the left-most leaf node, underflowing.
// This should cause two leaf nodes to be merged and the root node to go away.
assert_eq!(m.remove(15, f, &()), Some(1.5));
m.verify(f, &());
}
#[test]
fn remove_level1_rightmost() {
let f = &mut MapForest::<u32, f32>::new();
let mut m = two_leaf(f);
// [ 10 20 30 40 ] [ 50 60 70 80 ]
// Remove entries from the right leaf. This doesn't trigger a rebalancing.
assert_eq!(m.remove(60, f, &()), Some(6.0));
assert_eq!(m.remove(80, f, &()), Some(8.0));
assert_eq!(m.remove(50, f, &()), Some(5.0));
m.verify(f, &());
// [ 10 20 30 40 ] [ 70 ]
assert_eq!(m.tpath(50, f, &()), "node2[0]--node0[4]");
assert_eq!(m.tpath(70, f, &()), "node2[1]--node1[0]");
// Removing the last entry from the right leaf should cause a collapse.
assert_eq!(m.remove(70, f, &()), Some(7.0));
m.verify(f, &());
}
// Make a 3-level tree with barely healthy nodes.
// 1 root, 8 inner nodes, 7*4+5=33 leaf nodes, 4 entries each.
fn level3_sparse(f: &mut MapForest<u32, f32>) -> Map<u32, f32> {
f.clear();
let mut m = Map::new();
for n in 1..133 {
m.insert(n * 10, n as f32, f, &());
}
m
}
#[test]
fn level3_removes() {
let f = &mut MapForest::<u32, f32>::new();
let mut m = level3_sparse(f);
m.verify(f, &());
// Check geometry.
// Root: node11
// [ node2 170 node10 330 node16 490 node21 650 node26 810 node31 970 node36 1130 node41 ]
// L1: node11
assert_eq!(m.tpath(0, f, &()), "node11[0]--node2[0]--node0[0]");
assert_eq!(m.tpath(10000, f, &()), "node11[7]--node41[4]--node40[4]");
// 650 is a critical key in the middle of the root.
assert_eq!(m.tpath(640, f, &()), "node11[3]--node21[3]--node19[3]");
assert_eq!(m.tpath(650, f, &()), "node11[4]--node26[0]--node20[0]");
// Deleting 640 triggers a rebalance from node19 to node 20, cascading to n21 -> n26.
assert_eq!(m.remove(640, f, &()), Some(64.0));
m.verify(f, &());
assert_eq!(m.tpath(650, f, &()), "node11[3]--node26[3]--node20[3]");
// 1130 is in the first leaf of the last L1 node. Deleting it triggers a rebalance node35
// -> node37, but no rebalance above where there is no right sibling.
assert_eq!(m.tpath(1130, f, &()), "node11[6]--node41[0]--node35[0]");
assert_eq!(m.tpath(1140, f, &()), "node11[6]--node41[0]--node35[1]");
assert_eq!(m.remove(1130, f, &()), Some(113.0));
m.verify(f, &());
assert_eq!(m.tpath(1140, f, &()), "node11[6]--node41[0]--node37[0]");
}
#[test]
fn insert_many() {
let f = &mut MapForest::<u32, f32>::new();
let mut m = Map::<u32, f32>::new();
let mm = 4096;
let mut x = 0;
for n in 0..mm {
assert_eq!(m.insert(x, n as f32, f, &()), None);
m.verify(f, &());
x = (x + n + 1) % mm;
}
x = 0;
for n in 0..mm {
assert_eq!(m.get(x, f, &()), Some(n as f32));
x = (x + n + 1) % mm;
}
x = 0;
for n in 0..mm {
assert_eq!(m.remove(x, f, &()), Some(n as f32));
m.verify(f, &());
x = (x + n + 1) % mm;
}
assert!(m.is_empty());
}
} | /// Get a text version of the path to the current position.
fn tpath(&self) -> String { |
index.ts | export * from './defs/channel';
export * from './defs/chat';
export * from './defs/interactive';
export * from './defs/oauth';
export * from './defs/user';
export * from './providers/OAuth';
export * from './providers/Provider';
export * from './services/Channel';
export * from './services/Chat';
export * from './services/Clips'; | export * from './services/Service';
export * from './ws/Socket';
export * from './ws/Reply';
export * from './Client';
export * from './errors';
export * from './RequestRunner'; | export * from './services/Game'; |
file_systems_api.py | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.3 Python SDK
Pure Storage FlashBlade REST 1.3 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class FileSystemsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_file_systems(self, file_system, **kwargs):
"""
Create a new file system
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_file_systems(file_system, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FileSystem file_system: the attribute map used to create the file system (required)
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_file_systems_with_http_info(file_system, **kwargs)
else:
(data) = self.create_file_systems_with_http_info(file_system, **kwargs)
return data
def create_file_systems_with_http_info(self, file_system, **kwargs):
"""
Create a new file system
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_file_systems_with_http_info(file_system, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FileSystem file_system: the attribute map used to create the file system (required)
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_system']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_file_systems" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_system' is set
if ('file_system' not in params) or (params['file_system'] is None):
raise ValueError("Missing the required parameter `file_system` when calling `create_file_systems`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'file_system' in params:
body_params = params['file_system']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting | path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_file_systems(self, name, **kwargs):
"""
Delete a file system
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_file_systems(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the file system to be deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_file_systems_with_http_info(name, **kwargs)
else:
(data) = self.delete_file_systems_with_http_info(name, **kwargs)
return data
def delete_file_systems_with_http_info(self, name, **kwargs):
"""
Delete a file system
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_file_systems_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the file system to be deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_file_systems" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_file_systems`")
collection_formats = {}
path_params = {}
query_params = []
if 'name' in params:
query_params.append(('name', params['name']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/file-systems', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_file_systems(self, **kwargs):
"""
List file systems
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_file_systems(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: The way to order the results.
:param int start: start
:param int limit: limit, should be >= 0
:param str token: token
:param bool total: Return a total object in addition to the other results.
:param bool total_only: Return only the total object.
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_file_systems_with_http_info(**kwargs)
else:
(data) = self.list_file_systems_with_http_info(**kwargs)
return data
def list_file_systems_with_http_info(self, **kwargs):
"""
List file systems
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_file_systems_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: The way to order the results.
:param int start: start
:param int limit: limit, should be >= 0
:param str token: token
:param bool total: Return a total object in addition to the other results.
:param bool total_only: Return only the total object.
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'filter', 'sort', 'start', 'limit', 'token', 'total', 'total_only']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_file_systems" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'token' in params:
query_params.append(('token', params['token']))
if 'total' in params:
query_params.append(('total', params['total']))
if 'total_only' in params:
query_params.append(('total_only', params['total_only']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/file-systems', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_file_systems(self, name, attributes, **kwargs):
"""
Update an existing file system
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_file_systems(name, attributes, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: the name of the file system to be updated (required)
:param FileSystem attributes: the new attributes, only modifiable fields could be used. (required)
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_file_systems_with_http_info(name, attributes, **kwargs)
else:
(data) = self.update_file_systems_with_http_info(name, attributes, **kwargs)
return data
def update_file_systems_with_http_info(self, name, attributes, **kwargs):
"""
Update an existing file system
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_file_systems_with_http_info(name, attributes, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: the name of the file system to be updated (required)
:param FileSystem attributes: the new attributes, only modifiable fields could be used. (required)
:return: FileSystemResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'attributes']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_file_systems" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `update_file_systems`")
# verify the required parameter 'attributes' is set
if ('attributes' not in params) or (params['attributes'] is None):
raise ValueError("Missing the required parameter `attributes` when calling `update_file_systems`")
collection_formats = {}
path_params = {}
query_params = []
if 'name' in params:
query_params.append(('name', params['name']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'attributes' in params:
body_params = params['attributes']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/file-systems', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSystemResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.3/file-systems', 'POST', |
enum_set.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A structure for holding a set of enum variants
//!
//! This module defines a container which uses an efficient bit mask
//! representation to hold C-like enum variants.
use std::num::Bitwise;
#[deriving(Clone, PartialEq, TotalEq, Hash, Show)]
/// A specialized Set implementation to use enum types.
pub struct EnumSet<E> {
// We must maintain the invariant that no bits are set
// for which no variant exists
bits: uint
}
/// An interface for casting C-like enum to uint and back.
pub trait CLike {
/// Converts C-like enum to uint.
fn to_uint(&self) -> uint;
/// Converts uint to C-like enum.
fn from_uint(uint) -> Self;
}
fn bit<E:CLike>(e: E) -> uint {
1 << e.to_uint()
}
impl<E:CLike> EnumSet<E> {
/// Returns an empty EnumSet.
pub fn empty() -> EnumSet<E> {
EnumSet {bits: 0}
}
/// Returns true if an EnumSet is empty.
pub fn is_empty(&self) -> bool {
self.bits == 0
}
/// Returns true if an EnumSet contains any enum of a given EnumSet
pub fn intersects(&self, e: EnumSet<E>) -> bool {
(self.bits & e.bits) != 0
}
/// Returns an intersection of both EnumSets.
pub fn intersection(&self, e: EnumSet<E>) -> EnumSet<E> {
EnumSet {bits: self.bits & e.bits}
}
/// Returns true if a given EnumSet is included in an EnumSet.
pub fn contains(&self, e: EnumSet<E>) -> bool {
(self.bits & e.bits) == e.bits
}
/// Returns a union of both EnumSets.
pub fn union(&self, e: EnumSet<E>) -> EnumSet<E> {
EnumSet {bits: self.bits | e.bits}
}
/// Add an enum to an EnumSet
pub fn add(&mut self, e: E) {
self.bits |= bit(e);
}
/// Returns true if an EnumSet contains a given enum
pub fn contains_elem(&self, e: E) -> bool {
(self.bits & bit(e)) != 0
}
/// Returns an iterator over an EnumSet
pub fn iter(&self) -> Items<E> {
Items::new(self.bits)
}
}
impl<E:CLike> Sub<EnumSet<E>, EnumSet<E>> for EnumSet<E> {
fn sub(&self, e: &EnumSet<E>) -> EnumSet<E> {
EnumSet {bits: self.bits & !e.bits}
}
}
impl<E:CLike> BitOr<EnumSet<E>, EnumSet<E>> for EnumSet<E> {
fn bitor(&self, e: &EnumSet<E>) -> EnumSet<E> {
EnumSet {bits: self.bits | e.bits}
}
}
impl<E:CLike> BitAnd<EnumSet<E>, EnumSet<E>> for EnumSet<E> {
fn bitand(&self, e: &EnumSet<E>) -> EnumSet<E> {
EnumSet {bits: self.bits & e.bits}
}
}
/// An iterator over an EnumSet
pub struct Items<E> {
index: uint,
bits: uint,
}
impl<E:CLike> Items<E> {
fn new(bits: uint) -> Items<E> {
Items { index: 0, bits: bits }
}
}
impl<E:CLike> Iterator<E> for Items<E> {
fn next(&mut self) -> Option<E> {
if self.bits == 0 {
return None;
}
while (self.bits & 1) == 0 {
self.index += 1;
self.bits >>= 1;
}
let elem = CLike::from_uint(self.index);
self.index += 1;
self.bits >>= 1;
Some(elem)
}
fn size_hint(&self) -> (uint, Option<uint>) {
let exact = self.bits.count_ones();
(exact, Some(exact))
}
}
#[cfg(test)]
mod test {
use std::mem;
use enum_set::{EnumSet, CLike};
#[deriving(PartialEq, Show)]
#[repr(uint)]
enum Foo {
A, B, C
}
impl CLike for Foo {
fn to_uint(&self) -> uint {
*self as uint
}
fn from_uint(v: uint) -> Foo {
unsafe { mem::transmute(v) }
}
}
#[test]
fn test_empty() {
let e: EnumSet<Foo> = EnumSet::empty();
assert!(e.is_empty());
}
///////////////////////////////////////////////////////////////////////////
// intersect
#[test]
fn | () {
let e1: EnumSet<Foo> = EnumSet::empty();
let e2: EnumSet<Foo> = EnumSet::empty();
assert!(!e1.intersects(e2));
}
#[test]
fn test_empty_does_not_intersect_with_full() {
let e1: EnumSet<Foo> = EnumSet::empty();
let mut e2: EnumSet<Foo> = EnumSet::empty();
e2.add(A);
e2.add(B);
e2.add(C);
assert!(!e1.intersects(e2));
}
#[test]
fn test_disjoint_intersects() {
let mut e1: EnumSet<Foo> = EnumSet::empty();
e1.add(A);
let mut e2: EnumSet<Foo> = EnumSet::empty();
e2.add(B);
assert!(!e1.intersects(e2));
}
#[test]
fn test_overlapping_intersects() {
let mut e1: EnumSet<Foo> = EnumSet::empty();
e1.add(A);
let mut e2: EnumSet<Foo> = EnumSet::empty();
e2.add(A);
e2.add(B);
assert!(e1.intersects(e2));
}
///////////////////////////////////////////////////////////////////////////
// contains and contains_elem
#[test]
fn test_contains() {
let mut e1: EnumSet<Foo> = EnumSet::empty();
e1.add(A);
let mut e2: EnumSet<Foo> = EnumSet::empty();
e2.add(A);
e2.add(B);
assert!(!e1.contains(e2));
assert!(e2.contains(e1));
}
#[test]
fn test_contains_elem() {
let mut e1: EnumSet<Foo> = EnumSet::empty();
e1.add(A);
assert!(e1.contains_elem(A));
assert!(!e1.contains_elem(B));
assert!(!e1.contains_elem(C));
e1.add(A);
e1.add(B);
assert!(e1.contains_elem(A));
assert!(e1.contains_elem(B));
assert!(!e1.contains_elem(C));
}
///////////////////////////////////////////////////////////////////////////
// iter
#[test]
fn test_iterator() {
let mut e1: EnumSet<Foo> = EnumSet::empty();
let elems: Vec<Foo> = e1.iter().collect();
assert!(elems.is_empty())
e1.add(A);
let elems = e1.iter().collect();
assert_eq!(vec![A], elems)
e1.add(C);
let elems = e1.iter().collect();
assert_eq!(vec![A,C], elems)
e1.add(C);
let elems = e1.iter().collect();
assert_eq!(vec![A,C], elems)
e1.add(B);
let elems = e1.iter().collect();
assert_eq!(vec![A,B,C], elems)
}
///////////////////////////////////////////////////////////////////////////
// operators
#[test]
fn test_operators() {
let mut e1: EnumSet<Foo> = EnumSet::empty();
e1.add(A);
e1.add(C);
let mut e2: EnumSet<Foo> = EnumSet::empty();
e2.add(B);
e2.add(C);
let e_union = e1 | e2;
let elems = e_union.iter().collect();
assert_eq!(vec![A,B,C], elems)
let e_intersection = e1 & e2;
let elems = e_intersection.iter().collect();
assert_eq!(vec![C], elems)
let e_subtract = e1 - e2;
let elems = e_subtract.iter().collect();
assert_eq!(vec![A], elems)
}
}
| test_two_empties_do_not_intersect |
__init__.py | import os
from flask import Flask | def create_app(test_config=None):
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),
)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# a simple page that says hello
@app.route('/hello')
def hello():
return 'Hello, World!'
from . import db
db.init_app(app)
from . import auth
app.register_blueprint(auth.bp)
from . import blog
app.register_blueprint(blog.bp)
app.add_url_rule('/', endpoint='index')
return app | |
create-class.dto.ts | import {
IsDate,
IsNumber,
IsObject,
IsString,
Max,
Min
} from 'class-validator';
import {Type} from "class-transformer";
import {ApiProperty} from "@nestjs/swagger";
class | {
@IsDate()
started: Date;
@IsDate()
closed: Date;
}
export class CreateClassDto {
@ApiProperty({ example: 'Backend' })
@IsString()
readonly title: string;
@ApiProperty({ example: 'Backend Online Course' })
@IsString()
readonly description: string;
@ApiProperty({ example: '2' })
@IsNumber()
@Min(1)
@Max(9999)
readonly order: number;
@ApiProperty({ example: '{ started: 2019-06-19T07:44:06.353Z, closed: 2019-06-19T07:44:06.353Z}' })
@Type(() => Duration)
@IsObject()
duration: Duration;
} | Duration |
ze_generated_example_gatewayhostnameconfiguration_client_test.go | //go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
package armapimanagement_test
import (
"context"
"log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/apimanagement/armapimanagement"
)
// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/tree/main/specification/apimanagement/resource-manager/Microsoft.ApiManagement/stable/2021-08-01/examples/ApiManagementListGatewayHostnameConfigurations.json
func ExampleGatewayHostnameConfigurationClient_NewListByServicePager() {
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil |
ctx := context.Background()
client, err := armapimanagement.NewGatewayHostnameConfigurationClient("subid", cred, nil)
if err != nil {
log.Fatalf("failed to create client: %v", err)
}
pager := client.NewListByServicePager("rg1",
"apimService1",
"gw1",
&armapimanagement.GatewayHostnameConfigurationClientListByServiceOptions{Filter: nil,
Top: nil,
Skip: nil,
})
for pager.More() {
nextResult, err := pager.NextPage(ctx)
if err != nil {
log.Fatalf("failed to advance page: %v", err)
}
for _, v := range nextResult.Value {
// TODO: use page item
_ = v
}
}
}
// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/tree/main/specification/apimanagement/resource-manager/Microsoft.ApiManagement/stable/2021-08-01/examples/ApiManagementHeadGatewayHostnameConfiguration.json
func ExampleGatewayHostnameConfigurationClient_GetEntityTag() {
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
log.Fatalf("failed to obtain a credential: %v", err)
}
ctx := context.Background()
client, err := armapimanagement.NewGatewayHostnameConfigurationClient("subid", cred, nil)
if err != nil {
log.Fatalf("failed to create client: %v", err)
}
_, err = client.GetEntityTag(ctx,
"rg1",
"apimService1",
"gw1",
"default",
nil)
if err != nil {
log.Fatalf("failed to finish the request: %v", err)
}
}
// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/tree/main/specification/apimanagement/resource-manager/Microsoft.ApiManagement/stable/2021-08-01/examples/ApiManagementGetGatewayHostnameConfiguration.json
func ExampleGatewayHostnameConfigurationClient_Get() {
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
log.Fatalf("failed to obtain a credential: %v", err)
}
ctx := context.Background()
client, err := armapimanagement.NewGatewayHostnameConfigurationClient("subid", cred, nil)
if err != nil {
log.Fatalf("failed to create client: %v", err)
}
res, err := client.Get(ctx,
"rg1",
"apimService1",
"gw1",
"default",
nil)
if err != nil {
log.Fatalf("failed to finish the request: %v", err)
}
// TODO: use response item
_ = res
}
// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/tree/main/specification/apimanagement/resource-manager/Microsoft.ApiManagement/stable/2021-08-01/examples/ApiManagementCreateGatewayHostnameConfiguration.json
func ExampleGatewayHostnameConfigurationClient_CreateOrUpdate() {
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
log.Fatalf("failed to obtain a credential: %v", err)
}
ctx := context.Background()
client, err := armapimanagement.NewGatewayHostnameConfigurationClient("subid", cred, nil)
if err != nil {
log.Fatalf("failed to create client: %v", err)
}
res, err := client.CreateOrUpdate(ctx,
"rg1",
"apimService1",
"gw1",
"default",
armapimanagement.GatewayHostnameConfigurationContract{
Properties: &armapimanagement.GatewayHostnameConfigurationContractProperties{
CertificateID: to.Ptr("/subscriptions/subid/resourceGroups/rg1/providers/Microsoft.ApiManagement/service/apimService1/certificates/cert1"),
Hostname: to.Ptr("*"),
HTTP2Enabled: to.Ptr(true),
NegotiateClientCertificate: to.Ptr(false),
Tls10Enabled: to.Ptr(false),
Tls11Enabled: to.Ptr(false),
},
},
&armapimanagement.GatewayHostnameConfigurationClientCreateOrUpdateOptions{IfMatch: nil})
if err != nil {
log.Fatalf("failed to finish the request: %v", err)
}
// TODO: use response item
_ = res
}
// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/tree/main/specification/apimanagement/resource-manager/Microsoft.ApiManagement/stable/2021-08-01/examples/ApiManagementDeleteGatewayHostnameConfiguration.json
func ExampleGatewayHostnameConfigurationClient_Delete() {
cred, err := azidentity.NewDefaultAzureCredential(nil)
if err != nil {
log.Fatalf("failed to obtain a credential: %v", err)
}
ctx := context.Background()
client, err := armapimanagement.NewGatewayHostnameConfigurationClient("subid", cred, nil)
if err != nil {
log.Fatalf("failed to create client: %v", err)
}
_, err = client.Delete(ctx,
"rg1",
"apimService1",
"gw1",
"default",
"*",
nil)
if err != nil {
log.Fatalf("failed to finish the request: %v", err)
}
}
| {
log.Fatalf("failed to obtain a credential: %v", err)
} |
mount_management_section.ts | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { CoreSetup } from 'src/core/public';
import { ManagementAppMountParams } from 'src/plugins/management/public';
import { StartDependencies } from '../types';
import { documentationService, uiMetricService, apiService, breadcrumbService } from './services';
import { renderApp } from '.';
export async function mountManagementSection(
{ http, getStartServices, notifications }: CoreSetup<StartDependencies>,
params: ManagementAppMountParams
) {
const { element, setBreadcrumbs, history } = params;
const [coreStart, depsStart] = await getStartServices();
const {
docLinks,
i18n: { Context: I18nContext }, | documentationService.setup(docLinks);
breadcrumbService.setup(setBreadcrumbs);
const services = {
breadcrumbs: breadcrumbService,
metric: uiMetricService,
documentation: documentationService,
api: apiService,
notifications,
history,
uiSettings: coreStart.uiSettings,
urlGenerators: depsStart.share.urlGenerators,
};
return renderApp(element, I18nContext, services, { http });
} | } = coreStart;
|
web.rs | //! Test suite for the Web and headless browsers.
#![cfg(target_arch = "wasm32")]
extern crate wasm_bindgen_test;
use tendermint::Time;
use tendermint_light_client_js::{verify, Error, JsOptions};
use tendermint_light_client_verifier::{types::LightBlock, Verdict};
use wasm_bindgen::JsValue;
use wasm_bindgen_test::*;
wasm_bindgen_test_configure!(run_in_browser);
const UNTRUSTED_BLOCK: &str = r#"{
"signed_header": {
"header": {
"version": {
"block": "11",
"app": "0"
},
"chain_id": "test-chain",
"height": "4",
"time": "1970-01-01T00:00:04Z",
"last_block_id": null,
"last_commit_hash": null,
"data_hash": null,
"validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068",
"next_validators_hash": "C8CFFADA9808F685C4111693E1ADFDDBBEE9B9493493BEF805419F143C5B0D0A",
"consensus_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068",
"app_hash": "",
"last_results_hash": null,
"evidence_hash": null,
"proposer_address": "6AE5C701F508EB5B63343858E068C5843F28105F"
},
"commit": {
"height": "4",
"round": 1,
"block_id": {
"hash": "D0E7B0C678E290DA835BB26EE826472D66B6A306801E5FE0803C5320C554610A",
"part_set_header": {
"total": 1,
"hash": "D0E7B0C678E290DA835BB26EE826472D66B6A306801E5FE0803C5320C554610A"
}
},
"signatures": [
{
"block_id_flag": 2,
"validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F",
"timestamp": "1970-01-01T00:00:04Z",
"signature": "lTGBsjVI6YwIRcxQ6Lct4Q+xrtJc9h3648c42uWe4MpSgy4rUI5g71AEpG90Tbn0PRizjKgCPhokPpQoQLiqAg=="
}
]
}
},
"validator_set": {
"total_voting_power": "0",
"validators": [
{
"address": "6AE5C701F508EB5B63343858E068C5843F28105F",
"pub_key": {
"type": "tendermint/PubKeyEd25519",
"value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ="
},
"voting_power": "50",
"proposer_priority": null
}
]
},
"next_validator_set": {
"total_voting_power": "0",
"validators": [
{
"address": "C479DB6F37AB9757035CFBE10B687E27668EE7DF",
"pub_key": {
"type": "tendermint/PubKeyEd25519",
"value": "3wf60CidQcsIO7TksXzEZsJefMUFF73k6nP1YeEo9to="
},
"voting_power": "50",
"proposer_priority": null
}
]
},
"provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE"
}"#;
const TRUSTED_BLOCK: &str = r#"{
"signed_header": {
"header": {
"version": {
"block": "11",
"app": "0"
},
"chain_id": "test-chain",
"height": "3",
"time": "1970-01-01T00:00:03Z",
"last_block_id": null,
"last_commit_hash": null,
"data_hash": null,
"validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068",
"next_validators_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068",
"consensus_hash": "75E6DD63C2DC2B58FE0ED82792EAB369C4308C7EC16B69446382CC4B41D46068",
"app_hash": "",
"last_results_hash": null,
"evidence_hash": null,
"proposer_address": "6AE5C701F508EB5B63343858E068C5843F28105F"
},
"commit": {
"height": "3",
"round": 1,
"block_id": {
"hash": "AAB1B09D5FADAAE7CDF3451961A63F810DB73BF3214A7B74DBA36C52EDF1A793",
"part_set_header": {
"total": 1,
"hash": "AAB1B09D5FADAAE7CDF3451961A63F810DB73BF3214A7B74DBA36C52EDF1A793"
}
},
"signatures": [
{
"block_id_flag": 2,
"validator_address": "6AE5C701F508EB5B63343858E068C5843F28105F",
"timestamp": "1970-01-01T00:00:03Z",
"signature": "xn0eSsHYIsqUbmfAiJq1R0hqZbfuIjs5Na1c88EC1iPTuQAesKg9I7nXG4pk8d6U5fU4GysNLk5I4f7aoefOBA=="
}
]
}
},
"validator_set": {
"total_voting_power": "0",
"validators": [
{
"address": "6AE5C701F508EB5B63343858E068C5843F28105F",
"pub_key": {
"type": "tendermint/PubKeyEd25519",
"value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ="
},
"voting_power": "50",
"proposer_priority": null
}
]
},
"next_validator_set": {
"total_voting_power": "0",
"validators": [
{
"address": "6AE5C701F508EB5B63343858E068C5843F28105F",
"pub_key": {
"type": "tendermint/PubKeyEd25519",
"value": "GQEC/HB4sDBAVhHtUzyv4yct9ZGnudaP209QQBSTfSQ="
},
"voting_power": "50",
"proposer_priority": null
}
]
},
"provider": "BADFADAD0BEFEEDC0C0ADEADBEEFC0FFEEFACADE"
}"#;
#[wasm_bindgen_test]
fn successful_verification() {
let (untrusted_block, trusted_block) = test_blocks();
let options = test_options();
// Choose a "now" value within the trusting period
let now =
JsValue::from_serde(&Time::parse_from_rfc3339("1970-01-07T00:00:00Z").unwrap()).unwrap();
let js_result = verify(&untrusted_block, &trusted_block, &options, &now);
console_log!("js_result = {:?}", js_result);
let verdict = JsValue::into_serde::<Result<Verdict, Error>>(&js_result)
.unwrap()
.unwrap();
assert_eq!(verdict, Verdict::Success);
}
#[wasm_bindgen_test]
fn failed_verification_outside_trusting_period() |
fn test_blocks() -> (JsValue, JsValue) {
let untrusted_block =
JsValue::from_serde(&serde_json::from_str::<LightBlock>(UNTRUSTED_BLOCK).unwrap()).unwrap();
let trusted_block =
JsValue::from_serde(&serde_json::from_str::<LightBlock>(TRUSTED_BLOCK).unwrap()).unwrap();
(untrusted_block, trusted_block)
}
fn test_options() -> JsValue {
JsValue::from_serde(&JsOptions {
trust_threshold: (1, 3),
trusting_period: 1209600, // 2 weeks
clock_drift: 5, // 5 seconds
})
.unwrap()
}
| {
let (untrusted_block, trusted_block) = test_blocks();
let options = test_options();
// Choose a "now" value outside the trusting period
let now =
JsValue::from_serde(&Time::parse_from_rfc3339("1970-01-16T00:00:00Z").unwrap()).unwrap();
let js_result = verify(&untrusted_block, &trusted_block, &options, &now);
console_log!("js_result = {:?}", js_result);
// The result is Ok because we successfully obtained a verdict, even if the
// verdict isn't Verdict::Success.
let verdict = JsValue::into_serde::<Result<Verdict, Error>>(&js_result)
.unwrap()
.unwrap();
match verdict {
Verdict::Success | Verdict::NotEnoughTrust(_) => panic!("unexpected verdict"),
_ => {},
}
} |
FilterByPage.js | const pageSizeInput = document.getElementById('pageSize');
const pageInput = document.getElementById('page');
const searchBtn = document.getElementById('searchBtn');
const clearBtn = document.getElementById('clearBtn');
const tableBody = document.getElementById('tableBody');
const paginationBox = document.getElementById('pagination')
const proxy = "http://localhost:5000"
let queryParam = document.getElementById('queryParam')
let pagination
let count;
let query;
// Fetiching No search Data
query = proxy + '/api/Gridify'
fetch(query)
.then(response => response.json())
.then(data => {
data.items.forEach((q, index) => {
const headerOne = document.createElement('tr');
headerOne.innerHTML = `
<th scope="row">${index + 1} </th>
<th scope="row">${q.firstName} </th>
<th scope="row">${q.lastName} </th>
<th scope="row">${q.age} </th>
<th scope="row">${q.phoneNumber} </th>
<th scope="row">${q.address} </th>
`
tableBody.append(headerOne)
});
})
//Search Data with params
async function | () {
query = proxy + '/api/Gridify/Paging'
let pQuery = "";
if (pageSizeInput.value && pageInput.value == "") {
pageInput.value = 1;
}
if (pageInput.value && pageSizeInput.value == "") {
pQuery = `?Page=${pageInput.value}`;
}
if (pageSizeInput.value != "") {
pQuery = `?Page=${pageInput.value}&PageSize=${pageSizeInput.value}`;
}
query = proxy + `/api/Gridify/Paging${pQuery}`;
queryParam.innerHTML = 'query: ' + query;
tableBody.innerHTML = ''
await fetch(query)
.then(response => response.json())
.then(data => {
data.items.forEach((q, index) => {
const headerOne = document.createElement('tr');
headerOne.innerHTML = `
<th scope="row">${index + 1} </th>
<th scope="row">${q.firstName} </th>
<th scope="row">${q.lastName} </th>
<th scope="row">${q.age} </th>
<th scope="row">${q.phoneNumber} </th>
<th scope="row">${q.address} </th>
`
tableBody.append(headerOne)
});
count = data.totalItems
})
pageMaker()
}
// No Search param
function noSearchParam() {
query = proxy + '/api/Gridify'
queryParam.innerHTML = 'query: ' + query;
tableBody.innerHTML = ''
fetch(query)
.then(response => response.json())
.then(data => {
data.items.forEach((q, index) => {
const headerOne = document.createElement('tr');
headerOne.innerHTML = `
<th scope="row">${index + 1} </th>
<th scope="row">${q.firstName} </th>
<th scope="row">${q.lastName} </th>
<th scope="row">${q.age} </th>
<th scope="row">${q.phoneNumber} </th>
<th scope="row">${q.address} </th>
`
tableBody.append(headerOne)
});
})
}
//pagination method
function pageMaker() {
if (pageSizeInput.value > 0) {
pagination = [];
pageNum = Math.ceil(count / pageSizeInput.value);
paginationBox.innerHTML = ``
for (let i = 1; i <= pageNum; i++) {
pagination.push(i);
const pageNums = document.createElement('span');
pageNums.setAttribute('id', 'num-' + i)
pageNums.innerHTML = `${i}`
paginationBox.append(pageNums)
document.getElementById('num-' + i).addEventListener('click', () => {
pageInput.value = i;
search()
})
}
}
}
// change the page by click from dome
function pageChanger(page) {
pageInput.value = page;
console.log(pageInput.value);
search()
}
// Clear Input values
function clear() {
pageSizeInput.value = '';
pageInput.value = '';
paginationBox.innerHTML = ``
noSearchParam();
}
queryParam.innerHTML = 'query: ' + query;
// Event Listener For btn's
clearBtn.addEventListener('click', clear)
searchBtn.addEventListener('click', search) | search |
gallery.directive.ts | import {
Directive,
ComponentRef,
HostListener,
Input,
ElementRef,
OnDestroy,
OnInit
} from '@angular/core';
import { NbDialogService } from '@nebular/theme';
import { sortBy } from 'underscore';
import { GalleryComponent } from './gallery.component';
import { GalleryService } from './gallery.service';
export interface GalleryItem {
thumbUrl: string;
fullUrl: string;
recordedAt?: Date;
employeeId?: string;
}
@Directive({
selector: '[ngxGallery]'
})
export class | implements OnDestroy, OnInit {
disableClick: boolean = false;
dialogRef: ComponentRef<GalleryComponent>;
@Input() employeeId: string;
@Input() item: GalleryItem | GalleryItem[];
@Input() items: GalleryItem[] = [];
@Input() set disabled(value: any) {
this.disableClick = value || false;
if (this.disableClick) {
this.el.nativeElement.classList.add('disabled');
} else {
this.el.nativeElement.classList.remove('disabled');
}
}
constructor(
private readonly el: ElementRef,
private readonly nbDialogService: NbDialogService,
private readonly galleryService: GalleryService
) {}
@HostListener('click', [])
onClick(): void {
if (this.disableClick) {
return;
}
let items = JSON.parse(JSON.stringify(this.item));
items = sortBy(items, 'createdAt').reverse();
const item = items instanceof Array ? items[0] : items;
this.nbDialogService.open(GalleryComponent, {
context: {
items: this.items,
item,
employeeId: this.employeeId
},
dialogClass: 'fullscreen'
});
}
ngOnInit() {
const item = this.item instanceof Array ? this.item : [this.item];
this.item = sortBy(item, 'createdAt');
this.galleryService.appendItems(this.item);
}
ngOnDestroy() {}
}
| GalleryDirective |
error.rs | use std::error;
use std::fmt;
use std::fmt::Display;
use std::io;
use std::result;
use std::string::FromUtf8Error;
use crate::compression::CompressionError;
use crate::frame::frame_error::CDRSError;
use uuid::BytesError;
pub type Result<T> = result::Result<T, Error>;
/// CDRS custom error type. CDRS expects two types of error - errors returned by Server
/// and internal erros occured within the driver itself. Ocassionaly `io::Error`
/// is a type that represent internal error because due to implementation IO errors only
/// can be raised by CDRS driver. `Server` error is an error which are ones returned by
/// a Server via result error frames.
#[derive(Debug)]
pub enum Error {
/// Internal IO error.
Io(io::Error),
/// Internal error that may be raised during `uuid::Uuid::from_bytes`
UUIDParse(BytesError),
/// General error
General(String),
/// Internal error that may be raised during `String::from_utf8`
FromUtf8(FromUtf8Error),
/// Internal Compression/Decompression error
Compression(CompressionError),
/// Server error.
Server(CDRSError),
}
pub fn column_is_empty_err<T: Display>(column_name: T) -> Error {
Error::General(format!("Column or UDT property '{}' is empty", column_name))
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Io(ref err) => write!(f, "IO error: {}", err),
Error::Compression(ref err) => write!(f, "Compressor error: {}", err),
Error::Server(ref err) => write!(f, "Server error: {:?}", err.message),
Error::FromUtf8(ref err) => write!(f, "FromUtf8Error error: {:?}", err),
Error::UUIDParse(ref err) => write!(f, "UUIDParse error: {:?}", err),
Error::General(ref err) => write!(f, "GeneralParsing error: {:?}", err),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::Io(ref err) => err.description(),
Error::Compression(ref err) => err.description(),
Error::Server(ref err) => err.message.as_str(),
Error::FromUtf8(ref err) => err.description(),
// FIXME: err.description not found in current scope, std::error::Error not satisfied
Error::UUIDParse(_) => "UUID Parse Error",
Error::General(ref err) => err.as_str(),
}
}
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
Error::Io(err)
}
}
impl From<CDRSError> for Error {
fn from(err: CDRSError) -> Error {
Error::Server(err)
}
}
impl From<CompressionError> for Error {
fn from(err: CompressionError) -> Error {
Error::Compression(err)
}
}
impl From<FromUtf8Error> for Error {
fn from(err: FromUtf8Error) -> Error {
Error::FromUtf8(err)
}
}
impl From<BytesError> for Error {
fn from(err: BytesError) -> Error {
Error::UUIDParse(err)
}
}
impl From<String> for Error {
fn from(err: String) -> Error {
Error::General(err)
}
}
| fn from(err: &str) -> Error {
Error::General(err.to_string())
}
} | impl<'a> From<&'a str> for Error { |
xfr.go | package file
import (
"context"
"fmt"
"sync"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/file/tree"
"github.com/coredns/coredns/request"
"github.com/miekg/dns"
)
// Xfr serves up an AXFR.
type Xfr struct {
*Zone
}
// ServeDNS implements the plugin.Handler interface.
func (x Xfr) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
state := request.Request{W: w, Req: r}
if !x.TransferAllowed(state) {
return dns.RcodeServerFailure, nil
}
if state.QType() != dns.TypeAXFR && state.QType() != dns.TypeIXFR {
return 0, plugin.Error(x.Name(), fmt.Errorf("xfr called with non transfer type: %d", state.QType()))
}
// For IXFR we take the SOA in the IXFR message (if there), compare it what we have and then decide to do an
// AXFR or just reply with one SOA message back.
if state.QType() == dns.TypeIXFR {
code, _ := x.ServeIxfr(ctx, w, r)
if plugin.ClientWrite(code) {
return code, nil
}
}
// get soa and apex
apex, err := x.ApexIfDefined()
if err != nil {
return dns.RcodeServerFailure, nil
}
ch := make(chan *dns.Envelope)
tr := new(dns.Transfer)
wg := new(sync.WaitGroup)
wg.Add(1)
go func() {
tr.Out(w, r, ch) | l := len(apex)
ch <- &dns.Envelope{RR: apex}
x.Walk(func(e *tree.Elem, _ map[uint16][]dns.RR) error {
rrs = append(rrs, e.All()...)
if len(rrs) > 500 {
ch <- &dns.Envelope{RR: rrs}
l += len(rrs)
rrs = []dns.RR{}
}
return nil
})
if len(rrs) > 0 {
ch <- &dns.Envelope{RR: rrs}
l += len(rrs)
rrs = []dns.RR{}
}
ch <- &dns.Envelope{RR: []dns.RR{apex[0]}} // closing SOA.
l++
close(ch) // Even though we close the channel here, we still have
wg.Wait() // to wait before we can return and close the connection.
log.Infof("Outgoing transfer of %d records of zone %s to %s done with %d SOA serial", l, x.origin, state.IP(), apex[0].(*dns.SOA).Serial)
return dns.RcodeSuccess, nil
}
// Name implements the plugin.Handler interface.
func (x Xfr) Name() string { return "xfr" }
// ServeIxfr checks if we need to serve a simpler IXFR for the incoming message.
// See RFC 1995 Section 3: "... and the authority section containing the SOA record of client's version of the zone."
// and Section 2, paragraph 4 where we only need to echo the SOA record back.
// This function must be called when the qtype is IXFR. It returns a plugin.ClientWrite(code) == false, when it didn't
// write anything and we should perform an AXFR.
func (x Xfr) ServeIxfr(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
if len(r.Ns) != 1 {
return dns.RcodeServerFailure, nil
}
soa, ok := r.Ns[0].(*dns.SOA)
if !ok {
return dns.RcodeServerFailure, nil
}
x.RLock()
if x.Apex.SOA == nil {
x.RUnlock()
return dns.RcodeServerFailure, nil
}
serial := x.Apex.SOA.Serial
x.RUnlock()
if soa.Serial == serial { // Section 2, para 4; echo SOA back. We have the same zone
m := new(dns.Msg)
m.SetReply(r)
m.Answer = []dns.RR{soa}
w.WriteMsg(m)
return 0, nil
}
return dns.RcodeServerFailure, nil
} | wg.Done()
}()
rrs := []dns.RR{} |
AntennaSignalRounded.tsx | import * as React from 'react';
import Svg, { SvgProps, Path } from 'react-native-svg';
function | (
props: SvgProps,
svgRef?: React.Ref<React.Component<SvgProps>>
) {
return (
<Svg
width="1.5em"
height="1.5em"
strokeWidth={1.5}
viewBox="0 0 24 24"
fill="none"
color="currentColor"
ref={svgRef}
{...props}
>
<Path
d="M2 15V9a6 6 0 016-6h8a6 6 0 016 6v6a6 6 0 01-6 6H8a6 6 0 01-6-6z"
stroke="currentColor"
/>
<Path
d="M15 9s1 1.125 1 3-1 3-1 3M12 12.01l.01-.011M17 7s2 1.786 2 5-2 5-2 5M9 9s-1 1.125-1 3 1 3 1 3M7 7s-2 1.786-2 5 2 5 2 5"
stroke="currentColor"
strokeLinecap="round"
strokeLinejoin="round"
/>
</Svg>
);
}
const ForwardRef = React.forwardRef(SvgAntennaSignalRounded);
export default ForwardRef;
| SvgAntennaSignalRounded |
annotation_gui.py | # coding: utf-8
import wx
from enum import IntEnum
from bookworm import speech
from bookworm.gui.settings import SettingsPanel
from bookworm.structured_text import TextRange
from bookworm.logger import logger
from .annotator import Bookmarker, NoteTaker, Quoter
from .annotation_dialogs import (
BookmarksViewer,
CommentsDialog,
QuotesDialog,
GenericAnnotationWithContentDialog,
)
log = logger.getChild(__name__)
class AnnotationSettingsPanel(SettingsPanel):
config_section = "annotation"
def addControls(self):
# Translators: the title of a group of controls in the
UIBox = self.make_static_box(_("Annotation"))
wx.CheckBox(
UIBox,
-1,
# Translators: the label of a checkbox
_("Speak the bookmark when jumping"),
name="annotation.speak_bookmarks_on_jumping",
)
wx.CheckBox(
UIBox,
-1,
# Translators: the label of a checkbox
_("Select the bookmarked line when jumping"),
name="annotation.select_bookmarked_line_on_jumping",
)
wx.CheckBox(
UIBox,
-1,
# Translators: the label of a checkbox
_("Use visual styles to indicate annotations"),
name="annotation.use_visuals",
)
wx.CheckBox(
UIBox,
-1,
# Translators: the label of a checkbox
_("Use sounds to indicate the presence of comments"),
name="annotation.play_sound_for_comments",
)
class AnnotationsMenuIds(IntEnum):
addBookmark = 241
addNamedBookmark = 242
addNote = 243
quoteSelection = 244
viewBookmarks = 245
class StatelessAnnotationsMenuIds(IntEnum):
viewNotes = 246
viewQuotes = 247
ANNOTATIONS_KEYBOARD_SHORTCUTS = {
AnnotationsMenuIds.addBookmark: "Ctrl-B",
AnnotationsMenuIds.addNamedBookmark: "Ctrl-Shift-B",
AnnotationsMenuIds.addNote: "Ctrl-M",
AnnotationsMenuIds.quoteSelection: "Ctrl-H",
}
class AnnotationMenu(wx.Menu):
"""Annotation menu."""
def __init__(self, service):
super().__init__()
self.service = service
self.view = service.view
self.reader = service.reader
# Add menu items
self.Append(
AnnotationsMenuIds.addBookmark,
# Translators: the label of an item in the application menubar
_("Add &Bookmark\tCtrl-B"),
# Translators: the help text of an item in the application menubar
_("Add a bookmark at the current position"),
)
self.Append(
AnnotationsMenuIds.addNamedBookmark,
# Translators: the label of an item in the application menubar
_("Add &Named Bookmark...\tCtrl-Shift-B"),
# Translators: the help text of an item in the application menubar
_("Add a named bookmark at the current position"),
)
self.Append(
AnnotationsMenuIds.addNote,
# Translators: the label of an item in the application menubar
_("Add Co&mment...\tCtrl-M"),
# Translators: the help text of an item in the application menubar
_("Add a comment at the current position"),
)
self.Append(
AnnotationsMenuIds.quoteSelection,
# Translators: the label of an item in the application menubar
_("&Highlight Selection\tCtrl-H"),
# Translators: the help text of an item in the application menubar
_("Highlight selected text and save it."),
)
self.Append(
AnnotationsMenuIds.viewBookmarks,
# Translators: the label of an item in the application menubar
_("Saved &Bookmarks..."),
# Translators: the help text of an item in the application menubar
_("View added bookmarks"),
)
self.Append(
StatelessAnnotationsMenuIds.viewNotes,
# Translators: the label of an item in the application menubar
_("Saved Co&mments..."),
# Translators: the help text of an item in the application menubar
_("View, edit, and remove comments."),
)
self.Append(
StatelessAnnotationsMenuIds.viewQuotes,
# Translators: the label of an item in the application menubar
_("Saved &Highlights..."),
# Translators: the help text of an item in the application menubar
_("View saved highlights."),
)
# Translators: the label of an item in the application menubar
# EventHandlers
self.view.Bind(
wx.EVT_MENU, self.onAddBookmark, id=AnnotationsMenuIds.addBookmark
)
self.view.Bind(
wx.EVT_MENU, self.onAddNamedBookmark, id=AnnotationsMenuIds.addNamedBookmark
)
self.view.Bind(wx.EVT_MENU, self.onAddNote, id=AnnotationsMenuIds.addNote)
self.view.Bind(
wx.EVT_MENU, self.onQuoteSelection, id=AnnotationsMenuIds.quoteSelection
)
self.view.Bind(
wx.EVT_MENU, self.onViewBookmarks, id=AnnotationsMenuIds.viewBookmarks
)
self.view.Bind(
wx.EVT_MENU, self.onViewNotes, id=StatelessAnnotationsMenuIds.viewNotes
)
self.view.Bind(
wx.EVT_MENU, self.onViewQuotes, id=StatelessAnnotationsMenuIds.viewQuotes
)
def | (self, name=""):
bookmarker = Bookmarker(self.reader)
insertionPoint = self.view.contentTextCtrl.GetInsertionPoint()
__, __, current_lino = self.view.contentTextCtrl.PositionToXY(insertionPoint)
count = 0
for bkm in bookmarker.get_for_page(self.reader.current_page):
__, __, lino = self.view.contentTextCtrl.PositionToXY(bkm.position)
if lino == current_lino:
count += 1
bookmarker.delete(bkm.id)
self.service.style_bookmark(self.view, bkm.position, enable=False)
if count and not name:
return speech.announce(_("Bookmark removed"))
Bookmarker(self.reader).create(title=name, position=insertionPoint)
# Translators: spoken message
speech.announce(_("Bookmark Added"))
self.service.style_bookmark(self.view, insertionPoint)
def onAddBookmark(self, event):
self._add_bookmark()
def onAddNamedBookmark(self, event):
bookmark_name = self.view.get_text_from_user(
# Translators: title of a dialog
_("Add Named Bookmark"),
# Translators: label of a text entry
_("Bookmark name:"),
)
if bookmark_name:
self._add_bookmark(bookmark_name)
def onAddNote(self, event):
_with_tags = wx.GetKeyState(wx.WXK_SHIFT)
insertionPoint = self.view.contentTextCtrl.GetInsertionPoint()
comment_text = self.view.get_text_from_user(
# Translators: the title of a dialog to add a comment
_("New Comment"),
# Translators: the label of an edit field to enter a comment
_("Comment:"),
style=wx.OK | wx.CANCEL | wx.TE_MULTILINE | wx.CENTER,
)
if not comment_text:
return
note = NoteTaker(self.reader).create(
title="", content=comment_text, position=insertionPoint
)
self.service.style_comment(self.view, insertionPoint)
if _with_tags:
# add tags
tags_text = self.view.get_text_from_user(
# Translators: title of a dialog
_("Tag Comment"),
# Translators: label of a text entry
_("Tags:"),
)
if tags_text:
for tag in tags_text.split():
note.tags.append(tag.strip())
NoteTaker.model.session.commit()
def onQuoteSelection(self, event):
_with_tags = wx.GetKeyState(wx.WXK_SHIFT)
quoter = Quoter(self.reader)
selected_text = self.view.contentTextCtrl.GetStringSelection()
if not selected_text:
return speech.announce(_("No selection"))
x, y = self.view.get_selection_range()
for q in quoter.get_for_page():
q_range = TextRange(q.start_pos, q.end_pos)
if (q_range.start == x) and (q_range.stop == y):
quoter.delete(q.id)
self.service.style_highlight(self.view, x, y, enable=False)
# Translators: spoken message
return speech.announce(_("Highlight removed"))
elif (q.start_pos < x) and (q.end_pos > y):
# Translators: spoken message
speech.announce(_("Already highlighted"))
return wx.Bell()
if (x in q_range) or (y in q_range):
if x not in q_range:
q.start_pos = x
q.session.commit()
self.service.style_highlight(self.view, x, q_range.stop)
return speech.announce(_("Highlight extended"))
elif y not in q_range:
q.end_pos = y
q.session.commit()
self.service.style_highlight(self.view, q_range.start, y)
# Translators: spoken message
return speech.announce(_("Highlight extended"))
quote = quoter.create(title="", content=selected_text, start_pos=x, end_pos=y)
# Translators: spoken message
speech.announce(_("Selection highlighted"))
self.service.style_highlight(self.view, x, y)
if _with_tags:
# add tags
tags_text = self.view.get_text_from_user(
# Translators: title of a dialog
_("Tag Highlight"),
# Translators: label of a text entry
_("Tags:"),
)
if tags_text:
for tag in tags_text.split():
quote.tags.append(tag.strip())
Quoter.model.session.commit()
def onViewBookmarks(self, event):
with BookmarksViewer(
parent=self.view,
reader=self.reader,
annotator=Bookmarker,
# Translators: the title of a dialog to view bookmarks
title=_("Bookmarks | {book}").format(book=self.reader.current_book.title),
) as dlg:
dlg.ShowModal()
def onViewNotes(self, event):
Dialog = (
CommentsDialog if self.reader.ready else GenericAnnotationWithContentDialog
)
with Dialog(
parent=self.view,
title=_("Comments"),
reader=self.reader,
annotator_cls=NoteTaker,
can_edit=True,
) as dlg:
dlg.ShowModal()
def onViewQuotes(self, event):
Dialog = (
QuotesDialog if self.reader.ready else GenericAnnotationWithContentDialog
)
with Dialog(
parent=self.view,
title=_("Highlights"),
reader=self.reader,
annotator_cls=Quoter,
) as dlg:
dlg.ShowModal()
| _add_bookmark |
defaults.go | /**
* Copyright (C) 2015 Red Hat, Inc. | * You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import kruntime "k8s.io/apimachinery/pkg/runtime"
func addDefaultingFuncs(scheme *kruntime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_Configuration(obj *Configuration) {} | *
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. |
default.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The `Default` trait for types which may have meaningful default values.
//!
//! Sometimes, you want to fall back to some kind of default value, and
//! don't particularly care what it is. This comes up often with `struct`s
//! that define a set of options:
//!
//! ```
//! struct SomeOptions {
//! foo: i32,
//! bar: f32,
//! }
//! ```
//!
//! How can we define some default values? You can use `Default`:
//!
//! ```
//! #[derive(Default)]
//! struct SomeOptions {
//! foo: i32,
//! bar: f32,
//! }
//!
//!
//! fn main() {
//! let options: SomeOptions = Default::default();
//! }
//! ```
//!
//! Now, you get all of the default values. Rust implements `Default` for various primitives types.
//! If you have your own type, you need to implement `Default` yourself:
//!
//! ```
//! enum Kind {
//! A,
//! B,
//! C,
//! }
//!
//! impl Default for Kind {
//! fn default() -> Kind { Kind::A }
//! }
//!
//! #[derive(Default)]
//! struct SomeOptions {
//! foo: i32,
//! bar: f32,
//! baz: Kind,
//! }
//!
//!
//! fn main() {
//! let options: SomeOptions = Default::default(); | //! ```
//!
//! If you want to override a particular option, but still retain the other defaults:
//!
//! ```
//! # #[derive(Default)]
//! # struct SomeOptions {
//! # foo: i32,
//! # bar: f32,
//! # }
//! fn main() {
//! let options = SomeOptions { foo: 42, ..Default::default() };
//! }
//! ```
#![stable(feature = "rust1", since = "1.0.0")]
use marker::Sized;
/// A trait for giving a type a useful default value.
///
/// A struct can derive default implementations of `Default` for basic types using
/// `#[derive(Default)]`.
///
/// # Examples
///
/// ```
/// #[derive(Default)]
/// struct SomeOptions {
/// foo: i32,
/// bar: f32,
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Default: Sized {
/// Returns the "default value" for a type.
///
/// Default values are often some kind of initial value, identity value, or anything else that
/// may make sense as a default.
///
/// # Examples
///
/// Using built-in default values:
///
/// ```
/// let i: i8 = Default::default();
/// let (x, y): (Option<String>, f64) = Default::default();
/// let (a, b, (c, d)): (i32, u32, (bool, bool)) = Default::default();
/// ```
///
/// Making your own:
///
/// ```
/// enum Kind {
/// A,
/// B,
/// C,
/// }
///
/// impl Default for Kind {
/// fn default() -> Kind { Kind::A }
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn default() -> Self;
}
macro_rules! default_impl {
($t:ty, $v:expr) => {
#[stable(feature = "rust1", since = "1.0.0")]
impl Default for $t {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn default() -> $t { $v }
}
}
}
default_impl! { (), () }
default_impl! { bool, false }
default_impl! { char, '\x00' }
default_impl! { usize, 0 }
default_impl! { u8, 0 }
default_impl! { u16, 0 }
default_impl! { u32, 0 }
default_impl! { u64, 0 }
default_impl! { isize, 0 }
default_impl! { i8, 0 }
default_impl! { i16, 0 }
default_impl! { i32, 0 }
default_impl! { i64, 0 }
default_impl! { f32, 0.0f32 }
default_impl! { f64, 0.0f64 } | //! } |
thermo.py | # Copyright (c) 2008,2015,2016,2017,2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Contains a collection of thermodynamic calculations."""
import warnings
import numpy as np
import scipy.integrate as si
import scipy.optimize as so
from .tools import (_greater_or_close, _less_or_close, _remove_nans, find_bounding_indices,
find_intersections, first_derivative, get_layer)
from .. import constants as mpconsts
from ..cbook import broadcast_indices
from ..interpolate.one_dimension import interpolate_1d
from ..package_tools import Exporter
from ..units import check_units, concatenate, units
from ..xarray import preprocess_xarray
exporter = Exporter(globals())
sat_pressure_0c = 6.112 * units.millibar
@exporter.export
@preprocess_xarray
@check_units('[temperature]', '[temperature]')
def relative_humidity_from_dewpoint(temperature, dewpoint):
r"""Calculate the relative humidity.
Uses temperature and dewpoint in celsius to calculate relative
humidity using the ratio of vapor pressure to saturation vapor pressures.
Parameters
----------
temperature : `pint.Quantity`
air temperature
dewpoint : `pint.Quantity`
dewpoint temperature
Returns
-------
`pint.Quantity`
relative humidity
See Also
--------
saturation_vapor_pressure
"""
e = saturation_vapor_pressure(dewpoint)
e_s = saturation_vapor_pressure(temperature)
return (e / e_s)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[pressure]')
def exner_function(pressure, reference_pressure=mpconsts.P0):
r"""Calculate the Exner function.
.. math:: \Pi = \left( \frac{p}{p_0} \right)^\kappa
This can be used to calculate potential temperature from temperature (and visa-versa),
since
.. math:: \Pi = \frac{T}{\theta}
Parameters
----------
pressure : `pint.Quantity`
total atmospheric pressure
reference_pressure : `pint.Quantity`, optional
The reference pressure against which to calculate the Exner function, defaults to
metpy.constants.P0
Returns
-------
`pint.Quantity`
The value of the Exner function at the given pressure
See Also
--------
potential_temperature
temperature_from_potential_temperature
"""
return (pressure / reference_pressure).to('dimensionless')**mpconsts.kappa
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def potential_temperature(pressure, temperature):
r"""Calculate the potential temperature.
Uses the Poisson equation to calculation the potential temperature
given `pressure` and `temperature`.
Parameters
----------
pressure : `pint.Quantity`
total atmospheric pressure
temperature : `pint.Quantity`
air temperature
Returns
-------
`pint.Quantity`
The potential temperature corresponding to the temperature and
pressure.
See Also
--------
dry_lapse
Notes
-----
Formula:
.. math:: \Theta = T (P_0 / P)^\kappa
Examples
--------
>>> from metpy.units import units
>>> metpy.calc.potential_temperature(800. * units.mbar, 273. * units.kelvin)
<Quantity(290.9665329591884, 'kelvin')>
"""
return temperature / exner_function(pressure)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def temperature_from_potential_temperature(pressure, potential_temperature):
r"""Calculate the temperature from a given potential temperature.
Uses the inverse of the Poisson equation to calculate the temperature from a
given potential temperature at a specific pressure level.
Parameters
----------
pressure : `pint.Quantity`
total atmospheric pressure
potential_temperature : `pint.Quantity`
potential temperature
Returns
-------
`pint.Quantity`
The temperature corresponding to the potential temperature and pressure.
See Also
--------
dry_lapse
potential_temperature
Notes
-----
Formula:
.. math:: T = \Theta (P / P_0)^\kappa
Examples
--------
>>> from metpy.units import units
>>> from metpy.calc import temperature_from_potential_temperature
>>> # potential temperature
>>> theta = np.array([ 286.12859679, 288.22362587]) * units.kelvin
>>> p = 850 * units.mbar
>>> T = temperature_from_potential_temperature(p, theta)
"""
return potential_temperature * exner_function(pressure)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[pressure]')
def dry_lapse(pressure, temperature, reference_pressure=None):
r"""Calculate the temperature at a level assuming only dry processes.
This function lifts a parcel starting at `temperature`, conserving
potential temperature. The starting pressure can be given by `reference_pressure`.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest
temperature : `pint.Quantity`
The starting temperature
reference_pressure : `pint.Quantity`, optional
The reference pressure. If not given, it defaults to the first element of the
pressure array.
Returns
-------
`pint.Quantity`
The resulting parcel temperature at levels given by `pressure`
See Also
--------
moist_lapse : Calculate parcel temperature assuming liquid saturation processes
parcel_profile : Calculate complete parcel profile
potential_temperature
"""
if reference_pressure is None:
reference_pressure = pressure[0]
return temperature * (pressure / reference_pressure)**mpconsts.kappa
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[pressure]')
def moist_lapse(pressure, temperature, reference_pressure=None):
r"""Calculate the temperature at a level assuming liquid saturation processes.
This function lifts a parcel starting at `temperature`. The starting pressure can
be given by `reference_pressure`. Essentially, this function is calculating moist
pseudo-adiabats.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest
temperature : `pint.Quantity`
The starting temperature
reference_pressure : `pint.Quantity`, optional
The reference pressure. If not given, it defaults to the first element of the
pressure array.
Returns
-------
`pint.Quantity`
The temperature corresponding to the starting temperature and
pressure levels.
See Also
--------
dry_lapse : Calculate parcel temperature assuming dry adiabatic processes
parcel_profile : Calculate complete parcel profile
Notes
-----
This function is implemented by integrating the following differential
equation:
.. math:: \frac{dT}{dP} = \frac{1}{P} \frac{R_d T + L_v r_s}
{C_{pd} + \frac{L_v^2 r_s \epsilon}{R_d T^2}}
This equation comes from [Bakhshaii2013]_.
"""
def dt(t, p):
t = units.Quantity(t, temperature.units)
p = units.Quantity(p, pressure.units)
rs = saturation_mixing_ratio(p, t)
frac = ((mpconsts.Rd * t + mpconsts.Lv * rs)
/ (mpconsts.Cp_d + (mpconsts.Lv * mpconsts.Lv * rs * mpconsts.epsilon
/ (mpconsts.Rd * t * t)))).to('kelvin')
return (frac / p).magnitude
if reference_pressure is None:
reference_pressure = pressure[0]
pressure = pressure.to('mbar')
reference_pressure = reference_pressure.to('mbar')
temperature = np.atleast_1d(temperature)
side = 'left'
pres_decreasing = (pressure[0] > pressure[-1])
if pres_decreasing:
# Everything is easier if pressures are in increasing order
pressure = pressure[::-1]
side = 'right'
ref_pres_idx = np.searchsorted(pressure.m, reference_pressure.m, side=side)
ret_temperatures = np.empty((0, temperature.shape[0]))
if reference_pressure > pressure.min():
# Integrate downward in pressure
pres_down = np.append(reference_pressure.m, pressure[(ref_pres_idx - 1)::-1].m)
trace_down = si.odeint(dt, temperature.m.squeeze(), pres_down.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_down[:0:-1]))
if reference_pressure < pressure.max():
# Integrate upward in pressure
pres_up = np.append(reference_pressure.m, pressure[ref_pres_idx:].m)
trace_up = si.odeint(dt, temperature.m.squeeze(), pres_up.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_up[1:]))
if pres_decreasing:
ret_temperatures = ret_temperatures[::-1]
return units.Quantity(ret_temperatures.T.squeeze(), temperature.units)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def lcl(pressure, temperature, dewpoint, max_iters=50, eps=1e-5):
r"""Calculate the lifted condensation level (LCL) using from the starting point.
The starting state for the parcel is defined by `temperature`, `dewpoint`,
and `pressure`. If these are arrays, this function will return a LCL
for every index. This function does work with surface grids as a result.
Parameters
----------
pressure : `pint.Quantity`
The starting atmospheric pressure
temperature : `pint.Quantity`
The starting temperature
dewpoint : `pint.Quantity`
The starting dewpoint
Returns
-------
`pint.Quantity`
The LCL pressure
`pint.Quantity`
The LCL temperature
Other Parameters
----------------
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired relative error in the calculated value, defaults to 1e-5.
See Also
--------
parcel_profile
Notes
-----
This function is implemented using an iterative approach to solve for the
LCL. The basic algorithm is:
1. Find the dewpoint from the LCL pressure and starting mixing ratio
2. Find the LCL pressure from the starting temperature and dewpoint
3. Iterate until convergence
The function is guaranteed to finish by virtue of the `max_iters` counter.
"""
def _lcl_iter(p, p0, w, t):
td = globals()['dewpoint'](vapor_pressure(units.Quantity(p, pressure.units), w))
return (p0 * (td / t) ** (1. / mpconsts.kappa)).m
w = mixing_ratio(saturation_vapor_pressure(dewpoint), pressure)
lcl_p = so.fixed_point(_lcl_iter, pressure.m, args=(pressure.m, w, temperature),
xtol=eps, maxiter=max_iters)
# np.isclose needed if surface is LCL due to precision error with np.log in dewpoint.
# Causes issues with parcel_profile_with_lcl if removed. Issue #1187
lcl_p = np.where(np.isclose(lcl_p, pressure.m), pressure.m, lcl_p) * pressure.units
return lcl_p, globals()['dewpoint'](vapor_pressure(lcl_p, w)).to(temperature.units)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')
def lfc(pressure, temperature, dewpoint, parcel_temperature_profile=None, dewpoint_start=None,
which='top'):
r"""Calculate the level of free convection (LFC).
This works by finding the first intersection of the ideal parcel path and
the measured parcel temperature. If this intersection occurs below the LCL,
the LFC is determined to be the same as the LCL, based upon the conditions
set forth in [USAF1990]_, pg 4-14, where a parcel must be lifted dry adiabatically
to saturation before it can freely rise.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpoint : `pint.Quantity`
The dewpoint at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the LFC. Defaults to the
surface parcel profile.
dewpoint_start: `pint.Quantity`, optional
The dewpoint of the parcel for which to calculate the LFC. Defaults to the surface
dewpoint.
which: str, optional
Pick which LFC to return. Options are 'top', 'bottom', 'wide', 'most_cape', and 'all'.
'top' returns the lowest-pressure LFC, default.
'bottom' returns the highest-pressure LFC.
'wide' returns the LFC whose corresponding EL is farthest away.
'most_cape' returns the LFC that results in the most CAPE in the profile.
Returns
-------
`pint.Quantity`
The LFC pressure, or array of same if which='all'
`pint.Quantity`
The LFC temperature, or array of same if which='all'
See Also
--------
parcel_profile
"""
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpoint)
pressure, temperature, dewpoint, parcel_temperature_profile = new_stuff
parcel_temperature_profile = parcel_temperature_profile.to(temperature.units)
if dewpoint_start is None:
dewpoint_start = dewpoint[0]
# The parcel profile and data may have the same first data point.
# If that is the case, ignore that point to get the real first
# intersection for the LFC calculation. Use logarithmic interpolation.
if np.isclose(parcel_temperature_profile[0].to(temperature.units).m, temperature[0].m):
x, y = find_intersections(pressure[1:], parcel_temperature_profile[1:],
temperature[1:], direction='increasing', log_x=True)
else:
x, y = find_intersections(pressure, parcel_temperature_profile,
temperature, direction='increasing', log_x=True)
# Compute LCL for this parcel for future comparisons
this_lcl = lcl(pressure[0], parcel_temperature_profile[0], dewpoint_start)
# The LFC could:
# 1) Not exist
# 2) Exist but be equal to the LCL
# 3) Exist and be above the LCL
# LFC does not exist or is LCL
if len(x) == 0:
# Is there any positive area above the LCL?
mask = pressure < this_lcl[0]
if np.all(_less_or_close(parcel_temperature_profile[mask], temperature[mask])):
# LFC doesn't exist
x, y = np.nan * pressure.units, np.nan * temperature.units
else: # LFC = LCL
x, y = this_lcl
return x, y
# LFC exists. Make sure it is no lower than the LCL
else:
idx = x < this_lcl[0]
# LFC height < LCL height, so set LFC = LCL
if not any(idx):
el_pres, _ = find_intersections(pressure[1:], parcel_temperature_profile[1:],
temperature[1:], direction='decreasing',
log_x=True)
if np.min(el_pres) > this_lcl[0]:
x, y = np.nan * pressure.units, np.nan * temperature.units
else:
x, y = this_lcl
return x, y
# Otherwise, find all LFCs that exist above the LCL
# What is returned depends on which flag as described in the docstring
else:
return _multiple_el_lfc_options(x, y, idx, which, pressure,
parcel_temperature_profile, temperature,
dewpoint, intersect_type='LFC')
def _multiple_el_lfc_options(intersect_pressures, intersect_temperatures, valid_x,
which, pressure, parcel_temperature_profile, temperature,
dewpoint, intersect_type):
"""Choose which ELs and LFCs to return from a sounding."""
p_list, t_list = intersect_pressures[valid_x], intersect_temperatures[valid_x]
if which == 'all':
x, y = p_list, t_list
elif which == 'bottom':
x, y = p_list[0], t_list[0]
elif which == 'top':
x, y = p_list[-1], t_list[-1]
elif which == 'wide':
x, y = _wide_option(intersect_type, p_list, t_list, pressure,
parcel_temperature_profile, temperature)
elif which == 'most_cape':
x, y = _most_cape_option(intersect_type, p_list, t_list, pressure, temperature,
dewpoint, parcel_temperature_profile)
else:
raise ValueError('Invalid option for "which". Valid options are "top", "bottom", '
'"wide", "most_cape", and "all".')
return x, y
def _wide_option(intersect_type, p_list, t_list, pressure, parcel_temperature_profile,
temperature):
"""Calculate the LFC or EL that produces the greatest distance between these points."""
# zip the LFC and EL lists together and find greatest difference
if intersect_type == 'LFC':
# Find EL intersection pressure values
lfc_p_list = p_list
el_p_list, _ = find_intersections(pressure[1:], parcel_temperature_profile[1:],
temperature[1:], direction='decreasing',
log_x=True)
else: # intersect_type == 'EL'
el_p_list = p_list
# Find LFC intersection pressure values
lfc_p_list, _ = find_intersections(pressure, parcel_temperature_profile,
temperature, direction='increasing',
log_x=True)
diff = [lfc_p.m - el_p.m for lfc_p, el_p in zip(lfc_p_list, el_p_list)]
return (p_list[np.where(diff == np.max(diff))][0],
t_list[np.where(diff == np.max(diff))][0])
def _most_cape_option(intersect_type, p_list, t_list, pressure, temperature, dewpoint,
parcel_temperature_profile):
"""Calculate the LFC or EL that produces the most CAPE in the profile."""
# Need to loop through all possible combinations of cape, find greatest cape profile
cape_list, pair_list = [], []
for which_lfc in ['top', 'bottom']:
for which_el in ['top', 'bottom']:
cape, _ = cape_cin(pressure, temperature, dewpoint, parcel_temperature_profile,
which_lfc=which_lfc, which_el=which_el)
cape_list.append(cape.m)
pair_list.append([which_lfc, which_el])
(lfc_chosen, el_chosen) = pair_list[np.where(cape_list == np.max(cape_list))[0][0]]
if intersect_type == 'LFC':
if lfc_chosen == 'top':
x, y = p_list[-1], t_list[-1]
else: # 'bottom' is returned
x, y = p_list[0], t_list[0]
else: # EL is returned
if el_chosen == 'top':
x, y = p_list[-1], t_list[-1]
else:
x, y = p_list[0], t_list[0]
return x, y
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')
def el(pressure, temperature, dewpoint, parcel_temperature_profile=None, which='top'):
r"""Calculate the equilibrium level.
This works by finding the last intersection of the ideal parcel path and
the measured environmental temperature. If there is one or fewer intersections, there is
no equilibrium level.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure profile
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpoint : `pint.Quantity`
The dewpoint at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the EL. Defaults to the
surface parcel profile.
which: str, optional
Pick which LFC to return. Options are 'top', 'bottom', 'wide', 'most_cape', and 'all'.
'top' returns the lowest-pressure EL, default.
'bottom' returns the highest-pressure EL.
'wide' returns the EL whose corresponding LFC is farthest away.
'most_cape' returns the EL that results in the most CAPE in the profile.
Returns
-------
`pint.Quantity`
The EL pressure, or array of same if which='all'
`pint.Quantity`
The EL temperature, or array of same if which='all'
See Also
--------
parcel_profile
"""
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpoint)
pressure, temperature, dewpoint, parcel_temperature_profile = new_stuff
parcel_temperature_profile = parcel_temperature_profile.to(temperature.units)
# If the top of the sounding parcel is warmer than the environment, there is no EL
if parcel_temperature_profile[-1] > temperature[-1]:
return np.nan * pressure.units, np.nan * temperature.units
# Interpolate in log space to find the appropriate pressure - units have to be stripped
# and reassigned to allow np.log() to function properly.
x, y = find_intersections(pressure[1:], parcel_temperature_profile[1:], temperature[1:],
direction='decreasing', log_x=True)
lcl_p, _ = lcl(pressure[0], temperature[0], dewpoint[0])
idx = x < lcl_p
if len(x) > 0 and x[-1] < lcl_p:
return _multiple_el_lfc_options(x, y, idx, which, pressure,
parcel_temperature_profile, temperature, dewpoint,
intersect_type='EL')
else:
return np.nan * pressure.units, np.nan * temperature.units
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def parcel_profile(pressure, temperature, dewpoint):
r"""Calculate the profile a parcel takes through the atmosphere.
The parcel starts at `temperature`, and `dewpoint`, lifted up
dry adiabatically to the LCL, and then moist adiabatically from there.
`pressure` specifies the pressure levels for the profile.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest. This array must be from
high to low pressure.
temperature : `pint.Quantity`
The starting temperature
dewpoint : `pint.Quantity`
The starting dewpoint
Returns
-------
`pint.Quantity`
The parcel temperatures at the specified pressure levels.
See Also
--------
lcl, moist_lapse, dry_lapse
"""
_, _, _, t_l, _, t_u = _parcel_profile_helper(pressure, temperature, dewpoint)
return concatenate((t_l, t_u))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def parcel_profile_with_lcl(pressure, temperature, dewpoint):
r"""Calculate the profile a parcel takes through the atmosphere.
The parcel starts at `temperature`, and `dewpoint`, lifted up
dry adiabatically to the LCL, and then moist adiabatically from there.
`pressure` specifies the pressure levels for the profile. This function returns
a profile that includes the LCL.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest. This array must be from
high to low pressure.
temperature : `pint.Quantity`
The atmospheric temperature at the levels in `pressure`. The first entry should be at
the same level as the first `pressure` data point.
dewpoint : `pint.Quantity`
The atmospheric dewpoint at the levels in `pressure`. The first entry should be at
the same level as the first `pressure` data point.
Returns
-------
pressure : `pint.Quantity`
The parcel profile pressures, which includes the specified levels and the LCL
ambient_temperature : `pint.Quantity`
The atmospheric temperature values, including the value interpolated to the LCL level
ambient_dew_point : `pint.Quantity`
The atmospheric dewpoint values, including the value interpolated to the LCL level
profile_temperature : `pint.Quantity`
The parcel profile temperatures at all of the levels in the returned pressures array,
including the LCL.
See Also
--------
lcl, moist_lapse, dry_lapse, parcel_profile
"""
p_l, p_lcl, p_u, t_l, t_lcl, t_u = _parcel_profile_helper(pressure, temperature[0],
dewpoint[0])
new_press = concatenate((p_l, p_lcl, p_u))
prof_temp = concatenate((t_l, t_lcl, t_u))
new_temp = _insert_lcl_level(pressure, temperature, p_lcl)
new_dewp = _insert_lcl_level(pressure, dewpoint, p_lcl)
return new_press, new_temp, new_dewp, prof_temp
def _parcel_profile_helper(pressure, temperature, dewpoint):
"""Help calculate parcel profiles.
Returns the temperature and pressure, above, below, and including the LCL. The
other calculation functions decide what to do with the pieces.
"""
# Find the LCL
press_lcl, temp_lcl = lcl(pressure[0], temperature, dewpoint)
press_lcl = press_lcl.to(pressure.units)
# Find the dry adiabatic profile, *including* the LCL. We need >= the LCL in case the
# LCL is included in the levels. It's slightly redundant in that case, but simplifies
# the logic for removing it later.
press_lower = concatenate((pressure[pressure >= press_lcl], press_lcl))
temp_lower = dry_lapse(press_lower, temperature)
# If the pressure profile doesn't make it to the lcl, we can stop here
if _greater_or_close(np.nanmin(pressure.m), press_lcl.m):
return (press_lower[:-1], press_lcl, units.Quantity(np.array([]), press_lower.units),
temp_lower[:-1], temp_lcl, units.Quantity(np.array([]), temp_lower.units))
# Find moist pseudo-adiabatic profile starting at the LCL
press_upper = concatenate((press_lcl, pressure[pressure < press_lcl]))
temp_upper = moist_lapse(press_upper, temp_lower[-1]).to(temp_lower.units)
# Return profile pieces
return (press_lower[:-1], press_lcl, press_upper[1:],
temp_lower[:-1], temp_lcl, temp_upper[1:])
def _insert_lcl_level(pressure, temperature, lcl_pressure):
"""Insert the LCL pressure into the profile."""
interp_temp = interpolate_1d(lcl_pressure, pressure, temperature)
# Pressure needs to be increasing for searchsorted, so flip it and then convert
# the index back to the original array
loc = pressure.size - pressure[::-1].searchsorted(lcl_pressure)
return np.insert(temperature.m, loc, interp_temp.m) * temperature.units
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[dimensionless]')
def vapor_pressure(pressure, mixing_ratio):
r"""Calculate water vapor (partial) pressure.
Given total `pressure` and water vapor `mixing_ratio`, calculates the
partial pressure of water vapor.
Parameters
----------
pressure : `pint.Quantity`
total atmospheric pressure
mixing_ratio : `pint.Quantity`
dimensionless mass mixing ratio
Returns
-------
`pint.Quantity`
The ambient water vapor (partial) pressure in the same units as
`pressure`.
Notes
-----
This function is a straightforward implementation of the equation given in many places,
such as [Hobbs1977]_ pg.71:
.. math:: e = p \frac{r}{r + \epsilon}
See Also
--------
saturation_vapor_pressure, dewpoint
"""
return pressure * mixing_ratio / (mpconsts.epsilon + mixing_ratio)
@exporter.export
@preprocess_xarray
@check_units('[temperature]')
def saturation_vapor_pressure(temperature):
r"""Calculate the saturation water vapor (partial) pressure.
Parameters
----------
temperature : `pint.Quantity`
air temperature
Returns
-------
`pint.Quantity`
The saturation water vapor (partial) pressure
See Also
--------
vapor_pressure, dewpoint
Notes
-----
Instead of temperature, dewpoint may be used in order to calculate
the actual (ambient) water vapor (partial) pressure.
The formula used is that from [Bolton1980]_ for T in degrees Celsius:
.. math:: 6.112 e^\frac{17.67T}{T + 243.5}
"""
# Converted from original in terms of C to use kelvin. Using raw absolute values of C in
# a formula plays havoc with units support.
return sat_pressure_0c * np.exp(17.67 * (temperature - 273.15 * units.kelvin)
/ (temperature - 29.65 * units.kelvin))
@exporter.export
@preprocess_xarray
@check_units('[temperature]', '[dimensionless]')
def dewpoint_from_relative_humidity(temperature, relative_humidity):
r"""Calculate the ambient dewpoint given air temperature and relative humidity.
Parameters
----------
temperature : `pint.Quantity`
air temperature
relative_humidity : `pint.Quantity`
relative humidity expressed as a ratio in the range 0 < relative_humidity <= 1
Returns
-------
`pint.Quantity`
The dewpoint temperature
See Also
--------
dewpoint, saturation_vapor_pressure
"""
if np.any(relative_humidity > 1.2):
warnings.warn('Relative humidity >120%, ensure proper units.')
return dewpoint(relative_humidity * saturation_vapor_pressure(temperature))
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def dewpoint(vapor_pressure):
r"""Calculate the ambient dewpoint given the vapor pressure.
Parameters
----------
e : `pint.Quantity`
Water vapor partial pressure
Returns
-------
`pint.Quantity`
dewpoint temperature
See Also
--------
dewpoint_from_relative_humidity, saturation_vapor_pressure, vapor_pressure
Notes
-----
This function inverts the [Bolton1980]_ formula for saturation vapor
pressure to instead calculate the temperature. This yield the following
formula for dewpoint in degrees Celsius:
.. math:: T = \frac{243.5 log(e / 6.112)}{17.67 - log(e / 6.112)}
"""
val = np.log(vapor_pressure / sat_pressure_0c)
return 0. * units.degC + 243.5 * units.delta_degC * val / (17.67 - val)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[pressure]', '[dimensionless]')
def mixing_ratio(partial_press, total_press, molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate the mixing ratio of a gas.
This calculates mixing ratio given its partial pressure and the total pressure of
the air. There are no required units for the input arrays, other than that
they have the same units.
Parameters
----------
partial_press : `pint.Quantity`
Partial pressure of the constituent gas
total_press : `pint.Quantity`
Total air pressure
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The (mass) mixing ratio, dimensionless (e.g. Kg/Kg or g/g)
Notes
-----
This function is a straightforward implementation of the equation given in many places,
such as [Hobbs1977]_ pg.73:
.. math:: r = \epsilon \frac{e}{p - e}
See Also | saturation_mixing_ratio, vapor_pressure
"""
return (molecular_weight_ratio * partial_press
/ (total_press - partial_press)).to('dimensionless')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def saturation_mixing_ratio(total_press, temperature):
r"""Calculate the saturation mixing ratio of water vapor.
This calculation is given total pressure and the temperature. The implementation
uses the formula outlined in [Hobbs1977]_ pg.73.
Parameters
----------
total_press: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
air temperature
Returns
-------
`pint.Quantity`
The saturation mixing ratio, dimensionless
"""
return mixing_ratio(saturation_vapor_pressure(temperature), total_press)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def equivalent_potential_temperature(pressure, temperature, dewpoint):
r"""Calculate equivalent potential temperature.
This calculation must be given an air parcel's pressure, temperature, and dewpoint.
The implementation uses the formula outlined in [Bolton1980]_:
First, the LCL temperature is calculated:
.. math:: T_{L}=\frac{1}{\frac{1}{T_{D}-56}+\frac{ln(T_{K}/T_{D})}{800}}+56
Which is then used to calculate the potential temperature at the LCL:
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
\left(\frac{T_{K}}{T_{L}}\right)^{.28r}
Both of these are used to calculate the final equivalent potential temperature:
.. math:: \theta_{E}=\theta_{DL}\exp\left[\left(\frac{3036.}{T_{L}}
-1.78\right)*r(1+.448r)\right]
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Temperature of parcel
dewpoint: `pint.Quantity`
Dewpoint of parcel
Returns
-------
`pint.Quantity`
The equivalent potential temperature of the parcel
Notes
-----
[Bolton1980]_ formula for Theta-e is used, since according to
[DaviesJones2009]_ it is the most accurate non-iterative formulation
available.
"""
t = temperature.to('kelvin').magnitude
td = dewpoint.to('kelvin').magnitude
p = pressure.to('hPa').magnitude
e = saturation_vapor_pressure(dewpoint).to('hPa').magnitude
r = saturation_mixing_ratio(pressure, dewpoint).magnitude
t_l = 56 + 1. / (1. / (td - 56) + np.log(t / td) / 800.)
th_l = t * (1000 / (p - e)) ** mpconsts.kappa * (t / t_l) ** (0.28 * r)
th_e = th_l * np.exp((3036. / t_l - 1.78) * r * (1 + 0.448 * r))
return th_e * units.kelvin
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def saturation_equivalent_potential_temperature(pressure, temperature):
r"""Calculate saturation equivalent potential temperature.
This calculation must be given an air parcel's pressure and temperature.
The implementation uses the formula outlined in [Bolton1980]_ for the
equivalent potential temperature, and assumes a saturated process.
First, because we assume a saturated process, the temperature at the LCL is
equivalent to the current temperature. Therefore the following equation
.. math:: T_{L}=\frac{1}{\frac{1}{T_{D}-56}+\frac{ln(T_{K}/T_{D})}{800}}+56
reduces to
.. math:: T_{L} = T_{K}
Then the potential temperature at the temperature/LCL is calculated:
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
\left(\frac{T_{K}}{T_{L}}\right)^{.28r}
However, because
.. math:: T_{L} = T_{K}
it follows that
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
Both of these are used to calculate the final equivalent potential temperature:
.. math:: \theta_{E}=\theta_{DL}\exp\left[\left(\frac{3036.}{T_{K}}
-1.78\right)*r(1+.448r)\right]
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Temperature of parcel
Returns
-------
`pint.Quantity`
The saturation equivalent potential temperature of the parcel
Notes
-----
[Bolton1980]_ formula for Theta-e is used (for saturated case), since according to
[DaviesJones2009]_ it is the most accurate non-iterative formulation
available.
"""
t = temperature.to('kelvin').magnitude
p = pressure.to('hPa').magnitude
e = saturation_vapor_pressure(temperature).to('hPa').magnitude
r = saturation_mixing_ratio(pressure, temperature).magnitude
th_l = t * (1000 / (p - e)) ** mpconsts.kappa
th_es = th_l * np.exp((3036. / t - 1.78) * r * (1 + 0.448 * r))
return th_es * units.kelvin
@exporter.export
@preprocess_xarray
@check_units('[temperature]', '[dimensionless]', '[dimensionless]')
def virtual_temperature(temperature, mixing_ratio, molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate virtual temperature.
This calculation must be given an air parcel's temperature and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.80.
Parameters
----------
temperature: `pint.Quantity`
air temperature
mixing_ratio : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding virtual temperature of the parcel
Notes
-----
.. math:: T_v = T \frac{\text{w} + \epsilon}{\epsilon\,(1 + \text{w})}
"""
return temperature * ((mixing_ratio + molecular_weight_ratio)
/ (molecular_weight_ratio * (1 + mixing_ratio)))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]', '[dimensionless]')
def virtual_potential_temperature(pressure, temperature, mixing_ratio,
molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate virtual potential temperature.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Markowski2010]_ pg.13.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
air temperature
mixing_ratio : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding virtual potential temperature of the parcel
Notes
-----
.. math:: \Theta_v = \Theta \frac{\text{w} + \epsilon}{\epsilon\,(1 + \text{w})}
"""
pottemp = potential_temperature(pressure, temperature)
return virtual_temperature(pottemp, mixing_ratio, molecular_weight_ratio)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]', '[dimensionless]')
def density(pressure, temperature, mixing_ratio, molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate density.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.67.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
air temperature
mixing_ratio : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding density of the parcel
Notes
-----
.. math:: \rho = \frac{p}{R_dT_v}
"""
virttemp = virtual_temperature(temperature, mixing_ratio, molecular_weight_ratio)
return (pressure / (mpconsts.Rd * virttemp)).to(units.kilogram / units.meter ** 3)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def relative_humidity_wet_psychrometric(pressure, dry_bulb_temperature, web_bulb_temperature,
**kwargs):
r"""Calculate the relative humidity with wet bulb and dry bulb temperatures.
This uses a psychrometric relationship as outlined in [WMO8-2014]_, with
coefficients from [Fan1987]_.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
dry_bulb_temperature: `pint.Quantity`
Dry bulb temperature
web_bulb_temperature: `pint.Quantity`
Wet bulb temperature
Returns
-------
`pint.Quantity`
Relative humidity
Notes
-----
.. math:: relative_humidity = \frac{e}{e_s}
* :math:`relative_humidity` is relative humidity as a unitless ratio
* :math:`e` is vapor pressure from the wet psychrometric calculation
* :math:`e_s` is the saturation vapor pressure
See Also
--------
psychrometric_vapor_pressure_wet, saturation_vapor_pressure
"""
return (psychrometric_vapor_pressure_wet(pressure, dry_bulb_temperature,
web_bulb_temperature, **kwargs)
/ saturation_vapor_pressure(dry_bulb_temperature))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def psychrometric_vapor_pressure_wet(pressure, dry_bulb_temperature, wet_bulb_temperature,
psychrometer_coefficient=6.21e-4 / units.kelvin):
r"""Calculate the vapor pressure with wet bulb and dry bulb temperatures.
This uses a psychrometric relationship as outlined in [WMO8-2014]_, with
coefficients from [Fan1987]_.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
dry_bulb_temperature: `pint.Quantity`
Dry bulb temperature
wet_bulb_temperature: `pint.Quantity`
Wet bulb temperature
psychrometer_coefficient: `pint.Quantity`, optional
Psychrometer coefficient. Defaults to 6.21e-4 K^-1.
Returns
-------
`pint.Quantity`
Vapor pressure
Notes
-----
.. math:: e' = e'_w(T_w) - A p (T - T_w)
* :math:`e'` is vapor pressure
* :math:`e'_w(T_w)` is the saturation vapor pressure with respect to water at temperature
:math:`T_w`
* :math:`p` is the pressure of the wet bulb
* :math:`T` is the temperature of the dry bulb
* :math:`T_w` is the temperature of the wet bulb
* :math:`A` is the psychrometer coefficient
Psychrometer coefficient depends on the specific instrument being used and the ventilation
of the instrument.
See Also
--------
saturation_vapor_pressure
"""
return (saturation_vapor_pressure(wet_bulb_temperature) - psychrometer_coefficient
* pressure * (dry_bulb_temperature - wet_bulb_temperature).to('kelvin'))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def mixing_ratio_from_relative_humidity(pressure, temperature, relative_humidity):
r"""Calculate the mixing ratio from relative humidity, temperature, and pressure.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
relative_humidity: array_like
The relative humidity expressed as a unitless ratio in the range [0, 1]. Can also pass
a percentage if proper units are attached.
Returns
-------
`pint.Quantity`
Dimensionless mixing ratio
Notes
-----
Formula adapted from [Hobbs1977]_ pg. 74.
.. math:: w = (relative_humidity)(w_s)
* :math:`w` is mixing ratio
* :math:`relative_humidity` is relative humidity as a unitless ratio
* :math:`w_s` is the saturation mixing ratio
See Also
--------
relative_humidity_from_mixing_ratio, saturation_mixing_ratio
"""
return (relative_humidity
* saturation_mixing_ratio(pressure, temperature)).to('dimensionless')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def relative_humidity_from_mixing_ratio(pressure, temperature, mixing_ratio):
r"""Calculate the relative humidity from mixing ratio, temperature, and pressure.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
mixing_ratio: `pint.Quantity`
Dimensionless mass mixing ratio
Returns
-------
`pint.Quantity`
Relative humidity
Notes
-----
Formula based on that from [Hobbs1977]_ pg. 74.
.. math:: relative_humidity = \frac{w}{w_s}
* :math:`relative_humidity` is relative humidity as a unitless ratio
* :math:`w` is mixing ratio
* :math:`w_s` is the saturation mixing ratio
See Also
--------
mixing_ratio_from_relative_humidity, saturation_mixing_ratio
"""
return mixing_ratio / saturation_mixing_ratio(pressure, temperature)
@exporter.export
@preprocess_xarray
@check_units('[dimensionless]')
def mixing_ratio_from_specific_humidity(specific_humidity):
r"""Calculate the mixing ratio from specific humidity.
Parameters
----------
specific_humidity: `pint.Quantity`
Specific humidity of air
Returns
-------
`pint.Quantity`
Mixing ratio
Notes
-----
Formula from [Salby1996]_ pg. 118.
.. math:: w = \frac{q}{1-q}
* :math:`w` is mixing ratio
* :math:`q` is the specific humidity
See Also
--------
mixing_ratio, specific_humidity_from_mixing_ratio
"""
try:
specific_humidity = specific_humidity.to('dimensionless')
except AttributeError:
pass
return specific_humidity / (1 - specific_humidity)
@exporter.export
@preprocess_xarray
@check_units('[dimensionless]')
def specific_humidity_from_mixing_ratio(mixing_ratio):
r"""Calculate the specific humidity from the mixing ratio.
Parameters
----------
mixing_ratio: `pint.Quantity`
mixing ratio
Returns
-------
`pint.Quantity`
Specific humidity
Notes
-----
Formula from [Salby1996]_ pg. 118.
.. math:: q = \frac{w}{1+w}
* :math:`w` is mixing ratio
* :math:`q` is the specific humidity
See Also
--------
mixing_ratio, mixing_ratio_from_specific_humidity
"""
try:
mixing_ratio = mixing_ratio.to('dimensionless')
except AttributeError:
pass
return mixing_ratio / (1 + mixing_ratio)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def relative_humidity_from_specific_humidity(pressure, temperature, specific_humidity):
r"""Calculate the relative humidity from specific humidity, temperature, and pressure.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
specific_humidity: `pint.Quantity`
Specific humidity of air
Returns
-------
`pint.Quantity`
Relative humidity
Notes
-----
Formula based on that from [Hobbs1977]_ pg. 74. and [Salby1996]_ pg. 118.
.. math:: relative_humidity = \frac{q}{(1-q)w_s}
* :math:`relative_humidity` is relative humidity as a unitless ratio
* :math:`q` is specific humidity
* :math:`w_s` is the saturation mixing ratio
See Also
--------
relative_humidity_from_mixing_ratio
"""
return (mixing_ratio_from_specific_humidity(specific_humidity)
/ saturation_mixing_ratio(pressure, temperature))
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')
def cape_cin(pressure, temperature, dewpoint, parcel_profile, which_lfc='bottom',
which_el='top'):
r"""Calculate CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile and parcel path. CIN is integrated between the surface and
LFC, CAPE is integrated between the LFC and EL (or top of sounding). Intersection points
of the measured temperature profile and parcel profile are logarithmically interpolated.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest, in order from highest to
lowest pressure.
temperature : `pint.Quantity`
The atmospheric temperature corresponding to pressure.
dewpoint : `pint.Quantity`
The atmospheric dewpoint corresponding to pressure.
parcel_profile : `pint.Quantity`
The temperature profile of the parcel.
which_lfc : str
Choose which LFC to integrate from. Valid options are 'top', 'bottom', 'wide',
and 'most_cape'. Default is 'bottom'.
which_el : str
Choose which EL to integrate to. Valid options are 'top', 'bottom', 'wide',
and 'most_cape'. Default is 'top'.
Returns
-------
`pint.Quantity`
Convective Available Potential Energy (CAPE).
`pint.Quantity`
Convective INhibition (CIN).
Notes
-----
Formula adopted from [Hobbs1977]_.
.. math:: \text{CAPE} = -R_d \int_{LFC}^{EL} (T_{parcel} - T_{env}) d\text{ln}(p)
.. math:: \text{CIN} = -R_d \int_{SFC}^{LFC} (T_{parcel} - T_{env}) d\text{ln}(p)
* :math:`CAPE` Convective available potential energy
* :math:`CIN` Convective inhibition
* :math:`LFC` Pressure of the level of free convection
* :math:`EL` Pressure of the equilibrium level
* :math:`SFC` Level of the surface or beginning of parcel path
* :math:`R_d` Gas constant
* :math:`g` Gravitational acceleration
* :math:`T_{parcel}` Parcel temperature
* :math:`T_{env}` Environment temperature
* :math:`p` Atmospheric pressure
See Also
--------
lfc, el
"""
pressure, temperature, dewpoint, parcel_profile = _remove_nans(pressure, temperature,
dewpoint, parcel_profile)
# Calculate LFC limit of integration
lfc_pressure, _ = lfc(pressure, temperature, dewpoint,
parcel_temperature_profile=parcel_profile, which=which_lfc)
# If there is no LFC, no need to proceed.
if np.isnan(lfc_pressure):
return 0 * units('J/kg'), 0 * units('J/kg')
else:
lfc_pressure = lfc_pressure.magnitude
# Calculate the EL limit of integration
el_pressure, _ = el(pressure, temperature, dewpoint,
parcel_temperature_profile=parcel_profile, which=which_el)
# No EL and we use the top reading of the sounding.
if np.isnan(el_pressure):
el_pressure = pressure[-1].magnitude
else:
el_pressure = el_pressure.magnitude
# Difference between the parcel path and measured temperature profiles
y = (parcel_profile - temperature).to(units.degK)
# Estimate zero crossings
x, y = _find_append_zero_crossings(np.copy(pressure), y)
# CAPE
# Only use data between the LFC and EL for calculation
p_mask = _less_or_close(x.m, lfc_pressure) & _greater_or_close(x.m, el_pressure)
x_clipped = x[p_mask].magnitude
y_clipped = y[p_mask].magnitude
cape = (mpconsts.Rd
* (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units('J/kg'))
# CIN
# Only use data between the surface and LFC for calculation
p_mask = _greater_or_close(x.m, lfc_pressure)
x_clipped = x[p_mask].magnitude
y_clipped = y[p_mask].magnitude
cin = (mpconsts.Rd
* (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units('J/kg'))
# Set CIN to 0 if it's returned as a positive value (#1190)
if cin > 0 * units('J/kg'):
cin = 0 * units('J/kg')
return cape, cin
def _find_append_zero_crossings(x, y):
r"""
Find and interpolate zero crossings.
Estimate the zero crossings of an x,y series and add estimated crossings to series,
returning a sorted array with no duplicate values.
Parameters
----------
x : `pint.Quantity`
x values of data
y : `pint.Quantity`
y values of data
Returns
-------
x : `pint.Quantity`
x values of data
y : `pint.Quantity`
y values of data
"""
crossings = find_intersections(x[1:], y[1:], np.zeros_like(y[1:]) * y.units, log_x=True)
x = concatenate((x, crossings[0]))
y = concatenate((y, crossings[1]))
# Resort so that data are in order
sort_idx = np.argsort(x)
x = x[sort_idx]
y = y[sort_idx]
# Remove duplicate data points if there are any
keep_idx = np.ediff1d(x.magnitude, to_end=[1]) > 1e-6
x = x[keep_idx]
y = y[keep_idx]
return x, y
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def most_unstable_parcel(pressure, temperature, dewpoint, height=None,
bottom=None, depth=300 * units.hPa):
"""
Determine the most unstable parcel in a layer.
Determines the most unstable parcel of air by calculating the equivalent
potential temperature and finding its maximum in the specified layer.
Parameters
----------
pressure: `pint.Quantity`
Atmospheric pressure profile
temperature: `pint.Quantity`
Atmospheric temperature profile
dewpoint: `pint.Quantity`
Atmospheric dewpoint profile
height: `pint.Quantity`, optional
Atmospheric height profile. Standard atmosphere assumed when None (the default).
bottom: `pint.Quantity`, optional
Bottom of the layer to consider for the calculation in pressure or height.
Defaults to using the bottom pressure or height.
depth: `pint.Quantity`, optional
Depth of the layer to consider for the calculation in pressure or height. Defaults
to 300 hPa.
Returns
-------
`pint.Quantity`
Pressure, temperature, and dewpoint of most unstable parcel in the profile.
integer
Index of the most unstable parcel in the given profile
See Also
--------
get_layer
"""
p_layer, t_layer, td_layer = get_layer(pressure, temperature, dewpoint, bottom=bottom,
depth=depth, height=height, interpolate=False)
theta_e = equivalent_potential_temperature(p_layer, t_layer, td_layer)
max_idx = np.argmax(theta_e)
return p_layer[max_idx], t_layer[max_idx], td_layer[max_idx], max_idx
@exporter.export
@preprocess_xarray
@check_units('[temperature]', '[pressure]', '[temperature]')
def isentropic_interpolation(levels, pressure, temperature, *args, axis=0,
temperature_out=False, max_iters=50, eps=1e-6,
bottom_up_search=True, **kwargs):
r"""Interpolate data in isobaric coordinates to isentropic coordinates.
Parameters
----------
levels : array
One-dimensional array of desired potential temperature surfaces
pressure : array
One-dimensional array of pressure levels
temperature : array
Array of temperature
axis : int, optional
The axis corresponding to the vertical in the temperature array, defaults to 0.
temperature_out : bool, optional
If true, will calculate temperature and output as the last item in the output list.
Defaults to False.
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired absolute error in the calculated value, defaults to 1e-6.
bottom_up_search : bool, optional
Controls whether to search for levels bottom-up, or top-down. Defaults to
True, which is bottom-up search.
args : array, optional
Any additional variables will be interpolated to each isentropic level.
Returns
-------
list
List with pressure at each isentropic level, followed by each additional
argument interpolated to isentropic coordinates.
Notes
-----
Input variable arrays must have the same number of vertical levels as the pressure levels
array. Pressure is calculated on isentropic surfaces by assuming that temperature varies
linearly with the natural log of pressure. Linear interpolation is then used in the
vertical to find the pressure at each isentropic level. Interpolation method from
[Ziv1994]_. Any additional arguments are assumed to vary linearly with temperature and will
be linearly interpolated to the new isentropic levels.
See Also
--------
potential_temperature
"""
# iteration function to be used later
# Calculates theta from linearly interpolated temperature and solves for pressure
def _isen_iter(iter_log_p, isentlevs_nd, ka, a, b, pok):
exner = pok * np.exp(-ka * iter_log_p)
t = a * iter_log_p + b
# Newton-Raphson iteration
f = isentlevs_nd - t * exner
fp = exner * (ka * t - a)
return iter_log_p - (f / fp)
# Get dimensions in temperature
ndim = temperature.ndim
# Convert units
pres = pressure.to('hPa')
temperature = temperature.to('kelvin')
slices = [np.newaxis] * ndim
slices[axis] = slice(None)
slices = tuple(slices)
pres = np.broadcast_to(pres[slices].magnitude, temperature.shape) * pres.units
# Sort input data
sort_pres = np.argsort(pres.m, axis=axis)
sort_pres = np.swapaxes(np.swapaxes(sort_pres, 0, axis)[::-1], 0, axis)
sorter = broadcast_indices(pres, sort_pres, ndim, axis)
levs = pres[sorter]
tmpk = temperature[sorter]
levels = np.asarray(levels.m_as('kelvin')).reshape(-1)
isentlevels = levels[np.argsort(levels)]
# Make the desired isentropic levels the same shape as temperature
shape = list(temperature.shape)
shape[axis] = isentlevels.size
isentlevs_nd = np.broadcast_to(isentlevels[slices], shape)
# exponent to Poisson's Equation, which is imported above
ka = mpconsts.kappa.m_as('dimensionless')
# calculate theta for each point
pres_theta = potential_temperature(levs, tmpk)
# Raise error if input theta level is larger than pres_theta max
if np.max(pres_theta.m) < np.max(levels):
raise ValueError('Input theta level out of data bounds')
# Find log of pressure to implement assumption of linear temperature dependence on
# ln(p)
log_p = np.log(levs.m)
# Calculations for interpolation routine
pok = mpconsts.P0 ** ka
# index values for each point for the pressure level nearest to the desired theta level
above, below, good = find_bounding_indices(pres_theta.m, levels, axis,
from_below=bottom_up_search)
# calculate constants for the interpolation
a = (tmpk.m[above] - tmpk.m[below]) / (log_p[above] - log_p[below])
b = tmpk.m[above] - a * log_p[above]
# calculate first guess for interpolation
isentprs = 0.5 * (log_p[above] + log_p[below])
# Make sure we ignore any nans in the data for solving; checking a is enough since it
# combines log_p and tmpk.
good &= ~np.isnan(a)
# iterative interpolation using scipy.optimize.fixed_point and _isen_iter defined above
log_p_solved = so.fixed_point(_isen_iter, isentprs[good],
args=(isentlevs_nd[good], ka, a[good], b[good], pok.m),
xtol=eps, maxiter=max_iters)
# get back pressure from log p
isentprs[good] = np.exp(log_p_solved)
# Mask out points we know are bad as well as points that are beyond the max pressure
isentprs[~(good & _less_or_close(isentprs, np.max(pres.m)))] = np.nan
# create list for storing output data
ret = [isentprs * units.hPa]
# if temperature_out = true, calculate temperature and output as last item in list
if temperature_out:
ret.append((isentlevs_nd / ((mpconsts.P0.m / isentprs) ** ka)) * units.kelvin)
# do an interpolation for each additional argument
if args:
others = interpolate_1d(isentlevels, pres_theta.m, *(arr[sorter] for arr in args),
axis=axis, return_list_always=True)
ret.extend(others)
return ret
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def surface_based_cape_cin(pressure, temperature, dewpoint):
r"""Calculate surface-based CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile for a surface-based parcel. CIN is integrated
between the surface and LFC, CAPE is integrated between the LFC and EL (or top of
sounding). Intersection points of the measured temperature profile and parcel profile are
logarithmically interpolated.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile. The first entry should be the starting
(surface) observation, with the array going from high to low pressure.
temperature : `pint.Quantity`
Temperature profile corresponding to the `pressure` profile.
dewpoint : `pint.Quantity`
Dewpoint profile corresponding to the `pressure` profile.
Returns
-------
`pint.Quantity`
Surface based Convective Available Potential Energy (CAPE).
`pint.Quantity`
Surface based Convective INhibition (CIN).
See Also
--------
cape_cin, parcel_profile
"""
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
p, t, td, profile = parcel_profile_with_lcl(pressure, temperature, dewpoint)
return cape_cin(p, t, td, profile)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def most_unstable_cape_cin(pressure, temperature, dewpoint, **kwargs):
r"""Calculate most unstable CAPE/CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile and most unstable parcel path. CIN is integrated between the
surface and LFC, CAPE is integrated between the LFC and EL (or top of sounding).
Intersection points of the measured temperature profile and parcel profile are
logarithmically interpolated.
Parameters
----------
pressure : `pint.Quantity`
Pressure profile
temperature : `pint.Quantity`
Temperature profile
dewpoint : `pint.Quantity`
Dew point profile
kwargs
Additional keyword arguments to pass to `most_unstable_parcel`
Returns
-------
`pint.Quantity`
Most unstable Convective Available Potential Energy (CAPE).
`pint.Quantity`
Most unstable Convective INhibition (CIN).
See Also
--------
cape_cin, most_unstable_parcel, parcel_profile
"""
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
_, _, _, parcel_idx = most_unstable_parcel(pressure, temperature, dewpoint, **kwargs)
p, t, td, mu_profile = parcel_profile_with_lcl(pressure[parcel_idx:],
temperature[parcel_idx:],
dewpoint[parcel_idx:])
return cape_cin(p, t, td, mu_profile)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def mixed_layer_cape_cin(pressure, temperature, dewpoint, **kwargs):
r"""Calculate mixed-layer CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile and mixed-layer parcel path. CIN is integrated between the
surface and LFC, CAPE is integrated between the LFC and EL (or top of sounding).
Intersection points of the measured temperature profile and parcel profile are
logarithmically interpolated. Kwargs for `mixed_parcel` can be provided, such as `depth`.
Default mixed-layer depth is 100 hPa.
Parameters
----------
pressure : `pint.Quantity`
Pressure profile
temperature : `pint.Quantity`
Temperature profile
dewpoint : `pint.Quantity`
Dewpoint profile
kwargs
Additional keyword arguments to pass to `mixed_parcel`
Returns
-------
`pint.Quantity`
Mixed-layer Convective Available Potential Energy (CAPE).
`pint.Quantity`
Mixed-layer Convective INhibition (CIN).
See Also
--------
cape_cin, mixed_parcel, parcel_profile
"""
depth = kwargs.get('depth', 100 * units.hPa)
parcel_pressure, parcel_temp, parcel_dewpoint = mixed_parcel(pressure, temperature,
dewpoint, **kwargs)
# Remove values below top of mixed layer and add in the mixed layer values
pressure_prof = pressure[pressure < (pressure[0] - depth)]
temp_prof = temperature[pressure < (pressure[0] - depth)]
dew_prof = dewpoint[pressure < (pressure[0] - depth)]
pressure_prof = concatenate([parcel_pressure, pressure_prof])
temp_prof = concatenate([parcel_temp, temp_prof])
dew_prof = concatenate([parcel_dewpoint, dew_prof])
p, t, td, ml_profile = parcel_profile_with_lcl(pressure_prof, temp_prof, dew_prof)
return cape_cin(p, t, td, ml_profile)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def mixed_parcel(pressure, temperature, dewpoint, parcel_start_pressure=None,
height=None, bottom=None, depth=100 * units.hPa, interpolate=True):
r"""Calculate the properties of a parcel mixed from a layer.
Determines the properties of an air parcel that is the result of complete mixing of a
given atmospheric layer.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
dewpoint : `pint.Quantity`
Atmospheric dewpoint profile
parcel_start_pressure : `pint.Quantity`, optional
Pressure at which the mixed parcel should begin (default None)
height: `pint.Quantity`, optional
Atmospheric heights corresponding to the given pressures (default None)
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure
(default None)
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer
(default 100 hPa)
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data
Returns
-------
`pint.Quantity`
The pressure of the mixed parcel
`pint.Quantity`
The temperature of the mixed parcel
`pint.Quantity`
The dewpoint of the mixed parcel
"""
# If a parcel starting pressure is not provided, use the surface
if not parcel_start_pressure:
parcel_start_pressure = pressure[0]
# Calculate the potential temperature and mixing ratio over the layer
theta = potential_temperature(pressure, temperature)
mixing_ratio = saturation_mixing_ratio(pressure, dewpoint)
# Mix the variables over the layer
mean_theta, mean_mixing_ratio = mixed_layer(pressure, theta, mixing_ratio, bottom=bottom,
height=height, depth=depth,
interpolate=interpolate)
# Convert back to temperature
mean_temperature = mean_theta * exner_function(parcel_start_pressure)
# Convert back to dewpoint
mean_vapor_pressure = vapor_pressure(parcel_start_pressure, mean_mixing_ratio)
# Using globals() here allows us to keep the dewpoint parameter but still call the
# function of the same name.
mean_dewpoint = globals()['dewpoint'](mean_vapor_pressure)
return (parcel_start_pressure, mean_temperature.to(temperature.units),
mean_dewpoint.to(dewpoint.units))
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def mixed_layer(pressure, *args, height=None, bottom=None, depth=100 * units.hPa,
interpolate=True):
r"""Mix variable(s) over a layer, yielding a mass-weighted average.
This function will integrate a data variable with respect to pressure and determine the
average value using the mean value theorem.
Parameters
----------
pressure : array-like
Atmospheric pressure profile
datavar : array-like
Atmospheric variable measured at the given pressures
height: array-like, optional
Atmospheric heights corresponding to the given pressures (default None)
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure
(default None)
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer
(default 100 hPa)
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data (default True)
Returns
-------
`pint.Quantity`
The mixed value of the data variable.
"""
layer = get_layer(pressure, *args, height=height, bottom=bottom,
depth=depth, interpolate=interpolate)
p_layer = layer[0]
datavars_layer = layer[1:]
ret = []
for datavar_layer in datavars_layer:
actual_depth = abs(p_layer[0] - p_layer[-1])
ret.append((-1. / actual_depth.m) * np.trapz(datavar_layer.m, p_layer.m)
* datavar_layer.units)
return ret
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]')
def dry_static_energy(height, temperature):
r"""Calculate the dry static energy of parcels.
This function will calculate the dry static energy following the first two terms of
equation 3.72 in [Hobbs2006]_.
Notes
-----
.. math::\text{dry static energy} = c_{pd} * T + gz
* :math:`T` is temperature
* :math:`z` is height
Parameters
----------
height : `pint.Quantity`
Atmospheric height
temperature : `pint.Quantity`
Air temperature
Returns
-------
`pint.Quantity`
The dry static energy
"""
return (mpconsts.g * height + mpconsts.Cp_d * temperature).to('kJ/kg')
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]', '[dimensionless]')
def moist_static_energy(height, temperature, specific_humidity):
r"""Calculate the moist static energy of parcels.
This function will calculate the moist static energy following
equation 3.72 in [Hobbs2006]_.
Notes
-----
.. math::\text{moist static energy} = c_{pd} * T + gz + L_v q
* :math:`T` is temperature
* :math:`z` is height
* :math:`q` is specific humidity
Parameters
----------
height : `pint.Quantity`
Atmospheric height
temperature : `pint.Quantity`
Air temperature
specific_humidity : `pint.Quantity`
Atmospheric specific humidity
Returns
-------
`pint.Quantity`
The moist static energy
"""
return (dry_static_energy(height, temperature)
+ mpconsts.Lv * specific_humidity.to('dimensionless')).to('kJ/kg')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def thickness_hydrostatic(pressure, temperature, mixing_ratio=None,
molecular_weight_ratio=mpconsts.epsilon, bottom=None, depth=None):
r"""Calculate the thickness of a layer via the hypsometric equation.
This thickness calculation uses the pressure and temperature profiles (and optionally
mixing ratio) via the hypsometric equation with virtual temperature adjustment
.. math:: Z_2 - Z_1 = -\frac{R_d}{g} \int_{p_1}^{p_2} T_v d\ln p,
which is based off of Equation 3.24 in [Hobbs2006]_.
This assumes a hydrostatic atmosphere.
Layer bottom and depth specified in pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
mixing_ratio : `pint.Quantity`, optional
Profile of dimensionless mass mixing ratio. If none is given, virtual temperature
is simply set to be the given temperature.
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
bottom : `pint.Quantity`, optional
The bottom of the layer in pressure. Defaults to the first observation.
depth : `pint.Quantity`, optional
The depth of the layer in hPa. Defaults to the full profile if bottom is not given,
and 100 hPa if bottom is given.
Returns
-------
`pint.Quantity`
The thickness of the layer in meters.
See Also
--------
thickness_hydrostatic_from_relative_humidity, pressure_to_height_std, virtual_temperature
"""
# Get the data for the layer, conditional upon bottom/depth being specified and mixing
# ratio being given
if bottom is None and depth is None:
if mixing_ratio is None:
layer_p, layer_virttemp = pressure, temperature
else:
layer_p = pressure
layer_virttemp = virtual_temperature(temperature, mixing_ratio,
molecular_weight_ratio)
else:
if mixing_ratio is None:
layer_p, layer_virttemp = get_layer(pressure, temperature, bottom=bottom,
depth=depth)
else:
layer_p, layer_temp, layer_w = get_layer(pressure, temperature, mixing_ratio,
bottom=bottom, depth=depth)
layer_virttemp = virtual_temperature(layer_temp, layer_w, molecular_weight_ratio)
# Take the integral (with unit handling) and return the result in meters
return (- mpconsts.Rd / mpconsts.g * np.trapz(
layer_virttemp.m_as('K'), x=np.log(layer_p.m_as('hPa'))) * units.K).to('m')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def thickness_hydrostatic_from_relative_humidity(pressure, temperature, relative_humidity,
bottom=None, depth=None):
r"""Calculate the thickness of a layer given pressure, temperature and relative humidity.
Similar to ``thickness_hydrostatic``, this thickness calculation uses the pressure,
temperature, and relative humidity profiles via the hypsometric equation with virtual
temperature adjustment.
.. math:: Z_2 - Z_1 = -\frac{R_d}{g} \int_{p_1}^{p_2} T_v d\ln p,
which is based off of Equation 3.24 in [Hobbs2006]_. Virtual temperature is calculated
from the profiles of temperature and relative humidity.
This assumes a hydrostatic atmosphere.
Layer bottom and depth specified in pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
relative_humidity : `pint.Quantity`
Atmospheric relative humidity profile. The relative humidity is expressed as a
unitless ratio in the range [0, 1]. Can also pass a percentage if proper units are
attached.
bottom : `pint.Quantity`, optional
The bottom of the layer in pressure. Defaults to the first observation.
depth : `pint.Quantity`, optional
The depth of the layer in hPa. Defaults to the full profile if bottom is not given,
and 100 hPa if bottom is given.
Returns
-------
`pint.Quantity`
The thickness of the layer in meters.
See Also
--------
thickness_hydrostatic, pressure_to_height_std, virtual_temperature,
mixing_ratio_from_relative_humidity
"""
mixing = mixing_ratio_from_relative_humidity(pressure, temperature, relative_humidity)
return thickness_hydrostatic(pressure, temperature, mixing_ratio=mixing, bottom=bottom,
depth=depth)
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]')
def brunt_vaisala_frequency_squared(height, potential_temperature, axis=0):
r"""Calculate the square of the Brunt-Vaisala frequency.
Brunt-Vaisala frequency squared (a measure of atmospheric stability) is given by the
formula:
.. math:: N^2 = \frac{g}{\theta} \frac{d\theta}{dz}
This formula is based off of Equations 3.75 and 3.77 in [Hobbs2006]_.
Parameters
----------
height : `pint.Quantity`
One-dimensional profile of atmospheric height
potential_temperature : `pint.Quantity`
Atmospheric potential temperature
axis : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0.
Returns
-------
`pint.Quantity`
The square of the Brunt-Vaisala frequency.
See Also
--------
brunt_vaisala_frequency, brunt_vaisala_period, potential_temperature
"""
# Ensure validity of temperature units
potential_temperature = potential_temperature.to('K')
# Calculate and return the square of Brunt-Vaisala frequency
return mpconsts.g / potential_temperature * first_derivative(potential_temperature,
x=height, axis=axis)
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]')
def brunt_vaisala_frequency(height, potential_temperature, axis=0):
r"""Calculate the Brunt-Vaisala frequency.
This function will calculate the Brunt-Vaisala frequency as follows:
.. math:: N = \left( \frac{g}{\theta} \frac{d\theta}{dz} \right)^\frac{1}{2}
This formula based off of Equations 3.75 and 3.77 in [Hobbs2006]_.
This function is a wrapper for `brunt_vaisala_frequency_squared` that filters out negative
(unstable) quantities and takes the square root.
Parameters
----------
height : `pint.Quantity`
One-dimensional profile of atmospheric height
potential_temperature : `pint.Quantity`
Atmospheric potential temperature
axis : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0.
Returns
-------
`pint.Quantity`
Brunt-Vaisala frequency.
See Also
--------
brunt_vaisala_frequency_squared, brunt_vaisala_period, potential_temperature
"""
bv_freq_squared = brunt_vaisala_frequency_squared(height, potential_temperature,
axis=axis)
bv_freq_squared[bv_freq_squared.magnitude < 0] = np.nan
return np.sqrt(bv_freq_squared)
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]')
def brunt_vaisala_period(height, potential_temperature, axis=0):
r"""Calculate the Brunt-Vaisala period.
This function is a helper function for `brunt_vaisala_frequency` that calculates the
period of oscillation as in Exercise 3.13 of [Hobbs2006]_:
.. math:: \tau = \frac{2\pi}{N}
Returns `NaN` when :math:`N^2 > 0`.
Parameters
----------
height : `pint.Quantity`
One-dimensional profile of atmospheric height
potential_temperature : pint.Quantity`
Atmospheric potential temperature
axis : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0.
Returns
-------
`pint.Quantity`
Brunt-Vaisala period.
See Also
--------
brunt_vaisala_frequency, brunt_vaisala_frequency_squared, potential_temperature
"""
bv_freq_squared = brunt_vaisala_frequency_squared(height, potential_temperature,
axis=axis)
bv_freq_squared[bv_freq_squared.magnitude <= 0] = np.nan
return 2 * np.pi / np.sqrt(bv_freq_squared)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def wet_bulb_temperature(pressure, temperature, dewpoint):
"""Calculate the wet-bulb temperature using Normand's rule.
This function calculates the wet-bulb temperature using the Normand method. The LCL is
computed, and that parcel brought down to the starting pressure along a moist adiabat.
The Normand method (and others) are described and compared by [Knox2017]_.
Parameters
----------
pressure : `pint.Quantity`
Initial atmospheric pressure
temperature : `pint.Quantity`
Initial atmospheric temperature
dewpoint : `pint.Quantity`
Initial atmospheric dewpoint
Returns
-------
`pint.Quantity`
Wet-bulb temperature
See Also
--------
lcl, moist_lapse
"""
if not hasattr(pressure, 'shape'):
pressure = np.atleast_1d(pressure)
temperature = np.atleast_1d(temperature)
dewpoint = np.atleast_1d(dewpoint)
it = np.nditer([pressure, temperature, dewpoint, None],
op_dtypes=['float', 'float', 'float', 'float'],
flags=['buffered'])
for press, temp, dewp, ret in it:
press = press * pressure.units
temp = temp * temperature.units
dewp = dewp * dewpoint.units
lcl_pressure, lcl_temperature = lcl(press, temp, dewp)
moist_adiabat_temperatures = moist_lapse(concatenate([lcl_pressure, press]),
lcl_temperature)
ret[...] = moist_adiabat_temperatures[-1].magnitude
# If we started with a scalar, return a scalar
if it.operands[3].size == 1:
return it.operands[3][0] * moist_adiabat_temperatures.units
return it.operands[3] * moist_adiabat_temperatures.units
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def static_stability(pressure, temperature, axis=0):
r"""Calculate the static stability within a vertical profile.
.. math:: \sigma = -\frac{RT}{p} \frac{\partial \ln \theta}{\partial p}
This formula is based on equation 4.3.6 in [Bluestein1992]_.
Parameters
----------
pressure : `pint.Quantity`
Profile of atmospheric pressure
temperature : `pint.Quantity`
Profile of temperature
axis : int, optional
The axis corresponding to vertical in the pressure and temperature arrays, defaults
to 0.
Returns
-------
`pint.Quantity`
The profile of static stability.
"""
theta = potential_temperature(pressure, temperature)
return - mpconsts.Rd * temperature / pressure * first_derivative(np.log(theta.m_as('K')),
x=pressure, axis=axis)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def dewpoint_from_specific_humidity(pressure, temperature, specific_humidity):
r"""Calculate the dewpoint from specific humidity, temperature, and pressure.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
specific_humidity: `pint.Quantity`
Specific humidity of air
Returns
-------
`pint.Quantity`
Dew point temperature
See Also
--------
relative_humidity_from_mixing_ratio, dewpoint_from_relative_humidity
"""
return dewpoint_from_relative_humidity(temperature,
relative_humidity_from_specific_humidity(
pressure, temperature, specific_humidity))
@exporter.export
@preprocess_xarray
@check_units('[length]/[time]', '[pressure]', '[temperature]')
def vertical_velocity_pressure(w, pressure, temperature, mixing_ratio=0):
r"""Calculate omega from w assuming hydrostatic conditions.
This function converts vertical velocity with respect to height
:math:`\left(w = \frac{Dz}{Dt}\right)` to that
with respect to pressure :math:`\left(\omega = \frac{Dp}{Dt}\right)`
assuming hydrostatic conditions on the synoptic scale.
By Equation 7.33 in [Hobbs2006]_,
.. math:: \omega \simeq -\rho g w
Density (:math:`\rho`) is calculated using the :func:`density` function,
from the given pressure and temperature. If `mixing_ratio` is given, the virtual
temperature correction is used, otherwise, dry air is assumed.
Parameters
----------
w: `pint.Quantity`
Vertical velocity in terms of height
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
mixing_ratio: `pint.Quantity`, optional
Mixing_ratio ratio of air
Returns
-------
`pint.Quantity`
Vertical velocity in terms of pressure (in Pascals / second)
See Also
--------
density, vertical_velocity
"""
rho = density(pressure, temperature, mixing_ratio)
return (-mpconsts.g * rho * w).to('Pa/s')
@exporter.export
@preprocess_xarray
@check_units('[pressure]/[time]', '[pressure]', '[temperature]')
def vertical_velocity(omega, pressure, temperature, mixing_ratio=0):
r"""Calculate w from omega assuming hydrostatic conditions.
This function converts vertical velocity with respect to pressure
:math:`\left(\omega = \frac{Dp}{Dt}\right)` to that with respect to height
:math:`\left(w = \frac{Dz}{Dt}\right)` assuming hydrostatic conditions on
the synoptic scale. By Equation 7.33 in [Hobbs2006]_,
.. math:: \omega \simeq -\rho g w
so that
.. math:: w \simeq \frac{- \omega}{\rho g}
Density (:math:`\rho`) is calculated using the :func:`density` function,
from the given pressure and temperature. If `mixing_ratio` is given, the virtual
temperature correction is used, otherwise, dry air is assumed.
Parameters
----------
omega: `pint.Quantity`
Vertical velocity in terms of pressure
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
mixing_ratio: `pint.Quantity`, optional
Mixing ratio of air
Returns
-------
`pint.Quantity`
Vertical velocity in terms of height (in meters / second)
See Also
--------
density, vertical_velocity_pressure
"""
rho = density(pressure, temperature, mixing_ratio)
return (omega / (- mpconsts.g * rho)).to('m/s')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]')
def specific_humidity_from_dewpoint(pressure, dewpoint):
r"""Calculate the specific humidity from the dewpoint temperature and pressure.
Parameters
----------
dewpoint: `pint.Quantity`
dewpoint temperature
pressure: `pint.Quantity`
pressure
Returns
-------
`pint.Quantity`
Specific humidity
See Also
--------
mixing_ratio, saturation_mixing_ratio
"""
mixing_ratio = saturation_mixing_ratio(pressure, dewpoint)
return specific_humidity_from_mixing_ratio(mixing_ratio)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[temperature]', '[temperature]')
def lifted_index(pressure, temperature, parcel_profile):
"""Calculate Lifted Index from the pressure temperature and parcel profile.
Lifted index formula derived from [Galway1956]_ and referenced by [Doswell-Schultz2006]_:
LI = T500 - Tp500
where:
T500 is the measured temperature at 500 hPa.
Tp500 is the temperature of the lifted parcel at 500 hPa.
Calculation of the lifted index is defined as the temperature difference between the
observed 500 hPa temperature and the temperature of a parcel lifted from the
surface to 500 hPa.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest, in order from highest to
lowest pressure.
temperature : `pint.Quantity`
The atmospheric temperature corresponding to pressure.
parcel_profile : `pint.Quantity`
The temperature profile of the parcel.
Returns
-------
`pint.Quantity`
Lifted Index.
"""
# find the index for the 500 hPa pressure level.
idx = np.where(pressure == 500 * units.hPa)
# find the measured temperature at 500 hPa.
T500 = temperature[idx]
# find the parcel profile temperature at 500 hPa.
Tp500 = parcel_profile[idx]
# calculate the lifted index.
lifted_index = T500 - Tp500.to(units.degC)
return lifted_index
@exporter.export
@preprocess_xarray
@check_units('[length]', '[temperature]', '[speed]', '[speed]')
def gradient_richardson_number(height, potential_temperature, u, v, axis=0):
r"""Calculate the gradient (or flux) Richardson number.
.. math:: Ri = (g/\theta) * \frac{\left(\partial \theta/\partial z\)}
{[\left(\partial u / \partial z\right)^2 + \left(\partial v / \partial z\right)^2}
See [Holton2004]_ pg. 121-122. As noted by [Holton2004]_, flux Richardson
number values below 0.25 indicate turbulence.
Parameters
----------
height : `pint.Quantity`
Atmospheric height
potential_temperature : `pint.Quantity`
Atmospheric potential temperature
u : `pint.Quantity`
x component of the wind
v : `pint.Quantity`
y component of the wind
axis : int, optional
The axis corresponding to vertical, defaults to 0.
Returns
-------
`pint.Quantity`
Gradient Richardson number
"""
dthetadz = first_derivative(potential_temperature, x=height, axis=axis)
dudz = first_derivative(u, x=height, axis=axis)
dvdz = first_derivative(v, x=height, axis=axis)
return (mpconsts.g / potential_temperature) * (dthetadz / (dudz ** 2 + dvdz ** 2)) | -------- |
helpers.ts | import {
TABLE_TOTAL_SUMMARY_TYPE, TABLE_GROUP_SUMMARY_TYPE, TABLE_TREE_SUMMARY_TYPE,
} from './constants';
import { TABLE_DATA_TYPE } from '../table/constants';
import { GetColumnSummariesFn, IsSpecificCellFn, IsSpecificRowFn, SummaryItem } from '../../types';
export const isTotalSummaryTableCell: IsSpecificCellFn = (
tableRow, tableColumn,
) => tableRow.type === TABLE_TOTAL_SUMMARY_TYPE && tableColumn.type === TABLE_DATA_TYPE;
export const isGroupSummaryTableCell: IsSpecificCellFn = (
tableRow, tableColumn,
) => tableRow.type === TABLE_GROUP_SUMMARY_TYPE && tableColumn.type === TABLE_DATA_TYPE;
export const isTreeSummaryTableCell: IsSpecificCellFn = (
tableRow, tableColumn,
) => tableRow.type === TABLE_TREE_SUMMARY_TYPE && tableColumn.type === TABLE_DATA_TYPE;
export const isTotalSummaryTableRow: IsSpecificRowFn = tableRow => (
tableRow.type === TABLE_TOTAL_SUMMARY_TYPE
);
export const isGroupSummaryTableRow: IsSpecificRowFn = tableRow => (
tableRow.type === TABLE_GROUP_SUMMARY_TYPE
);
export const isTreeSummaryTableRow: IsSpecificRowFn = tableRow => (
tableRow.type === TABLE_TREE_SUMMARY_TYPE
);
export const getColumnSummaries: GetColumnSummariesFn = ( | .map((item, index) => [item, index] as [SummaryItem, number])
.filter(([item]) => item.columnName === columnName)
.map(([item, index]) => ({
type: item.type,
value: summaryValues[index],
})); | summaryItems, columnName, summaryValues,
) => summaryItems |
server.ts | import express from 'express'; | import { routes } from './routes';
const app = express();
app.use(express.json());
app.use(routes);
export { app as Server }; | |
test_write_metrics_reports.py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
import tempfile
import unittest
import torch
from monai.handlers.utils import write_metrics_reports
class TestWriteMetricsReports(unittest.TestCase):
def | (self):
with tempfile.TemporaryDirectory() as tempdir:
write_metrics_reports(
save_dir=tempdir,
images=["filepath1", "filepath2"],
metrics={"metric1": 1, "metric2": 2},
metric_details={"metric3": torch.tensor([[1, 2], [2, 3]]), "metric4": torch.tensor([[5, 6], [7, 8]])},
summary_ops=["mean", "median", "max", "90percentile"],
deli="\t",
output_type="csv",
)
# check the metrics.csv and content
self.assertTrue(os.path.exists(os.path.join(tempdir, "metrics.csv")))
with open(os.path.join(tempdir, "metrics.csv")) as f:
f_csv = csv.reader(f)
for i, row in enumerate(f_csv):
self.assertEqual(row, [f"metric{i + 1}\t{i + 1}"])
self.assertTrue(os.path.exists(os.path.join(tempdir, "metric3_raw.csv")))
# check the metric_raw.csv and content
with open(os.path.join(tempdir, "metric3_raw.csv")) as f:
f_csv = csv.reader(f)
for i, row in enumerate(f_csv):
if i > 0:
self.assertEqual(row, [f"filepath{i}\t{float(i)}\t{float(i + 1)}\t{i + 0.5}"])
self.assertTrue(os.path.exists(os.path.join(tempdir, "metric3_summary.csv")))
# check the metric_summary.csv and content
with open(os.path.join(tempdir, "metric3_summary.csv")) as f:
f_csv = csv.reader(f)
for i, row in enumerate(f_csv):
if i == 1:
self.assertEqual(row, ["class0\t1.5000\t1.5000\t2.0000\t1.9000"])
elif i == 2:
self.assertEqual(row, ["class1\t2.5000\t2.5000\t3.0000\t2.9000"])
elif i == 3:
self.assertEqual(row, ["mean\t2.0000\t2.0000\t2.5000\t2.4000"])
self.assertTrue(os.path.exists(os.path.join(tempdir, "metric4_raw.csv")))
self.assertTrue(os.path.exists(os.path.join(tempdir, "metric4_summary.csv")))
if __name__ == "__main__":
unittest.main()
| test_content |
image_corruptor.py | import functools
import random
from math import cos, pi
import cv2
import kornia
import numpy as np
import torch
from kornia.augmentation import ColorJitter
from data.util import read_img
from PIL import Image
from io import BytesIO
# Get a rough visualization of the above distribution. (Y-axis is meaningless, just spreads data)
from utils.util import opt_get
'''
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
data = np.asarray([get_rand() for _ in range(5000)])
plt.plot(data, np.random.uniform(size=(5000,)), 'x')
plt.show()
'''
def kornia_color_jitter_numpy(img, setting):
if setting * 255 > 1:
# I'm using Kornia's ColorJitter, which requires pytorch arrays in b,c,h,w format.
img = torch.from_numpy(img).permute(2,0,1).unsqueeze(0)
img = ColorJitter(setting, setting, setting, setting)(img)
img = img.squeeze(0).permute(1,2,0).numpy()
return img
# Performs image corruption on a list of images from a configurable set of corruption
# options.
class | :
def __init__(self, opt):
self.opt = opt
self.reset_random()
self.blur_scale = opt['corruption_blur_scale'] if 'corruption_blur_scale' in opt.keys() else 1
self.fixed_corruptions = opt['fixed_corruptions'] if 'fixed_corruptions' in opt.keys() else []
self.num_corrupts = opt['num_corrupts_per_image'] if 'num_corrupts_per_image' in opt.keys() else 0
self.cosine_bias = opt_get(opt, ['cosine_bias'], True)
if self.num_corrupts == 0:
return
else:
self.random_corruptions = opt['random_corruptions'] if 'random_corruptions' in opt.keys() else []
def reset_random(self):
if 'random_seed' in self.opt.keys():
self.rand = random.Random(self.opt['random_seed'])
else:
self.rand = random.Random()
# Feeds a random uniform through a cosine distribution to slightly bias corruptions towards "uncorrupted".
# Return is on [0,1] with a bias towards 0.
def get_rand(self):
r = self.rand.random()
if self.cosine_bias:
return 1 - cos(r * pi / 2)
else:
return r
def corrupt_images(self, imgs, return_entropy=False):
if self.num_corrupts == 0 and not self.fixed_corruptions:
if return_entropy:
return imgs, []
else:
return imgs
if self.num_corrupts == 0:
augmentations = []
else:
augmentations = random.choices(self.random_corruptions, k=self.num_corrupts)
# Sources of entropy
corrupted_imgs = []
entropy = []
undo_fns = []
applied_augs = augmentations + self.fixed_corruptions
for img in imgs:
for aug in augmentations:
r = self.get_rand()
img, undo_fn = self.apply_corruption(img, aug, r, applied_augs)
if undo_fn is not None:
undo_fns.append(undo_fn)
for aug in self.fixed_corruptions:
r = self.get_rand()
img, undo_fn = self.apply_corruption(img, aug, r, applied_augs)
entropy.append(r)
if undo_fn is not None:
undo_fns.append(undo_fn)
# Apply undo_fns after all corruptions are finished, in same order.
for ufn in undo_fns:
img = ufn(img)
corrupted_imgs.append(img)
if return_entropy:
return corrupted_imgs, entropy
else:
return corrupted_imgs
def apply_corruption(self, img, aug, rand_val, applied_augmentations):
undo_fn = None
if 'color_quantization' in aug:
# Color quantization
quant_div = 2 ** (int(rand_val * 10 / 3) + 2)
img = img * 255
img = (img // quant_div) * quant_div
img = img / 255
elif 'color_jitter' in aug:
lo_end = 0
hi_end = .2
setting = rand_val * (hi_end - lo_end) + lo_end
img = kornia_color_jitter_numpy(img, setting)
elif 'gaussian_blur' in aug:
img = cv2.GaussianBlur(img, (0,0), self.blur_scale*rand_val*1.5)
elif 'motion_blur' in aug:
# Motion blur
intensity = self.blur_scale*rand_val * 3 + 1
angle = random.randint(0,360)
k = np.zeros((intensity, intensity), dtype=np.float32)
k[(intensity - 1) // 2, :] = np.ones(intensity, dtype=np.float32)
k = cv2.warpAffine(k, cv2.getRotationMatrix2D((intensity / 2 - 0.5, intensity / 2 - 0.5), angle, 1.0),
(intensity, intensity))
k = k * (1.0 / np.sum(k))
img = cv2.filter2D(img, -1, k)
elif 'block_noise' in aug:
# Large distortion blocks in part of an img, such as is used to mask out a face.
pass
elif 'lq_resampling' in aug:
# Random mode interpolation HR->LR->HR
if 'lq_resampling4x' == aug:
scale = 4
else:
if rand_val < .3:
scale = 1
elif rand_val < .7:
scale = 2
else:
scale = 4
if scale > 1:
interpolation_modes = [cv2.INTER_NEAREST, cv2.INTER_CUBIC, cv2.INTER_LINEAR, cv2.INTER_LANCZOS4]
mode = random.randint(0,4) % len(interpolation_modes)
# Downsample first, then upsample using the random mode.
img = cv2.resize(img, dsize=(img.shape[1]//scale, img.shape[0]//scale), interpolation=mode)
def lq_resampling_undo_fn(scale, img):
return cv2.resize(img, dsize=(img.shape[1]*scale, img.shape[0]*scale), interpolation=cv2.INTER_LINEAR)
undo_fn = functools.partial(lq_resampling_undo_fn, scale)
elif 'color_shift' in aug:
# Color shift
pass
elif 'interlacing' in aug:
# Interlacing distortion
pass
elif 'chromatic_aberration' in aug:
# Chromatic aberration
pass
elif 'noise' in aug:
# Random noise
if 'noise-5' == aug:
noise_intensity = 5 / 255.0
else:
noise_intensity = (rand_val*6) / 255.0
img += np.random.rand(*img.shape) * noise_intensity
elif 'jpeg' in aug:
if 'noise' not in applied_augmentations and 'noise-5' not in applied_augmentations:
if aug == 'jpeg':
lo=10
range=20
elif aug == 'jpeg-low':
lo=15
range=10
elif aug == 'jpeg-medium':
lo=23
range=25
elif aug == 'jpeg-broad':
lo=15
range=60
elif aug == 'jpeg-normal':
lo=47
range=35
else:
raise NotImplementedError("specified jpeg corruption doesn't exist")
# JPEG compression
qf = (int((1-rand_val)*range) + lo)
# Use PIL to perform a mock compression to a data buffer, then swap back to cv2.
img = (img * 255).astype(np.uint8)
img = Image.fromarray(img)
buffer = BytesIO()
img.save(buffer, "JPEG", quality=qf, optimize=True)
buffer.seek(0)
jpeg_img_bytes = np.asarray(bytearray(buffer.read()), dtype="uint8")
img = read_img("buffer", jpeg_img_bytes, rgb=True)
elif 'saturation' in aug:
# Lightening / saturation
saturation = rand_val * .3
img = np.clip(img + saturation, a_max=1, a_min=0)
elif 'greyscale' in aug:
img = np.tile(np.mean(img, axis=2, keepdims=True), [1,1,3])
elif 'none' not in aug:
raise NotImplementedError("Augmentation doesn't exist")
return img, undo_fn
| ImageCorruptor |
DeleteSnapshotPolicy.go | // Copyright 2018 JDCLOUD.COM
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// NOTE: This class is auto generated by the jdcloud code generator program.
package apis
import (
"github.com/jdcloud-api/jdcloud-sdk-go/core"
)
type DeleteSnapshotPolicyRequest struct {
core.JDCloudRequest
/* 地域ID */
RegionId string `json:"regionId"`
/* 策略ID */
PolicyId string `json:"policyId"`
}
/*
* param regionId: 地域ID (Required)
* param policyId: 策略ID (Required)
*
* @Deprecated, not compatible when mandatory parameters changed
*/
func NewDeleteSnapshotPolicyRequest(
regionId string,
policyId string,
) *DeleteSnapshotPolicyRequest {
return &DeleteSnapshotPolicyRequest{
JDCloudRequest: core.JDCloudRequest{
URL: "/regions/{regionId}/snapshotPolicy/{policyId}",
Method: "DELETE",
Header: nil,
Version: "v1",
},
RegionId: regionId,
PolicyId: policyId,
}
}
/*
* param regionId: 地域ID (Required)
* param policyId: 策略ID (Required)
*/
func NewDeleteSnapshotPolicyR | policyId string,
) *DeleteSnapshotPolicyRequest {
return &DeleteSnapshotPolicyRequest{
JDCloudRequest: core.JDCloudRequest{
URL: "/regions/{regionId}/snapshotPolicy/{policyId}",
Method: "DELETE",
Header: nil,
Version: "v1",
},
RegionId: regionId,
PolicyId: policyId,
}
}
/* This constructor has better compatible ability when API parameters changed */
func NewDeleteSnapshotPolicyRequestWithoutParam() *DeleteSnapshotPolicyRequest {
return &DeleteSnapshotPolicyRequest{
JDCloudRequest: core.JDCloudRequest{
URL: "/regions/{regionId}/snapshotPolicy/{policyId}",
Method: "DELETE",
Header: nil,
Version: "v1",
},
}
}
/* param regionId: 地域ID(Required) */
func (r *DeleteSnapshotPolicyRequest) SetRegionId(regionId string) {
r.RegionId = regionId
}
/* param policyId: 策略ID(Required) */
func (r *DeleteSnapshotPolicyRequest) SetPolicyId(policyId string) {
r.PolicyId = policyId
}
// GetRegionId returns path parameter 'regionId' if exist,
// otherwise return empty string
func (r DeleteSnapshotPolicyRequest) GetRegionId() string {
return r.RegionId
}
type DeleteSnapshotPolicyResponse struct {
RequestID string `json:"requestId"`
Error core.ErrorResponse `json:"error"`
Result DeleteSnapshotPolicyResult `json:"result"`
}
type DeleteSnapshotPolicyResult struct {
} | equestWithAllParams(
regionId string,
|
Drawer.d.ts | // Elix is a JavaScript project, but we define TypeScript declarations so we can
// confirm our code is type safe, and to support TypeScript users.
import Dialog from './Dialog.js';
import EffectMixin from './EffectMixin.js';
import LanguageDirectionMixin from './LanguageDirectionMixin.js';
import TouchSwipeMixin from './TouchSwipeMixin.js';
import TrackpadSwipeMixin from './TrackpadSwipeMixin.js';
import TransitionEffectMixin from './TransitionEffectMixin.js';
export default class | extends
LanguageDirectionMixin(
TouchSwipeMixin(
TrackpadSwipeMixin(
EffectMixin(
TransitionEffectMixin(
Dialog
))))) {
fromEdge: 'bottom'|'end'|'left'|'right'|'start'|'top';
}
| Drawer |
dirs.rs | use std::path::{Path, PathBuf};
static TEMPLATES: &str = "templates";
static PAGES: &str = "pages";
static ASSETS: &str = "assets";
static OUTPUT: &str = "out";
pub fn get_template_dir<P>(root: P) -> PathBuf
where P: AsRef<Path>
{
let mut b = PathBuf::from(root.as_ref());
b.push(TEMPLATES);
b
}
pub fn get_page_dir<P>(root: P) -> PathBuf
where P: AsRef<Path>
{
let mut b = PathBuf::from(root.as_ref());
b.push(PAGES);
b
}
pub fn get_asset_dir<P>(root: P) -> PathBuf
where P: AsRef<Path>
{
let mut b = PathBuf::from(root.as_ref()); | b.push(ASSETS);
b
}
pub fn get_output_dir<P>(root: P) -> PathBuf
where P: AsRef<Path>
{
let mut b = PathBuf::from(root.as_ref());
b.push(OUTPUT);
b
} | |
focus_outline_manager.js | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
cr.define('cr.ui', function() {
/**
* The class name to set on the document element.
* @const
*/
var CLASS_NAME = 'focus-outline-visible';
/**
* This class sets a CSS class name on the HTML element of |doc| when the user
* presses the tab key. It removes the class name when the user clicks
* anywhere.
*
* This allows you to write CSS like this:
*
* html.focus-outline-visible my-element:focus {
* outline: 5px auto -webkit-focus-ring-color;
* }
*
* And the outline will only be shown if the user uses the keyboard to get to
* it.
*
* @param {Document} doc The document to attach the focus outline manager to.
* @constructor
*/
function | (doc) {
this.classList_ = doc.documentElement.classList;
var self = this;
doc.addEventListener('keydown', function(e) {
if (e.keyCode == 9) // Tab
self.focusByKeyboard_ = true;
}, true);
doc.addEventListener('mousedown', function(e) {
self.focusByKeyboard_ = false;
}, true);
doc.addEventListener('focus', function(event) {
// Update visibility only when focus is actually changed.
self.visible = self.focusByKeyboard_;
}, true);
}
FocusOutlineManager.prototype = {
/**
* Whether focus change is triggered by TAB key.
* @type {boolean}
* @private
*/
focusByKeyboard_: true,
/**
* Whether the focus outline should be visible.
* @type {boolean}
*/
set visible(visible) {
if (visible)
this.classList_.add(CLASS_NAME);
else
this.classList_.remove(CLASS_NAME);
},
get visible() {
return this.classList_.contains(CLASS_NAME);
}
};
/**
* Array of Document and FocusOutlineManager pairs.
* @type {Array}
*/
var docsToManager = [];
/**
* Gets a per document sigleton focus outline manager.
* @param {Document} doc The document to get the |FocusOutlineManager| for.
* @return {cr.ui.FocusOutlineManager} The per document singleton focus
* outline manager.
*/
FocusOutlineManager.forDocument = function(doc) {
for (var i = 0; i < docsToManager.length; i++) {
if (doc == docsToManager[i][0])
return docsToManager[i][1];
}
var manager = new FocusOutlineManager(doc);
docsToManager.push([doc, manager]);
return manager;
};
return {
FocusOutlineManager: FocusOutlineManager
};
});
| FocusOutlineManager |
server.go | package network
import (
"net"
"google.golang.org/grpc"
)
type ServiceProvider func(server *grpc.Server)
type Server struct {
Addr string
serviceProviders []ServiceProvider
}
func NewServer(addr string) *Server {
return &Server{Addr: addr}
}
func (s *Server) ListenAndServe() error {
addr := s.Addr
if addr == "" {
panic("unable to start server without an address")
}
l, err := net.Listen("tcp", addr)
if err != nil {
return err
}
| return s.Serve(l)
}
func (s *Server) Serve(l net.Listener) error {
grpcServer := grpc.NewServer()
for _, sp := range s.serviceProviders {
sp(grpcServer)
}
return grpcServer.Serve(l)
}
func (s *Server) RegisterServices(sp ServiceProvider) {
s.serviceProviders = append(s.serviceProviders, sp)
} | |
swed.rs | extern crate serde;
use serde::Deserialize;
#[derive(Debug, Deserialize)]
pub enum EntryType {
#[serde(rename = "K")]
Credit,
#[serde(rename = "D")]
Debit,
}
#[derive(Debug, Deserialize)]
pub enum RecordType {
#[serde(rename = "10")]
StartBalance,
#[serde(rename = "20")]
Transaction,
#[serde(rename = "82")]
Turnover,
#[serde(rename = "86")]
EndBalance,
#[serde(rename = "900")]
Interest,
}
#[derive(Debug, Deserialize)]
pub struct SwedbankCsv {
#[serde(rename = "Ieraksta tips")] // 1
pub record_type: RecordType,
#[serde(rename = "Datums")] // 2
pub date: String, | pub memo: String,
#[serde(rename = "Summa")] // 5
pub amount: String,
#[serde(rename = "Valūta")] // 5
pub currency: String,
#[serde(rename = "Debets/Kredīts")] // 7
pub debit_or_credit: EntryType,
#[serde(rename = "Arhīva kods")] // 7
pub transaction_id: String,
#[serde(rename = "Maksājuma veids")] // 9
pub payment_type: String,
} | #[serde(rename = "Saņēmējs/Maksātājs")] // 3
pub payee: String,
#[serde(rename = "Informācija saņēmējam")] // 4 |
cci.py | import click
from backend.bots import CCIBot, CGroupBot
@click.command()
@click.argument("bot_name", nargs=1)
def | (bot_name):
if bot_name == "cci_bot":
CCIBot().run()
elif bot_name == "cgroup_bot":
CGroupBot().run()
else:
click.echo("No such bot yet...")
if __name__ == '__main__':
cci()
| cci |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.