file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
mod.rs | extern crate android_glue;
use libc;
use std::ffi::{CString};
use std::sync::mpsc::{Receiver, channel};
use {CreationError, Event, MouseCursor};
use CreationError::OsError;
use events::ElementState::{Pressed, Released};
use events::Event::{MouseInput, MouseMoved};
use events::MouseButton;
use std::collections::VecDeque;
use Api;
use BuilderAttribs;
use GlRequest;
use native_monitor::NativeMonitorId;
pub struct Window {
display: ffi::egl::types::EGLDisplay,
context: ffi::egl::types::EGLContext,
surface: ffi::egl::types::EGLSurface,
event_rx: Receiver<android_glue::Event>,
}
pub struct MonitorID;
mod ffi;
pub fn get_available_monitors() -> VecDeque <MonitorID> {
let mut rb = VecDeque::new();
rb.push_back(MonitorID);
rb
}
pub fn get_primary_monitor() -> MonitorID {
MonitorID
}
impl MonitorID {
pub fn get_name(&self) -> Option<String> {
Some("Primary".to_string())
}
pub fn get_native_identifier(&self) -> NativeMonitorId {
NativeMonitorId::Unavailable
}
pub fn get_dimensions(&self) -> (u32, u32) {
unimplemented!()
}
}
#[cfg(feature = "headless")]
pub struct HeadlessContext(i32);
#[cfg(feature = "headless")]
impl HeadlessContext {
/// See the docs in the crate root file.
pub fn new(_builder: BuilderAttribs) -> Result<HeadlessContext, CreationError> {
unimplemented!()
}
/// See the docs in the crate root file.
pub unsafe fn make_current(&self) {
unimplemented!()
}
/// See the docs in the crate root file.
pub fn is_current(&self) -> bool {
unimplemented!()
}
/// See the docs in the crate root file.
pub fn get_proc_address(&self, _addr: &str) -> *const () {
unimplemented!()
}
pub fn get_api(&self) -> ::Api {
::Api::OpenGlEs
}
}
#[cfg(feature = "headless")]
unsafe impl Send for HeadlessContext {}
#[cfg(feature = "headless")]
unsafe impl Sync for HeadlessContext {}
pub struct PollEventsIterator<'a> {
window: &'a Window,
}
impl<'a> Iterator for PollEventsIterator<'a> {
type Item = Event;
fn next(&mut self) -> Option<Event> {
match self.window.event_rx.try_recv() {
Ok(event) => {
match event {
android_glue::Event::EventDown => Some(MouseInput(Pressed, MouseButton::Left)),
android_glue::Event::EventUp => Some(MouseInput(Released, MouseButton::Left)),
android_glue::Event::EventMove(x, y) => Some(MouseMoved((x as i32, y as i32))),
_ => None,
}
}
Err(_) => {
None
}
}
}
}
pub struct WaitEventsIterator<'a> {
window: &'a Window,
}
impl<'a> Iterator for WaitEventsIterator<'a> {
type Item = Event;
fn next(&mut self) -> Option<Event> {
use std::time::Duration;
use std::old_io::timer;
loop {
// calling poll_events()
if let Some(ev) = self.window.poll_events().next() {
return Some(ev);
}
// TODO: Implement a proper way of sleeping on the event queue
timer::sleep(Duration::milliseconds(16));
}
}
}
impl Window {
pub fn new(builder: BuilderAttribs) -> Result<Window, CreationError> {
use std::{mem, ptr};
if builder.sharing.is_some() {
unimplemented!()
}
let native_window = unsafe { android_glue::get_native_window() };
if native_window.is_null() {
return Err(OsError(format!("Android's native window is null")));
}
let display = unsafe {
let display = ffi::egl::GetDisplay(mem::transmute(ffi::egl::DEFAULT_DISPLAY));
if display.is_null() {
return Err(OsError("No EGL display connection available".to_string()));
}
display
};
android_glue::write_log("eglGetDisplay succeeded");
let (_major, _minor) = unsafe {
let mut major: ffi::egl::types::EGLint = mem::uninitialized();
let mut minor: ffi::egl::types::EGLint = mem::uninitialized();
if ffi::egl::Initialize(display, &mut major, &mut minor) == 0 {
return Err(OsError(format!("eglInitialize failed")))
}
(major, minor)
};
android_glue::write_log("eglInitialize succeeded");
let use_gles2 = match builder.gl_version {
GlRequest::Specific(Api::OpenGlEs, (2, _)) => true,
GlRequest::Specific(Api::OpenGlEs, _) => false,
GlRequest::Specific(_, _) => panic!("Only OpenGL ES is supported"), // FIXME: return a result
GlRequest::GlThenGles { opengles_version: (2, _), .. } => true,
_ => false,
};
let mut attribute_list = vec!();
if use_gles2 {
attribute_list.push_all(&[
ffi::egl::RENDERABLE_TYPE as i32,
ffi::egl::OPENGL_ES2_BIT as i32,
]);
}
{
let (red, green, blue) = match builder.color_bits.unwrap_or(24) {
24 => (8, 8, 8),
16 => (6, 5, 6),
_ => panic!("Bad color_bits"),
};
attribute_list.push_all(&[ffi::egl::RED_SIZE as i32, red]);
attribute_list.push_all(&[ffi::egl::GREEN_SIZE as i32, green]);
attribute_list.push_all(&[ffi::egl::BLUE_SIZE as i32, blue]);
}
attribute_list.push_all(&[
ffi::egl::DEPTH_SIZE as i32,
builder.depth_bits.unwrap_or(8) as i32,
]);
attribute_list.push(ffi::egl::NONE as i32);
let config = unsafe {
let mut num_config: ffi::egl::types::EGLint = mem::uninitialized();
let mut config: ffi::egl::types::EGLConfig = mem::uninitialized();
if ffi::egl::ChooseConfig(display, attribute_list.as_ptr(), &mut config, 1,
&mut num_config) == 0
{
return Err(OsError(format!("eglChooseConfig failed")))
}
if num_config <= 0 {
return Err(OsError(format!("eglChooseConfig returned no available config")))
}
config
};
android_glue::write_log("eglChooseConfig succeeded");
let context = unsafe {
let mut context_attributes = vec!();
if use_gles2 {
context_attributes.push_all(&[ffi::egl::CONTEXT_CLIENT_VERSION as i32, 2]);
}
context_attributes.push(ffi::egl::NONE as i32);
let context = ffi::egl::CreateContext(display, config, ptr::null(),
context_attributes.as_ptr());
if context.is_null() {
return Err(OsError(format!("eglCreateContext failed")))
}
context
};
android_glue::write_log("eglCreateContext succeeded");
let surface = unsafe {
let surface = ffi::egl::CreateWindowSurface(display, config, native_window, ptr::null());
if surface.is_null() {
return Err(OsError(format!("eglCreateWindowSurface failed")))
}
surface
};
android_glue::write_log("eglCreateWindowSurface succeeded");
let (tx, rx) = channel();
android_glue::add_sender(tx);
Ok(Window {
display: display,
context: context,
surface: surface,
event_rx: rx,
})
}
pub fn is_closed(&self) -> bool {
false
}
pub fn set_title(&self, _: &str) {
}
pub fn show(&self) {
}
pub fn hide(&self) {
}
pub fn get_position(&self) -> Option<(i32, i32)> {
None
}
pub fn set_position(&self, _x: i32, _y: i32) {
}
| } else {
Some((
unsafe { ffi::ANativeWindow_getWidth(native_window) } as u32,
unsafe { ffi::ANativeWindow_getHeight(native_window) } as u32
))
}
}
pub fn get_outer_size(&self) -> Option<(u32, u32)> {
self.get_inner_size()
}
pub fn set_inner_size(&self, _x: u32, _y: u32) {
}
pub fn create_window_proxy(&self) -> WindowProxy {
WindowProxy
}
pub fn poll_events(&self) -> PollEventsIterator {
PollEventsIterator {
window: self
}
}
pub fn wait_events(&self) -> WaitEventsIterator {
WaitEventsIterator {
window: self
}
}
pub fn make_current(&self) {
unsafe {
ffi::egl::MakeCurrent(self.display, self.surface, self.surface, self.context);
}
}
pub fn is_current(&self) -> bool {
unsafe { ffi::egl::GetCurrentContext() == self.context }
}
pub fn get_proc_address(&self, addr: &str) -> *const () {
let addr = CString::from_slice(addr.as_bytes());
let addr = addr.as_ptr();
unsafe {
ffi::egl::GetProcAddress(addr) as *const ()
}
}
pub fn swap_buffers(&self) {
unsafe {
ffi::egl::SwapBuffers(self.display, self.surface);
}
}
pub fn platform_display(&self) -> *mut libc::c_void {
self.display as *mut libc::c_void
}
pub fn platform_window(&self) -> *mut libc::c_void {
unimplemented!()
}
pub fn get_api(&self) -> ::Api {
::Api::OpenGlEs
}
pub fn set_window_resize_callback(&mut self, _: Option<fn(u32, u32)>) {
}
pub fn set_cursor(&self, _: MouseCursor) {
}
pub fn hidpi_factor(&self) -> f32 {
1.0
}
pub fn set_cursor_position(&self, x: i32, y: i32) -> Result<(), ()> {
unimplemented!();
}
}
unsafe impl Send for Window {}
unsafe impl Sync for Window {}
#[cfg(feature = "window")]
#[derive(Clone)]
pub struct WindowProxy;
impl WindowProxy {
pub fn wakeup_event_loop(&self) {
unimplemented!()
}
}
#[unsafe_destructor]
impl Drop for Window {
fn drop(&mut self) {
use std::ptr;
unsafe {
// we don't call MakeCurrent(0, 0) because we are not sure that the context
// is still the current one
android_glue::write_log("Destroying gl-init window");
ffi::egl::DestroySurface(self.display, self.surface);
ffi::egl::DestroyContext(self.display, self.context);
ffi::egl::Terminate(self.display);
}
}
} | pub fn get_inner_size(&self) -> Option<(u32, u32)> {
let native_window = unsafe { android_glue::get_native_window() };
if native_window.is_null() {
None | random_line_split |
deriving-meta-multiple.rs | // xfail-fast
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[deriving(Eq)]
#[deriving(Clone)]
#[deriving(IterBytes)]
struct Foo {
bar: uint,
baz: int
}
pub fn | () {
use core::hash::{Hash, HashUtil}; // necessary for IterBytes check
let a = Foo {bar: 4, baz: -3};
a == a; // check for Eq impl w/o testing its correctness
a.clone(); // check for Clone impl w/o testing its correctness
a.hash(); // check for IterBytes impl w/o testing its correctness
}
| main | identifier_name |
deriving-meta-multiple.rs | // xfail-fast
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
// | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[deriving(Eq)]
#[deriving(Clone)]
#[deriving(IterBytes)]
struct Foo {
bar: uint,
baz: int
}
pub fn main() {
use core::hash::{Hash, HashUtil}; // necessary for IterBytes check
let a = Foo {bar: 4, baz: -3};
a == a; // check for Eq impl w/o testing its correctness
a.clone(); // check for Clone impl w/o testing its correctness
a.hash(); // check for IterBytes impl w/o testing its correctness
} | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | random_line_split |
deriving-meta-multiple.rs | // xfail-fast
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[deriving(Eq)]
#[deriving(Clone)]
#[deriving(IterBytes)]
struct Foo {
bar: uint,
baz: int
}
pub fn main() | {
use core::hash::{Hash, HashUtil}; // necessary for IterBytes check
let a = Foo {bar: 4, baz: -3};
a == a; // check for Eq impl w/o testing its correctness
a.clone(); // check for Clone impl w/o testing its correctness
a.hash(); // check for IterBytes impl w/o testing its correctness
} | identifier_body |
|
derive_input_object.rs | #![allow(clippy::match_wild_err_arm)]
use crate::{
result::{GraphQLScope, UnsupportedAttribute},
util::{self, span_container::SpanContainer, RenameRule},
};
use proc_macro2::TokenStream;
use quote::{quote, ToTokens};
use syn::{self, ext::IdentExt, spanned::Spanned, Data, Fields};
pub fn impl_input_object(ast: syn::DeriveInput, error: GraphQLScope) -> syn::Result<TokenStream> {
let ast_span = ast.span();
let fields = match ast.data {
Data::Struct(data) => match data.fields {
Fields::Named(named) => named.named,
_ => {
return Err(
error.custom_error(ast_span, "all fields must be named, e.g., `test: String`")
)
}
},
_ => return Err(error.custom_error(ast_span, "can only be used on structs with fields")),
};
// Parse attributes.
let attrs = util::ObjectAttributes::from_attrs(&ast.attrs)?;
// Parse attributes.
let ident = &ast.ident;
let name = attrs
.name
.clone()
.map(SpanContainer::into_inner)
.unwrap_or_else(|| ident.to_string());
let fields = fields
.into_iter()
.filter_map(|field| {
let span = field.span();
let field_attrs = match util::FieldAttributes::from_attrs(
&field.attrs,
util::FieldAttributeParseMode::Object,
) {
Ok(attrs) => attrs,
Err(e) => {
proc_macro_error::emit_error!(e);
return None;
}
};
let field_ident = field.ident.as_ref().unwrap();
let name = match field_attrs.name {
Some(ref name) => name.to_string(),
None => attrs
.rename
.unwrap_or(RenameRule::CamelCase)
.apply(&field_ident.unraw().to_string()),
};
if let Some(span) = field_attrs.skip {
error.unsupported_attribute_within(span.span(), UnsupportedAttribute::Skip)
}
if let Some(span) = field_attrs.deprecation {
error.unsupported_attribute_within(
span.span_ident(),
UnsupportedAttribute::Deprecation,
)
}
if name.starts_with("__") {
error.no_double_underscore(if let Some(name) = field_attrs.name {
name.span_ident()
} else {
name.span()
});
}
let resolver_code = quote!(#field_ident);
let default = field_attrs
.default
.map(|default| match default.into_inner() {
Some(expr) => expr.into_token_stream(),
None => quote! { Default::default() },
});
Some(util::GraphQLTypeDefinitionField {
name,
_type: field.ty,
args: Vec::new(),
description: field_attrs.description.map(SpanContainer::into_inner),
deprecation: None,
resolver_code,
is_type_inferred: true,
is_async: false,
default,
span,
})
})
.collect::<Vec<_>>();
proc_macro_error::abort_if_dirty();
if fields.is_empty() |
if let Some(duplicates) =
crate::util::duplicate::Duplicate::find_by_key(&fields, |field| &field.name)
{
error.duplicate(duplicates.iter())
}
if !attrs.interfaces.is_empty() {
attrs.interfaces.iter().for_each(|elm| {
error.unsupported_attribute(elm.span(), UnsupportedAttribute::Interface)
});
}
if let Some(duplicates) =
crate::util::duplicate::Duplicate::find_by_key(&fields, |field| field.name.as_str())
{
error.duplicate(duplicates.iter());
}
if !attrs.is_internal && name.starts_with("__") {
error.no_double_underscore(if let Some(name) = attrs.name {
name.span_ident()
} else {
ident.span()
});
}
proc_macro_error::abort_if_dirty();
let definition = util::GraphQLTypeDefiniton {
name,
_type: syn::parse_str(&ast.ident.to_string()).unwrap(),
context: attrs.context.map(SpanContainer::into_inner),
scalar: attrs.scalar.map(SpanContainer::into_inner),
description: attrs.description.map(SpanContainer::into_inner),
fields,
generics: ast.generics,
interfaces: vec![],
include_type_generics: true,
generic_scalar: true,
no_async: attrs.no_async.is_some(),
};
Ok(definition.into_input_object_tokens())
}
| {
error.not_empty(ast_span);
} | conditional_block |
derive_input_object.rs | #![allow(clippy::match_wild_err_arm)]
use crate::{
result::{GraphQLScope, UnsupportedAttribute},
util::{self, span_container::SpanContainer, RenameRule},
};
use proc_macro2::TokenStream;
use quote::{quote, ToTokens};
use syn::{self, ext::IdentExt, spanned::Spanned, Data, Fields};
pub fn | (ast: syn::DeriveInput, error: GraphQLScope) -> syn::Result<TokenStream> {
let ast_span = ast.span();
let fields = match ast.data {
Data::Struct(data) => match data.fields {
Fields::Named(named) => named.named,
_ => {
return Err(
error.custom_error(ast_span, "all fields must be named, e.g., `test: String`")
)
}
},
_ => return Err(error.custom_error(ast_span, "can only be used on structs with fields")),
};
// Parse attributes.
let attrs = util::ObjectAttributes::from_attrs(&ast.attrs)?;
// Parse attributes.
let ident = &ast.ident;
let name = attrs
.name
.clone()
.map(SpanContainer::into_inner)
.unwrap_or_else(|| ident.to_string());
let fields = fields
.into_iter()
.filter_map(|field| {
let span = field.span();
let field_attrs = match util::FieldAttributes::from_attrs(
&field.attrs,
util::FieldAttributeParseMode::Object,
) {
Ok(attrs) => attrs,
Err(e) => {
proc_macro_error::emit_error!(e);
return None;
}
};
let field_ident = field.ident.as_ref().unwrap();
let name = match field_attrs.name {
Some(ref name) => name.to_string(),
None => attrs
.rename
.unwrap_or(RenameRule::CamelCase)
.apply(&field_ident.unraw().to_string()),
};
if let Some(span) = field_attrs.skip {
error.unsupported_attribute_within(span.span(), UnsupportedAttribute::Skip)
}
if let Some(span) = field_attrs.deprecation {
error.unsupported_attribute_within(
span.span_ident(),
UnsupportedAttribute::Deprecation,
)
}
if name.starts_with("__") {
error.no_double_underscore(if let Some(name) = field_attrs.name {
name.span_ident()
} else {
name.span()
});
}
let resolver_code = quote!(#field_ident);
let default = field_attrs
.default
.map(|default| match default.into_inner() {
Some(expr) => expr.into_token_stream(),
None => quote! { Default::default() },
});
Some(util::GraphQLTypeDefinitionField {
name,
_type: field.ty,
args: Vec::new(),
description: field_attrs.description.map(SpanContainer::into_inner),
deprecation: None,
resolver_code,
is_type_inferred: true,
is_async: false,
default,
span,
})
})
.collect::<Vec<_>>();
proc_macro_error::abort_if_dirty();
if fields.is_empty() {
error.not_empty(ast_span);
}
if let Some(duplicates) =
crate::util::duplicate::Duplicate::find_by_key(&fields, |field| &field.name)
{
error.duplicate(duplicates.iter())
}
if !attrs.interfaces.is_empty() {
attrs.interfaces.iter().for_each(|elm| {
error.unsupported_attribute(elm.span(), UnsupportedAttribute::Interface)
});
}
if let Some(duplicates) =
crate::util::duplicate::Duplicate::find_by_key(&fields, |field| field.name.as_str())
{
error.duplicate(duplicates.iter());
}
if !attrs.is_internal && name.starts_with("__") {
error.no_double_underscore(if let Some(name) = attrs.name {
name.span_ident()
} else {
ident.span()
});
}
proc_macro_error::abort_if_dirty();
let definition = util::GraphQLTypeDefiniton {
name,
_type: syn::parse_str(&ast.ident.to_string()).unwrap(),
context: attrs.context.map(SpanContainer::into_inner),
scalar: attrs.scalar.map(SpanContainer::into_inner),
description: attrs.description.map(SpanContainer::into_inner),
fields,
generics: ast.generics,
interfaces: vec![],
include_type_generics: true,
generic_scalar: true,
no_async: attrs.no_async.is_some(),
};
Ok(definition.into_input_object_tokens())
}
| impl_input_object | identifier_name |
derive_input_object.rs | #![allow(clippy::match_wild_err_arm)]
use crate::{
result::{GraphQLScope, UnsupportedAttribute},
util::{self, span_container::SpanContainer, RenameRule},
};
use proc_macro2::TokenStream;
use quote::{quote, ToTokens};
use syn::{self, ext::IdentExt, spanned::Spanned, Data, Fields};
pub fn impl_input_object(ast: syn::DeriveInput, error: GraphQLScope) -> syn::Result<TokenStream> | {
let ast_span = ast.span();
let fields = match ast.data {
Data::Struct(data) => match data.fields {
Fields::Named(named) => named.named,
_ => {
return Err(
error.custom_error(ast_span, "all fields must be named, e.g., `test: String`")
)
}
},
_ => return Err(error.custom_error(ast_span, "can only be used on structs with fields")),
};
// Parse attributes.
let attrs = util::ObjectAttributes::from_attrs(&ast.attrs)?;
// Parse attributes.
let ident = &ast.ident;
let name = attrs
.name
.clone()
.map(SpanContainer::into_inner)
.unwrap_or_else(|| ident.to_string());
let fields = fields
.into_iter()
.filter_map(|field| {
let span = field.span();
let field_attrs = match util::FieldAttributes::from_attrs(
&field.attrs,
util::FieldAttributeParseMode::Object,
) {
Ok(attrs) => attrs,
Err(e) => {
proc_macro_error::emit_error!(e);
return None;
}
};
let field_ident = field.ident.as_ref().unwrap();
let name = match field_attrs.name {
Some(ref name) => name.to_string(),
None => attrs
.rename
.unwrap_or(RenameRule::CamelCase)
.apply(&field_ident.unraw().to_string()),
};
if let Some(span) = field_attrs.skip {
error.unsupported_attribute_within(span.span(), UnsupportedAttribute::Skip)
}
if let Some(span) = field_attrs.deprecation {
error.unsupported_attribute_within(
span.span_ident(),
UnsupportedAttribute::Deprecation,
)
}
if name.starts_with("__") {
error.no_double_underscore(if let Some(name) = field_attrs.name {
name.span_ident()
} else {
name.span()
});
}
let resolver_code = quote!(#field_ident);
let default = field_attrs
.default
.map(|default| match default.into_inner() {
Some(expr) => expr.into_token_stream(),
None => quote! { Default::default() },
});
Some(util::GraphQLTypeDefinitionField {
name,
_type: field.ty,
args: Vec::new(),
description: field_attrs.description.map(SpanContainer::into_inner),
deprecation: None,
resolver_code,
is_type_inferred: true,
is_async: false,
default,
span,
})
})
.collect::<Vec<_>>();
proc_macro_error::abort_if_dirty();
if fields.is_empty() {
error.not_empty(ast_span);
}
if let Some(duplicates) =
crate::util::duplicate::Duplicate::find_by_key(&fields, |field| &field.name)
{
error.duplicate(duplicates.iter())
}
if !attrs.interfaces.is_empty() {
attrs.interfaces.iter().for_each(|elm| {
error.unsupported_attribute(elm.span(), UnsupportedAttribute::Interface)
});
}
if let Some(duplicates) =
crate::util::duplicate::Duplicate::find_by_key(&fields, |field| field.name.as_str())
{
error.duplicate(duplicates.iter());
}
if !attrs.is_internal && name.starts_with("__") {
error.no_double_underscore(if let Some(name) = attrs.name {
name.span_ident()
} else {
ident.span()
});
}
proc_macro_error::abort_if_dirty();
let definition = util::GraphQLTypeDefiniton {
name,
_type: syn::parse_str(&ast.ident.to_string()).unwrap(),
context: attrs.context.map(SpanContainer::into_inner),
scalar: attrs.scalar.map(SpanContainer::into_inner),
description: attrs.description.map(SpanContainer::into_inner),
fields,
generics: ast.generics,
interfaces: vec![],
include_type_generics: true,
generic_scalar: true,
no_async: attrs.no_async.is_some(),
};
Ok(definition.into_input_object_tokens())
} | identifier_body |
|
derive_input_object.rs | #![allow(clippy::match_wild_err_arm)]
use crate::{
result::{GraphQLScope, UnsupportedAttribute},
util::{self, span_container::SpanContainer, RenameRule},
};
use proc_macro2::TokenStream;
use quote::{quote, ToTokens};
use syn::{self, ext::IdentExt, spanned::Spanned, Data, Fields};
pub fn impl_input_object(ast: syn::DeriveInput, error: GraphQLScope) -> syn::Result<TokenStream> {
let ast_span = ast.span();
let fields = match ast.data {
Data::Struct(data) => match data.fields {
Fields::Named(named) => named.named,
_ => {
return Err(
error.custom_error(ast_span, "all fields must be named, e.g., `test: String`")
)
}
},
_ => return Err(error.custom_error(ast_span, "can only be used on structs with fields")),
};
// Parse attributes.
let attrs = util::ObjectAttributes::from_attrs(&ast.attrs)?;
// Parse attributes.
let ident = &ast.ident;
let name = attrs
.name
.clone()
.map(SpanContainer::into_inner)
.unwrap_or_else(|| ident.to_string());
let fields = fields
.into_iter()
.filter_map(|field| {
let span = field.span();
let field_attrs = match util::FieldAttributes::from_attrs(
&field.attrs,
util::FieldAttributeParseMode::Object,
) {
Ok(attrs) => attrs,
Err(e) => {
proc_macro_error::emit_error!(e);
return None;
}
};
let field_ident = field.ident.as_ref().unwrap();
let name = match field_attrs.name {
Some(ref name) => name.to_string(),
None => attrs
.rename
.unwrap_or(RenameRule::CamelCase)
.apply(&field_ident.unraw().to_string()),
};
if let Some(span) = field_attrs.skip {
error.unsupported_attribute_within(span.span(), UnsupportedAttribute::Skip)
}
if let Some(span) = field_attrs.deprecation {
error.unsupported_attribute_within(
span.span_ident(),
UnsupportedAttribute::Deprecation,
)
}
if name.starts_with("__") {
error.no_double_underscore(if let Some(name) = field_attrs.name {
name.span_ident()
} else {
name.span()
});
}
let resolver_code = quote!(#field_ident);
let default = field_attrs
.default
.map(|default| match default.into_inner() {
Some(expr) => expr.into_token_stream(),
None => quote! { Default::default() },
});
Some(util::GraphQLTypeDefinitionField {
name,
_type: field.ty, | description: field_attrs.description.map(SpanContainer::into_inner),
deprecation: None,
resolver_code,
is_type_inferred: true,
is_async: false,
default,
span,
})
})
.collect::<Vec<_>>();
proc_macro_error::abort_if_dirty();
if fields.is_empty() {
error.not_empty(ast_span);
}
if let Some(duplicates) =
crate::util::duplicate::Duplicate::find_by_key(&fields, |field| &field.name)
{
error.duplicate(duplicates.iter())
}
if !attrs.interfaces.is_empty() {
attrs.interfaces.iter().for_each(|elm| {
error.unsupported_attribute(elm.span(), UnsupportedAttribute::Interface)
});
}
if let Some(duplicates) =
crate::util::duplicate::Duplicate::find_by_key(&fields, |field| field.name.as_str())
{
error.duplicate(duplicates.iter());
}
if !attrs.is_internal && name.starts_with("__") {
error.no_double_underscore(if let Some(name) = attrs.name {
name.span_ident()
} else {
ident.span()
});
}
proc_macro_error::abort_if_dirty();
let definition = util::GraphQLTypeDefiniton {
name,
_type: syn::parse_str(&ast.ident.to_string()).unwrap(),
context: attrs.context.map(SpanContainer::into_inner),
scalar: attrs.scalar.map(SpanContainer::into_inner),
description: attrs.description.map(SpanContainer::into_inner),
fields,
generics: ast.generics,
interfaces: vec![],
include_type_generics: true,
generic_scalar: true,
no_async: attrs.no_async.is_some(),
};
Ok(definition.into_input_object_tokens())
} | args: Vec::new(), | random_line_split |
config.js | /**
* #config
*
* Copyright (c)2011, by Branko Vukelic
*
* Configuration methods and settings for Postfinance. All startup configuration
* settings are set using the `config.configure()` and `config.option()`
* methods. Most options can only be set once, and subsequent attempts to set
* them will result in an error. To avoid this, pass the
* `allowMultipleSetOption` option to `config.configure()` and set it to
* `true`. (The option has a long name to prevent accidental usage.)
*
* Copyright (c)2014, by Olivier Evalet <[email protected]>
* Copyright (c)2011, by Branko Vukelic <[email protected]>
* Licensed under GPL license (see LICENSE)
*/
var config = exports;
var util = require('util');
var PostFinanceError = require('./error');
var samurayKeyRe = /^[0-9a-f]{4}$/;
var isConfigured = false;
config.POSTFINANCE_VERSION = '0.0.1';
/**
* ## settings
* *Master configuration settings for Postfinance*
*
* The `settings` object contains all the core configuration options that
* affect the way certain things work (or not), and the Postfinance gateway
* credentials. You should _not_ access this object directly. The correct way
* to access and set the settings is through either ``configure()`` or
* ``option()`` methods.
*
* Settings are expected to contain following keys with their default values:
*
* + _pspid_: Postfinance gateway Merchant Key (default: `''`)
* + _apiPassword_: Postfinance gateway API Password (default: `''`)
* + _apiUser_: Processor (gateway) ID; be sure to set this to a sandbox
* ID for testing (default: `''`)
* + _currency_: Default currency for all transactions (can be overriden by
* specifying the appropriate options in transaction objects)
* + _allowedCurrencies_: Array containing the currencies that can be used
* in transactions. (default: ['USD'])
* + _sandbox_: All new payment methods will be sandbox payment methods
* (default: false)
* + _enabled_: Whether to actually make requests to gateway (default: true)
* + _debug_: Whether to log to STDOUT; it is highly recommended that
* you disable this in production, to remain PCI comliant, and to
* avoid performance issues (default: true)
* | * treated as a constant (i.e., read-only).
*/
var settings = {};
settings.pmlist=['creditcart','postfinance card','paypal']
settings.pspid = '';
settings.apiPassword = '';
settings.apiUser = '';
settings.currency = 'CHF';
settings.allowedCurrencies = ['CHF'];
settings.shaWithSecret=true; // do not append secret in sha string (this is a postfinance configuration)
settings.operation='RES'
settings.path = {
ecommerce:'/ncol/test/orderstandard_utf8.asp',
order:'/ncol/test/orderdirect_utf8.asp',
maintenance:'/ncol/test/maintenancedirect.asp',
query:'/ncol/test/querydirect_utf8.asp',
};
settings.host = 'e-payment.postfinance.ch';
settings.allowMaxAmount=400.00; // block payment above
settings.sandbox = false;
settings.enabled = true; // Does not make any actual API calls if false
settings.debug = false; // Enables *blocking* debug output to STDOUT
settings.apiVersion = 1; // Don't change this... unless you need to
settings.allowMultipleSetOption = false;
config.reset=function(){
if(process.env.NODE_ENV=='test'){
settings.sandbox = false;
settings.enabled = true;
settings.pspid = '';
settings.apiPassword = '';
settings.apiUser = '';
settings.currency = 'CHF';
settings.allowedCurrencies = ['CHF'];
settings.shaWithSecret=true;
settings.operation='RES'
isConfigured=false;
}
else throw new Error('Reset is not possible here')
}
/**
* ## config.debug(message)
* *Wrapper around `util.debug` to log items in debug mode*
*
* This method is typically used by Postfinance implementation to output debug
* messages. There is no need to call this method outside of Postfinance.
*
* Note that any debug messages output using this function will block
* execution temporarily. It is advised to disable debug setting in production
* to prevent this logger from running.
*
* @param {Object} message Object to be output as a message
* @private
*/
config.debug = debug = function(message) {
if (settings.debug) {
util.debug(message);
}
};
/**
* ## config.configure(opts)
* *Set global Postfinance configuration options*
*
* This method should be used before using any of the Postfinance's functions. It
* sets the options in the `settings` object, and performs basic validation
* of the options before doing so.
*
* Unless you also pass it the `allowMultipleSetOption` option with value set
* to `true`, you will only be able to call this method once. This is done to
* prevent accidental calls to this method to modify critical options that may
* affect the security and/or correct operation of your system.
*
* This method depends on ``config.option()`` method to set the individual
* options.
*
* If an invalid option is passed, it will throw an error.
*
* @param {Object} Configuration options
*/
config.configure = function(opts) {
debug('Configuring Postfinance with: \n' + util.inspect(opts));
if (!opts.pspid || (opts.apiUser&&!opts.apiPassword)) {
throw new PostFinanceError('system', 'Incomplete Postfinance API credentials', opts);
}
Object.keys(opts).forEach(function(key) {
config.option(key, opts[key]);
});
isConfigured = true;
if(config.option('shaWithSecret'))
debug("append sha with secret")
//print settings?
//debug("settings "+util.inspect(settings))
};
/**
* ## config.option(name, [value])
* *Returns or sets a single configuration option*
*
* If value is not provided this method returns the value of the named
* configuration option key. Otherwise, it sets the value and returns it.
*
* Setting values can only be set once for most options. An error will be
* thrown if you try to set an option more than once. This restriction exist
* to prevent accidental and/or malicious manipulation of critical Postfinance
* configuration options.
*
* During testing, you may set the `allowMultipleSetOption` to `true` in order
* to enable multiple setting of protected options. Note that once this option
* is set to `false` it can no longer be set to true.
*
* Postfinance API credentials are additionally checked for consistency. If they
* do not appear to be valid keys, an error will be thrown.
*
* @param {String} option Name of the option key
* @param {Object} value New value of the option
* @returns {Object} Value of the `option` key
*/
config.option = function(option, value) {
if (typeof value !== 'undefined') {
debug('Setting Postfinance key `' + option + '` to `' + value.toString() + '`');
// Do not allow an option to be set twice unless it's `currency`
if (isConfigured &&
!settings.allowMultipleSetOption &&
option !== 'currency') {
throw new PostFinanceError(
'system',
'Option ' + option + ' is already locked',
option);
}
switch (option) {
case 'pspid':
case 'apiPassword':
case 'apiUser':
case 'currency':
case 'shaSecret':
case 'host':
case 'path':
case 'operation':
case 'acceptUrl':
case 'declineUrl':
case 'exceptionUrl':
case 'cancelUrl':
case 'backUrl':
settings[option] = value;
break;
case 'allowMaxAmount':
settings[option] = parseFloat(value)
break;
case 'sandbox':
case 'enabled':
case 'debug':
case 'shaWithSecret':
case 'allowMultipleSetOption':
settings[option] = Boolean(value);
break;
case 'allowedCurrencies':
if (!Array.isArray(value)) {
throw new PostFinanceError('system', 'Allowed currencies must be an array', null);
}
if (value.indexOf(settings.currency) < 0) {
value.push(settings.currency);
}
settings.allowedCurrencies = value;
break;
default:
// Do not allow unknown options to be set
throw new PostFinanceError('system', 'Unrecognized configuration option', option);
}
}
return settings[option];
}; | * Only `currency` option can be set multiple times. All other options can only
* be set once using the ``config.configure()`` method.
*
* The ``apiVersion`` setting is present for conveinence and is should be | random_line_split |
config.js | /**
* #config
*
* Copyright (c)2011, by Branko Vukelic
*
* Configuration methods and settings for Postfinance. All startup configuration
* settings are set using the `config.configure()` and `config.option()`
* methods. Most options can only be set once, and subsequent attempts to set
* them will result in an error. To avoid this, pass the
* `allowMultipleSetOption` option to `config.configure()` and set it to
* `true`. (The option has a long name to prevent accidental usage.)
*
* Copyright (c)2014, by Olivier Evalet <[email protected]>
* Copyright (c)2011, by Branko Vukelic <[email protected]>
* Licensed under GPL license (see LICENSE)
*/
var config = exports;
var util = require('util');
var PostFinanceError = require('./error');
var samurayKeyRe = /^[0-9a-f]{4}$/;
var isConfigured = false;
config.POSTFINANCE_VERSION = '0.0.1';
/**
* ## settings
* *Master configuration settings for Postfinance*
*
* The `settings` object contains all the core configuration options that
* affect the way certain things work (or not), and the Postfinance gateway
* credentials. You should _not_ access this object directly. The correct way
* to access and set the settings is through either ``configure()`` or
* ``option()`` methods.
*
* Settings are expected to contain following keys with their default values:
*
* + _pspid_: Postfinance gateway Merchant Key (default: `''`)
* + _apiPassword_: Postfinance gateway API Password (default: `''`)
* + _apiUser_: Processor (gateway) ID; be sure to set this to a sandbox
* ID for testing (default: `''`)
* + _currency_: Default currency for all transactions (can be overriden by
* specifying the appropriate options in transaction objects)
* + _allowedCurrencies_: Array containing the currencies that can be used
* in transactions. (default: ['USD'])
* + _sandbox_: All new payment methods will be sandbox payment methods
* (default: false)
* + _enabled_: Whether to actually make requests to gateway (default: true)
* + _debug_: Whether to log to STDOUT; it is highly recommended that
* you disable this in production, to remain PCI comliant, and to
* avoid performance issues (default: true)
*
* Only `currency` option can be set multiple times. All other options can only
* be set once using the ``config.configure()`` method.
*
* The ``apiVersion`` setting is present for conveinence and is should be
* treated as a constant (i.e., read-only).
*/
var settings = {};
settings.pmlist=['creditcart','postfinance card','paypal']
settings.pspid = '';
settings.apiPassword = '';
settings.apiUser = '';
settings.currency = 'CHF';
settings.allowedCurrencies = ['CHF'];
settings.shaWithSecret=true; // do not append secret in sha string (this is a postfinance configuration)
settings.operation='RES'
settings.path = {
ecommerce:'/ncol/test/orderstandard_utf8.asp',
order:'/ncol/test/orderdirect_utf8.asp',
maintenance:'/ncol/test/maintenancedirect.asp',
query:'/ncol/test/querydirect_utf8.asp',
};
settings.host = 'e-payment.postfinance.ch';
settings.allowMaxAmount=400.00; // block payment above
settings.sandbox = false;
settings.enabled = true; // Does not make any actual API calls if false
settings.debug = false; // Enables *blocking* debug output to STDOUT
settings.apiVersion = 1; // Don't change this... unless you need to
settings.allowMultipleSetOption = false;
config.reset=function(){
if(process.env.NODE_ENV=='test'){
settings.sandbox = false;
settings.enabled = true;
settings.pspid = '';
settings.apiPassword = '';
settings.apiUser = '';
settings.currency = 'CHF';
settings.allowedCurrencies = ['CHF'];
settings.shaWithSecret=true;
settings.operation='RES'
isConfigured=false;
}
else throw new Error('Reset is not possible here')
}
/**
* ## config.debug(message)
* *Wrapper around `util.debug` to log items in debug mode*
*
* This method is typically used by Postfinance implementation to output debug
* messages. There is no need to call this method outside of Postfinance.
*
* Note that any debug messages output using this function will block
* execution temporarily. It is advised to disable debug setting in production
* to prevent this logger from running.
*
* @param {Object} message Object to be output as a message
* @private
*/
config.debug = debug = function(message) {
if (settings.debug) |
};
/**
* ## config.configure(opts)
* *Set global Postfinance configuration options*
*
* This method should be used before using any of the Postfinance's functions. It
* sets the options in the `settings` object, and performs basic validation
* of the options before doing so.
*
* Unless you also pass it the `allowMultipleSetOption` option with value set
* to `true`, you will only be able to call this method once. This is done to
* prevent accidental calls to this method to modify critical options that may
* affect the security and/or correct operation of your system.
*
* This method depends on ``config.option()`` method to set the individual
* options.
*
* If an invalid option is passed, it will throw an error.
*
* @param {Object} Configuration options
*/
config.configure = function(opts) {
debug('Configuring Postfinance with: \n' + util.inspect(opts));
if (!opts.pspid || (opts.apiUser&&!opts.apiPassword)) {
throw new PostFinanceError('system', 'Incomplete Postfinance API credentials', opts);
}
Object.keys(opts).forEach(function(key) {
config.option(key, opts[key]);
});
isConfigured = true;
if(config.option('shaWithSecret'))
debug("append sha with secret")
//print settings?
//debug("settings "+util.inspect(settings))
};
/**
* ## config.option(name, [value])
* *Returns or sets a single configuration option*
*
* If value is not provided this method returns the value of the named
* configuration option key. Otherwise, it sets the value and returns it.
*
* Setting values can only be set once for most options. An error will be
* thrown if you try to set an option more than once. This restriction exist
* to prevent accidental and/or malicious manipulation of critical Postfinance
* configuration options.
*
* During testing, you may set the `allowMultipleSetOption` to `true` in order
* to enable multiple setting of protected options. Note that once this option
* is set to `false` it can no longer be set to true.
*
* Postfinance API credentials are additionally checked for consistency. If they
* do not appear to be valid keys, an error will be thrown.
*
* @param {String} option Name of the option key
* @param {Object} value New value of the option
* @returns {Object} Value of the `option` key
*/
config.option = function(option, value) {
if (typeof value !== 'undefined') {
debug('Setting Postfinance key `' + option + '` to `' + value.toString() + '`');
// Do not allow an option to be set twice unless it's `currency`
if (isConfigured &&
!settings.allowMultipleSetOption &&
option !== 'currency') {
throw new PostFinanceError(
'system',
'Option ' + option + ' is already locked',
option);
}
switch (option) {
case 'pspid':
case 'apiPassword':
case 'apiUser':
case 'currency':
case 'shaSecret':
case 'host':
case 'path':
case 'operation':
case 'acceptUrl':
case 'declineUrl':
case 'exceptionUrl':
case 'cancelUrl':
case 'backUrl':
settings[option] = value;
break;
case 'allowMaxAmount':
settings[option] = parseFloat(value)
break;
case 'sandbox':
case 'enabled':
case 'debug':
case 'shaWithSecret':
case 'allowMultipleSetOption':
settings[option] = Boolean(value);
break;
case 'allowedCurrencies':
if (!Array.isArray(value)) {
throw new PostFinanceError('system', 'Allowed currencies must be an array', null);
}
if (value.indexOf(settings.currency) < 0) {
value.push(settings.currency);
}
settings.allowedCurrencies = value;
break;
default:
// Do not allow unknown options to be set
throw new PostFinanceError('system', 'Unrecognized configuration option', option);
}
}
return settings[option];
};
| {
util.debug(message);
} | conditional_block |
get.rs | extern crate tftp;
use std::io::BufWriter;
use std::fs::OpenOptions;
use std::path::Path;
use std::net::{SocketAddr, IpAddr, Ipv4Addr};
use std::process::exit;
use std::env;
use tftp::client::get;
use tftp::packet::Mode;
fn | () {
let args: Vec<_> = env::args().collect();
if args.len() != 2 {
println!("Usage: {} PATH", args.get(0).unwrap());
return
}
let file_path = args[1].clone();
let mut file_options = OpenOptions::new();
file_options.truncate(true).create(true).write(true);
let file = match file_options.open(Path::new("result")) {
Ok(f) => f,
Err(_) => {
exit(1);
},
};
let mut writer = BufWriter::new(file);
get(&Path::new(&file_path), Mode::Octet, &mut writer);
}
| main | identifier_name |
get.rs | extern crate tftp;
use std::io::BufWriter;
use std::fs::OpenOptions;
use std::path::Path;
use std::net::{SocketAddr, IpAddr, Ipv4Addr};
use std::process::exit;
use std::env;
use tftp::client::get;
use tftp::packet::Mode;
fn main() {
let args: Vec<_> = env::args().collect();
if args.len() != 2 {
println!("Usage: {} PATH", args.get(0).unwrap());
return
}
let file_path = args[1].clone();
let mut file_options = OpenOptions::new();
file_options.truncate(true).create(true).write(true);
let file = match file_options.open(Path::new("result")) {
Ok(f) => f,
Err(_) => {
exit(1);
},
};
let mut writer = BufWriter::new(file); | get(&Path::new(&file_path), Mode::Octet, &mut writer);
} | random_line_split |
|
get.rs | extern crate tftp;
use std::io::BufWriter;
use std::fs::OpenOptions;
use std::path::Path;
use std::net::{SocketAddr, IpAddr, Ipv4Addr};
use std::process::exit;
use std::env;
use tftp::client::get;
use tftp::packet::Mode;
fn main() | {
let args: Vec<_> = env::args().collect();
if args.len() != 2 {
println!("Usage: {} PATH", args.get(0).unwrap());
return
}
let file_path = args[1].clone();
let mut file_options = OpenOptions::new();
file_options.truncate(true).create(true).write(true);
let file = match file_options.open(Path::new("result")) {
Ok(f) => f,
Err(_) => {
exit(1);
},
};
let mut writer = BufWriter::new(file);
get(&Path::new(&file_path), Mode::Octet, &mut writer);
} | identifier_body |
|
vcr.py | # Copyright 2016 Osvaldo Santana Neto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from vcr import VCR
USER_REGEX = re.compile(r'<usuario>\w+</usuario>')
PASS_REGEX = re.compile(r'<senha>.*</senha>')
def | (request):
if not request.body:
return request
body = request.body.decode()
body = USER_REGEX.sub(r'<usuario>teste</usuario>', body)
body = PASS_REGEX.sub(r'<senha>****</senha>', body)
request.body = body.encode()
return request
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), "fixtures")
vcr = VCR(
record_mode='once',
serializer='yaml',
cassette_library_dir=os.path.join(FIXTURES_DIR, 'cassettes'),
path_transformer=VCR.ensure_suffix('.yaml'),
match_on=['method'],
before_record_request=replace_auth,
)
| replace_auth | identifier_name |
vcr.py | # Copyright 2016 Osvaldo Santana Neto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from vcr import VCR
USER_REGEX = re.compile(r'<usuario>\w+</usuario>')
PASS_REGEX = re.compile(r'<senha>.*</senha>')
def replace_auth(request):
|
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), "fixtures")
vcr = VCR(
record_mode='once',
serializer='yaml',
cassette_library_dir=os.path.join(FIXTURES_DIR, 'cassettes'),
path_transformer=VCR.ensure_suffix('.yaml'),
match_on=['method'],
before_record_request=replace_auth,
)
| if not request.body:
return request
body = request.body.decode()
body = USER_REGEX.sub(r'<usuario>teste</usuario>', body)
body = PASS_REGEX.sub(r'<senha>****</senha>', body)
request.body = body.encode()
return request | identifier_body |
vcr.py | # Copyright 2016 Osvaldo Santana Neto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from vcr import VCR
USER_REGEX = re.compile(r'<usuario>\w+</usuario>')
PASS_REGEX = re.compile(r'<senha>.*</senha>')
def replace_auth(request):
if not request.body:
return request
body = request.body.decode()
body = USER_REGEX.sub(r'<usuario>teste</usuario>', body)
body = PASS_REGEX.sub(r'<senha>****</senha>', body)
request.body = body.encode()
return request
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), "fixtures")
vcr = VCR(
record_mode='once',
serializer='yaml',
cassette_library_dir=os.path.join(FIXTURES_DIR, 'cassettes'), | ) | path_transformer=VCR.ensure_suffix('.yaml'),
match_on=['method'],
before_record_request=replace_auth, | random_line_split |
vcr.py | # Copyright 2016 Osvaldo Santana Neto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from vcr import VCR
USER_REGEX = re.compile(r'<usuario>\w+</usuario>')
PASS_REGEX = re.compile(r'<senha>.*</senha>')
def replace_auth(request):
if not request.body:
|
body = request.body.decode()
body = USER_REGEX.sub(r'<usuario>teste</usuario>', body)
body = PASS_REGEX.sub(r'<senha>****</senha>', body)
request.body = body.encode()
return request
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), "fixtures")
vcr = VCR(
record_mode='once',
serializer='yaml',
cassette_library_dir=os.path.join(FIXTURES_DIR, 'cassettes'),
path_transformer=VCR.ensure_suffix('.yaml'),
match_on=['method'],
before_record_request=replace_auth,
)
| return request | conditional_block |
root.d.ts | import PreviousMap from './previous-map';
import Container from './container';
import * as postcss from './postcss';
import Result from './result';
import Node from './node';
export default class | extends Container implements postcss.Root {
/**
* Returns a string representing the node's type. Possible values are
* root, atrule, rule, decl or comment.
*/
type: string;
rawCache: {
[key: string]: any;
};
/**
* Represents a CSS file and contains all its parsed nodes.
*/
constructor(defaults?: postcss.RootNewProps);
/**
* @param overrides New properties to override in the clone.
* @returns A clone of this node. The node and its (cloned) children will
* have a clean parent and code style properties.
*/
clone(overrides?: Object): Root;
toJSON(): postcss.JsonRoot;
/**
* Removes child from the root node, and the parent properties of node and
* its children.
* @param child Child or child's index.
* @returns This root node for chaining.
*/
removeChild(child: Node | number): this;
protected normalize(node: Node | string, sample: Node, type?: string): Node[];
protected normalize(props: postcss.AtRuleNewProps | postcss.RuleNewProps | postcss.DeclarationNewProps | postcss.CommentNewProps, sample: Node, type?: string): Node[];
/**
* @returns A Result instance representing the root's CSS.
*/
toResult(options?: {
/**
* The path where you'll put the output CSS file. You should always
* set "to" to generate correct source maps.
*/
to?: string;
map?: postcss.SourceMapOptions;
}): Result;
/**
* Deprecated. Use Root#removeChild.
*/
remove(child?: Node | number): Root;
/**
* Deprecated. Use Root#source.input.map.
*/
prevMap(): PreviousMap;
}
| Root | identifier_name |
root.d.ts | import PreviousMap from './previous-map';
import Container from './container';
import * as postcss from './postcss';
import Result from './result';
import Node from './node';
export default class Root extends Container implements postcss.Root {
/**
* Returns a string representing the node's type. Possible values are
* root, atrule, rule, decl or comment.
*/
type: string;
rawCache: {
[key: string]: any;
};
/**
* Represents a CSS file and contains all its parsed nodes.
*/
constructor(defaults?: postcss.RootNewProps);
/**
* @param overrides New properties to override in the clone.
* @returns A clone of this node. The node and its (cloned) children will
* have a clean parent and code style properties.
*/
clone(overrides?: Object): Root;
toJSON(): postcss.JsonRoot;
/**
* Removes child from the root node, and the parent properties of node and
* its children.
* @param child Child or child's index.
* @returns This root node for chaining.
*/
removeChild(child: Node | number): this;
protected normalize(node: Node | string, sample: Node, type?: string): Node[];
protected normalize(props: postcss.AtRuleNewProps | postcss.RuleNewProps | postcss.DeclarationNewProps | postcss.CommentNewProps, sample: Node, type?: string): Node[];
/**
* @returns A Result instance representing the root's CSS.
*/
toResult(options?: {
/**
* The path where you'll put the output CSS file. You should always
* set "to" to generate correct source maps.
*/
to?: string;
map?: postcss.SourceMapOptions;
}): Result;
/** | */
remove(child?: Node | number): Root;
/**
* Deprecated. Use Root#source.input.map.
*/
prevMap(): PreviousMap;
} | * Deprecated. Use Root#removeChild. | random_line_split |
config.py | #!/usr/bin/env python
#
# Original filename: config.py
#
# Author: Tim Brandt
# Email: [email protected]
# Date: August 2011
#
# Summary: Set configuration parameters to sensible values.
#
import re
from subprocess import * |
def config(nframes, framesize):
###################################################################
# Fetch the total amount of physical system memory in bytes.
# This is the second entry on the second line of the standard
# output of the 'free' command.
###################################################################
print "\nGetting system parameters, setting pipeline execution parameters..."
osver = Popen(["uname", "-a"], stdout=PIPE).stdout.read()
if osver.startswith("Linux"):
print "You are running Linux."
elif osver.startswith("Darwin"):
print "You are running Mac OS-X."
else:
print "Your operating system is not recognized."
if osver.startswith("Linux"):
mem = Popen(["free", "-b"], stdout=PIPE).stdout.read()
mem = int(mem.split('\n')[1].split()[1])
elif osver.startswith("Darwin"):
mem = Popen(["vm_stat"], stdout=PIPE).stdout.read().split('\n')
blocksize = re.search('.*size of ([0-9]+) bytes.*', mem[0]).group(1)
totmem = 0.
for line in mem:
if np.any(["Pages free:" in line, "Pages active:" in line,
"Pages inactive:" in line, "Pages speculative:" in line,
"Pages wired down:" in line]):
totmem += float(line.split(':')[1]) * float(blocksize)
mem = int(totmem)
ncpus = multiprocessing.cpu_count()
hostname = Popen("hostname", stdout=PIPE).stdout.read().split()[0]
print "\n You are running on " + hostname + "."
print " You have " + str(mem / 2**20) + " megabytes of memory and " + \
str(ncpus) + " threads available."
datasize = framesize * nframes * 4
print " The dataset consists of " + str(nframes) + " frames, " + \
str(datasize * 100 / mem) + "% of your physical RAM."
storeall = False
if datasize * 100 / mem < 20:
storeall = True
print " --> You have enough RAM to store all data."
print " The pipeline will not need to write all intermediate files."
else:
print " --> You do not have enough RAM to store all data."
print " The pipeline will need to write all intermediate files"
print " and do the reduction in pieces."
return mem, ncpus, storeall | import multiprocessing
import numpy as np | random_line_split |
config.py | #!/usr/bin/env python
#
# Original filename: config.py
#
# Author: Tim Brandt
# Email: [email protected]
# Date: August 2011
#
# Summary: Set configuration parameters to sensible values.
#
import re
from subprocess import *
import multiprocessing
import numpy as np
def config(nframes, framesize):
###################################################################
# Fetch the total amount of physical system memory in bytes.
# This is the second entry on the second line of the standard
# output of the 'free' command.
###################################################################
| print "\nGetting system parameters, setting pipeline execution parameters..."
osver = Popen(["uname", "-a"], stdout=PIPE).stdout.read()
if osver.startswith("Linux"):
print "You are running Linux."
elif osver.startswith("Darwin"):
print "You are running Mac OS-X."
else:
print "Your operating system is not recognized."
if osver.startswith("Linux"):
mem = Popen(["free", "-b"], stdout=PIPE).stdout.read()
mem = int(mem.split('\n')[1].split()[1])
elif osver.startswith("Darwin"):
mem = Popen(["vm_stat"], stdout=PIPE).stdout.read().split('\n')
blocksize = re.search('.*size of ([0-9]+) bytes.*', mem[0]).group(1)
totmem = 0.
for line in mem:
if np.any(["Pages free:" in line, "Pages active:" in line,
"Pages inactive:" in line, "Pages speculative:" in line,
"Pages wired down:" in line]):
totmem += float(line.split(':')[1]) * float(blocksize)
mem = int(totmem)
ncpus = multiprocessing.cpu_count()
hostname = Popen("hostname", stdout=PIPE).stdout.read().split()[0]
print "\n You are running on " + hostname + "."
print " You have " + str(mem / 2**20) + " megabytes of memory and " + \
str(ncpus) + " threads available."
datasize = framesize * nframes * 4
print " The dataset consists of " + str(nframes) + " frames, " + \
str(datasize * 100 / mem) + "% of your physical RAM."
storeall = False
if datasize * 100 / mem < 20:
storeall = True
print " --> You have enough RAM to store all data."
print " The pipeline will not need to write all intermediate files."
else:
print " --> You do not have enough RAM to store all data."
print " The pipeline will need to write all intermediate files"
print " and do the reduction in pieces."
return mem, ncpus, storeall | identifier_body |
|
config.py | #!/usr/bin/env python
#
# Original filename: config.py
#
# Author: Tim Brandt
# Email: [email protected]
# Date: August 2011
#
# Summary: Set configuration parameters to sensible values.
#
import re
from subprocess import *
import multiprocessing
import numpy as np
def | (nframes, framesize):
###################################################################
# Fetch the total amount of physical system memory in bytes.
# This is the second entry on the second line of the standard
# output of the 'free' command.
###################################################################
print "\nGetting system parameters, setting pipeline execution parameters..."
osver = Popen(["uname", "-a"], stdout=PIPE).stdout.read()
if osver.startswith("Linux"):
print "You are running Linux."
elif osver.startswith("Darwin"):
print "You are running Mac OS-X."
else:
print "Your operating system is not recognized."
if osver.startswith("Linux"):
mem = Popen(["free", "-b"], stdout=PIPE).stdout.read()
mem = int(mem.split('\n')[1].split()[1])
elif osver.startswith("Darwin"):
mem = Popen(["vm_stat"], stdout=PIPE).stdout.read().split('\n')
blocksize = re.search('.*size of ([0-9]+) bytes.*', mem[0]).group(1)
totmem = 0.
for line in mem:
if np.any(["Pages free:" in line, "Pages active:" in line,
"Pages inactive:" in line, "Pages speculative:" in line,
"Pages wired down:" in line]):
totmem += float(line.split(':')[1]) * float(blocksize)
mem = int(totmem)
ncpus = multiprocessing.cpu_count()
hostname = Popen("hostname", stdout=PIPE).stdout.read().split()[0]
print "\n You are running on " + hostname + "."
print " You have " + str(mem / 2**20) + " megabytes of memory and " + \
str(ncpus) + " threads available."
datasize = framesize * nframes * 4
print " The dataset consists of " + str(nframes) + " frames, " + \
str(datasize * 100 / mem) + "% of your physical RAM."
storeall = False
if datasize * 100 / mem < 20:
storeall = True
print " --> You have enough RAM to store all data."
print " The pipeline will not need to write all intermediate files."
else:
print " --> You do not have enough RAM to store all data."
print " The pipeline will need to write all intermediate files"
print " and do the reduction in pieces."
return mem, ncpus, storeall
| config | identifier_name |
config.py | #!/usr/bin/env python
#
# Original filename: config.py
#
# Author: Tim Brandt
# Email: [email protected]
# Date: August 2011
#
# Summary: Set configuration parameters to sensible values.
#
import re
from subprocess import *
import multiprocessing
import numpy as np
def config(nframes, framesize):
###################################################################
# Fetch the total amount of physical system memory in bytes.
# This is the second entry on the second line of the standard
# output of the 'free' command.
###################################################################
print "\nGetting system parameters, setting pipeline execution parameters..."
osver = Popen(["uname", "-a"], stdout=PIPE).stdout.read()
if osver.startswith("Linux"):
print "You are running Linux."
elif osver.startswith("Darwin"):
|
else:
print "Your operating system is not recognized."
if osver.startswith("Linux"):
mem = Popen(["free", "-b"], stdout=PIPE).stdout.read()
mem = int(mem.split('\n')[1].split()[1])
elif osver.startswith("Darwin"):
mem = Popen(["vm_stat"], stdout=PIPE).stdout.read().split('\n')
blocksize = re.search('.*size of ([0-9]+) bytes.*', mem[0]).group(1)
totmem = 0.
for line in mem:
if np.any(["Pages free:" in line, "Pages active:" in line,
"Pages inactive:" in line, "Pages speculative:" in line,
"Pages wired down:" in line]):
totmem += float(line.split(':')[1]) * float(blocksize)
mem = int(totmem)
ncpus = multiprocessing.cpu_count()
hostname = Popen("hostname", stdout=PIPE).stdout.read().split()[0]
print "\n You are running on " + hostname + "."
print " You have " + str(mem / 2**20) + " megabytes of memory and " + \
str(ncpus) + " threads available."
datasize = framesize * nframes * 4
print " The dataset consists of " + str(nframes) + " frames, " + \
str(datasize * 100 / mem) + "% of your physical RAM."
storeall = False
if datasize * 100 / mem < 20:
storeall = True
print " --> You have enough RAM to store all data."
print " The pipeline will not need to write all intermediate files."
else:
print " --> You do not have enough RAM to store all data."
print " The pipeline will need to write all intermediate files"
print " and do the reduction in pieces."
return mem, ncpus, storeall
| print "You are running Mac OS-X." | conditional_block |
Final_P4_1and2.py | #Final Exam Problem 4-2
import random, pylab
# You are given this function
def getMeanAndStd(X):
mean = sum(X)/float(len(X))
tot = 0.0
for x in X:
tot += (x - mean)**2
std = (tot/len(X))**0.5
return mean, std
# You are given this class
class Die(object):
def __init__(self, valList):
""" valList is not empty """
self.possibleVals = valList[:]
def roll(self):
return random.choice(self.possibleVals)
# Implement this -- Coding Part 1 of 2
def makeHistogram(values, numBins, xLabel, yLabel, title=None):
"""
- values, a sequence of numbers
- numBins, a positive int
- xLabel, yLabel, title, are strings
- Produces a histogram of values with numBins bins and the indicated labels
for the x and y axis
- If title is provided by caller, puts that title on the figure and otherwise
does not title the figure
"""
pylab.hist(values, numBins)
pylab.xlabel(xLabel)
pylab.ylabel(yLabel)
if title != None:
pylab.title(title)
pylab.show()
# Implement this -- Coding Part 2 of 2
def getAverage(die, numRolls, numTrials):
"""
- die, a Die
- numRolls, numTrials, are positive ints
- Calculates the expected mean value of the longest run of a number
over numTrials runs of numRolls rolls.
- Calls makeHistogram to produce a histogram of the longest runs for all
the trials. There should be 10 bins in the histogram
- Choose appropriate labels for the x and y axes.
- Returns the mean calculated to 3 decimal places
"""
longest_runs = []
for x in range(numTrials):
rolls = [die.roll() for x in range(numRolls)]
run = 0
longest = 0
for i in range(len(rolls)):
if i == 0:
run += 1
longest += 1
else:
if rolls[i] == rolls[i-1]:
|
else:
run = 1
longest_runs.append(longest)
makeHistogram(longest_runs, 10, 'Longest Run', 'Frequency', \
'Frequency of Longest Consecutive Dice Rolls')
return sum(longest_runs)/len(longest_runs)
# One test case
print(getAverage(Die([1,2,3,4,5,6,6,6,7]), 1, 1000)) | run += 1
if run > longest:
longest = run | conditional_block |
Final_P4_1and2.py | #Final Exam Problem 4-2
import random, pylab
# You are given this function
def getMeanAndStd(X):
mean = sum(X)/float(len(X))
tot = 0.0
for x in X:
tot += (x - mean)**2
std = (tot/len(X))**0.5
return mean, std
# You are given this class
class Die(object):
def __init__(self, valList):
""" valList is not empty """
self.possibleVals = valList[:]
def roll(self):
|
# Implement this -- Coding Part 1 of 2
def makeHistogram(values, numBins, xLabel, yLabel, title=None):
"""
- values, a sequence of numbers
- numBins, a positive int
- xLabel, yLabel, title, are strings
- Produces a histogram of values with numBins bins and the indicated labels
for the x and y axis
- If title is provided by caller, puts that title on the figure and otherwise
does not title the figure
"""
pylab.hist(values, numBins)
pylab.xlabel(xLabel)
pylab.ylabel(yLabel)
if title != None:
pylab.title(title)
pylab.show()
# Implement this -- Coding Part 2 of 2
def getAverage(die, numRolls, numTrials):
"""
- die, a Die
- numRolls, numTrials, are positive ints
- Calculates the expected mean value of the longest run of a number
over numTrials runs of numRolls rolls.
- Calls makeHistogram to produce a histogram of the longest runs for all
the trials. There should be 10 bins in the histogram
- Choose appropriate labels for the x and y axes.
- Returns the mean calculated to 3 decimal places
"""
longest_runs = []
for x in range(numTrials):
rolls = [die.roll() for x in range(numRolls)]
run = 0
longest = 0
for i in range(len(rolls)):
if i == 0:
run += 1
longest += 1
else:
if rolls[i] == rolls[i-1]:
run += 1
if run > longest:
longest = run
else:
run = 1
longest_runs.append(longest)
makeHistogram(longest_runs, 10, 'Longest Run', 'Frequency', \
'Frequency of Longest Consecutive Dice Rolls')
return sum(longest_runs)/len(longest_runs)
# One test case
print(getAverage(Die([1,2,3,4,5,6,6,6,7]), 1, 1000)) | return random.choice(self.possibleVals) | identifier_body |
Final_P4_1and2.py | #Final Exam Problem 4-2
import random, pylab
# You are given this function
def getMeanAndStd(X):
mean = sum(X)/float(len(X))
tot = 0.0
for x in X:
tot += (x - mean)**2
std = (tot/len(X))**0.5
return mean, std
# You are given this class
class Die(object):
def __init__(self, valList):
""" valList is not empty """
self.possibleVals = valList[:]
def roll(self):
return random.choice(self.possibleVals)
# Implement this -- Coding Part 1 of 2
def makeHistogram(values, numBins, xLabel, yLabel, title=None):
"""
- values, a sequence of numbers
- numBins, a positive int
- xLabel, yLabel, title, are strings
- Produces a histogram of values with numBins bins and the indicated labels
for the x and y axis
- If title is provided by caller, puts that title on the figure and otherwise
does not title the figure
"""
pylab.hist(values, numBins)
pylab.xlabel(xLabel)
pylab.ylabel(yLabel)
if title != None:
pylab.title(title)
pylab.show()
# Implement this -- Coding Part 2 of 2
def | (die, numRolls, numTrials):
"""
- die, a Die
- numRolls, numTrials, are positive ints
- Calculates the expected mean value of the longest run of a number
over numTrials runs of numRolls rolls.
- Calls makeHistogram to produce a histogram of the longest runs for all
the trials. There should be 10 bins in the histogram
- Choose appropriate labels for the x and y axes.
- Returns the mean calculated to 3 decimal places
"""
longest_runs = []
for x in range(numTrials):
rolls = [die.roll() for x in range(numRolls)]
run = 0
longest = 0
for i in range(len(rolls)):
if i == 0:
run += 1
longest += 1
else:
if rolls[i] == rolls[i-1]:
run += 1
if run > longest:
longest = run
else:
run = 1
longest_runs.append(longest)
makeHistogram(longest_runs, 10, 'Longest Run', 'Frequency', \
'Frequency of Longest Consecutive Dice Rolls')
return sum(longest_runs)/len(longest_runs)
# One test case
print(getAverage(Die([1,2,3,4,5,6,6,6,7]), 1, 1000)) | getAverage | identifier_name |
Final_P4_1and2.py | #Final Exam Problem 4-2
import random, pylab
# You are given this function
def getMeanAndStd(X):
mean = sum(X)/float(len(X))
tot = 0.0
for x in X: | std = (tot/len(X))**0.5
return mean, std
# You are given this class
class Die(object):
def __init__(self, valList):
""" valList is not empty """
self.possibleVals = valList[:]
def roll(self):
return random.choice(self.possibleVals)
# Implement this -- Coding Part 1 of 2
def makeHistogram(values, numBins, xLabel, yLabel, title=None):
"""
- values, a sequence of numbers
- numBins, a positive int
- xLabel, yLabel, title, are strings
- Produces a histogram of values with numBins bins and the indicated labels
for the x and y axis
- If title is provided by caller, puts that title on the figure and otherwise
does not title the figure
"""
pylab.hist(values, numBins)
pylab.xlabel(xLabel)
pylab.ylabel(yLabel)
if title != None:
pylab.title(title)
pylab.show()
# Implement this -- Coding Part 2 of 2
def getAverage(die, numRolls, numTrials):
"""
- die, a Die
- numRolls, numTrials, are positive ints
- Calculates the expected mean value of the longest run of a number
over numTrials runs of numRolls rolls.
- Calls makeHistogram to produce a histogram of the longest runs for all
the trials. There should be 10 bins in the histogram
- Choose appropriate labels for the x and y axes.
- Returns the mean calculated to 3 decimal places
"""
longest_runs = []
for x in range(numTrials):
rolls = [die.roll() for x in range(numRolls)]
run = 0
longest = 0
for i in range(len(rolls)):
if i == 0:
run += 1
longest += 1
else:
if rolls[i] == rolls[i-1]:
run += 1
if run > longest:
longest = run
else:
run = 1
longest_runs.append(longest)
makeHistogram(longest_runs, 10, 'Longest Run', 'Frequency', \
'Frequency of Longest Consecutive Dice Rolls')
return sum(longest_runs)/len(longest_runs)
# One test case
print(getAverage(Die([1,2,3,4,5,6,6,6,7]), 1, 1000)) | tot += (x - mean)**2 | random_line_split |
htmlTest.js | /*globals describe, it, beforeEach, afterEach */
var assert = require("assert"),
fs = require('fs'),
_ = require('underscore'),
path = require("path"),
Html = require("../lib/html.js"),
describeReporting = require("../../../test/helpers.js").describeReporting;
describeReporting(path.join(__dirname, "../../"), ["html"], function(reporter) {
describe('html', function() {
beforeEach(function() {
this.html = new Html(reporter);
});
it('should be rendered', function(done) {
var request = {
options: { type: "html", timeout: 5000 },
reporter: reporter,
template: { content: "Hey" },
data: null
};
var response = { headers: {}};
_.findWhere(reporter.extensionsManager.recipes, { name: "html" }).execute(request, response).then(function () {
assert.equal("Hey", response.result); | });
});
}); | done();
}); | random_line_split |
profile-addresses.component.ts | /// <reference types="@types/google-maps" />
import { AfterViewInit, ChangeDetectionStrategy, Component, ElementRef, Injector, Input, OnInit, ViewChild } from '@angular/core';
import { AddressView } from 'app/api/models';
import { Breakpoint } from 'app/core/layout.service';
import { BaseComponent } from 'app/shared/base.component';
import { AddressHelperService } from 'app/ui/core/address-helper.service';
import { MapsService } from 'app/ui/core/maps.service';
import { UiLayoutService } from 'app/ui/core/ui-layout.service';
import { CountriesResolve } from 'app/ui/countries.resolve';
/**
* Shows the user / advertisement address(es) in the view page
*/
@Component({
selector: 'profile-addresses',
templateUrl: 'profile-addresses.component.html',
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class ProfileAddressesComponent extends BaseComponent implements OnInit, AfterViewInit {
constructor(
injector: Injector,
private uiLayout: UiLayoutService,
public addressHelper: AddressHelperService,
public maps: MapsService,
public countriesResolve: CountriesResolve,
) {
super(injector);
}
@ViewChild('mapContainer') mapContainer: ElementRef;
map: google.maps.Map;
private allInfoWindows: google.maps.InfoWindow[] = [];
@Input() addresses: AddressView[];
locatedAddresses: AddressView[];
ngOnInit() {
super.ngOnInit();
this.locatedAddresses = (this.addresses || []).filter(a => a.location);
}
ngAfterViewInit() {
// We'll only use a dynamic map with multiple located addresses
if (this.locatedAddresses.length > 1) {
this.addSub(this.maps.ensureScriptLoaded().subscribe(() => this.showMap()));
}
}
closeAllInfoWindows() {
this.allInfoWindows.forEach(iw => iw.close());
}
singleMapWidth(breakpoints: Set<Breakpoint>): number | 'auto' { | if (breakpoints.has('xl')) {
return 400;
} else if (breakpoints.has('gt-xs')) {
return 340;
} else {
return 'auto';
}
}
private showMap() {
const container = this.mapContainer.nativeElement as HTMLElement;
this.map = new google.maps.Map(container, {
mapTypeControl: false,
streetViewControl: false,
minZoom: 2,
maxZoom: 17,
styles: this.uiLayout.googleMapStyles,
});
const bounds = new google.maps.LatLngBounds();
this.locatedAddresses.map(a => {
const marker = new google.maps.Marker({
title: a.name,
icon: this.dataForFrontendHolder.dataForFrontend.mapMarkerUrl,
position: new google.maps.LatLng(a.location.latitude, a.location.longitude),
});
bounds.extend(marker.getPosition());
marker.addListener('click', () => {
this.closeAllInfoWindows();
let infoWindow = marker['infoWindow'] as google.maps.InfoWindow;
if (!infoWindow) {
infoWindow = new google.maps.InfoWindow({
content: marker.getTitle(),
});
this.allInfoWindows.push(infoWindow);
}
infoWindow.open(marker.getMap(), marker);
});
marker.setMap(this.map);
});
this.map.addListener('click', () => this.closeAllInfoWindows());
this.map.fitBounds(bounds);
}
} | random_line_split |
|
profile-addresses.component.ts | /// <reference types="@types/google-maps" />
import { AfterViewInit, ChangeDetectionStrategy, Component, ElementRef, Injector, Input, OnInit, ViewChild } from '@angular/core';
import { AddressView } from 'app/api/models';
import { Breakpoint } from 'app/core/layout.service';
import { BaseComponent } from 'app/shared/base.component';
import { AddressHelperService } from 'app/ui/core/address-helper.service';
import { MapsService } from 'app/ui/core/maps.service';
import { UiLayoutService } from 'app/ui/core/ui-layout.service';
import { CountriesResolve } from 'app/ui/countries.resolve';
/**
* Shows the user / advertisement address(es) in the view page
*/
@Component({
selector: 'profile-addresses',
templateUrl: 'profile-addresses.component.html',
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class ProfileAddressesComponent extends BaseComponent implements OnInit, AfterViewInit {
constructor(
injector: Injector,
private uiLayout: UiLayoutService,
public addressHelper: AddressHelperService,
public maps: MapsService,
public countriesResolve: CountriesResolve,
) |
@ViewChild('mapContainer') mapContainer: ElementRef;
map: google.maps.Map;
private allInfoWindows: google.maps.InfoWindow[] = [];
@Input() addresses: AddressView[];
locatedAddresses: AddressView[];
ngOnInit() {
super.ngOnInit();
this.locatedAddresses = (this.addresses || []).filter(a => a.location);
}
ngAfterViewInit() {
// We'll only use a dynamic map with multiple located addresses
if (this.locatedAddresses.length > 1) {
this.addSub(this.maps.ensureScriptLoaded().subscribe(() => this.showMap()));
}
}
closeAllInfoWindows() {
this.allInfoWindows.forEach(iw => iw.close());
}
singleMapWidth(breakpoints: Set<Breakpoint>): number | 'auto' {
if (breakpoints.has('xl')) {
return 400;
} else if (breakpoints.has('gt-xs')) {
return 340;
} else {
return 'auto';
}
}
private showMap() {
const container = this.mapContainer.nativeElement as HTMLElement;
this.map = new google.maps.Map(container, {
mapTypeControl: false,
streetViewControl: false,
minZoom: 2,
maxZoom: 17,
styles: this.uiLayout.googleMapStyles,
});
const bounds = new google.maps.LatLngBounds();
this.locatedAddresses.map(a => {
const marker = new google.maps.Marker({
title: a.name,
icon: this.dataForFrontendHolder.dataForFrontend.mapMarkerUrl,
position: new google.maps.LatLng(a.location.latitude, a.location.longitude),
});
bounds.extend(marker.getPosition());
marker.addListener('click', () => {
this.closeAllInfoWindows();
let infoWindow = marker['infoWindow'] as google.maps.InfoWindow;
if (!infoWindow) {
infoWindow = new google.maps.InfoWindow({
content: marker.getTitle(),
});
this.allInfoWindows.push(infoWindow);
}
infoWindow.open(marker.getMap(), marker);
});
marker.setMap(this.map);
});
this.map.addListener('click', () => this.closeAllInfoWindows());
this.map.fitBounds(bounds);
}
}
| {
super(injector);
} | identifier_body |
profile-addresses.component.ts | /// <reference types="@types/google-maps" />
import { AfterViewInit, ChangeDetectionStrategy, Component, ElementRef, Injector, Input, OnInit, ViewChild } from '@angular/core';
import { AddressView } from 'app/api/models';
import { Breakpoint } from 'app/core/layout.service';
import { BaseComponent } from 'app/shared/base.component';
import { AddressHelperService } from 'app/ui/core/address-helper.service';
import { MapsService } from 'app/ui/core/maps.service';
import { UiLayoutService } from 'app/ui/core/ui-layout.service';
import { CountriesResolve } from 'app/ui/countries.resolve';
/**
* Shows the user / advertisement address(es) in the view page
*/
@Component({
selector: 'profile-addresses',
templateUrl: 'profile-addresses.component.html',
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class ProfileAddressesComponent extends BaseComponent implements OnInit, AfterViewInit {
constructor(
injector: Injector,
private uiLayout: UiLayoutService,
public addressHelper: AddressHelperService,
public maps: MapsService,
public countriesResolve: CountriesResolve,
) {
super(injector);
}
@ViewChild('mapContainer') mapContainer: ElementRef;
map: google.maps.Map;
private allInfoWindows: google.maps.InfoWindow[] = [];
@Input() addresses: AddressView[];
locatedAddresses: AddressView[];
ngOnInit() {
super.ngOnInit();
this.locatedAddresses = (this.addresses || []).filter(a => a.location);
}
ngAfterViewInit() {
// We'll only use a dynamic map with multiple located addresses
if (this.locatedAddresses.length > 1) {
this.addSub(this.maps.ensureScriptLoaded().subscribe(() => this.showMap()));
}
}
closeAllInfoWindows() {
this.allInfoWindows.forEach(iw => iw.close());
}
singleMapWidth(breakpoints: Set<Breakpoint>): number | 'auto' {
if (breakpoints.has('xl')) {
return 400;
} else if (breakpoints.has('gt-xs')) | else {
return 'auto';
}
}
private showMap() {
const container = this.mapContainer.nativeElement as HTMLElement;
this.map = new google.maps.Map(container, {
mapTypeControl: false,
streetViewControl: false,
minZoom: 2,
maxZoom: 17,
styles: this.uiLayout.googleMapStyles,
});
const bounds = new google.maps.LatLngBounds();
this.locatedAddresses.map(a => {
const marker = new google.maps.Marker({
title: a.name,
icon: this.dataForFrontendHolder.dataForFrontend.mapMarkerUrl,
position: new google.maps.LatLng(a.location.latitude, a.location.longitude),
});
bounds.extend(marker.getPosition());
marker.addListener('click', () => {
this.closeAllInfoWindows();
let infoWindow = marker['infoWindow'] as google.maps.InfoWindow;
if (!infoWindow) {
infoWindow = new google.maps.InfoWindow({
content: marker.getTitle(),
});
this.allInfoWindows.push(infoWindow);
}
infoWindow.open(marker.getMap(), marker);
});
marker.setMap(this.map);
});
this.map.addListener('click', () => this.closeAllInfoWindows());
this.map.fitBounds(bounds);
}
}
| {
return 340;
} | conditional_block |
profile-addresses.component.ts | /// <reference types="@types/google-maps" />
import { AfterViewInit, ChangeDetectionStrategy, Component, ElementRef, Injector, Input, OnInit, ViewChild } from '@angular/core';
import { AddressView } from 'app/api/models';
import { Breakpoint } from 'app/core/layout.service';
import { BaseComponent } from 'app/shared/base.component';
import { AddressHelperService } from 'app/ui/core/address-helper.service';
import { MapsService } from 'app/ui/core/maps.service';
import { UiLayoutService } from 'app/ui/core/ui-layout.service';
import { CountriesResolve } from 'app/ui/countries.resolve';
/**
* Shows the user / advertisement address(es) in the view page
*/
@Component({
selector: 'profile-addresses',
templateUrl: 'profile-addresses.component.html',
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class ProfileAddressesComponent extends BaseComponent implements OnInit, AfterViewInit {
constructor(
injector: Injector,
private uiLayout: UiLayoutService,
public addressHelper: AddressHelperService,
public maps: MapsService,
public countriesResolve: CountriesResolve,
) {
super(injector);
}
@ViewChild('mapContainer') mapContainer: ElementRef;
map: google.maps.Map;
private allInfoWindows: google.maps.InfoWindow[] = [];
@Input() addresses: AddressView[];
locatedAddresses: AddressView[];
ngOnInit() {
super.ngOnInit();
this.locatedAddresses = (this.addresses || []).filter(a => a.location);
}
ngAfterViewInit() {
// We'll only use a dynamic map with multiple located addresses
if (this.locatedAddresses.length > 1) {
this.addSub(this.maps.ensureScriptLoaded().subscribe(() => this.showMap()));
}
}
closeAllInfoWindows() {
this.allInfoWindows.forEach(iw => iw.close());
}
| (breakpoints: Set<Breakpoint>): number | 'auto' {
if (breakpoints.has('xl')) {
return 400;
} else if (breakpoints.has('gt-xs')) {
return 340;
} else {
return 'auto';
}
}
private showMap() {
const container = this.mapContainer.nativeElement as HTMLElement;
this.map = new google.maps.Map(container, {
mapTypeControl: false,
streetViewControl: false,
minZoom: 2,
maxZoom: 17,
styles: this.uiLayout.googleMapStyles,
});
const bounds = new google.maps.LatLngBounds();
this.locatedAddresses.map(a => {
const marker = new google.maps.Marker({
title: a.name,
icon: this.dataForFrontendHolder.dataForFrontend.mapMarkerUrl,
position: new google.maps.LatLng(a.location.latitude, a.location.longitude),
});
bounds.extend(marker.getPosition());
marker.addListener('click', () => {
this.closeAllInfoWindows();
let infoWindow = marker['infoWindow'] as google.maps.InfoWindow;
if (!infoWindow) {
infoWindow = new google.maps.InfoWindow({
content: marker.getTitle(),
});
this.allInfoWindows.push(infoWindow);
}
infoWindow.open(marker.getMap(), marker);
});
marker.setMap(this.map);
});
this.map.addListener('click', () => this.closeAllInfoWindows());
this.map.fitBounds(bounds);
}
}
| singleMapWidth | identifier_name |
f.js | /**
* for(var p in Script.scripts) {
*
* var script = Script.scripts[p];
* var handle = script.handle;
* var base = script.base;
* var limit = base + script.extent;
*
* print(script+"\n");
*
* for(var i = base; i < limit; i++) {
* var pc = jsd.GetClosestPC(handle,i)
* var hascode = String(pc).length && i == jsd.GetClosestLine(handle,pc);
* print("line "+i+" "+ (hascode ? "has code" : "has NO code"));
* }
* print("...............................\n");
* }
*/
function rlocals()
{
var retval = "";
var name = "___UNIQUE_NAME__";
var fun = ""+
"var text = \\\"\\\";"+ | "{"+
" if(text != \\\"\\\")"+
" text += \\\",\\\";"+
" text += p;"+
"}"+
"return text;";
reval(name+" = new Function(\"ob\",\""+fun+"\")");
// show(name);
retval = _reval([name+"("+"arguments.callee"+")"]);
reval("delete "+name);
return retval;
}
function e(a)
{
return eval(a);
} | "for(var p in ob)"+ | random_line_split |
f.js |
/**
* for(var p in Script.scripts) {
*
* var script = Script.scripts[p];
* var handle = script.handle;
* var base = script.base;
* var limit = base + script.extent;
*
* print(script+"\n");
*
* for(var i = base; i < limit; i++) {
* var pc = jsd.GetClosestPC(handle,i)
* var hascode = String(pc).length && i == jsd.GetClosestLine(handle,pc);
* print("line "+i+" "+ (hascode ? "has code" : "has NO code"));
* }
* print("...............................\n");
* }
*/
function rlocals()
|
function e(a)
{
return eval(a);
} | {
var retval = "";
var name = "___UNIQUE_NAME__";
var fun = ""+
"var text = \\\"\\\";"+
"for(var p in ob)"+
"{"+
" if(text != \\\"\\\")"+
" text += \\\",\\\";"+
" text += p;"+
"}"+
"return text;";
reval(name+" = new Function(\"ob\",\""+fun+"\")");
// show(name);
retval = _reval([name+"("+"arguments.callee"+")"]);
reval("delete "+name);
return retval;
} | identifier_body |
f.js |
/**
* for(var p in Script.scripts) {
*
* var script = Script.scripts[p];
* var handle = script.handle;
* var base = script.base;
* var limit = base + script.extent;
*
* print(script+"\n");
*
* for(var i = base; i < limit; i++) {
* var pc = jsd.GetClosestPC(handle,i)
* var hascode = String(pc).length && i == jsd.GetClosestLine(handle,pc);
* print("line "+i+" "+ (hascode ? "has code" : "has NO code"));
* }
* print("...............................\n");
* }
*/
function rlocals()
{
var retval = "";
var name = "___UNIQUE_NAME__";
var fun = ""+
"var text = \\\"\\\";"+
"for(var p in ob)"+
"{"+
" if(text != \\\"\\\")"+
" text += \\\",\\\";"+
" text += p;"+
"}"+
"return text;";
reval(name+" = new Function(\"ob\",\""+fun+"\")");
// show(name);
retval = _reval([name+"("+"arguments.callee"+")"]);
reval("delete "+name);
return retval;
}
function | (a)
{
return eval(a);
} | e | identifier_name |
chat_markers.py | # Copyright (C) 2018 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
from nbxmpp.namespaces import Namespace
from nbxmpp.structs import StanzaHandler
from nbxmpp.structs import ChatMarker
from nbxmpp.modules.base import BaseModule
class C | BaseModule):
def __init__(self, client):
BaseModule.__init__(self, client)
self._client = client
self.handlers = [
StanzaHandler(name='message',
callback=self._process_message_marker,
ns=Namespace.CHATMARKERS,
priority=15),
]
def _process_message_marker(self, _client, stanza, properties):
type_ = stanza.getTag('received', namespace=Namespace.CHATMARKERS)
if type_ is None:
type_ = stanza.getTag('displayed', namespace=Namespace.CHATMARKERS)
if type_ is None:
type_ = stanza.getTag('acknowledged',
namespace=Namespace.CHATMARKERS)
if type_ is None:
return
name = type_.getName()
id_ = type_.getAttr('id')
if id_ is None:
self._log.warning('Chatmarker without id')
self._log.warning(stanza)
return
properties.marker = ChatMarker(name, id_)
| hatMarkers( | identifier_name |
chat_markers.py | # Copyright (C) 2018 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
from nbxmpp.namespaces import Namespace
from nbxmpp.structs import StanzaHandler
from nbxmpp.structs import ChatMarker
from nbxmpp.modules.base import BaseModule
class ChatMarkers(BaseModule):
def __init__(self, client):
B |
def _process_message_marker(self, _client, stanza, properties):
type_ = stanza.getTag('received', namespace=Namespace.CHATMARKERS)
if type_ is None:
type_ = stanza.getTag('displayed', namespace=Namespace.CHATMARKERS)
if type_ is None:
type_ = stanza.getTag('acknowledged',
namespace=Namespace.CHATMARKERS)
if type_ is None:
return
name = type_.getName()
id_ = type_.getAttr('id')
if id_ is None:
self._log.warning('Chatmarker without id')
self._log.warning(stanza)
return
properties.marker = ChatMarker(name, id_)
| aseModule.__init__(self, client)
self._client = client
self.handlers = [
StanzaHandler(name='message',
callback=self._process_message_marker,
ns=Namespace.CHATMARKERS,
priority=15),
]
| identifier_body |
chat_markers.py | # Copyright (C) 2018 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
from nbxmpp.namespaces import Namespace
from nbxmpp.structs import StanzaHandler
from nbxmpp.structs import ChatMarker
from nbxmpp.modules.base import BaseModule
class ChatMarkers(BaseModule):
def __init__(self, client):
BaseModule.__init__(self, client)
self._client = client
self.handlers = [
StanzaHandler(name='message',
callback=self._process_message_marker,
ns=Namespace.CHATMARKERS,
priority=15),
]
def _process_message_marker(self, _client, stanza, properties):
type_ = stanza.getTag('received', namespace=Namespace.CHATMARKERS)
if type_ is None:
type_ = stanza.getTag('displayed', namespace=Namespace.CHATMARKERS)
if type_ is None:
type_ = stanza.getTag('acknowledged',
namespace=Namespace.CHATMARKERS)
if type_ is None:
return
name = type_.getName() | id_ = type_.getAttr('id')
if id_ is None:
self._log.warning('Chatmarker without id')
self._log.warning(stanza)
return
properties.marker = ChatMarker(name, id_) | random_line_split |
|
chat_markers.py | # Copyright (C) 2018 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
from nbxmpp.namespaces import Namespace
from nbxmpp.structs import StanzaHandler
from nbxmpp.structs import ChatMarker
from nbxmpp.modules.base import BaseModule
class ChatMarkers(BaseModule):
def __init__(self, client):
BaseModule.__init__(self, client)
self._client = client
self.handlers = [
StanzaHandler(name='message',
callback=self._process_message_marker,
ns=Namespace.CHATMARKERS,
priority=15),
]
def _process_message_marker(self, _client, stanza, properties):
type_ = stanza.getTag('received', namespace=Namespace.CHATMARKERS)
if type_ is None:
type_ = stanza.getTag('displayed', namespace=Namespace.CHATMARKERS)
if type_ is None:
t |
name = type_.getName()
id_ = type_.getAttr('id')
if id_ is None:
self._log.warning('Chatmarker without id')
self._log.warning(stanza)
return
properties.marker = ChatMarker(name, id_)
| ype_ = stanza.getTag('acknowledged',
namespace=Namespace.CHATMARKERS)
if type_ is None:
return
| conditional_block |
plot_holoviews.py | # Use holoviews to plot the particle distribution at given time
from pathlib import Path
import numpy as np
import xarray as xr
import holoviews as hv
from postladim import ParticleFile
hv.extension("bokeh")
# --- Settings ---
tstep = 40 # Time step to show
# Output file (and type)
output_file = "line_hv.png"
#output_file = "line_hv.html"
scale = 5 # Figure size factor
# --- Data files ---
ladim_dir = Path("../../")
grid_file = ladim_dir / "examples/data/ocean_avg_0014.nc"
particle_file = ladim_dir / "examples/line/line.nc"
# --- Read particle data ---
pf = ParticleFile(particle_file)
X, Y = pf.position(tstep)
# --- Background bathymetry data ---
# Read bathymetry and land mask
with xr.open_dataset(grid_file) as A:
H = A.h
M = A.mask_rho
jmax, imax = M.shape
H = H.where(M > 0) # Mask out land
M = M.where(M < 1) # Mask out sea
# --- Holoviews elements ---
# Land image
land = hv.Image(data=M, kdims=["xi_rho", "eta_rho"], group="Land")
# Bathymetry image
topo = hv.Image(data=-np.log10(H), kdims=["xi_rho", "eta_rho"], group="Topo")
# Particle distribution
spread = hv.Scatter(data=(X, Y))
# Overlay
h = topo * land * spread
# --- Plot options ---
h.opts(frame_width=scale * imax, frame_height=scale * jmax)
h.opts("Scatter", color="red")
h.opts("Image.Topo", cmap="blues_r", alpha=0.7)
h.opts("Image.Land", cmap=["#80B040"])
# --- Save output ---
if output_file.endswith("png"):
|
hv.save(h, filename=output_file)
| h.opts(toolbar=None) | conditional_block |
plot_holoviews.py | # Use holoviews to plot the particle distribution at given time
from pathlib import Path
import numpy as np
import xarray as xr
import holoviews as hv
from postladim import ParticleFile
hv.extension("bokeh")
# --- Settings ---
tstep = 40 # Time step to show
# Output file (and type)
output_file = "line_hv.png"
#output_file = "line_hv.html"
scale = 5 # Figure size factor
# --- Data files ---
ladim_dir = Path("../../")
grid_file = ladim_dir / "examples/data/ocean_avg_0014.nc"
particle_file = ladim_dir / "examples/line/line.nc"
# --- Read particle data ---
pf = ParticleFile(particle_file)
X, Y = pf.position(tstep)
# --- Background bathymetry data ---
# Read bathymetry and land mask
with xr.open_dataset(grid_file) as A:
H = A.h
M = A.mask_rho
jmax, imax = M.shape
H = H.where(M > 0) # Mask out land
M = M.where(M < 1) # Mask out sea
# --- Holoviews elements ---
# Land image
land = hv.Image(data=M, kdims=["xi_rho", "eta_rho"], group="Land")
# Bathymetry image
topo = hv.Image(data=-np.log10(H), kdims=["xi_rho", "eta_rho"], group="Topo")
# Particle distribution
spread = hv.Scatter(data=(X, Y))
# Overlay
h = topo * land * spread
# --- Plot options ---
h.opts(frame_width=scale * imax, frame_height=scale * jmax)
h.opts("Scatter", color="red")
h.opts("Image.Topo", cmap="blues_r", alpha=0.7)
h.opts("Image.Land", cmap=["#80B040"])
# --- Save output ---
if output_file.endswith("png"):
h.opts(toolbar=None) | hv.save(h, filename=output_file) | random_line_split |
|
require-dependencies.test.ts | import {lint, ruleType} from '../../../src/rules/require-dependencies';
import {Severity} from '../../../src/types/severity';
describe('require-dependencies Unit Tests', () => {
describe('a rule type value should be exported', () => {
test('it should equal "standard"', () => {
expect(ruleType).toStrictEqual('standard');
});
});
describe('when package.json has node', () => {
test('true should be returned', () => {
const packageJsonData = {
dependencies: 'dependencies',
};
const response = lint(packageJsonData, Severity.Error);
expect(response).toBeNull();
});
});
describe('when package.json does not have node', () => {
test('LintIssue object should be returned', () => {
const packageJsonData = {};
const response = lint(packageJsonData, Severity.Error);
expect(response.lintId).toStrictEqual('require-dependencies');
expect(response.severity).toStrictEqual('error');
expect(response.node).toStrictEqual('dependencies');
expect(response.lintMessage).toStrictEqual('dependencies is required');
});
}); | }); | random_line_split |
|
lifetimes_as_part_of_type.rs | #![allow(warnings)]
// **Exercise 1.** For the method `get`, identify at least 4 lifetimes
// that must be inferred.
//
// **Exercise 2.** Modify the signature of `get` such that the method
// `get` fails to compile with a lifetime inference error.
//
// **Exercise 3.** Modify the signature of `get` such that the
// `do_not_compile` test fails to compile with a lifetime error
// (but `get` does not have any errors).
//
// **Exercise 4.** There are actually two ways to achieve Exercise 3.
// Can you find the other one?
pub struct Map<K: Eq, V> {
elements: Vec<(K, V)>,
}
impl<K: Eq, V> Map<K, V> {
pub fn new() -> Self {
Map { elements: vec![] }
}
pub fn get(&self, key: &K) -> Option<&V> |
}
#[test]
// START SOLUTION
#[should_panic]
// END SOLUTION
fn do_not_compile() {
let map: Map<char, String> = Map::new();
let r;
let key = &'c';
r = map.get(key);
panic!("If this test is running, your program compiled, and that's bad!");
}
| {
let matching_pair: Option<&(K, V)> = <[_]>::iter(&self.elements)
.rev()
.find(|pair| pair.0 == *key);
matching_pair.map(|pair| &pair.1)
} | identifier_body |
lifetimes_as_part_of_type.rs | #![allow(warnings)]
// **Exercise 1.** For the method `get`, identify at least 4 lifetimes
// that must be inferred.
//
// **Exercise 2.** Modify the signature of `get` such that the method
// `get` fails to compile with a lifetime inference error.
//
// **Exercise 3.** Modify the signature of `get` such that the
// `do_not_compile` test fails to compile with a lifetime error
// (but `get` does not have any errors).
//
// **Exercise 4.** There are actually two ways to achieve Exercise 3.
// Can you find the other one?
pub struct Map<K: Eq, V> {
elements: Vec<(K, V)>,
}
impl<K: Eq, V> Map<K, V> {
pub fn new() -> Self {
Map { elements: vec![] }
}
pub fn get(&self, key: &K) -> Option<&V> {
let matching_pair: Option<&(K, V)> = <[_]>::iter(&self.elements)
.rev()
.find(|pair| pair.0 == *key);
matching_pair.map(|pair| &pair.1)
}
}
#[test]
// START SOLUTION
#[should_panic]
// END SOLUTION
fn | () {
let map: Map<char, String> = Map::new();
let r;
let key = &'c';
r = map.get(key);
panic!("If this test is running, your program compiled, and that's bad!");
}
| do_not_compile | identifier_name |
lifetimes_as_part_of_type.rs | #![allow(warnings)]
// **Exercise 1.** For the method `get`, identify at least 4 lifetimes | //
// **Exercise 2.** Modify the signature of `get` such that the method
// `get` fails to compile with a lifetime inference error.
//
// **Exercise 3.** Modify the signature of `get` such that the
// `do_not_compile` test fails to compile with a lifetime error
// (but `get` does not have any errors).
//
// **Exercise 4.** There are actually two ways to achieve Exercise 3.
// Can you find the other one?
pub struct Map<K: Eq, V> {
elements: Vec<(K, V)>,
}
impl<K: Eq, V> Map<K, V> {
pub fn new() -> Self {
Map { elements: vec![] }
}
pub fn get(&self, key: &K) -> Option<&V> {
let matching_pair: Option<&(K, V)> = <[_]>::iter(&self.elements)
.rev()
.find(|pair| pair.0 == *key);
matching_pair.map(|pair| &pair.1)
}
}
#[test]
// START SOLUTION
#[should_panic]
// END SOLUTION
fn do_not_compile() {
let map: Map<char, String> = Map::new();
let r;
let key = &'c';
r = map.get(key);
panic!("If this test is running, your program compiled, and that's bad!");
} | // that must be inferred. | random_line_split |
issue-14589.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// All 3 expressions should work in that the argument gets
// coerced to a trait object
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
fn main() {
send::<Box<Foo>>(Box::new(Output(0)));
Test::<Box<Foo>>::foo(Box::new(Output(0)));
Test::<Box<Foo>>::new().send(Box::new(Output(0)));
}
fn send<T>(_: T) {}
struct Test<T> { marker: std::marker::PhantomData<T> }
impl<T> Test<T> {
fn new() -> Test<T> { Test { marker: ::std::marker::PhantomData } }
fn foo(_: T) |
fn send(&self, _: T) {}
}
trait Foo { fn dummy(&self) { }}
struct Output(int);
impl Foo for Output {}
| {} | identifier_body |
issue-14589.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// All 3 expressions should work in that the argument gets
// coerced to a trait object
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
fn main() {
send::<Box<Foo>>(Box::new(Output(0)));
Test::<Box<Foo>>::foo(Box::new(Output(0)));
Test::<Box<Foo>>::new().send(Box::new(Output(0)));
}
fn | <T>(_: T) {}
struct Test<T> { marker: std::marker::PhantomData<T> }
impl<T> Test<T> {
fn new() -> Test<T> { Test { marker: ::std::marker::PhantomData } }
fn foo(_: T) {}
fn send(&self, _: T) {}
}
trait Foo { fn dummy(&self) { }}
struct Output(int);
impl Foo for Output {}
| send | identifier_name |
issue-14589.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// All 3 expressions should work in that the argument gets
// coerced to a trait object
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
fn main() {
send::<Box<Foo>>(Box::new(Output(0)));
Test::<Box<Foo>>::foo(Box::new(Output(0)));
Test::<Box<Foo>>::new().send(Box::new(Output(0)));
}
fn send<T>(_: T) {}
struct Test<T> { marker: std::marker::PhantomData<T> }
impl<T> Test<T> {
fn new() -> Test<T> { Test { marker: ::std::marker::PhantomData } }
fn foo(_: T) {}
fn send(&self, _: T) {}
}
trait Foo { fn dummy(&self) { }} | struct Output(int);
impl Foo for Output {} | random_line_split |
|
base_inequality_op.py | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions._utils import builtin_ops
from jx_base.expressions.expression import Expression
from jx_base.expressions.false_op import FALSE
from jx_base.expressions.literal import is_literal, Literal
from jx_base.expressions.variable import Variable
from jx_base.language import is_op
from mo_json.types import T_BOOLEAN
class BaseInequalityOp(Expression):
has_simple_form = True
data_type = T_BOOLEAN
op = None
def __init__(self, terms):
Expression.__init__(self, terms)
self.lhs, self.rhs = terms
@property
def name(self):
return self.op
def __data__(self):
if is_op(self.lhs, Variable) and is_literal(self.rhs):
return {self.op: {self.lhs.var, self.rhs.value}}
else:
return {self.op: [self.lhs.__data__(), self.rhs.__data__()]}
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.op == other.op and self.lhs == other.lhs and self.rhs == other.rhs
def vars(self):
return self.lhs.vars() | self.rhs.vars()
def | (self, map_):
return self.__class__([self.lhs.map(map_), self.rhs.map(map_)])
def missing(self, lang):
return FALSE
def partial_eval(self, lang):
lhs = self.lhs.partial_eval(lang)
rhs = self.rhs.partial_eval(lang)
if is_literal(lhs) and is_literal(rhs):
return Literal(builtin_ops[self.op](lhs, rhs))
return self.__class__([lhs, rhs])
| map | identifier_name |
base_inequality_op.py | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions._utils import builtin_ops
from jx_base.expressions.expression import Expression
from jx_base.expressions.false_op import FALSE
from jx_base.expressions.literal import is_literal, Literal
from jx_base.expressions.variable import Variable
from jx_base.language import is_op
from mo_json.types import T_BOOLEAN | op = None
def __init__(self, terms):
Expression.__init__(self, terms)
self.lhs, self.rhs = terms
@property
def name(self):
return self.op
def __data__(self):
if is_op(self.lhs, Variable) and is_literal(self.rhs):
return {self.op: {self.lhs.var, self.rhs.value}}
else:
return {self.op: [self.lhs.__data__(), self.rhs.__data__()]}
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.op == other.op and self.lhs == other.lhs and self.rhs == other.rhs
def vars(self):
return self.lhs.vars() | self.rhs.vars()
def map(self, map_):
return self.__class__([self.lhs.map(map_), self.rhs.map(map_)])
def missing(self, lang):
return FALSE
def partial_eval(self, lang):
lhs = self.lhs.partial_eval(lang)
rhs = self.rhs.partial_eval(lang)
if is_literal(lhs) and is_literal(rhs):
return Literal(builtin_ops[self.op](lhs, rhs))
return self.__class__([lhs, rhs]) |
class BaseInequalityOp(Expression):
has_simple_form = True
data_type = T_BOOLEAN | random_line_split |
base_inequality_op.py | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions._utils import builtin_ops
from jx_base.expressions.expression import Expression
from jx_base.expressions.false_op import FALSE
from jx_base.expressions.literal import is_literal, Literal
from jx_base.expressions.variable import Variable
from jx_base.language import is_op
from mo_json.types import T_BOOLEAN
class BaseInequalityOp(Expression):
has_simple_form = True
data_type = T_BOOLEAN
op = None
def __init__(self, terms):
Expression.__init__(self, terms)
self.lhs, self.rhs = terms
@property
def name(self):
return self.op
def __data__(self):
if is_op(self.lhs, Variable) and is_literal(self.rhs):
return {self.op: {self.lhs.var, self.rhs.value}}
else:
|
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.op == other.op and self.lhs == other.lhs and self.rhs == other.rhs
def vars(self):
return self.lhs.vars() | self.rhs.vars()
def map(self, map_):
return self.__class__([self.lhs.map(map_), self.rhs.map(map_)])
def missing(self, lang):
return FALSE
def partial_eval(self, lang):
lhs = self.lhs.partial_eval(lang)
rhs = self.rhs.partial_eval(lang)
if is_literal(lhs) and is_literal(rhs):
return Literal(builtin_ops[self.op](lhs, rhs))
return self.__class__([lhs, rhs])
| return {self.op: [self.lhs.__data__(), self.rhs.__data__()]} | conditional_block |
base_inequality_op.py | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions._utils import builtin_ops
from jx_base.expressions.expression import Expression
from jx_base.expressions.false_op import FALSE
from jx_base.expressions.literal import is_literal, Literal
from jx_base.expressions.variable import Variable
from jx_base.language import is_op
from mo_json.types import T_BOOLEAN
class BaseInequalityOp(Expression):
| has_simple_form = True
data_type = T_BOOLEAN
op = None
def __init__(self, terms):
Expression.__init__(self, terms)
self.lhs, self.rhs = terms
@property
def name(self):
return self.op
def __data__(self):
if is_op(self.lhs, Variable) and is_literal(self.rhs):
return {self.op: {self.lhs.var, self.rhs.value}}
else:
return {self.op: [self.lhs.__data__(), self.rhs.__data__()]}
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.op == other.op and self.lhs == other.lhs and self.rhs == other.rhs
def vars(self):
return self.lhs.vars() | self.rhs.vars()
def map(self, map_):
return self.__class__([self.lhs.map(map_), self.rhs.map(map_)])
def missing(self, lang):
return FALSE
def partial_eval(self, lang):
lhs = self.lhs.partial_eval(lang)
rhs = self.rhs.partial_eval(lang)
if is_literal(lhs) and is_literal(rhs):
return Literal(builtin_ops[self.op](lhs, rhs))
return self.__class__([lhs, rhs]) | identifier_body |
|
cexceptions.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Netheos (http://www.netheos.net)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, unicode_literals, print_function
class CStorageError(Exception):
"""Base class for all cloud storage errors.
Such exceptions have an optional 'message' and 'cause' attribute."""
def __init__(self, message, cause=None):
super(CStorageError, self).__init__(message)
self.cause = cause
def __str__(self):
ret = "%s(%s)" % (self.__class__, self.message)
if self.cause:
ret += " (caused by %r)" % (self.cause,)
return ret
class CInvalidFileTypeError(CStorageError):
"""Raised when performing an operation on a folder when a blob is expected,
or when operating on a blob and a folder is expected.
Also raised when downloading provider special files (eg google drive native docs)."""
def __init__(self, c_path, expected_blob, message=None):
""":param c_path: the problematic path
:param expected_blob: if True, a blob was expected but a folder was found.
if False, a folder was expected but a blob was found
:param message: optional message"""
if not message:
message = 'Invalid file type at %r (expected %s)' % \
(c_path, 'blob' if expected_blob else 'folder')
super(CInvalidFileTypeError, self).__init__(message)
self.path = c_path
self.expected_blob = expected_blob
class CRetriableError(CStorageError):
"""Raised by RequestInvoker validation method, when request
has failed but should be retried.
This class is only a marker ; the underlying root exception
is given by the 'cause' attribute.
The optional 'delay' specifies how much one should wait before retrying"""
def __init__(self, cause, delay=None):
super(CRetriableError, self).__init__(message=None, cause=cause)
self.delay = delay
def get_delay(self):
return self.delay
#def __str__(self):
# return "%s(%s)" % (self.__class__, self.cause)
class CFileNotFoundError(CStorageError):
"""File has not been found (sometimes consecutive to http 404 error)"""
def __init__(self, message, c_path):
super(CFileNotFoundError, self).__init__(message)
self.path = c_path
class CHttpError(CStorageError):
"""Raised when providers server answers non OK answers"""
def __init__(self, request_method,
request_path,
status_code, reason,
message=None):
super(CHttpError, self).__init__(message)
self.request_method = request_method
self.request_path = request_path
self.status_code = status_code
self.reason = reason
def __str__(self):
ret = "%s(%d %s) %s %s" % (
self.__class__.__name__, self.status_code, self.reason, self.request_method, self.request_path )
if self.message:
|
return ret
class CAuthenticationError(CHttpError):
"""http 401 error"""
pass
| ret += ' msg=%s' % self.message | conditional_block |
cexceptions.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Netheos (http://www.netheos.net)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, unicode_literals, print_function
class CStorageError(Exception):
"""Base class for all cloud storage errors.
Such exceptions have an optional 'message' and 'cause' attribute."""
def __init__(self, message, cause=None):
super(CStorageError, self).__init__(message)
self.cause = cause
def __str__(self):
ret = "%s(%s)" % (self.__class__, self.message)
if self.cause:
ret += " (caused by %r)" % (self.cause,)
return ret
class CInvalidFileTypeError(CStorageError):
"""Raised when performing an operation on a folder when a blob is expected,
or when operating on a blob and a folder is expected.
Also raised when downloading provider special files (eg google drive native docs)."""
def __init__(self, c_path, expected_blob, message=None):
""":param c_path: the problematic path
:param expected_blob: if True, a blob was expected but a folder was found.
if False, a folder was expected but a blob was found
:param message: optional message"""
if not message:
message = 'Invalid file type at %r (expected %s)' % \
(c_path, 'blob' if expected_blob else 'folder')
super(CInvalidFileTypeError, self).__init__(message)
self.path = c_path
self.expected_blob = expected_blob
class CRetriableError(CStorageError):
"""Raised by RequestInvoker validation method, when request
has failed but should be retried.
This class is only a marker ; the underlying root exception
is given by the 'cause' attribute.
The optional 'delay' specifies how much one should wait before retrying"""
def __init__(self, cause, delay=None):
super(CRetriableError, self).__init__(message=None, cause=cause)
self.delay = delay
def get_delay(self):
return self.delay
#def __str__(self):
# return "%s(%s)" % (self.__class__, self.cause)
class CFileNotFoundError(CStorageError):
"""File has not been found (sometimes consecutive to http 404 error)"""
def __init__(self, message, c_path):
|
class CHttpError(CStorageError):
"""Raised when providers server answers non OK answers"""
def __init__(self, request_method,
request_path,
status_code, reason,
message=None):
super(CHttpError, self).__init__(message)
self.request_method = request_method
self.request_path = request_path
self.status_code = status_code
self.reason = reason
def __str__(self):
ret = "%s(%d %s) %s %s" % (
self.__class__.__name__, self.status_code, self.reason, self.request_method, self.request_path )
if self.message:
ret += ' msg=%s' % self.message
return ret
class CAuthenticationError(CHttpError):
"""http 401 error"""
pass
| super(CFileNotFoundError, self).__init__(message)
self.path = c_path | identifier_body |
cexceptions.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Netheos (http://www.netheos.net)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, unicode_literals, print_function
class CStorageError(Exception):
"""Base class for all cloud storage errors.
Such exceptions have an optional 'message' and 'cause' attribute."""
def | (self, message, cause=None):
super(CStorageError, self).__init__(message)
self.cause = cause
def __str__(self):
ret = "%s(%s)" % (self.__class__, self.message)
if self.cause:
ret += " (caused by %r)" % (self.cause,)
return ret
class CInvalidFileTypeError(CStorageError):
"""Raised when performing an operation on a folder when a blob is expected,
or when operating on a blob and a folder is expected.
Also raised when downloading provider special files (eg google drive native docs)."""
def __init__(self, c_path, expected_blob, message=None):
""":param c_path: the problematic path
:param expected_blob: if True, a blob was expected but a folder was found.
if False, a folder was expected but a blob was found
:param message: optional message"""
if not message:
message = 'Invalid file type at %r (expected %s)' % \
(c_path, 'blob' if expected_blob else 'folder')
super(CInvalidFileTypeError, self).__init__(message)
self.path = c_path
self.expected_blob = expected_blob
class CRetriableError(CStorageError):
"""Raised by RequestInvoker validation method, when request
has failed but should be retried.
This class is only a marker ; the underlying root exception
is given by the 'cause' attribute.
The optional 'delay' specifies how much one should wait before retrying"""
def __init__(self, cause, delay=None):
super(CRetriableError, self).__init__(message=None, cause=cause)
self.delay = delay
def get_delay(self):
return self.delay
#def __str__(self):
# return "%s(%s)" % (self.__class__, self.cause)
class CFileNotFoundError(CStorageError):
"""File has not been found (sometimes consecutive to http 404 error)"""
def __init__(self, message, c_path):
super(CFileNotFoundError, self).__init__(message)
self.path = c_path
class CHttpError(CStorageError):
"""Raised when providers server answers non OK answers"""
def __init__(self, request_method,
request_path,
status_code, reason,
message=None):
super(CHttpError, self).__init__(message)
self.request_method = request_method
self.request_path = request_path
self.status_code = status_code
self.reason = reason
def __str__(self):
ret = "%s(%d %s) %s %s" % (
self.__class__.__name__, self.status_code, self.reason, self.request_method, self.request_path )
if self.message:
ret += ' msg=%s' % self.message
return ret
class CAuthenticationError(CHttpError):
"""http 401 error"""
pass
| __init__ | identifier_name |
cexceptions.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Netheos (http://www.netheos.net)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, unicode_literals, print_function
class CStorageError(Exception):
"""Base class for all cloud storage errors.
Such exceptions have an optional 'message' and 'cause' attribute."""
def __init__(self, message, cause=None):
super(CStorageError, self).__init__(message)
self.cause = cause
def __str__(self):
ret = "%s(%s)" % (self.__class__, self.message)
if self.cause:
ret += " (caused by %r)" % (self.cause,)
return ret
| """Raised when performing an operation on a folder when a blob is expected,
or when operating on a blob and a folder is expected.
Also raised when downloading provider special files (eg google drive native docs)."""
def __init__(self, c_path, expected_blob, message=None):
""":param c_path: the problematic path
:param expected_blob: if True, a blob was expected but a folder was found.
if False, a folder was expected but a blob was found
:param message: optional message"""
if not message:
message = 'Invalid file type at %r (expected %s)' % \
(c_path, 'blob' if expected_blob else 'folder')
super(CInvalidFileTypeError, self).__init__(message)
self.path = c_path
self.expected_blob = expected_blob
class CRetriableError(CStorageError):
"""Raised by RequestInvoker validation method, when request
has failed but should be retried.
This class is only a marker ; the underlying root exception
is given by the 'cause' attribute.
The optional 'delay' specifies how much one should wait before retrying"""
def __init__(self, cause, delay=None):
super(CRetriableError, self).__init__(message=None, cause=cause)
self.delay = delay
def get_delay(self):
return self.delay
#def __str__(self):
# return "%s(%s)" % (self.__class__, self.cause)
class CFileNotFoundError(CStorageError):
"""File has not been found (sometimes consecutive to http 404 error)"""
def __init__(self, message, c_path):
super(CFileNotFoundError, self).__init__(message)
self.path = c_path
class CHttpError(CStorageError):
"""Raised when providers server answers non OK answers"""
def __init__(self, request_method,
request_path,
status_code, reason,
message=None):
super(CHttpError, self).__init__(message)
self.request_method = request_method
self.request_path = request_path
self.status_code = status_code
self.reason = reason
def __str__(self):
ret = "%s(%d %s) %s %s" % (
self.__class__.__name__, self.status_code, self.reason, self.request_method, self.request_path )
if self.message:
ret += ' msg=%s' % self.message
return ret
class CAuthenticationError(CHttpError):
"""http 401 error"""
pass |
class CInvalidFileTypeError(CStorageError): | random_line_split |
s3.js |
import("crypto");
jimport("com.amazonaws.services.s3.model.CannedAccessControlList");
jimport("com.amazonaws.services.s3.model.GeneratePresignedUrlRequest");
jimport("org.apache.commons.codec.binary.Hex");
jimport("java.io.ByteArrayInputStream");
jimport("java.io.ByteArrayOutputStream");
S3 = null;
function _init() {
if (S3 == null) {
S3 = new com.amazonaws.services.s3.AmazonS3Client(
new com.amazonaws.auth.BasicAWSCredentials(
appjet.config.awsUser, appjet.config.awsPass));
}
}
function getBucketName(bucketName) {
if (appjet.config["s3." + bucketName]) {
return appjet.config["s3." + bucketName];
}
return bucketName;
}
function list(bucketName) {
_init();
return S3.listObjects(getBucketName(bucketName)).getObjectSummaries().toArray();
}
function put(bucketName, keyName, bytes, isPublicRead, contentType) {
_init();
if (!(bytes instanceof java.io.InputStream)) {
bytes = new java.io.ByteArrayInputStream(new java.lang.String(bytes).getBytes());
}
var meta = null;
if (contentType) {
meta = new com.amazonaws.services.s3.model.ObjectMetadata();
meta.setContentType(contentType);
}
S3.putObject(getBucketName(bucketName), keyName, bytes, meta);
if (isPublicRead) {
S3.setObjectAcl(getBucketName(bucketName), keyName, CannedAccessControlList.PublicRead);
}
}
function getURL(bucketName, keyName, useHTTP) {
return (useHTTP?"http":"https") + "://s3.amazonaws.com/" + getBucketName(bucketName) + "/" + keyName;
}
function getPresignedURL(bucketName, keyName, durationValidMs) {
var expiration = new java.util.Date();
expiration.setTime(expiration.getTime() + durationValidMs);
var generatePresignedUrlRequest = new GeneratePresignedUrlRequest(getBucketName(bucketName), keyName);
generatePresignedUrlRequest.setExpiration(expiration);
return S3.generatePresignedUrl(generatePresignedUrlRequest);
}
function getBytes(bucketName, keyName) {
_init();
var obj = S3.getObject(getBucketName(bucketName), keyName);
var inStream = obj.getObjectContent();
try {
return new java.io.ByteArrayOutputStream(inStream).toByteArray();
} finally {
inStream.close();
}
}
var AWS_SERVICE = 's3';
var AWS_REQUEST = 'aws4_request';
/**
* This signature allows the user to upload a file to the bucket that begins with a specific
* key (domain_localPadId_userId_), enforces only image uploads up to a max size of 20MB.
*/
function _getS3Policy(domain, localPadId, userId, expirationDate, utcDateStr) {
var isoDate = expirationDate.toISOString();
// Amazon wants two types of date strings, one like:
// "2015-04-28T00:36:03.092Z"
// and one like:
// "20150428T003603Z"
// :facepalm:
var alternateWTFAreYouSeriousISOAmazonDate = isoDate.replace(/[:\-]|\.\d{3}/g, '');
return {
"expiration": isoDate,
"conditions": [
{"bucket": getBucketName(appjet.config.s3Bucket)},
["starts-with", "$key", domain + '_' + localPadId + '_' + userId + '_'],
{"acl": "public-read"},
["starts-with", "$Content-Type", "image/"],
["content-length-range", 0, 1024*1024*20 /* 20 MB for animated gifs! */],
{"x-amz-credential":
appjet.config.awsUser + "/" +
utcDateStr + "/" +
appjet.config.s3Region + "/" +
AWS_SERVICE + "/" +
AWS_REQUEST
},
{"x-amz-algorithm": "AWS4-HMAC-SHA256"},
{"x-amz-date": alternateWTFAreYouSeriousISOAmazonDate }
]
};
}
/**
* We must sign requests to Amazon, otherwise you could have people arbitrarily:
* - uploading files of random content types to our bucket
* - uploading files with whatever key they want
* - uploading files with an acl that they can choose
* - uploading files of unbounded size
*/
function getS3PolicyAndSig(domain, localPadId, userId) {
var expirationDate = new Date();
expirationDate.setDate(expirationDate.getDate() + 1);
function | (n) { return n < 10 ? '0' + n.toString() : n.toString() }
var utcDateStr = pad(expirationDate.getUTCFullYear()) +
pad(expirationDate.getUTCMonth() + 1) +
pad(expirationDate.getUTCDate());
var s3Policy = crypto.convertToBase64(JSON.stringify(
_getS3Policy(domain, localPadId, userId, expirationDate, utcDateStr)));
var AWSAccessKeyId = appjet.config.awsUser;
var AWSSecretAccessKey = appjet.config.awsPass;
var awsSecretKey = new java.lang.String("AWS4" + AWSSecretAccessKey).getBytes("UTF8");
var dateKey = crypto.signString(awsSecretKey, utcDateStr);
var dateRegionKey = crypto.signString(dateKey, appjet.config.s3Region);
var dateRegionServiceKey = crypto.signString(dateRegionKey, AWS_SERVICE);
var signingKey = crypto.signString(dateRegionServiceKey, AWS_REQUEST);
var signature = crypto.signString(signingKey, s3Policy);
// XXX Amazon's documentation lies. I've emailed them to correct it.
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-UsingHTTPPOST.html
// says to take the final result and and base64 encode it.
// But what they really want is *hex* encoding, not base64.
// See also: http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-post-example.html
// and: http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-other
return {
s3Policy: s3Policy,
s3PolicySig: Hex.encodeHexString(signature)
};
}
| pad | identifier_name |
s3.js |
import("crypto");
jimport("com.amazonaws.services.s3.model.CannedAccessControlList");
jimport("com.amazonaws.services.s3.model.GeneratePresignedUrlRequest");
jimport("org.apache.commons.codec.binary.Hex");
jimport("java.io.ByteArrayInputStream");
jimport("java.io.ByteArrayOutputStream");
S3 = null;
function _init() |
function getBucketName(bucketName) {
if (appjet.config["s3." + bucketName]) {
return appjet.config["s3." + bucketName];
}
return bucketName;
}
function list(bucketName) {
_init();
return S3.listObjects(getBucketName(bucketName)).getObjectSummaries().toArray();
}
function put(bucketName, keyName, bytes, isPublicRead, contentType) {
_init();
if (!(bytes instanceof java.io.InputStream)) {
bytes = new java.io.ByteArrayInputStream(new java.lang.String(bytes).getBytes());
}
var meta = null;
if (contentType) {
meta = new com.amazonaws.services.s3.model.ObjectMetadata();
meta.setContentType(contentType);
}
S3.putObject(getBucketName(bucketName), keyName, bytes, meta);
if (isPublicRead) {
S3.setObjectAcl(getBucketName(bucketName), keyName, CannedAccessControlList.PublicRead);
}
}
function getURL(bucketName, keyName, useHTTP) {
return (useHTTP?"http":"https") + "://s3.amazonaws.com/" + getBucketName(bucketName) + "/" + keyName;
}
function getPresignedURL(bucketName, keyName, durationValidMs) {
var expiration = new java.util.Date();
expiration.setTime(expiration.getTime() + durationValidMs);
var generatePresignedUrlRequest = new GeneratePresignedUrlRequest(getBucketName(bucketName), keyName);
generatePresignedUrlRequest.setExpiration(expiration);
return S3.generatePresignedUrl(generatePresignedUrlRequest);
}
function getBytes(bucketName, keyName) {
_init();
var obj = S3.getObject(getBucketName(bucketName), keyName);
var inStream = obj.getObjectContent();
try {
return new java.io.ByteArrayOutputStream(inStream).toByteArray();
} finally {
inStream.close();
}
}
var AWS_SERVICE = 's3';
var AWS_REQUEST = 'aws4_request';
/**
* This signature allows the user to upload a file to the bucket that begins with a specific
* key (domain_localPadId_userId_), enforces only image uploads up to a max size of 20MB.
*/
function _getS3Policy(domain, localPadId, userId, expirationDate, utcDateStr) {
var isoDate = expirationDate.toISOString();
// Amazon wants two types of date strings, one like:
// "2015-04-28T00:36:03.092Z"
// and one like:
// "20150428T003603Z"
// :facepalm:
var alternateWTFAreYouSeriousISOAmazonDate = isoDate.replace(/[:\-]|\.\d{3}/g, '');
return {
"expiration": isoDate,
"conditions": [
{"bucket": getBucketName(appjet.config.s3Bucket)},
["starts-with", "$key", domain + '_' + localPadId + '_' + userId + '_'],
{"acl": "public-read"},
["starts-with", "$Content-Type", "image/"],
["content-length-range", 0, 1024*1024*20 /* 20 MB for animated gifs! */],
{"x-amz-credential":
appjet.config.awsUser + "/" +
utcDateStr + "/" +
appjet.config.s3Region + "/" +
AWS_SERVICE + "/" +
AWS_REQUEST
},
{"x-amz-algorithm": "AWS4-HMAC-SHA256"},
{"x-amz-date": alternateWTFAreYouSeriousISOAmazonDate }
]
};
}
/**
* We must sign requests to Amazon, otherwise you could have people arbitrarily:
* - uploading files of random content types to our bucket
* - uploading files with whatever key they want
* - uploading files with an acl that they can choose
* - uploading files of unbounded size
*/
function getS3PolicyAndSig(domain, localPadId, userId) {
var expirationDate = new Date();
expirationDate.setDate(expirationDate.getDate() + 1);
function pad(n) { return n < 10 ? '0' + n.toString() : n.toString() }
var utcDateStr = pad(expirationDate.getUTCFullYear()) +
pad(expirationDate.getUTCMonth() + 1) +
pad(expirationDate.getUTCDate());
var s3Policy = crypto.convertToBase64(JSON.stringify(
_getS3Policy(domain, localPadId, userId, expirationDate, utcDateStr)));
var AWSAccessKeyId = appjet.config.awsUser;
var AWSSecretAccessKey = appjet.config.awsPass;
var awsSecretKey = new java.lang.String("AWS4" + AWSSecretAccessKey).getBytes("UTF8");
var dateKey = crypto.signString(awsSecretKey, utcDateStr);
var dateRegionKey = crypto.signString(dateKey, appjet.config.s3Region);
var dateRegionServiceKey = crypto.signString(dateRegionKey, AWS_SERVICE);
var signingKey = crypto.signString(dateRegionServiceKey, AWS_REQUEST);
var signature = crypto.signString(signingKey, s3Policy);
// XXX Amazon's documentation lies. I've emailed them to correct it.
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-UsingHTTPPOST.html
// says to take the final result and and base64 encode it.
// But what they really want is *hex* encoding, not base64.
// See also: http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-post-example.html
// and: http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-other
return {
s3Policy: s3Policy,
s3PolicySig: Hex.encodeHexString(signature)
};
}
| {
if (S3 == null) {
S3 = new com.amazonaws.services.s3.AmazonS3Client(
new com.amazonaws.auth.BasicAWSCredentials(
appjet.config.awsUser, appjet.config.awsPass));
}
} | identifier_body |
s3.js |
import("crypto");
jimport("com.amazonaws.services.s3.model.CannedAccessControlList");
jimport("com.amazonaws.services.s3.model.GeneratePresignedUrlRequest");
jimport("org.apache.commons.codec.binary.Hex");
jimport("java.io.ByteArrayInputStream");
jimport("java.io.ByteArrayOutputStream");
S3 = null;
function _init() {
if (S3 == null) {
S3 = new com.amazonaws.services.s3.AmazonS3Client(
new com.amazonaws.auth.BasicAWSCredentials(
appjet.config.awsUser, appjet.config.awsPass));
}
}
function getBucketName(bucketName) {
if (appjet.config["s3." + bucketName]) {
return appjet.config["s3." + bucketName];
}
return bucketName;
}
function list(bucketName) {
_init();
return S3.listObjects(getBucketName(bucketName)).getObjectSummaries().toArray();
}
function put(bucketName, keyName, bytes, isPublicRead, contentType) {
_init();
if (!(bytes instanceof java.io.InputStream)) {
bytes = new java.io.ByteArrayInputStream(new java.lang.String(bytes).getBytes());
}
var meta = null;
if (contentType) {
meta = new com.amazonaws.services.s3.model.ObjectMetadata();
meta.setContentType(contentType);
}
S3.putObject(getBucketName(bucketName), keyName, bytes, meta);
if (isPublicRead) |
}
function getURL(bucketName, keyName, useHTTP) {
return (useHTTP?"http":"https") + "://s3.amazonaws.com/" + getBucketName(bucketName) + "/" + keyName;
}
function getPresignedURL(bucketName, keyName, durationValidMs) {
var expiration = new java.util.Date();
expiration.setTime(expiration.getTime() + durationValidMs);
var generatePresignedUrlRequest = new GeneratePresignedUrlRequest(getBucketName(bucketName), keyName);
generatePresignedUrlRequest.setExpiration(expiration);
return S3.generatePresignedUrl(generatePresignedUrlRequest);
}
function getBytes(bucketName, keyName) {
_init();
var obj = S3.getObject(getBucketName(bucketName), keyName);
var inStream = obj.getObjectContent();
try {
return new java.io.ByteArrayOutputStream(inStream).toByteArray();
} finally {
inStream.close();
}
}
var AWS_SERVICE = 's3';
var AWS_REQUEST = 'aws4_request';
/**
* This signature allows the user to upload a file to the bucket that begins with a specific
* key (domain_localPadId_userId_), enforces only image uploads up to a max size of 20MB.
*/
function _getS3Policy(domain, localPadId, userId, expirationDate, utcDateStr) {
var isoDate = expirationDate.toISOString();
// Amazon wants two types of date strings, one like:
// "2015-04-28T00:36:03.092Z"
// and one like:
// "20150428T003603Z"
// :facepalm:
var alternateWTFAreYouSeriousISOAmazonDate = isoDate.replace(/[:\-]|\.\d{3}/g, '');
return {
"expiration": isoDate,
"conditions": [
{"bucket": getBucketName(appjet.config.s3Bucket)},
["starts-with", "$key", domain + '_' + localPadId + '_' + userId + '_'],
{"acl": "public-read"},
["starts-with", "$Content-Type", "image/"],
["content-length-range", 0, 1024*1024*20 /* 20 MB for animated gifs! */],
{"x-amz-credential":
appjet.config.awsUser + "/" +
utcDateStr + "/" +
appjet.config.s3Region + "/" +
AWS_SERVICE + "/" +
AWS_REQUEST
},
{"x-amz-algorithm": "AWS4-HMAC-SHA256"},
{"x-amz-date": alternateWTFAreYouSeriousISOAmazonDate }
]
};
}
/**
* We must sign requests to Amazon, otherwise you could have people arbitrarily:
* - uploading files of random content types to our bucket
* - uploading files with whatever key they want
* - uploading files with an acl that they can choose
* - uploading files of unbounded size
*/
function getS3PolicyAndSig(domain, localPadId, userId) {
var expirationDate = new Date();
expirationDate.setDate(expirationDate.getDate() + 1);
function pad(n) { return n < 10 ? '0' + n.toString() : n.toString() }
var utcDateStr = pad(expirationDate.getUTCFullYear()) +
pad(expirationDate.getUTCMonth() + 1) +
pad(expirationDate.getUTCDate());
var s3Policy = crypto.convertToBase64(JSON.stringify(
_getS3Policy(domain, localPadId, userId, expirationDate, utcDateStr)));
var AWSAccessKeyId = appjet.config.awsUser;
var AWSSecretAccessKey = appjet.config.awsPass;
var awsSecretKey = new java.lang.String("AWS4" + AWSSecretAccessKey).getBytes("UTF8");
var dateKey = crypto.signString(awsSecretKey, utcDateStr);
var dateRegionKey = crypto.signString(dateKey, appjet.config.s3Region);
var dateRegionServiceKey = crypto.signString(dateRegionKey, AWS_SERVICE);
var signingKey = crypto.signString(dateRegionServiceKey, AWS_REQUEST);
var signature = crypto.signString(signingKey, s3Policy);
// XXX Amazon's documentation lies. I've emailed them to correct it.
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-UsingHTTPPOST.html
// says to take the final result and and base64 encode it.
// But what they really want is *hex* encoding, not base64.
// See also: http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-post-example.html
// and: http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-other
return {
s3Policy: s3Policy,
s3PolicySig: Hex.encodeHexString(signature)
};
}
| {
S3.setObjectAcl(getBucketName(bucketName), keyName, CannedAccessControlList.PublicRead);
} | conditional_block |
s3.js | import("crypto");
jimport("com.amazonaws.services.s3.model.CannedAccessControlList");
jimport("com.amazonaws.services.s3.model.GeneratePresignedUrlRequest");
jimport("org.apache.commons.codec.binary.Hex");
jimport("java.io.ByteArrayInputStream");
jimport("java.io.ByteArrayOutputStream");
S3 = null;
function _init() {
if (S3 == null) {
S3 = new com.amazonaws.services.s3.AmazonS3Client(
new com.amazonaws.auth.BasicAWSCredentials(
appjet.config.awsUser, appjet.config.awsPass));
}
}
function getBucketName(bucketName) {
if (appjet.config["s3." + bucketName]) {
return appjet.config["s3." + bucketName];
}
return bucketName;
}
function list(bucketName) {
_init();
return S3.listObjects(getBucketName(bucketName)).getObjectSummaries().toArray();
}
function put(bucketName, keyName, bytes, isPublicRead, contentType) {
_init();
if (!(bytes instanceof java.io.InputStream)) {
bytes = new java.io.ByteArrayInputStream(new java.lang.String(bytes).getBytes());
}
var meta = null;
if (contentType) {
meta = new com.amazonaws.services.s3.model.ObjectMetadata();
meta.setContentType(contentType);
}
S3.putObject(getBucketName(bucketName), keyName, bytes, meta);
if (isPublicRead) {
S3.setObjectAcl(getBucketName(bucketName), keyName, CannedAccessControlList.PublicRead);
}
}
function getURL(bucketName, keyName, useHTTP) {
return (useHTTP?"http":"https") + "://s3.amazonaws.com/" + getBucketName(bucketName) + "/" + keyName;
}
function getPresignedURL(bucketName, keyName, durationValidMs) {
var expiration = new java.util.Date();
expiration.setTime(expiration.getTime() + durationValidMs);
var generatePresignedUrlRequest = new GeneratePresignedUrlRequest(getBucketName(bucketName), keyName);
generatePresignedUrlRequest.setExpiration(expiration);
return S3.generatePresignedUrl(generatePresignedUrlRequest);
}
function getBytes(bucketName, keyName) {
_init();
var obj = S3.getObject(getBucketName(bucketName), keyName);
var inStream = obj.getObjectContent();
try {
return new java.io.ByteArrayOutputStream(inStream).toByteArray();
} finally {
inStream.close();
}
}
var AWS_SERVICE = 's3'; | /**
* This signature allows the user to upload a file to the bucket that begins with a specific
* key (domain_localPadId_userId_), enforces only image uploads up to a max size of 20MB.
*/
function _getS3Policy(domain, localPadId, userId, expirationDate, utcDateStr) {
var isoDate = expirationDate.toISOString();
// Amazon wants two types of date strings, one like:
// "2015-04-28T00:36:03.092Z"
// and one like:
// "20150428T003603Z"
// :facepalm:
var alternateWTFAreYouSeriousISOAmazonDate = isoDate.replace(/[:\-]|\.\d{3}/g, '');
return {
"expiration": isoDate,
"conditions": [
{"bucket": getBucketName(appjet.config.s3Bucket)},
["starts-with", "$key", domain + '_' + localPadId + '_' + userId + '_'],
{"acl": "public-read"},
["starts-with", "$Content-Type", "image/"],
["content-length-range", 0, 1024*1024*20 /* 20 MB for animated gifs! */],
{"x-amz-credential":
appjet.config.awsUser + "/" +
utcDateStr + "/" +
appjet.config.s3Region + "/" +
AWS_SERVICE + "/" +
AWS_REQUEST
},
{"x-amz-algorithm": "AWS4-HMAC-SHA256"},
{"x-amz-date": alternateWTFAreYouSeriousISOAmazonDate }
]
};
}
/**
* We must sign requests to Amazon, otherwise you could have people arbitrarily:
* - uploading files of random content types to our bucket
* - uploading files with whatever key they want
* - uploading files with an acl that they can choose
* - uploading files of unbounded size
*/
function getS3PolicyAndSig(domain, localPadId, userId) {
var expirationDate = new Date();
expirationDate.setDate(expirationDate.getDate() + 1);
function pad(n) { return n < 10 ? '0' + n.toString() : n.toString() }
var utcDateStr = pad(expirationDate.getUTCFullYear()) +
pad(expirationDate.getUTCMonth() + 1) +
pad(expirationDate.getUTCDate());
var s3Policy = crypto.convertToBase64(JSON.stringify(
_getS3Policy(domain, localPadId, userId, expirationDate, utcDateStr)));
var AWSAccessKeyId = appjet.config.awsUser;
var AWSSecretAccessKey = appjet.config.awsPass;
var awsSecretKey = new java.lang.String("AWS4" + AWSSecretAccessKey).getBytes("UTF8");
var dateKey = crypto.signString(awsSecretKey, utcDateStr);
var dateRegionKey = crypto.signString(dateKey, appjet.config.s3Region);
var dateRegionServiceKey = crypto.signString(dateRegionKey, AWS_SERVICE);
var signingKey = crypto.signString(dateRegionServiceKey, AWS_REQUEST);
var signature = crypto.signString(signingKey, s3Policy);
// XXX Amazon's documentation lies. I've emailed them to correct it.
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-UsingHTTPPOST.html
// says to take the final result and and base64 encode it.
// But what they really want is *hex* encoding, not base64.
// See also: http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-post-example.html
// and: http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-other
return {
s3Policy: s3Policy,
s3PolicySig: Hex.encodeHexString(signature)
};
} | var AWS_REQUEST = 'aws4_request'; | random_line_split |
test_crossdomain.py | # Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.object_storage import base
from tempest.common import custom_matchers
from tempest.common import utils
from tempest.lib import decorators
class CrossdomainTest(base.BaseObjectTest):
@classmethod
def resource_setup(cls):
super(CrossdomainTest, cls).resource_setup()
cls.xml_start = '<?xml version="1.0"?>\n' \
'<!DOCTYPE cross-domain-policy SYSTEM ' \
'"http://www.adobe.com/xml/dtds/cross-domain-policy.' \
'dtd" >\n<cross-domain-policy>\n'
cls.xml_end = "</cross-domain-policy>"
def setUp(self):
super(CrossdomainTest, self).setUp()
@decorators.idempotent_id('d1b8b031-b622-4010-82f9-ff78a9e915c7')
@utils.requires_ext(extension='crossdomain', service='object')
def | (self):
url = self.account_client._get_base_version_url() + "crossdomain.xml"
resp, body = self.account_client.raw_request(url, "GET")
self.account_client._error_checker(resp, body)
body = body.decode()
self.assertTrue(body.startswith(self.xml_start) and
body.endswith(self.xml_end))
# The target of the request is not any Swift resource. Therefore, the
# existence of response header is checked without a custom matcher.
self.assertIn('content-length', resp)
self.assertIn('content-type', resp)
self.assertIn('x-trans-id', resp)
self.assertIn('date', resp)
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
| test_get_crossdomain_policy | identifier_name |
test_crossdomain.py | # Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.object_storage import base
from tempest.common import custom_matchers
from tempest.common import utils
from tempest.lib import decorators
class CrossdomainTest(base.BaseObjectTest):
@classmethod
def resource_setup(cls):
super(CrossdomainTest, cls).resource_setup()
cls.xml_start = '<?xml version="1.0"?>\n' \
'<!DOCTYPE cross-domain-policy SYSTEM ' \
'"http://www.adobe.com/xml/dtds/cross-domain-policy.' \
'dtd" >\n<cross-domain-policy>\n'
cls.xml_end = "</cross-domain-policy>"
def setUp(self):
super(CrossdomainTest, self).setUp()
@decorators.idempotent_id('d1b8b031-b622-4010-82f9-ff78a9e915c7')
@utils.requires_ext(extension='crossdomain', service='object')
def test_get_crossdomain_policy(self):
| url = self.account_client._get_base_version_url() + "crossdomain.xml"
resp, body = self.account_client.raw_request(url, "GET")
self.account_client._error_checker(resp, body)
body = body.decode()
self.assertTrue(body.startswith(self.xml_start) and
body.endswith(self.xml_end))
# The target of the request is not any Swift resource. Therefore, the
# existence of response header is checked without a custom matcher.
self.assertIn('content-length', resp)
self.assertIn('content-type', resp)
self.assertIn('x-trans-id', resp)
self.assertIn('date', resp)
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted()) | identifier_body |
|
test_crossdomain.py | # Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.object_storage import base
from tempest.common import custom_matchers
from tempest.common import utils
from tempest.lib import decorators
class CrossdomainTest(base.BaseObjectTest):
@classmethod
def resource_setup(cls):
super(CrossdomainTest, cls).resource_setup()
cls.xml_start = '<?xml version="1.0"?>\n' \
'<!DOCTYPE cross-domain-policy SYSTEM ' \
'"http://www.adobe.com/xml/dtds/cross-domain-policy.' \
'dtd" >\n<cross-domain-policy>\n'
cls.xml_end = "</cross-domain-policy>"
def setUp(self):
super(CrossdomainTest, self).setUp()
@decorators.idempotent_id('d1b8b031-b622-4010-82f9-ff78a9e915c7')
@utils.requires_ext(extension='crossdomain', service='object')
def test_get_crossdomain_policy(self):
url = self.account_client._get_base_version_url() + "crossdomain.xml"
resp, body = self.account_client.raw_request(url, "GET")
self.account_client._error_checker(resp, body)
body = body.decode()
self.assertTrue(body.startswith(self.xml_start) and
body.endswith(self.xml_end))
# The target of the request is not any Swift resource. Therefore, the | self.assertIn('content-length', resp)
self.assertIn('content-type', resp)
self.assertIn('x-trans-id', resp)
self.assertIn('date', resp)
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted()) | # existence of response header is checked without a custom matcher. | random_line_split |
data_types.py | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Various custom data types for use throughout the unexpected pass finder."""
from __future__ import print_function
import collections
import copy
import fnmatch
import logging
import six
FULL_PASS = 1
NEVER_PASS = 2
PARTIAL_PASS = 3
# Allow different unexpected pass finder implementations to register custom
# data types if necessary. These are set to the base versions at the end of the
# file.
Expectation = None
Result = None
BuildStats = None
TestExpectationMap = None
def SetExpectationImplementation(impl):
global Expectation
assert issubclass(impl, BaseExpectation)
Expectation = impl
def SetResultImplementation(impl):
global Result
assert issubclass(impl, BaseResult)
Result = impl
def SetBuildStatsImplementation(impl):
global BuildStats
assert issubclass(impl, BaseBuildStats)
BuildStats = impl
def SetTestExpectationMapImplementation(impl):
global TestExpectationMap
assert issubclass(impl, BaseTestExpectationMap)
TestExpectationMap = impl
class BaseExpectation(object):
"""Container for a test expectation.
Similar to typ's expectations_parser.Expectation class, but with unnecessary
data stripped out and made hashable.
The data contained in an Expectation is equivalent to a single line in an
expectation file.
"""
def __init__(self, test, tags, expected_results, bug=None):
self.test = test
self.tags = frozenset(tags)
self.bug = bug or ''
if isinstance(expected_results, str):
self.expected_results = frozenset([expected_results])
else:
self.expected_results = frozenset(expected_results)
# We're going to be making a lot of comparisons, and fnmatch is *much*
# slower (~40x from rough testing) than a straight comparison, so only use
# it if necessary.
if '*' in test:
self._comp = self._CompareWildcard
else:
self._comp = self._CompareNonWildcard
def __eq__(self, other):
return (isinstance(other, BaseExpectation) and self.test == other.test
and self.tags == other.tags
and self.expected_results == other.expected_results
and self.bug == other.bug)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.test, self.tags, self.expected_results, self.bug))
def _CompareWildcard(self, result_test_name):
return fnmatch.fnmatch(result_test_name, self.test)
def _CompareNonWildcard(self, result_test_name):
return result_test_name == self.test
def AppliesToResult(self, result):
"""Checks whether this expectation should have applied to |result|.
An expectation applies to a result if the test names match (including
wildcard expansion) and the expectation's tags are a subset of the result's
tags.
Args:
result: A Result instance to check against.
Returns:
True if |self| applies to |result|, otherwise False.
"""
assert isinstance(result, BaseResult)
return (self._comp(result.test) and self.tags <= result.tags)
def MaybeAppliesToTest(self, test_name):
"""Similar to AppliesToResult, but used to do initial filtering.
Args:
test_name: A string containing the name of a test.
Returns:
True if |self| could apply to a test named |test_name|, otherwise False.
"""
return self._comp(test_name)
class BaseResult(object):
"""Container for a test result.
Contains the minimal amount of data necessary to describe/identify a result
from ResultDB for the purposes of the unexpected pass finder.
"""
def __init__(self, test, tags, actual_result, step, build_id):
"""
Args:
test: A string containing the name of the test.
tags: An iterable containing the typ tags for the result.
actual_result: The actual result of the test as a string.
step: A string containing the name of the step on the builder.
build_id: A string containing the Buildbucket ID for the build this result
came from.
"""
self.test = test
self.tags = frozenset(tags)
self.actual_result = actual_result
self.step = step
self.build_id = build_id
def __eq__(self, other):
return (isinstance(other, BaseResult) and self.test == other.test
and self.tags == other.tags
and self.actual_result == other.actual_result
and self.step == other.step and self.build_id == other.build_id)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(
(self.test, self.tags, self.actual_result, self.step, self.build_id))
class BaseBuildStats(object):
"""Container for keeping track of a builder's pass/fail stats."""
def __init__(self):
self.passed_builds = 0
self.total_builds = 0
self.failure_links = frozenset()
@property
def failed_builds(self):
return self.total_builds - self.passed_builds
@property
def did_fully_pass(self):
return self.passed_builds == self.total_builds
@property
def did_never_pass(self):
return self.failed_builds == self.total_builds
def AddPassedBuild(self):
self.passed_builds += 1
self.total_builds += 1
def AddFailedBuild(self, build_id):
self.total_builds += 1
build_link = BuildLinkFromBuildId(build_id)
self.failure_links = frozenset([build_link]) | self.failure_links
def GetStatsAsString(self):
return '(%d/%d passed)' % (self.passed_builds, self.total_builds)
def NeverNeededExpectation(self, expectation): # pylint:disable=unused-argument
"""Returns whether the results tallied in |self| never needed |expectation|.
Args:
expectation: An Expectation object that |stats| is located under.
Returns:
True if all the results tallied in |self| would have passed without
|expectation| being present. Otherwise, False.
"""
return self.did_fully_pass
def AlwaysNeededExpectation(self, expectation): # pylint:disable=unused-argument
"""Returns whether the results tallied in |self| always needed |expectation.
Args:
expectation: An Expectation object that |stats| is located under.
Returns:
True if all the results tallied in |self| would have failed without
|expectation| being present. Otherwise, False.
"""
return self.did_never_pass
def __eq__(self, other):
return (isinstance(other, BuildStats)
and self.passed_builds == other.passed_builds
and self.total_builds == other.total_builds
and self.failure_links == other.failure_links)
def __ne__(self, other):
return not self.__eq__(other)
def BuildLinkFromBuildId(build_id):
return 'http://ci.chromium.org/b/%s' % build_id
# These explicit overrides could likely be replaced by using regular dicts with
# type hinting in Python 3. Based on https://stackoverflow.com/a/2588648, this
# should cover all cases where the dict can be modified.
class BaseTypedMap(dict):
"""A base class for typed dictionaries.
Any child classes that override __setitem__ will have any modifications to the
dictionary go through the type checking in __setitem__.
"""
def __init__(self, *args, **kwargs): # pylint:disable=super-init-not-called
self.update(*args, **kwargs)
def update(self, *args, **kwargs):
if args:
assert len(args) == 1
other = dict(args[0])
for k, v in other.items():
self[k] = v
for k, v in kwargs.items():
self[k] = v
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
def _value_type(self):
raise NotImplementedError()
def IterToValueType(self, value_type):
"""Recursively iterates over contents until |value_type| is found.
Used to get rid of nested loops, instead using a single loop that
automatically iterates through all the contents at a certain depth.
Args:
value_type: The type to recurse to and then iterate over. For example,
"BuilderStepMap" would result in iterating over the BuilderStepMap
values, meaning that the returned generator would create tuples in the
form (test_name, expectation, builder_map).
Returns:
A generator that yields tuples. The length and content of the tuples will
vary depending on |value_type|. For example, using "BuilderStepMap" would
result in tuples of the form (test_name, expectation, builder_map), while
"BuildStats" would result in (test_name, expectation, builder_name,
step_name, build_stats).
"""
if self._value_type() == value_type:
for k, v in self.items():
yield k, v
else:
for k, v in self.items():
for nested_value in v.IterToValueType(value_type):
yield (k, ) + nested_value
def Merge(self, other_map, reference_map=None):
"""Merges |other_map| into self.
Args:
other_map: A BaseTypedMap whose contents will be merged into self.
reference_map: A dict containing the information that was originally in
self. Used for ensuring that a single expectation/builder/step
combination is only ever updated once. If None, a copy of self will be
used.
"""
assert isinstance(other_map, self.__class__)
# We should only ever encounter a single updated BuildStats for an
# expectation/builder/step combination. Use the reference map to determine
# if a particular BuildStats has already been updated or not.
reference_map = reference_map or copy.deepcopy(self)
for key, value in other_map.items():
if key not in self:
self[key] = value
else:
if isinstance(value, dict):
self[key].Merge(value, reference_map.get(key, {}))
else:
assert isinstance(value, BuildStats)
# Ensure we haven't updated this BuildStats already. If the reference
# map doesn't have a corresponding BuildStats, then base_map shouldn't
# have initially either, and thus it would have been added before
# reaching this point. Otherwise, the two values must match, meaning
# that base_map's BuildStats hasn't been updated yet.
reference_stats = reference_map.get(key, None)
assert reference_stats is not None
assert reference_stats == self[key]
self[key] = value
class BaseTestExpectationMap(BaseTypedMap): | """Typed map for string types -> ExpectationBuilderMap.
This results in a dict in the following format:
{
expectation_file1 (str): {
expectation1 (data_types.Expectation): {
builder_name1 (str): {
step_name1 (str): stats1 (data_types.BuildStats),
step_name2 (str): stats2 (data_types.BuildStats),
...
},
builder_name2 (str): { ... },
},
expectation2 (data_types.Expectation): { ... },
...
},
expectation_file2 (str): { ... },
...
}
"""
def __setitem__(self, key, value):
assert IsStringType(key)
assert isinstance(value, ExpectationBuilderMap)
super(BaseTestExpectationMap, self).__setitem__(key, value)
def _value_type(self):
return ExpectationBuilderMap
def IterBuilderStepMaps(self):
"""Iterates over all BuilderStepMaps contained in the map.
Returns:
A generator yielding tuples in the form (expectation_file (str),
expectation (Expectation), builder_map (BuilderStepMap))
"""
return self.IterToValueType(BuilderStepMap)
def AddResultList(self, builder, results, expectation_files=None):
"""Adds |results| to |self|.
Args:
builder: A string containing the builder |results| came from. Should be
prefixed with something to distinguish between identically named CI
and try builders.
results: A list of data_types.Result objects corresponding to the ResultDB
data queried for |builder|.
expectation_files: An iterable of expectation file names that these
results could possibly apply to. If None, then expectations from all
known expectation files will be used.
Returns:
A list of data_types.Result objects who did not have a matching
expectation in |self|.
"""
failure_results = set()
pass_results = set()
unmatched_results = []
for r in results:
if r.actual_result == 'Pass':
pass_results.add(r)
else:
failure_results.add(r)
# Remove any cases of failure -> pass from the passing set. If a test is
# flaky, we get both pass and failure results for it, so we need to remove
# the any cases of a pass result having a corresponding, earlier failure
# result.
modified_failing_retry_results = set()
for r in failure_results:
modified_failing_retry_results.add(
Result(r.test, r.tags, 'Pass', r.step, r.build_id))
pass_results -= modified_failing_retry_results
# Group identically named results together so we reduce the number of
# comparisons we have to make.
all_results = pass_results | failure_results
grouped_results = collections.defaultdict(list)
for r in all_results:
grouped_results[r.test].append(r)
matched_results = self._AddGroupedResults(grouped_results, builder,
expectation_files)
unmatched_results = list(all_results - matched_results)
return unmatched_results
def _AddGroupedResults(self, grouped_results, builder, expectation_files):
"""Adds all results in |grouped_results| to |self|.
Args:
grouped_results: A dict mapping test name (str) to a list of
data_types.Result objects for that test.
builder: A string containing the name of the builder |grouped_results|
came from.
expectation_files: An iterable of expectation file names that these
results could possibly apply to. If None, then expectations from all
known expectation files will be used.
Returns:
A set of data_types.Result objects that had at least one matching
expectation.
"""
matched_results = set()
for test_name, result_list in grouped_results.items():
for ef, expectation_map in self.items():
if expectation_files is not None and ef not in expectation_files:
continue
for expectation, builder_map in expectation_map.items():
if not expectation.MaybeAppliesToTest(test_name):
continue
for r in result_list:
if expectation.AppliesToResult(r):
matched_results.add(r)
step_map = builder_map.setdefault(builder, StepBuildStatsMap())
stats = step_map.setdefault(r.step, BuildStats())
self._AddSingleResult(r, stats)
return matched_results
def _AddSingleResult(self, result, stats):
"""Adds |result| to |self|.
Args:
result: A data_types.Result object to add.
stats: A data_types.BuildStats object to add the result to.
"""
if result.actual_result == 'Pass':
stats.AddPassedBuild()
else:
stats.AddFailedBuild(result.build_id)
def SplitByStaleness(self):
"""Separates stored data based on expectation staleness.
Returns:
Three TestExpectationMaps (stale_dict, semi_stale_dict, active_dict). All
three combined contain the information of |self|. |stale_dict| contains
entries for expectations that are no longer being helpful,
|semi_stale_dict| contains entries for expectations that might be
removable or modifiable, but have at least one failed test run.
|active_dict| contains entries for expectations that are preventing
failures on all builders they're active on, and thus shouldn't be removed.
"""
stale_dict = TestExpectationMap()
semi_stale_dict = TestExpectationMap()
active_dict = TestExpectationMap()
# This initially looks like a good target for using
# TestExpectationMap's iterators since there are many nested loops.
# However, we need to reset state in different loops, and the alternative of
# keeping all the state outside the loop and resetting under certain
# conditions ends up being less readable than just using nested loops.
for expectation_file, expectation_map in self.items():
for expectation, builder_map in expectation_map.items():
# A temporary map to hold data so we can later determine whether an
# expectation is stale, semi-stale, or active.
tmp_map = {
FULL_PASS: BuilderStepMap(),
NEVER_PASS: BuilderStepMap(),
PARTIAL_PASS: BuilderStepMap(),
}
split_stats_map = builder_map.SplitBuildStatsByPass(expectation)
for builder_name, (fully_passed, never_passed,
partially_passed) in split_stats_map.items():
if fully_passed:
tmp_map[FULL_PASS][builder_name] = fully_passed
if never_passed:
tmp_map[NEVER_PASS][builder_name] = never_passed
if partially_passed:
tmp_map[PARTIAL_PASS][builder_name] = partially_passed
def _CopyPassesIntoBuilderMap(builder_map, pass_types):
for pt in pass_types:
for builder, steps in tmp_map[pt].items():
builder_map.setdefault(builder, StepBuildStatsMap()).update(steps)
# Handle the case of a stale expectation.
if not (tmp_map[NEVER_PASS] or tmp_map[PARTIAL_PASS]):
builder_map = stale_dict.setdefault(
expectation_file,
ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
_CopyPassesIntoBuilderMap(builder_map, [FULL_PASS])
# Handle the case of an active expectation.
elif not tmp_map[FULL_PASS]:
builder_map = active_dict.setdefault(
expectation_file,
ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
_CopyPassesIntoBuilderMap(builder_map, [NEVER_PASS, PARTIAL_PASS])
# Handle the case of a semi-stale expectation.
else:
# TODO(crbug.com/998329): Sort by pass percentage so it's easier to
# find problematic builders without highlighting.
builder_map = semi_stale_dict.setdefault(
expectation_file,
ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
_CopyPassesIntoBuilderMap(builder_map,
[FULL_PASS, PARTIAL_PASS, NEVER_PASS])
return stale_dict, semi_stale_dict, active_dict
def FilterOutUnusedExpectations(self):
"""Filters out any unused Expectations from stored data.
An Expectation is considered unused if its corresponding dictionary is
empty. If removing Expectations results in a top-level test key having an
empty dictionary, that test entry will also be removed.
Returns:
A dict from expectation file name (str) to set of unused expectations
(str) from that file.
"""
logging.info('Filtering out unused expectations')
unused = collections.defaultdict(list)
unused_count = 0
for (expectation_file, expectation,
builder_map) in self.IterBuilderStepMaps():
if not builder_map:
unused[expectation_file].append(expectation)
unused_count += 1
for expectation_file, expectations in unused.items():
for e in expectations:
del self[expectation_file][e]
logging.debug('Found %d unused expectations', unused_count)
empty_files = []
for expectation_file, expectation_map in self.items():
if not expectation_map:
empty_files.append(expectation_file)
for empty in empty_files:
del self[empty]
logging.debug('Found %d empty files: %s', len(empty_files), empty_files)
return unused
class ExpectationBuilderMap(BaseTypedMap):
"""Typed map for Expectation -> BuilderStepMap."""
def __setitem__(self, key, value):
assert isinstance(key, BaseExpectation)
assert isinstance(value, self._value_type())
super(ExpectationBuilderMap, self).__setitem__(key, value)
def _value_type(self):
return BuilderStepMap
class BuilderStepMap(BaseTypedMap):
"""Typed map for string types -> StepBuildStatsMap."""
def __setitem__(self, key, value):
assert IsStringType(key)
assert isinstance(value, self._value_type())
super(BuilderStepMap, self).__setitem__(key, value)
def _value_type(self):
return StepBuildStatsMap
def SplitBuildStatsByPass(self, expectation):
"""Splits the underlying BuildStats data by passing-ness.
Args:
expectation: The Expectation that this BuilderStepMap is located under.
Returns:
A dict mapping builder name to a tuple (fully_passed, never_passed,
partially_passed). Each *_passed is a StepBuildStatsMap containing data
for the steps that either fully passed on all builds, never passed on any
builds, or passed some of the time.
"""
retval = {}
for builder_name, step_map in self.items():
fully_passed = StepBuildStatsMap()
never_passed = StepBuildStatsMap()
partially_passed = StepBuildStatsMap()
for step_name, stats in step_map.items():
if stats.NeverNeededExpectation(expectation):
assert step_name not in fully_passed
fully_passed[step_name] = stats
elif stats.AlwaysNeededExpectation(expectation):
assert step_name not in never_passed
never_passed[step_name] = stats
else:
assert step_name not in partially_passed
partially_passed[step_name] = stats
retval[builder_name] = (fully_passed, never_passed, partially_passed)
return retval
def IterBuildStats(self):
"""Iterates over all BuildStats contained in the map.
Returns:
A generator yielding tuples in the form (builder_name (str), step_name
(str), build_stats (BuildStats)).
"""
return self.IterToValueType(BuildStats)
class StepBuildStatsMap(BaseTypedMap):
"""Typed map for string types -> BuildStats"""
def __setitem__(self, key, value):
assert IsStringType(key)
assert isinstance(value, self._value_type())
super(StepBuildStatsMap, self).__setitem__(key, value)
def _value_type(self):
return BuildStats
def IsStringType(s):
return isinstance(s, six.string_types)
Expectation = BaseExpectation
Result = BaseResult
BuildStats = BaseBuildStats
TestExpectationMap = BaseTestExpectationMap | random_line_split |
|
data_types.py | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Various custom data types for use throughout the unexpected pass finder."""
from __future__ import print_function
import collections
import copy
import fnmatch
import logging
import six
FULL_PASS = 1
NEVER_PASS = 2
PARTIAL_PASS = 3
# Allow different unexpected pass finder implementations to register custom
# data types if necessary. These are set to the base versions at the end of the
# file.
Expectation = None
Result = None
BuildStats = None
TestExpectationMap = None
def SetExpectationImplementation(impl):
global Expectation
assert issubclass(impl, BaseExpectation)
Expectation = impl
def SetResultImplementation(impl):
global Result
assert issubclass(impl, BaseResult)
Result = impl
def SetBuildStatsImplementation(impl):
global BuildStats
assert issubclass(impl, BaseBuildStats)
BuildStats = impl
def SetTestExpectationMapImplementation(impl):
global TestExpectationMap
assert issubclass(impl, BaseTestExpectationMap)
TestExpectationMap = impl
class BaseExpectation(object):
"""Container for a test expectation.
Similar to typ's expectations_parser.Expectation class, but with unnecessary
data stripped out and made hashable.
The data contained in an Expectation is equivalent to a single line in an
expectation file.
"""
def __init__(self, test, tags, expected_results, bug=None):
self.test = test
self.tags = frozenset(tags)
self.bug = bug or ''
if isinstance(expected_results, str):
self.expected_results = frozenset([expected_results])
else:
self.expected_results = frozenset(expected_results)
# We're going to be making a lot of comparisons, and fnmatch is *much*
# slower (~40x from rough testing) than a straight comparison, so only use
# it if necessary.
if '*' in test:
self._comp = self._CompareWildcard
else:
self._comp = self._CompareNonWildcard
def __eq__(self, other):
return (isinstance(other, BaseExpectation) and self.test == other.test
and self.tags == other.tags
and self.expected_results == other.expected_results
and self.bug == other.bug)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.test, self.tags, self.expected_results, self.bug))
def _CompareWildcard(self, result_test_name):
return fnmatch.fnmatch(result_test_name, self.test)
def _CompareNonWildcard(self, result_test_name):
return result_test_name == self.test
def AppliesToResult(self, result):
"""Checks whether this expectation should have applied to |result|.
An expectation applies to a result if the test names match (including
wildcard expansion) and the expectation's tags are a subset of the result's
tags.
Args:
result: A Result instance to check against.
Returns:
True if |self| applies to |result|, otherwise False.
"""
assert isinstance(result, BaseResult)
return (self._comp(result.test) and self.tags <= result.tags)
def MaybeAppliesToTest(self, test_name):
"""Similar to AppliesToResult, but used to do initial filtering.
Args:
test_name: A string containing the name of a test.
Returns:
True if |self| could apply to a test named |test_name|, otherwise False.
"""
return self._comp(test_name)
class BaseResult(object):
"""Container for a test result.
Contains the minimal amount of data necessary to describe/identify a result
from ResultDB for the purposes of the unexpected pass finder.
"""
def __init__(self, test, tags, actual_result, step, build_id):
"""
Args:
test: A string containing the name of the test.
tags: An iterable containing the typ tags for the result.
actual_result: The actual result of the test as a string.
step: A string containing the name of the step on the builder.
build_id: A string containing the Buildbucket ID for the build this result
came from.
"""
self.test = test
self.tags = frozenset(tags)
self.actual_result = actual_result
self.step = step
self.build_id = build_id
def __eq__(self, other):
return (isinstance(other, BaseResult) and self.test == other.test
and self.tags == other.tags
and self.actual_result == other.actual_result
and self.step == other.step and self.build_id == other.build_id)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(
(self.test, self.tags, self.actual_result, self.step, self.build_id))
class BaseBuildStats(object):
"""Container for keeping track of a builder's pass/fail stats."""
def __init__(self):
self.passed_builds = 0
self.total_builds = 0
self.failure_links = frozenset()
@property
def failed_builds(self):
return self.total_builds - self.passed_builds
@property
def did_fully_pass(self):
return self.passed_builds == self.total_builds
@property
def did_never_pass(self):
return self.failed_builds == self.total_builds
def AddPassedBuild(self):
self.passed_builds += 1
self.total_builds += 1
def AddFailedBuild(self, build_id):
self.total_builds += 1
build_link = BuildLinkFromBuildId(build_id)
self.failure_links = frozenset([build_link]) | self.failure_links
def GetStatsAsString(self):
return '(%d/%d passed)' % (self.passed_builds, self.total_builds)
def NeverNeededExpectation(self, expectation): # pylint:disable=unused-argument
"""Returns whether the results tallied in |self| never needed |expectation|.
Args:
expectation: An Expectation object that |stats| is located under.
Returns:
True if all the results tallied in |self| would have passed without
|expectation| being present. Otherwise, False.
"""
return self.did_fully_pass
def AlwaysNeededExpectation(self, expectation): # pylint:disable=unused-argument
"""Returns whether the results tallied in |self| always needed |expectation.
Args:
expectation: An Expectation object that |stats| is located under.
Returns:
True if all the results tallied in |self| would have failed without
|expectation| being present. Otherwise, False.
"""
return self.did_never_pass
def __eq__(self, other):
return (isinstance(other, BuildStats)
and self.passed_builds == other.passed_builds
and self.total_builds == other.total_builds
and self.failure_links == other.failure_links)
def __ne__(self, other):
return not self.__eq__(other)
def BuildLinkFromBuildId(build_id):
return 'http://ci.chromium.org/b/%s' % build_id
# These explicit overrides could likely be replaced by using regular dicts with
# type hinting in Python 3. Based on https://stackoverflow.com/a/2588648, this
# should cover all cases where the dict can be modified.
class BaseTypedMap(dict):
"""A base class for typed dictionaries.
Any child classes that override __setitem__ will have any modifications to the
dictionary go through the type checking in __setitem__.
"""
def __init__(self, *args, **kwargs): # pylint:disable=super-init-not-called
self.update(*args, **kwargs)
def update(self, *args, **kwargs):
if args:
assert len(args) == 1
other = dict(args[0])
for k, v in other.items():
self[k] = v
for k, v in kwargs.items():
self[k] = v
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
def _value_type(self):
raise NotImplementedError()
def IterToValueType(self, value_type):
"""Recursively iterates over contents until |value_type| is found.
Used to get rid of nested loops, instead using a single loop that
automatically iterates through all the contents at a certain depth.
Args:
value_type: The type to recurse to and then iterate over. For example,
"BuilderStepMap" would result in iterating over the BuilderStepMap
values, meaning that the returned generator would create tuples in the
form (test_name, expectation, builder_map).
Returns:
A generator that yields tuples. The length and content of the tuples will
vary depending on |value_type|. For example, using "BuilderStepMap" would
result in tuples of the form (test_name, expectation, builder_map), while
"BuildStats" would result in (test_name, expectation, builder_name,
step_name, build_stats).
"""
if self._value_type() == value_type:
for k, v in self.items():
yield k, v
else:
for k, v in self.items():
for nested_value in v.IterToValueType(value_type):
yield (k, ) + nested_value
def Merge(self, other_map, reference_map=None):
"""Merges |other_map| into self.
Args:
other_map: A BaseTypedMap whose contents will be merged into self.
reference_map: A dict containing the information that was originally in
self. Used for ensuring that a single expectation/builder/step
combination is only ever updated once. If None, a copy of self will be
used.
"""
assert isinstance(other_map, self.__class__)
# We should only ever encounter a single updated BuildStats for an
# expectation/builder/step combination. Use the reference map to determine
# if a particular BuildStats has already been updated or not.
reference_map = reference_map or copy.deepcopy(self)
for key, value in other_map.items():
if key not in self:
self[key] = value
else:
if isinstance(value, dict):
self[key].Merge(value, reference_map.get(key, {}))
else:
assert isinstance(value, BuildStats)
# Ensure we haven't updated this BuildStats already. If the reference
# map doesn't have a corresponding BuildStats, then base_map shouldn't
# have initially either, and thus it would have been added before
# reaching this point. Otherwise, the two values must match, meaning
# that base_map's BuildStats hasn't been updated yet.
reference_stats = reference_map.get(key, None)
assert reference_stats is not None
assert reference_stats == self[key]
self[key] = value
class BaseTestExpectationMap(BaseTypedMap):
"""Typed map for string types -> ExpectationBuilderMap.
This results in a dict in the following format:
{
expectation_file1 (str): {
expectation1 (data_types.Expectation): {
builder_name1 (str): {
step_name1 (str): stats1 (data_types.BuildStats),
step_name2 (str): stats2 (data_types.BuildStats),
...
},
builder_name2 (str): { ... },
},
expectation2 (data_types.Expectation): { ... },
...
},
expectation_file2 (str): { ... },
...
}
"""
def __setitem__(self, key, value):
assert IsStringType(key)
assert isinstance(value, ExpectationBuilderMap)
super(BaseTestExpectationMap, self).__setitem__(key, value)
def _value_type(self):
return ExpectationBuilderMap
def IterBuilderStepMaps(self):
"""Iterates over all BuilderStepMaps contained in the map.
Returns:
A generator yielding tuples in the form (expectation_file (str),
expectation (Expectation), builder_map (BuilderStepMap))
"""
return self.IterToValueType(BuilderStepMap)
def AddResultList(self, builder, results, expectation_files=None):
"""Adds |results| to |self|.
Args:
builder: A string containing the builder |results| came from. Should be
prefixed with something to distinguish between identically named CI
and try builders.
results: A list of data_types.Result objects corresponding to the ResultDB
data queried for |builder|.
expectation_files: An iterable of expectation file names that these
results could possibly apply to. If None, then expectations from all
known expectation files will be used.
Returns:
A list of data_types.Result objects who did not have a matching
expectation in |self|.
"""
failure_results = set()
pass_results = set()
unmatched_results = []
for r in results:
if r.actual_result == 'Pass':
pass_results.add(r)
else:
failure_results.add(r)
# Remove any cases of failure -> pass from the passing set. If a test is
# flaky, we get both pass and failure results for it, so we need to remove
# the any cases of a pass result having a corresponding, earlier failure
# result.
modified_failing_retry_results = set()
for r in failure_results:
modified_failing_retry_results.add(
Result(r.test, r.tags, 'Pass', r.step, r.build_id))
pass_results -= modified_failing_retry_results
# Group identically named results together so we reduce the number of
# comparisons we have to make.
all_results = pass_results | failure_results
grouped_results = collections.defaultdict(list)
for r in all_results:
grouped_results[r.test].append(r)
matched_results = self._AddGroupedResults(grouped_results, builder,
expectation_files)
unmatched_results = list(all_results - matched_results)
return unmatched_results
def | (self, grouped_results, builder, expectation_files):
"""Adds all results in |grouped_results| to |self|.
Args:
grouped_results: A dict mapping test name (str) to a list of
data_types.Result objects for that test.
builder: A string containing the name of the builder |grouped_results|
came from.
expectation_files: An iterable of expectation file names that these
results could possibly apply to. If None, then expectations from all
known expectation files will be used.
Returns:
A set of data_types.Result objects that had at least one matching
expectation.
"""
matched_results = set()
for test_name, result_list in grouped_results.items():
for ef, expectation_map in self.items():
if expectation_files is not None and ef not in expectation_files:
continue
for expectation, builder_map in expectation_map.items():
if not expectation.MaybeAppliesToTest(test_name):
continue
for r in result_list:
if expectation.AppliesToResult(r):
matched_results.add(r)
step_map = builder_map.setdefault(builder, StepBuildStatsMap())
stats = step_map.setdefault(r.step, BuildStats())
self._AddSingleResult(r, stats)
return matched_results
def _AddSingleResult(self, result, stats):
"""Adds |result| to |self|.
Args:
result: A data_types.Result object to add.
stats: A data_types.BuildStats object to add the result to.
"""
if result.actual_result == 'Pass':
stats.AddPassedBuild()
else:
stats.AddFailedBuild(result.build_id)
def SplitByStaleness(self):
"""Separates stored data based on expectation staleness.
Returns:
Three TestExpectationMaps (stale_dict, semi_stale_dict, active_dict). All
three combined contain the information of |self|. |stale_dict| contains
entries for expectations that are no longer being helpful,
|semi_stale_dict| contains entries for expectations that might be
removable or modifiable, but have at least one failed test run.
|active_dict| contains entries for expectations that are preventing
failures on all builders they're active on, and thus shouldn't be removed.
"""
stale_dict = TestExpectationMap()
semi_stale_dict = TestExpectationMap()
active_dict = TestExpectationMap()
# This initially looks like a good target for using
# TestExpectationMap's iterators since there are many nested loops.
# However, we need to reset state in different loops, and the alternative of
# keeping all the state outside the loop and resetting under certain
# conditions ends up being less readable than just using nested loops.
for expectation_file, expectation_map in self.items():
for expectation, builder_map in expectation_map.items():
# A temporary map to hold data so we can later determine whether an
# expectation is stale, semi-stale, or active.
tmp_map = {
FULL_PASS: BuilderStepMap(),
NEVER_PASS: BuilderStepMap(),
PARTIAL_PASS: BuilderStepMap(),
}
split_stats_map = builder_map.SplitBuildStatsByPass(expectation)
for builder_name, (fully_passed, never_passed,
partially_passed) in split_stats_map.items():
if fully_passed:
tmp_map[FULL_PASS][builder_name] = fully_passed
if never_passed:
tmp_map[NEVER_PASS][builder_name] = never_passed
if partially_passed:
tmp_map[PARTIAL_PASS][builder_name] = partially_passed
def _CopyPassesIntoBuilderMap(builder_map, pass_types):
for pt in pass_types:
for builder, steps in tmp_map[pt].items():
builder_map.setdefault(builder, StepBuildStatsMap()).update(steps)
# Handle the case of a stale expectation.
if not (tmp_map[NEVER_PASS] or tmp_map[PARTIAL_PASS]):
builder_map = stale_dict.setdefault(
expectation_file,
ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
_CopyPassesIntoBuilderMap(builder_map, [FULL_PASS])
# Handle the case of an active expectation.
elif not tmp_map[FULL_PASS]:
builder_map = active_dict.setdefault(
expectation_file,
ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
_CopyPassesIntoBuilderMap(builder_map, [NEVER_PASS, PARTIAL_PASS])
# Handle the case of a semi-stale expectation.
else:
# TODO(crbug.com/998329): Sort by pass percentage so it's easier to
# find problematic builders without highlighting.
builder_map = semi_stale_dict.setdefault(
expectation_file,
ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
_CopyPassesIntoBuilderMap(builder_map,
[FULL_PASS, PARTIAL_PASS, NEVER_PASS])
return stale_dict, semi_stale_dict, active_dict
def FilterOutUnusedExpectations(self):
"""Filters out any unused Expectations from stored data.
An Expectation is considered unused if its corresponding dictionary is
empty. If removing Expectations results in a top-level test key having an
empty dictionary, that test entry will also be removed.
Returns:
A dict from expectation file name (str) to set of unused expectations
(str) from that file.
"""
logging.info('Filtering out unused expectations')
unused = collections.defaultdict(list)
unused_count = 0
for (expectation_file, expectation,
builder_map) in self.IterBuilderStepMaps():
if not builder_map:
unused[expectation_file].append(expectation)
unused_count += 1
for expectation_file, expectations in unused.items():
for e in expectations:
del self[expectation_file][e]
logging.debug('Found %d unused expectations', unused_count)
empty_files = []
for expectation_file, expectation_map in self.items():
if not expectation_map:
empty_files.append(expectation_file)
for empty in empty_files:
del self[empty]
logging.debug('Found %d empty files: %s', len(empty_files), empty_files)
return unused
class ExpectationBuilderMap(BaseTypedMap):
"""Typed map for Expectation -> BuilderStepMap."""
def __setitem__(self, key, value):
assert isinstance(key, BaseExpectation)
assert isinstance(value, self._value_type())
super(ExpectationBuilderMap, self).__setitem__(key, value)
def _value_type(self):
return BuilderStepMap
class BuilderStepMap(BaseTypedMap):
"""Typed map for string types -> StepBuildStatsMap."""
def __setitem__(self, key, value):
assert IsStringType(key)
assert isinstance(value, self._value_type())
super(BuilderStepMap, self).__setitem__(key, value)
def _value_type(self):
return StepBuildStatsMap
def SplitBuildStatsByPass(self, expectation):
"""Splits the underlying BuildStats data by passing-ness.
Args:
expectation: The Expectation that this BuilderStepMap is located under.
Returns:
A dict mapping builder name to a tuple (fully_passed, never_passed,
partially_passed). Each *_passed is a StepBuildStatsMap containing data
for the steps that either fully passed on all builds, never passed on any
builds, or passed some of the time.
"""
retval = {}
for builder_name, step_map in self.items():
fully_passed = StepBuildStatsMap()
never_passed = StepBuildStatsMap()
partially_passed = StepBuildStatsMap()
for step_name, stats in step_map.items():
if stats.NeverNeededExpectation(expectation):
assert step_name not in fully_passed
fully_passed[step_name] = stats
elif stats.AlwaysNeededExpectation(expectation):
assert step_name not in never_passed
never_passed[step_name] = stats
else:
assert step_name not in partially_passed
partially_passed[step_name] = stats
retval[builder_name] = (fully_passed, never_passed, partially_passed)
return retval
def IterBuildStats(self):
"""Iterates over all BuildStats contained in the map.
Returns:
A generator yielding tuples in the form (builder_name (str), step_name
(str), build_stats (BuildStats)).
"""
return self.IterToValueType(BuildStats)
class StepBuildStatsMap(BaseTypedMap):
"""Typed map for string types -> BuildStats"""
def __setitem__(self, key, value):
assert IsStringType(key)
assert isinstance(value, self._value_type())
super(StepBuildStatsMap, self).__setitem__(key, value)
def _value_type(self):
return BuildStats
def IsStringType(s):
return isinstance(s, six.string_types)
Expectation = BaseExpectation
Result = BaseResult
BuildStats = BaseBuildStats
TestExpectationMap = BaseTestExpectationMap
| _AddGroupedResults | identifier_name |
data_types.py | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Various custom data types for use throughout the unexpected pass finder."""
from __future__ import print_function
import collections
import copy
import fnmatch
import logging
import six
FULL_PASS = 1
NEVER_PASS = 2
PARTIAL_PASS = 3
# Allow different unexpected pass finder implementations to register custom
# data types if necessary. These are set to the base versions at the end of the
# file.
Expectation = None
Result = None
BuildStats = None
TestExpectationMap = None
def SetExpectationImplementation(impl):
global Expectation
assert issubclass(impl, BaseExpectation)
Expectation = impl
def SetResultImplementation(impl):
global Result
assert issubclass(impl, BaseResult)
Result = impl
def SetBuildStatsImplementation(impl):
global BuildStats
assert issubclass(impl, BaseBuildStats)
BuildStats = impl
def SetTestExpectationMapImplementation(impl):
global TestExpectationMap
assert issubclass(impl, BaseTestExpectationMap)
TestExpectationMap = impl
class BaseExpectation(object):
"""Container for a test expectation.
Similar to typ's expectations_parser.Expectation class, but with unnecessary
data stripped out and made hashable.
The data contained in an Expectation is equivalent to a single line in an
expectation file.
"""
def __init__(self, test, tags, expected_results, bug=None):
self.test = test
self.tags = frozenset(tags)
self.bug = bug or ''
if isinstance(expected_results, str):
self.expected_results = frozenset([expected_results])
else:
self.expected_results = frozenset(expected_results)
# We're going to be making a lot of comparisons, and fnmatch is *much*
# slower (~40x from rough testing) than a straight comparison, so only use
# it if necessary.
if '*' in test:
self._comp = self._CompareWildcard
else:
self._comp = self._CompareNonWildcard
def __eq__(self, other):
return (isinstance(other, BaseExpectation) and self.test == other.test
and self.tags == other.tags
and self.expected_results == other.expected_results
and self.bug == other.bug)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.test, self.tags, self.expected_results, self.bug))
def _CompareWildcard(self, result_test_name):
return fnmatch.fnmatch(result_test_name, self.test)
def _CompareNonWildcard(self, result_test_name):
return result_test_name == self.test
def AppliesToResult(self, result):
"""Checks whether this expectation should have applied to |result|.
An expectation applies to a result if the test names match (including
wildcard expansion) and the expectation's tags are a subset of the result's
tags.
Args:
result: A Result instance to check against.
Returns:
True if |self| applies to |result|, otherwise False.
"""
assert isinstance(result, BaseResult)
return (self._comp(result.test) and self.tags <= result.tags)
def MaybeAppliesToTest(self, test_name):
"""Similar to AppliesToResult, but used to do initial filtering.
Args:
test_name: A string containing the name of a test.
Returns:
True if |self| could apply to a test named |test_name|, otherwise False.
"""
return self._comp(test_name)
class BaseResult(object):
"""Container for a test result.
Contains the minimal amount of data necessary to describe/identify a result
from ResultDB for the purposes of the unexpected pass finder.
"""
def __init__(self, test, tags, actual_result, step, build_id):
"""
Args:
test: A string containing the name of the test.
tags: An iterable containing the typ tags for the result.
actual_result: The actual result of the test as a string.
step: A string containing the name of the step on the builder.
build_id: A string containing the Buildbucket ID for the build this result
came from.
"""
self.test = test
self.tags = frozenset(tags)
self.actual_result = actual_result
self.step = step
self.build_id = build_id
def __eq__(self, other):
return (isinstance(other, BaseResult) and self.test == other.test
and self.tags == other.tags
and self.actual_result == other.actual_result
and self.step == other.step and self.build_id == other.build_id)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(
(self.test, self.tags, self.actual_result, self.step, self.build_id))
class BaseBuildStats(object):
"""Container for keeping track of a builder's pass/fail stats."""
def __init__(self):
self.passed_builds = 0
self.total_builds = 0
self.failure_links = frozenset()
@property
def failed_builds(self):
return self.total_builds - self.passed_builds
@property
def did_fully_pass(self):
return self.passed_builds == self.total_builds
@property
def did_never_pass(self):
return self.failed_builds == self.total_builds
def AddPassedBuild(self):
self.passed_builds += 1
self.total_builds += 1
def AddFailedBuild(self, build_id):
self.total_builds += 1
build_link = BuildLinkFromBuildId(build_id)
self.failure_links = frozenset([build_link]) | self.failure_links
def GetStatsAsString(self):
return '(%d/%d passed)' % (self.passed_builds, self.total_builds)
def NeverNeededExpectation(self, expectation): # pylint:disable=unused-argument
"""Returns whether the results tallied in |self| never needed |expectation|.
Args:
expectation: An Expectation object that |stats| is located under.
Returns:
True if all the results tallied in |self| would have passed without
|expectation| being present. Otherwise, False.
"""
return self.did_fully_pass
def AlwaysNeededExpectation(self, expectation): # pylint:disable=unused-argument
"""Returns whether the results tallied in |self| always needed |expectation.
Args:
expectation: An Expectation object that |stats| is located under.
Returns:
True if all the results tallied in |self| would have failed without
|expectation| being present. Otherwise, False.
"""
return self.did_never_pass
def __eq__(self, other):
return (isinstance(other, BuildStats)
and self.passed_builds == other.passed_builds
and self.total_builds == other.total_builds
and self.failure_links == other.failure_links)
def __ne__(self, other):
return not self.__eq__(other)
def BuildLinkFromBuildId(build_id):
return 'http://ci.chromium.org/b/%s' % build_id
# These explicit overrides could likely be replaced by using regular dicts with
# type hinting in Python 3. Based on https://stackoverflow.com/a/2588648, this
# should cover all cases where the dict can be modified.
class BaseTypedMap(dict):
"""A base class for typed dictionaries.
Any child classes that override __setitem__ will have any modifications to the
dictionary go through the type checking in __setitem__.
"""
def __init__(self, *args, **kwargs): # pylint:disable=super-init-not-called
self.update(*args, **kwargs)
def update(self, *args, **kwargs):
if args:
assert len(args) == 1
other = dict(args[0])
for k, v in other.items():
self[k] = v
for k, v in kwargs.items():
self[k] = v
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
def _value_type(self):
raise NotImplementedError()
def IterToValueType(self, value_type):
"""Recursively iterates over contents until |value_type| is found.
Used to get rid of nested loops, instead using a single loop that
automatically iterates through all the contents at a certain depth.
Args:
value_type: The type to recurse to and then iterate over. For example,
"BuilderStepMap" would result in iterating over the BuilderStepMap
values, meaning that the returned generator would create tuples in the
form (test_name, expectation, builder_map).
Returns:
A generator that yields tuples. The length and content of the tuples will
vary depending on |value_type|. For example, using "BuilderStepMap" would
result in tuples of the form (test_name, expectation, builder_map), while
"BuildStats" would result in (test_name, expectation, builder_name,
step_name, build_stats).
"""
if self._value_type() == value_type:
for k, v in self.items():
yield k, v
else:
|
def Merge(self, other_map, reference_map=None):
"""Merges |other_map| into self.
Args:
other_map: A BaseTypedMap whose contents will be merged into self.
reference_map: A dict containing the information that was originally in
self. Used for ensuring that a single expectation/builder/step
combination is only ever updated once. If None, a copy of self will be
used.
"""
assert isinstance(other_map, self.__class__)
# We should only ever encounter a single updated BuildStats for an
# expectation/builder/step combination. Use the reference map to determine
# if a particular BuildStats has already been updated or not.
reference_map = reference_map or copy.deepcopy(self)
for key, value in other_map.items():
if key not in self:
self[key] = value
else:
if isinstance(value, dict):
self[key].Merge(value, reference_map.get(key, {}))
else:
assert isinstance(value, BuildStats)
# Ensure we haven't updated this BuildStats already. If the reference
# map doesn't have a corresponding BuildStats, then base_map shouldn't
# have initially either, and thus it would have been added before
# reaching this point. Otherwise, the two values must match, meaning
# that base_map's BuildStats hasn't been updated yet.
reference_stats = reference_map.get(key, None)
assert reference_stats is not None
assert reference_stats == self[key]
self[key] = value
class BaseTestExpectationMap(BaseTypedMap):
"""Typed map for string types -> ExpectationBuilderMap.
This results in a dict in the following format:
{
expectation_file1 (str): {
expectation1 (data_types.Expectation): {
builder_name1 (str): {
step_name1 (str): stats1 (data_types.BuildStats),
step_name2 (str): stats2 (data_types.BuildStats),
...
},
builder_name2 (str): { ... },
},
expectation2 (data_types.Expectation): { ... },
...
},
expectation_file2 (str): { ... },
...
}
"""
def __setitem__(self, key, value):
assert IsStringType(key)
assert isinstance(value, ExpectationBuilderMap)
super(BaseTestExpectationMap, self).__setitem__(key, value)
def _value_type(self):
return ExpectationBuilderMap
def IterBuilderStepMaps(self):
"""Iterates over all BuilderStepMaps contained in the map.
Returns:
A generator yielding tuples in the form (expectation_file (str),
expectation (Expectation), builder_map (BuilderStepMap))
"""
return self.IterToValueType(BuilderStepMap)
def AddResultList(self, builder, results, expectation_files=None):
"""Adds |results| to |self|.
Args:
builder: A string containing the builder |results| came from. Should be
prefixed with something to distinguish between identically named CI
and try builders.
results: A list of data_types.Result objects corresponding to the ResultDB
data queried for |builder|.
expectation_files: An iterable of expectation file names that these
results could possibly apply to. If None, then expectations from all
known expectation files will be used.
Returns:
A list of data_types.Result objects who did not have a matching
expectation in |self|.
"""
failure_results = set()
pass_results = set()
unmatched_results = []
for r in results:
if r.actual_result == 'Pass':
pass_results.add(r)
else:
failure_results.add(r)
# Remove any cases of failure -> pass from the passing set. If a test is
# flaky, we get both pass and failure results for it, so we need to remove
# the any cases of a pass result having a corresponding, earlier failure
# result.
modified_failing_retry_results = set()
for r in failure_results:
modified_failing_retry_results.add(
Result(r.test, r.tags, 'Pass', r.step, r.build_id))
pass_results -= modified_failing_retry_results
# Group identically named results together so we reduce the number of
# comparisons we have to make.
all_results = pass_results | failure_results
grouped_results = collections.defaultdict(list)
for r in all_results:
grouped_results[r.test].append(r)
matched_results = self._AddGroupedResults(grouped_results, builder,
expectation_files)
unmatched_results = list(all_results - matched_results)
return unmatched_results
def _AddGroupedResults(self, grouped_results, builder, expectation_files):
"""Adds all results in |grouped_results| to |self|.
Args:
grouped_results: A dict mapping test name (str) to a list of
data_types.Result objects for that test.
builder: A string containing the name of the builder |grouped_results|
came from.
expectation_files: An iterable of expectation file names that these
results could possibly apply to. If None, then expectations from all
known expectation files will be used.
Returns:
A set of data_types.Result objects that had at least one matching
expectation.
"""
matched_results = set()
for test_name, result_list in grouped_results.items():
for ef, expectation_map in self.items():
if expectation_files is not None and ef not in expectation_files:
continue
for expectation, builder_map in expectation_map.items():
if not expectation.MaybeAppliesToTest(test_name):
continue
for r in result_list:
if expectation.AppliesToResult(r):
matched_results.add(r)
step_map = builder_map.setdefault(builder, StepBuildStatsMap())
stats = step_map.setdefault(r.step, BuildStats())
self._AddSingleResult(r, stats)
return matched_results
def _AddSingleResult(self, result, stats):
"""Adds |result| to |self|.
Args:
result: A data_types.Result object to add.
stats: A data_types.BuildStats object to add the result to.
"""
if result.actual_result == 'Pass':
stats.AddPassedBuild()
else:
stats.AddFailedBuild(result.build_id)
def SplitByStaleness(self):
"""Separates stored data based on expectation staleness.
Returns:
Three TestExpectationMaps (stale_dict, semi_stale_dict, active_dict). All
three combined contain the information of |self|. |stale_dict| contains
entries for expectations that are no longer being helpful,
|semi_stale_dict| contains entries for expectations that might be
removable or modifiable, but have at least one failed test run.
|active_dict| contains entries for expectations that are preventing
failures on all builders they're active on, and thus shouldn't be removed.
"""
stale_dict = TestExpectationMap()
semi_stale_dict = TestExpectationMap()
active_dict = TestExpectationMap()
# This initially looks like a good target for using
# TestExpectationMap's iterators since there are many nested loops.
# However, we need to reset state in different loops, and the alternative of
# keeping all the state outside the loop and resetting under certain
# conditions ends up being less readable than just using nested loops.
for expectation_file, expectation_map in self.items():
for expectation, builder_map in expectation_map.items():
# A temporary map to hold data so we can later determine whether an
# expectation is stale, semi-stale, or active.
tmp_map = {
FULL_PASS: BuilderStepMap(),
NEVER_PASS: BuilderStepMap(),
PARTIAL_PASS: BuilderStepMap(),
}
split_stats_map = builder_map.SplitBuildStatsByPass(expectation)
for builder_name, (fully_passed, never_passed,
partially_passed) in split_stats_map.items():
if fully_passed:
tmp_map[FULL_PASS][builder_name] = fully_passed
if never_passed:
tmp_map[NEVER_PASS][builder_name] = never_passed
if partially_passed:
tmp_map[PARTIAL_PASS][builder_name] = partially_passed
def _CopyPassesIntoBuilderMap(builder_map, pass_types):
for pt in pass_types:
for builder, steps in tmp_map[pt].items():
builder_map.setdefault(builder, StepBuildStatsMap()).update(steps)
# Handle the case of a stale expectation.
if not (tmp_map[NEVER_PASS] or tmp_map[PARTIAL_PASS]):
builder_map = stale_dict.setdefault(
expectation_file,
ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
_CopyPassesIntoBuilderMap(builder_map, [FULL_PASS])
# Handle the case of an active expectation.
elif not tmp_map[FULL_PASS]:
builder_map = active_dict.setdefault(
expectation_file,
ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
_CopyPassesIntoBuilderMap(builder_map, [NEVER_PASS, PARTIAL_PASS])
# Handle the case of a semi-stale expectation.
else:
# TODO(crbug.com/998329): Sort by pass percentage so it's easier to
# find problematic builders without highlighting.
builder_map = semi_stale_dict.setdefault(
expectation_file,
ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
_CopyPassesIntoBuilderMap(builder_map,
[FULL_PASS, PARTIAL_PASS, NEVER_PASS])
return stale_dict, semi_stale_dict, active_dict
def FilterOutUnusedExpectations(self):
"""Filters out any unused Expectations from stored data.
An Expectation is considered unused if its corresponding dictionary is
empty. If removing Expectations results in a top-level test key having an
empty dictionary, that test entry will also be removed.
Returns:
A dict from expectation file name (str) to set of unused expectations
(str) from that file.
"""
logging.info('Filtering out unused expectations')
unused = collections.defaultdict(list)
unused_count = 0
for (expectation_file, expectation,
builder_map) in self.IterBuilderStepMaps():
if not builder_map:
unused[expectation_file].append(expectation)
unused_count += 1
for expectation_file, expectations in unused.items():
for e in expectations:
del self[expectation_file][e]
logging.debug('Found %d unused expectations', unused_count)
empty_files = []
for expectation_file, expectation_map in self.items():
if not expectation_map:
empty_files.append(expectation_file)
for empty in empty_files:
del self[empty]
logging.debug('Found %d empty files: %s', len(empty_files), empty_files)
return unused
class ExpectationBuilderMap(BaseTypedMap):
"""Typed map for Expectation -> BuilderStepMap."""
def __setitem__(self, key, value):
assert isinstance(key, BaseExpectation)
assert isinstance(value, self._value_type())
super(ExpectationBuilderMap, self).__setitem__(key, value)
def _value_type(self):
return BuilderStepMap
class BuilderStepMap(BaseTypedMap):
"""Typed map for string types -> StepBuildStatsMap."""
def __setitem__(self, key, value):
assert IsStringType(key)
assert isinstance(value, self._value_type())
super(BuilderStepMap, self).__setitem__(key, value)
def _value_type(self):
return StepBuildStatsMap
def SplitBuildStatsByPass(self, expectation):
"""Splits the underlying BuildStats data by passing-ness.
Args:
expectation: The Expectation that this BuilderStepMap is located under.
Returns:
A dict mapping builder name to a tuple (fully_passed, never_passed,
partially_passed). Each *_passed is a StepBuildStatsMap containing data
for the steps that either fully passed on all builds, never passed on any
builds, or passed some of the time.
"""
retval = {}
for builder_name, step_map in self.items():
fully_passed = StepBuildStatsMap()
never_passed = StepBuildStatsMap()
partially_passed = StepBuildStatsMap()
for step_name, stats in step_map.items():
if stats.NeverNeededExpectation(expectation):
assert step_name not in fully_passed
fully_passed[step_name] = stats
elif stats.AlwaysNeededExpectation(expectation):
assert step_name not in never_passed
never_passed[step_name] = stats
else:
assert step_name not in partially_passed
partially_passed[step_name] = stats
retval[builder_name] = (fully_passed, never_passed, partially_passed)
return retval
def IterBuildStats(self):
"""Iterates over all BuildStats contained in the map.
Returns:
A generator yielding tuples in the form (builder_name (str), step_name
(str), build_stats (BuildStats)).
"""
return self.IterToValueType(BuildStats)
class StepBuildStatsMap(BaseTypedMap):
"""Typed map for string types -> BuildStats"""
def __setitem__(self, key, value):
assert IsStringType(key)
assert isinstance(value, self._value_type())
super(StepBuildStatsMap, self).__setitem__(key, value)
def _value_type(self):
return BuildStats
def IsStringType(s):
return isinstance(s, six.string_types)
Expectation = BaseExpectation
Result = BaseResult
BuildStats = BaseBuildStats
TestExpectationMap = BaseTestExpectationMap
| for k, v in self.items():
for nested_value in v.IterToValueType(value_type):
yield (k, ) + nested_value | conditional_block |
data_types.py | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Various custom data types for use throughout the unexpected pass finder."""
from __future__ import print_function
import collections
import copy
import fnmatch
import logging
import six
FULL_PASS = 1
NEVER_PASS = 2
PARTIAL_PASS = 3
# Allow different unexpected pass finder implementations to register custom
# data types if necessary. These are set to the base versions at the end of the
# file.
Expectation = None
Result = None
BuildStats = None
TestExpectationMap = None
def SetExpectationImplementation(impl):
global Expectation
assert issubclass(impl, BaseExpectation)
Expectation = impl
def SetResultImplementation(impl):
global Result
assert issubclass(impl, BaseResult)
Result = impl
def SetBuildStatsImplementation(impl):
global BuildStats
assert issubclass(impl, BaseBuildStats)
BuildStats = impl
def SetTestExpectationMapImplementation(impl):
global TestExpectationMap
assert issubclass(impl, BaseTestExpectationMap)
TestExpectationMap = impl
class BaseExpectation(object):
"""Container for a test expectation.
Similar to typ's expectations_parser.Expectation class, but with unnecessary
data stripped out and made hashable.
The data contained in an Expectation is equivalent to a single line in an
expectation file.
"""
def __init__(self, test, tags, expected_results, bug=None):
self.test = test
self.tags = frozenset(tags)
self.bug = bug or ''
if isinstance(expected_results, str):
self.expected_results = frozenset([expected_results])
else:
self.expected_results = frozenset(expected_results)
# We're going to be making a lot of comparisons, and fnmatch is *much*
# slower (~40x from rough testing) than a straight comparison, so only use
# it if necessary.
if '*' in test:
self._comp = self._CompareWildcard
else:
self._comp = self._CompareNonWildcard
def __eq__(self, other):
return (isinstance(other, BaseExpectation) and self.test == other.test
and self.tags == other.tags
and self.expected_results == other.expected_results
and self.bug == other.bug)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.test, self.tags, self.expected_results, self.bug))
def _CompareWildcard(self, result_test_name):
return fnmatch.fnmatch(result_test_name, self.test)
def _CompareNonWildcard(self, result_test_name):
return result_test_name == self.test
def AppliesToResult(self, result):
"""Checks whether this expectation should have applied to |result|.
An expectation applies to a result if the test names match (including
wildcard expansion) and the expectation's tags are a subset of the result's
tags.
Args:
result: A Result instance to check against.
Returns:
True if |self| applies to |result|, otherwise False.
"""
assert isinstance(result, BaseResult)
return (self._comp(result.test) and self.tags <= result.tags)
def MaybeAppliesToTest(self, test_name):
"""Similar to AppliesToResult, but used to do initial filtering.
Args:
test_name: A string containing the name of a test.
Returns:
True if |self| could apply to a test named |test_name|, otherwise False.
"""
return self._comp(test_name)
class BaseResult(object):
"""Container for a test result.
Contains the minimal amount of data necessary to describe/identify a result
from ResultDB for the purposes of the unexpected pass finder.
"""
def __init__(self, test, tags, actual_result, step, build_id):
"""
Args:
test: A string containing the name of the test.
tags: An iterable containing the typ tags for the result.
actual_result: The actual result of the test as a string.
step: A string containing the name of the step on the builder.
build_id: A string containing the Buildbucket ID for the build this result
came from.
"""
self.test = test
self.tags = frozenset(tags)
self.actual_result = actual_result
self.step = step
self.build_id = build_id
def __eq__(self, other):
return (isinstance(other, BaseResult) and self.test == other.test
and self.tags == other.tags
and self.actual_result == other.actual_result
and self.step == other.step and self.build_id == other.build_id)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(
(self.test, self.tags, self.actual_result, self.step, self.build_id))
class BaseBuildStats(object):
"""Container for keeping track of a builder's pass/fail stats."""
def __init__(self):
self.passed_builds = 0
self.total_builds = 0
self.failure_links = frozenset()
@property
def failed_builds(self):
return self.total_builds - self.passed_builds
@property
def did_fully_pass(self):
return self.passed_builds == self.total_builds
@property
def did_never_pass(self):
return self.failed_builds == self.total_builds
def AddPassedBuild(self):
self.passed_builds += 1
self.total_builds += 1
def AddFailedBuild(self, build_id):
self.total_builds += 1
build_link = BuildLinkFromBuildId(build_id)
self.failure_links = frozenset([build_link]) | self.failure_links
def GetStatsAsString(self):
return '(%d/%d passed)' % (self.passed_builds, self.total_builds)
def NeverNeededExpectation(self, expectation): # pylint:disable=unused-argument
"""Returns whether the results tallied in |self| never needed |expectation|.
Args:
expectation: An Expectation object that |stats| is located under.
Returns:
True if all the results tallied in |self| would have passed without
|expectation| being present. Otherwise, False.
"""
return self.did_fully_pass
def AlwaysNeededExpectation(self, expectation): # pylint:disable=unused-argument
"""Returns whether the results tallied in |self| always needed |expectation.
Args:
expectation: An Expectation object that |stats| is located under.
Returns:
True if all the results tallied in |self| would have failed without
|expectation| being present. Otherwise, False.
"""
return self.did_never_pass
def __eq__(self, other):
return (isinstance(other, BuildStats)
and self.passed_builds == other.passed_builds
and self.total_builds == other.total_builds
and self.failure_links == other.failure_links)
def __ne__(self, other):
return not self.__eq__(other)
def BuildLinkFromBuildId(build_id):
return 'http://ci.chromium.org/b/%s' % build_id
# These explicit overrides could likely be replaced by using regular dicts with
# type hinting in Python 3. Based on https://stackoverflow.com/a/2588648, this
# should cover all cases where the dict can be modified.
class BaseTypedMap(dict):
"""A base class for typed dictionaries.
Any child classes that override __setitem__ will have any modifications to the
dictionary go through the type checking in __setitem__.
"""
def __init__(self, *args, **kwargs): # pylint:disable=super-init-not-called
self.update(*args, **kwargs)
def update(self, *args, **kwargs):
if args:
assert len(args) == 1
other = dict(args[0])
for k, v in other.items():
self[k] = v
for k, v in kwargs.items():
self[k] = v
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
def _value_type(self):
raise NotImplementedError()
def IterToValueType(self, value_type):
"""Recursively iterates over contents until |value_type| is found.
Used to get rid of nested loops, instead using a single loop that
automatically iterates through all the contents at a certain depth.
Args:
value_type: The type to recurse to and then iterate over. For example,
"BuilderStepMap" would result in iterating over the BuilderStepMap
values, meaning that the returned generator would create tuples in the
form (test_name, expectation, builder_map).
Returns:
A generator that yields tuples. The length and content of the tuples will
vary depending on |value_type|. For example, using "BuilderStepMap" would
result in tuples of the form (test_name, expectation, builder_map), while
"BuildStats" would result in (test_name, expectation, builder_name,
step_name, build_stats).
"""
if self._value_type() == value_type:
for k, v in self.items():
yield k, v
else:
for k, v in self.items():
for nested_value in v.IterToValueType(value_type):
yield (k, ) + nested_value
def Merge(self, other_map, reference_map=None):
"""Merges |other_map| into self.
Args:
other_map: A BaseTypedMap whose contents will be merged into self.
reference_map: A dict containing the information that was originally in
self. Used for ensuring that a single expectation/builder/step
combination is only ever updated once. If None, a copy of self will be
used.
"""
assert isinstance(other_map, self.__class__)
# We should only ever encounter a single updated BuildStats for an
# expectation/builder/step combination. Use the reference map to determine
# if a particular BuildStats has already been updated or not.
reference_map = reference_map or copy.deepcopy(self)
for key, value in other_map.items():
if key not in self:
self[key] = value
else:
if isinstance(value, dict):
self[key].Merge(value, reference_map.get(key, {}))
else:
assert isinstance(value, BuildStats)
# Ensure we haven't updated this BuildStats already. If the reference
# map doesn't have a corresponding BuildStats, then base_map shouldn't
# have initially either, and thus it would have been added before
# reaching this point. Otherwise, the two values must match, meaning
# that base_map's BuildStats hasn't been updated yet.
reference_stats = reference_map.get(key, None)
assert reference_stats is not None
assert reference_stats == self[key]
self[key] = value
class BaseTestExpectationMap(BaseTypedMap):
"""Typed map for string types -> ExpectationBuilderMap.
This results in a dict in the following format:
{
expectation_file1 (str): {
expectation1 (data_types.Expectation): {
builder_name1 (str): {
step_name1 (str): stats1 (data_types.BuildStats),
step_name2 (str): stats2 (data_types.BuildStats),
...
},
builder_name2 (str): { ... },
},
expectation2 (data_types.Expectation): { ... },
...
},
expectation_file2 (str): { ... },
...
}
"""
def __setitem__(self, key, value):
assert IsStringType(key)
assert isinstance(value, ExpectationBuilderMap)
super(BaseTestExpectationMap, self).__setitem__(key, value)
def _value_type(self):
|
def IterBuilderStepMaps(self):
"""Iterates over all BuilderStepMaps contained in the map.
Returns:
A generator yielding tuples in the form (expectation_file (str),
expectation (Expectation), builder_map (BuilderStepMap))
"""
return self.IterToValueType(BuilderStepMap)
def AddResultList(self, builder, results, expectation_files=None):
"""Adds |results| to |self|.
Args:
builder: A string containing the builder |results| came from. Should be
prefixed with something to distinguish between identically named CI
and try builders.
results: A list of data_types.Result objects corresponding to the ResultDB
data queried for |builder|.
expectation_files: An iterable of expectation file names that these
results could possibly apply to. If None, then expectations from all
known expectation files will be used.
Returns:
A list of data_types.Result objects who did not have a matching
expectation in |self|.
"""
failure_results = set()
pass_results = set()
unmatched_results = []
for r in results:
if r.actual_result == 'Pass':
pass_results.add(r)
else:
failure_results.add(r)
# Remove any cases of failure -> pass from the passing set. If a test is
# flaky, we get both pass and failure results for it, so we need to remove
# the any cases of a pass result having a corresponding, earlier failure
# result.
modified_failing_retry_results = set()
for r in failure_results:
modified_failing_retry_results.add(
Result(r.test, r.tags, 'Pass', r.step, r.build_id))
pass_results -= modified_failing_retry_results
# Group identically named results together so we reduce the number of
# comparisons we have to make.
all_results = pass_results | failure_results
grouped_results = collections.defaultdict(list)
for r in all_results:
grouped_results[r.test].append(r)
matched_results = self._AddGroupedResults(grouped_results, builder,
expectation_files)
unmatched_results = list(all_results - matched_results)
return unmatched_results
def _AddGroupedResults(self, grouped_results, builder, expectation_files):
"""Adds all results in |grouped_results| to |self|.
Args:
grouped_results: A dict mapping test name (str) to a list of
data_types.Result objects for that test.
builder: A string containing the name of the builder |grouped_results|
came from.
expectation_files: An iterable of expectation file names that these
results could possibly apply to. If None, then expectations from all
known expectation files will be used.
Returns:
A set of data_types.Result objects that had at least one matching
expectation.
"""
matched_results = set()
for test_name, result_list in grouped_results.items():
for ef, expectation_map in self.items():
if expectation_files is not None and ef not in expectation_files:
continue
for expectation, builder_map in expectation_map.items():
if not expectation.MaybeAppliesToTest(test_name):
continue
for r in result_list:
if expectation.AppliesToResult(r):
matched_results.add(r)
step_map = builder_map.setdefault(builder, StepBuildStatsMap())
stats = step_map.setdefault(r.step, BuildStats())
self._AddSingleResult(r, stats)
return matched_results
def _AddSingleResult(self, result, stats):
"""Adds |result| to |self|.
Args:
result: A data_types.Result object to add.
stats: A data_types.BuildStats object to add the result to.
"""
if result.actual_result == 'Pass':
stats.AddPassedBuild()
else:
stats.AddFailedBuild(result.build_id)
def SplitByStaleness(self):
"""Separates stored data based on expectation staleness.
Returns:
Three TestExpectationMaps (stale_dict, semi_stale_dict, active_dict). All
three combined contain the information of |self|. |stale_dict| contains
entries for expectations that are no longer being helpful,
|semi_stale_dict| contains entries for expectations that might be
removable or modifiable, but have at least one failed test run.
|active_dict| contains entries for expectations that are preventing
failures on all builders they're active on, and thus shouldn't be removed.
"""
stale_dict = TestExpectationMap()
semi_stale_dict = TestExpectationMap()
active_dict = TestExpectationMap()
# This initially looks like a good target for using
# TestExpectationMap's iterators since there are many nested loops.
# However, we need to reset state in different loops, and the alternative of
# keeping all the state outside the loop and resetting under certain
# conditions ends up being less readable than just using nested loops.
for expectation_file, expectation_map in self.items():
for expectation, builder_map in expectation_map.items():
# A temporary map to hold data so we can later determine whether an
# expectation is stale, semi-stale, or active.
tmp_map = {
FULL_PASS: BuilderStepMap(),
NEVER_PASS: BuilderStepMap(),
PARTIAL_PASS: BuilderStepMap(),
}
split_stats_map = builder_map.SplitBuildStatsByPass(expectation)
for builder_name, (fully_passed, never_passed,
partially_passed) in split_stats_map.items():
if fully_passed:
tmp_map[FULL_PASS][builder_name] = fully_passed
if never_passed:
tmp_map[NEVER_PASS][builder_name] = never_passed
if partially_passed:
tmp_map[PARTIAL_PASS][builder_name] = partially_passed
def _CopyPassesIntoBuilderMap(builder_map, pass_types):
for pt in pass_types:
for builder, steps in tmp_map[pt].items():
builder_map.setdefault(builder, StepBuildStatsMap()).update(steps)
# Handle the case of a stale expectation.
if not (tmp_map[NEVER_PASS] or tmp_map[PARTIAL_PASS]):
builder_map = stale_dict.setdefault(
expectation_file,
ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
_CopyPassesIntoBuilderMap(builder_map, [FULL_PASS])
# Handle the case of an active expectation.
elif not tmp_map[FULL_PASS]:
builder_map = active_dict.setdefault(
expectation_file,
ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
_CopyPassesIntoBuilderMap(builder_map, [NEVER_PASS, PARTIAL_PASS])
# Handle the case of a semi-stale expectation.
else:
# TODO(crbug.com/998329): Sort by pass percentage so it's easier to
# find problematic builders without highlighting.
builder_map = semi_stale_dict.setdefault(
expectation_file,
ExpectationBuilderMap()).setdefault(expectation, BuilderStepMap())
_CopyPassesIntoBuilderMap(builder_map,
[FULL_PASS, PARTIAL_PASS, NEVER_PASS])
return stale_dict, semi_stale_dict, active_dict
def FilterOutUnusedExpectations(self):
"""Filters out any unused Expectations from stored data.
An Expectation is considered unused if its corresponding dictionary is
empty. If removing Expectations results in a top-level test key having an
empty dictionary, that test entry will also be removed.
Returns:
A dict from expectation file name (str) to set of unused expectations
(str) from that file.
"""
logging.info('Filtering out unused expectations')
unused = collections.defaultdict(list)
unused_count = 0
for (expectation_file, expectation,
builder_map) in self.IterBuilderStepMaps():
if not builder_map:
unused[expectation_file].append(expectation)
unused_count += 1
for expectation_file, expectations in unused.items():
for e in expectations:
del self[expectation_file][e]
logging.debug('Found %d unused expectations', unused_count)
empty_files = []
for expectation_file, expectation_map in self.items():
if not expectation_map:
empty_files.append(expectation_file)
for empty in empty_files:
del self[empty]
logging.debug('Found %d empty files: %s', len(empty_files), empty_files)
return unused
class ExpectationBuilderMap(BaseTypedMap):
"""Typed map for Expectation -> BuilderStepMap."""
def __setitem__(self, key, value):
assert isinstance(key, BaseExpectation)
assert isinstance(value, self._value_type())
super(ExpectationBuilderMap, self).__setitem__(key, value)
def _value_type(self):
return BuilderStepMap
class BuilderStepMap(BaseTypedMap):
"""Typed map for string types -> StepBuildStatsMap."""
def __setitem__(self, key, value):
assert IsStringType(key)
assert isinstance(value, self._value_type())
super(BuilderStepMap, self).__setitem__(key, value)
def _value_type(self):
return StepBuildStatsMap
def SplitBuildStatsByPass(self, expectation):
"""Splits the underlying BuildStats data by passing-ness.
Args:
expectation: The Expectation that this BuilderStepMap is located under.
Returns:
A dict mapping builder name to a tuple (fully_passed, never_passed,
partially_passed). Each *_passed is a StepBuildStatsMap containing data
for the steps that either fully passed on all builds, never passed on any
builds, or passed some of the time.
"""
retval = {}
for builder_name, step_map in self.items():
fully_passed = StepBuildStatsMap()
never_passed = StepBuildStatsMap()
partially_passed = StepBuildStatsMap()
for step_name, stats in step_map.items():
if stats.NeverNeededExpectation(expectation):
assert step_name not in fully_passed
fully_passed[step_name] = stats
elif stats.AlwaysNeededExpectation(expectation):
assert step_name not in never_passed
never_passed[step_name] = stats
else:
assert step_name not in partially_passed
partially_passed[step_name] = stats
retval[builder_name] = (fully_passed, never_passed, partially_passed)
return retval
def IterBuildStats(self):
"""Iterates over all BuildStats contained in the map.
Returns:
A generator yielding tuples in the form (builder_name (str), step_name
(str), build_stats (BuildStats)).
"""
return self.IterToValueType(BuildStats)
class StepBuildStatsMap(BaseTypedMap):
"""Typed map for string types -> BuildStats"""
def __setitem__(self, key, value):
assert IsStringType(key)
assert isinstance(value, self._value_type())
super(StepBuildStatsMap, self).__setitem__(key, value)
def _value_type(self):
return BuildStats
def IsStringType(s):
return isinstance(s, six.string_types)
Expectation = BaseExpectation
Result = BaseResult
BuildStats = BaseBuildStats
TestExpectationMap = BaseTestExpectationMap
| return ExpectationBuilderMap | identifier_body |
MediaMetadata.ts | import _ from 'lodash';
MediaMetadata.$inject = ['userList', 'archiveService', 'metadata'];
export function MediaMetadata(userList, archiveService, metadata) {
return {
scope: {
item: '=',
},
templateUrl: 'scripts/apps/archive/views/metadata-view.html',
link: function(scope, elem) {
scope.$watch('item', reloadData);
function reloadData() {
var qcodes = [];
metadata.getFilteredCustomVocabularies(qcodes).then((cvs) => {
scope.cvs = _.sortBy(cvs, 'priority');
scope.genreInCvs = _.map(cvs, 'schema_field').indexOf('genre') !== -1;
scope.placeInCvs = _.map(cvs, 'schema_field').indexOf('place') !== -1;
});
scope.originalCreator = scope.item.original_creator;
scope.versionCreator = scope.item.version_creator;
if (!archiveService.isLegal(scope.item)) {
if (scope.item.original_creator) {
userList.getUser(scope.item.original_creator)
.then((user) => {
scope.originalCreator = user.display_name;
});
}
if (scope.item.version_creator) {
userList.getUser(scope.item.version_creator)
.then((user) => {
scope.versionCreator = user.display_name;
});
}
}
}
scope.getLocaleName = function(terms, scheme) {
const term = terms.find((element) => element.scheme === scheme);
if (!term) {
return 'None';
}
if (term.translations && scope.item.language
&& term.translations.name[scope.item.language]) {
return term.translations.name[scope.item.language];
}
return term.name;
}; | },
};
} | random_line_split |
|
MediaMetadata.ts | import _ from 'lodash';
MediaMetadata.$inject = ['userList', 'archiveService', 'metadata'];
export function MediaMetadata(userList, archiveService, metadata) {
return {
scope: {
item: '=',
},
templateUrl: 'scripts/apps/archive/views/metadata-view.html',
link: function(scope, elem) {
scope.$watch('item', reloadData);
function | () {
var qcodes = [];
metadata.getFilteredCustomVocabularies(qcodes).then((cvs) => {
scope.cvs = _.sortBy(cvs, 'priority');
scope.genreInCvs = _.map(cvs, 'schema_field').indexOf('genre') !== -1;
scope.placeInCvs = _.map(cvs, 'schema_field').indexOf('place') !== -1;
});
scope.originalCreator = scope.item.original_creator;
scope.versionCreator = scope.item.version_creator;
if (!archiveService.isLegal(scope.item)) {
if (scope.item.original_creator) {
userList.getUser(scope.item.original_creator)
.then((user) => {
scope.originalCreator = user.display_name;
});
}
if (scope.item.version_creator) {
userList.getUser(scope.item.version_creator)
.then((user) => {
scope.versionCreator = user.display_name;
});
}
}
}
scope.getLocaleName = function(terms, scheme) {
const term = terms.find((element) => element.scheme === scheme);
if (!term) {
return 'None';
}
if (term.translations && scope.item.language
&& term.translations.name[scope.item.language]) {
return term.translations.name[scope.item.language];
}
return term.name;
};
},
};
}
| reloadData | identifier_name |
MediaMetadata.ts | import _ from 'lodash';
MediaMetadata.$inject = ['userList', 'archiveService', 'metadata'];
export function MediaMetadata(userList, archiveService, metadata) {
return {
scope: {
item: '=',
},
templateUrl: 'scripts/apps/archive/views/metadata-view.html',
link: function(scope, elem) {
scope.$watch('item', reloadData);
function reloadData() |
scope.getLocaleName = function(terms, scheme) {
const term = terms.find((element) => element.scheme === scheme);
if (!term) {
return 'None';
}
if (term.translations && scope.item.language
&& term.translations.name[scope.item.language]) {
return term.translations.name[scope.item.language];
}
return term.name;
};
},
};
}
| {
var qcodes = [];
metadata.getFilteredCustomVocabularies(qcodes).then((cvs) => {
scope.cvs = _.sortBy(cvs, 'priority');
scope.genreInCvs = _.map(cvs, 'schema_field').indexOf('genre') !== -1;
scope.placeInCvs = _.map(cvs, 'schema_field').indexOf('place') !== -1;
});
scope.originalCreator = scope.item.original_creator;
scope.versionCreator = scope.item.version_creator;
if (!archiveService.isLegal(scope.item)) {
if (scope.item.original_creator) {
userList.getUser(scope.item.original_creator)
.then((user) => {
scope.originalCreator = user.display_name;
});
}
if (scope.item.version_creator) {
userList.getUser(scope.item.version_creator)
.then((user) => {
scope.versionCreator = user.display_name;
});
}
}
} | identifier_body |
MediaMetadata.ts | import _ from 'lodash';
MediaMetadata.$inject = ['userList', 'archiveService', 'metadata'];
export function MediaMetadata(userList, archiveService, metadata) {
return {
scope: {
item: '=',
},
templateUrl: 'scripts/apps/archive/views/metadata-view.html',
link: function(scope, elem) {
scope.$watch('item', reloadData);
function reloadData() {
var qcodes = [];
metadata.getFilteredCustomVocabularies(qcodes).then((cvs) => {
scope.cvs = _.sortBy(cvs, 'priority');
scope.genreInCvs = _.map(cvs, 'schema_field').indexOf('genre') !== -1;
scope.placeInCvs = _.map(cvs, 'schema_field').indexOf('place') !== -1;
});
scope.originalCreator = scope.item.original_creator;
scope.versionCreator = scope.item.version_creator;
if (!archiveService.isLegal(scope.item)) {
if (scope.item.original_creator) {
userList.getUser(scope.item.original_creator)
.then((user) => {
scope.originalCreator = user.display_name;
});
}
if (scope.item.version_creator) {
userList.getUser(scope.item.version_creator)
.then((user) => {
scope.versionCreator = user.display_name;
});
}
}
}
scope.getLocaleName = function(terms, scheme) {
const term = terms.find((element) => element.scheme === scheme);
if (!term) {
return 'None';
}
if (term.translations && scope.item.language
&& term.translations.name[scope.item.language]) |
return term.name;
};
},
};
}
| {
return term.translations.name[scope.item.language];
} | conditional_block |
index.js | /**
* CORS middleware for koa2
*
* @param {Object} [options]
* - {String|Function(ctx)} origin `Access-Control-Allow-Origin`, default is request Origin header
* - {Array} exposeHeaders `Access-Control-Expose-Headers`
* - {String|Number} maxAge `Access-Control-Max-Age` in seconds
* - {Boolean} credentials `Access-Control-Allow-Credentials`
* - {Array} allowMethods `Access-Control-Allow-Methods`, default is ['GET', 'PUT', 'POST', 'DELETE', 'HEAD', 'OPTIONS']
* - {Array} allowHeaders `Access-Control-Allow-Headers`
* @return {Function}
* @api public
*/
module.exports = function crossOrigin(options = {}) {
const defaultOptions = {
allowMethods: ['GET', 'PUT', 'POST', 'DELETE', 'HEAD', 'OPTIONS'],
};
// set defaultOptions to options
for (let key in defaultOptions) {
if (!Object.prototype.hasOwnProperty.call(options, key)) {
options[key] = defaultOptions[key];
}
}
return async function (ctx, next) {
let origin;
if (typeof options.origin === 'function') {
origin = options.origin(ctx);
} else {
origin = options.origin || ctx.get('Origin') || '*';
}
if (!origin) {
return await next();
}
// Access-Control-Allow-Origin
ctx.set('Access-Control-Allow-Origin', origin);
if (ctx.method === 'OPTIONS') {
// Preflight Request
if (!ctx.get('Access-Control-Request-Method')) {
return await next();
}
// Access-Control-Max-Age
if (options.maxAge) {
ctx.set('Access-Control-Max-Age', String(options.maxAge));
}
// Access-Control-Allow-Credentials
if (options.credentials === true) {
// When used as part of a response to a preflight request,
// this indicates whether or not the actual request can be made using credentials.
ctx.set('Access-Control-Allow-Credentials', 'true');
}
// Access-Control-Allow-Methods
if (options.allowMethods) {
ctx.set('Access-Control-Allow-Methods', options.allowMethods.join(','));
}
// Access-Control-Allow-Headers
if (options.allowHeaders) {
ctx.set('Access-Control-Allow-Headers', options.allowHeaders.join(','));
} else {
ctx.set('Access-Control-Allow-Headers', ctx.get('Access-Control-Request-Headers'));
}
ctx.status = 204; // No Content
} else {
// Request | ctx.remove('Access-Control-Allow-Credentials');
} else {
ctx.set('Access-Control-Allow-Credentials', 'true');
}
}
// Access-Control-Expose-Headers
if (options.exposeHeaders) {
ctx.set('Access-Control-Expose-Headers', options.exposeHeaders.join(','));
}
try {
await next();
} catch (err) {
throw err;
}
}
};
}; | // Access-Control-Allow-Credentials
if (options.credentials === true) {
if (origin === '*') {
// `credentials` can't be true when the `origin` is set to `*` | random_line_split |
index.js | /**
* CORS middleware for koa2
*
* @param {Object} [options]
* - {String|Function(ctx)} origin `Access-Control-Allow-Origin`, default is request Origin header
* - {Array} exposeHeaders `Access-Control-Expose-Headers`
* - {String|Number} maxAge `Access-Control-Max-Age` in seconds
* - {Boolean} credentials `Access-Control-Allow-Credentials`
* - {Array} allowMethods `Access-Control-Allow-Methods`, default is ['GET', 'PUT', 'POST', 'DELETE', 'HEAD', 'OPTIONS']
* - {Array} allowHeaders `Access-Control-Allow-Headers`
* @return {Function}
* @api public
*/
module.exports = function crossOrigin(options = {}) {
const defaultOptions = {
allowMethods: ['GET', 'PUT', 'POST', 'DELETE', 'HEAD', 'OPTIONS'],
};
// set defaultOptions to options
for (let key in defaultOptions) {
if (!Object.prototype.hasOwnProperty.call(options, key)) {
options[key] = defaultOptions[key];
}
}
return async function (ctx, next) {
let origin;
if (typeof options.origin === 'function') {
origin = options.origin(ctx);
} else |
if (!origin) {
return await next();
}
// Access-Control-Allow-Origin
ctx.set('Access-Control-Allow-Origin', origin);
if (ctx.method === 'OPTIONS') {
// Preflight Request
if (!ctx.get('Access-Control-Request-Method')) {
return await next();
}
// Access-Control-Max-Age
if (options.maxAge) {
ctx.set('Access-Control-Max-Age', String(options.maxAge));
}
// Access-Control-Allow-Credentials
if (options.credentials === true) {
// When used as part of a response to a preflight request,
// this indicates whether or not the actual request can be made using credentials.
ctx.set('Access-Control-Allow-Credentials', 'true');
}
// Access-Control-Allow-Methods
if (options.allowMethods) {
ctx.set('Access-Control-Allow-Methods', options.allowMethods.join(','));
}
// Access-Control-Allow-Headers
if (options.allowHeaders) {
ctx.set('Access-Control-Allow-Headers', options.allowHeaders.join(','));
} else {
ctx.set('Access-Control-Allow-Headers', ctx.get('Access-Control-Request-Headers'));
}
ctx.status = 204; // No Content
} else {
// Request
// Access-Control-Allow-Credentials
if (options.credentials === true) {
if (origin === '*') {
// `credentials` can't be true when the `origin` is set to `*`
ctx.remove('Access-Control-Allow-Credentials');
} else {
ctx.set('Access-Control-Allow-Credentials', 'true');
}
}
// Access-Control-Expose-Headers
if (options.exposeHeaders) {
ctx.set('Access-Control-Expose-Headers', options.exposeHeaders.join(','));
}
try {
await next();
} catch (err) {
throw err;
}
}
};
};
| {
origin = options.origin || ctx.get('Origin') || '*';
} | conditional_block |
builder.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use {
super::{
Elem, ElemTypes, Grammar, GrammarErrors, Name, ProdElement,
ProdInner, RuleInner,
},
std::collections::BTreeMap,
};
/// A helper trait to allow builder methods to either take a type `T`, or a
/// reference to `T` if it is clonable.
pub trait BuilderInto<T> {
/// Consumes self and produces a value of type `T`.
fn builder_into(self) -> T;
}
impl<T> BuilderInto<T> for T {
fn builder_into(self) -> T {
self
}
}
impl<'a, T> BuilderInto<T> for &'a T
where
T: Clone,
{
fn builder_into(self) -> T {
self.clone()
}
}
impl BuilderInto<Name> for &'_ str {
fn builder_into(self) -> Name {
Name::new(self)
}
}
pub struct ProductionBuilder<E: ElemTypes> {
action_key: E::ActionKey,
elems: Vec<ProdElement<E>>,
}
impl<E: ElemTypes> ProductionBuilder<E> {
fn new(action_key: E::ActionKey) -> Self {
ProductionBuilder {
action_key,
elems: Vec::new(),
}
}
fn build(self) -> ProdInner<E> {
let ProductionBuilder { action_key, elems } = self;
ProdInner::new(action_key, elems)
}
pub fn add_term(&mut self, term: impl BuilderInto<E::Term>) -> &mut Self {
self.elems.push(ProdElement::new_empty(Elem::Term(
term.builder_into(),
)));
self
}
pub fn add_named_term(
&mut self,
name: impl BuilderInto<Name>,
term: impl BuilderInto<E::Term>,
) -> &mut Self {
self.elems.push(ProdElement::new_with_name(
name.builder_into(),
Elem::Term(term.builder_into()),
));
self
}
pub fn add_nonterm(
&mut self, | nonterm.builder_into(),
)));
self
}
pub fn add_named_nonterm(
&mut self,
name: impl BuilderInto<Name>,
nonterm: impl BuilderInto<E::NonTerm>,
) -> &mut Self {
self.elems.push(ProdElement::new_with_name(
name.builder_into(),
Elem::NonTerm(nonterm.builder_into()),
));
self
}
}
// ----------------
pub struct RuleBuilder<'a, E: ElemTypes> {
action_map: &'a mut BTreeMap<E::ActionKey, E::ActionValue>,
head: E::NonTerm,
prods: Vec<ProdInner<E>>,
}
impl<'a, E: ElemTypes> RuleBuilder<'a, E> {
fn new(
action_map: &'a mut BTreeMap<E::ActionKey, E::ActionValue>,
head: E::NonTerm,
) -> Self {
RuleBuilder {
action_map,
head,
prods: Vec::new(),
}
}
fn build(self) -> RuleInner<E> {
let RuleBuilder { head, prods, .. } = self;
RuleInner::new(head, prods)
}
pub fn add_prod(
&mut self,
action_key: impl BuilderInto<E::ActionKey>,
action_value: impl BuilderInto<E::ActionValue>,
build_fn: impl FnOnce(&mut ProductionBuilder<E>),
) -> &mut Self {
let action_key = action_key.builder_into();
self
.action_map
.insert(action_key.clone(), action_value.builder_into());
let mut builder = ProductionBuilder::new(action_key);
build_fn(&mut builder);
self.prods.push(builder.build());
self
}
pub fn add_prod_with_elems(
&mut self,
action_key: impl BuilderInto<E::ActionKey>,
action_value: impl BuilderInto<E::ActionValue>,
elems: impl BuilderInto<Vec<ProdElement<E>>>,
) -> &mut Self {
let action_key = action_key.builder_into();
self
.action_map
.insert(action_key.clone(), action_value.builder_into());
self.prods.push(ProdInner {
action_key,
elements: elems.builder_into(),
});
self
}
}
// ----------------
pub struct GrammarBuilder<E: ElemTypes> {
start: E::NonTerm,
rules: Vec<RuleInner<E>>,
action_map: BTreeMap<E::ActionKey, E::ActionValue>,
}
impl<E: ElemTypes> GrammarBuilder<E> {
fn new(start: E::NonTerm) -> Self {
GrammarBuilder {
start,
rules: Vec::new(),
action_map: BTreeMap::new(),
}
}
fn build(self) -> Result<Grammar<E>, GrammarErrors<E>> {
let GrammarBuilder {
start,
rules,
action_map,
} = self;
Grammar::new(start, rules, action_map)
}
pub fn add_rule<F>(
&mut self,
head: impl BuilderInto<E::NonTerm>,
build_fn: F,
) -> &mut Self
where
F: FnOnce(&mut RuleBuilder<E>),
{
let mut rule_builder =
RuleBuilder::new(&mut self.action_map, head.builder_into());
build_fn(&mut rule_builder);
self.rules.push(rule_builder.build());
self
}
}
/// Builds a grammar using a builder function.
///
/// Example:
///
/// ```rust
/// # use bongo::grammar::{Terminal, NonTerminal, BaseElementTypes,
/// # Grammar};
/// # use bongo::utils::Name;
/// let t_a = Terminal::new("A");
/// let nt_x = NonTerminal::new("x");
/// let g: Grammar<BaseElementTypes> =
/// bongo::grammar::build(&nt_x, |gb| {
/// gb.add_rule(&nt_x, |rb| {
/// rb.add_prod(Name::new("Recursive"), (), |pb| {
/// pb.add_term(&t_a).add_nonterm(&nt_x).add_term(&t_a);
/// })
/// .add_prod(Name::new("Empty"), (), |_pb| {});
/// });
/// }).unwrap();
/// ```
///
/// Note that arguments that take `E::Term`, `E::NonTerm`, or `E::Action` can
/// either take a non-reference value, or a cloneable reference value.
pub fn build<E>(
start: impl BuilderInto<E::NonTerm>,
build_fn: impl FnOnce(&mut GrammarBuilder<E>),
) -> Result<Grammar<E>, GrammarErrors<E>>
where
E: ElemTypes,
{
let mut builder = GrammarBuilder::new(start.builder_into());
build_fn(&mut builder);
builder.build()
} | nonterm: impl BuilderInto<E::NonTerm>,
) -> &mut Self {
self
.elems
.push(ProdElement::new_empty(Elem::NonTerm( | random_line_split |
builder.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use {
super::{
Elem, ElemTypes, Grammar, GrammarErrors, Name, ProdElement,
ProdInner, RuleInner,
},
std::collections::BTreeMap,
};
/// A helper trait to allow builder methods to either take a type `T`, or a
/// reference to `T` if it is clonable.
pub trait BuilderInto<T> {
/// Consumes self and produces a value of type `T`.
fn builder_into(self) -> T;
}
impl<T> BuilderInto<T> for T {
fn builder_into(self) -> T {
self
}
}
impl<'a, T> BuilderInto<T> for &'a T
where
T: Clone,
{
fn builder_into(self) -> T {
self.clone()
}
}
impl BuilderInto<Name> for &'_ str {
fn builder_into(self) -> Name {
Name::new(self)
}
}
pub struct ProductionBuilder<E: ElemTypes> {
action_key: E::ActionKey,
elems: Vec<ProdElement<E>>,
}
impl<E: ElemTypes> ProductionBuilder<E> {
fn new(action_key: E::ActionKey) -> Self {
ProductionBuilder {
action_key,
elems: Vec::new(),
}
}
fn build(self) -> ProdInner<E> {
let ProductionBuilder { action_key, elems } = self;
ProdInner::new(action_key, elems)
}
pub fn add_term(&mut self, term: impl BuilderInto<E::Term>) -> &mut Self {
self.elems.push(ProdElement::new_empty(Elem::Term(
term.builder_into(),
)));
self
}
pub fn add_named_term(
&mut self,
name: impl BuilderInto<Name>,
term: impl BuilderInto<E::Term>,
) -> &mut Self {
self.elems.push(ProdElement::new_with_name(
name.builder_into(),
Elem::Term(term.builder_into()),
));
self
}
pub fn add_nonterm(
&mut self,
nonterm: impl BuilderInto<E::NonTerm>,
) -> &mut Self {
self
.elems
.push(ProdElement::new_empty(Elem::NonTerm(
nonterm.builder_into(),
)));
self
}
pub fn add_named_nonterm(
&mut self,
name: impl BuilderInto<Name>,
nonterm: impl BuilderInto<E::NonTerm>,
) -> &mut Self {
self.elems.push(ProdElement::new_with_name(
name.builder_into(),
Elem::NonTerm(nonterm.builder_into()),
));
self
}
}
// ----------------
pub struct RuleBuilder<'a, E: ElemTypes> {
action_map: &'a mut BTreeMap<E::ActionKey, E::ActionValue>,
head: E::NonTerm,
prods: Vec<ProdInner<E>>,
}
impl<'a, E: ElemTypes> RuleBuilder<'a, E> {
fn new(
action_map: &'a mut BTreeMap<E::ActionKey, E::ActionValue>,
head: E::NonTerm,
) -> Self {
RuleBuilder {
action_map,
head,
prods: Vec::new(),
}
}
fn build(self) -> RuleInner<E> {
let RuleBuilder { head, prods, .. } = self;
RuleInner::new(head, prods)
}
pub fn add_prod(
&mut self,
action_key: impl BuilderInto<E::ActionKey>,
action_value: impl BuilderInto<E::ActionValue>,
build_fn: impl FnOnce(&mut ProductionBuilder<E>),
) -> &mut Self {
let action_key = action_key.builder_into();
self
.action_map
.insert(action_key.clone(), action_value.builder_into());
let mut builder = ProductionBuilder::new(action_key);
build_fn(&mut builder);
self.prods.push(builder.build());
self
}
pub fn add_prod_with_elems(
&mut self,
action_key: impl BuilderInto<E::ActionKey>,
action_value: impl BuilderInto<E::ActionValue>,
elems: impl BuilderInto<Vec<ProdElement<E>>>,
) -> &mut Self |
}
// ----------------
pub struct GrammarBuilder<E: ElemTypes> {
start: E::NonTerm,
rules: Vec<RuleInner<E>>,
action_map: BTreeMap<E::ActionKey, E::ActionValue>,
}
impl<E: ElemTypes> GrammarBuilder<E> {
fn new(start: E::NonTerm) -> Self {
GrammarBuilder {
start,
rules: Vec::new(),
action_map: BTreeMap::new(),
}
}
fn build(self) -> Result<Grammar<E>, GrammarErrors<E>> {
let GrammarBuilder {
start,
rules,
action_map,
} = self;
Grammar::new(start, rules, action_map)
}
pub fn add_rule<F>(
&mut self,
head: impl BuilderInto<E::NonTerm>,
build_fn: F,
) -> &mut Self
where
F: FnOnce(&mut RuleBuilder<E>),
{
let mut rule_builder =
RuleBuilder::new(&mut self.action_map, head.builder_into());
build_fn(&mut rule_builder);
self.rules.push(rule_builder.build());
self
}
}
/// Builds a grammar using a builder function.
///
/// Example:
///
/// ```rust
/// # use bongo::grammar::{Terminal, NonTerminal, BaseElementTypes,
/// # Grammar};
/// # use bongo::utils::Name;
/// let t_a = Terminal::new("A");
/// let nt_x = NonTerminal::new("x");
/// let g: Grammar<BaseElementTypes> =
/// bongo::grammar::build(&nt_x, |gb| {
/// gb.add_rule(&nt_x, |rb| {
/// rb.add_prod(Name::new("Recursive"), (), |pb| {
/// pb.add_term(&t_a).add_nonterm(&nt_x).add_term(&t_a);
/// })
/// .add_prod(Name::new("Empty"), (), |_pb| {});
/// });
/// }).unwrap();
/// ```
///
/// Note that arguments that take `E::Term`, `E::NonTerm`, or `E::Action` can
/// either take a non-reference value, or a cloneable reference value.
pub fn build<E>(
start: impl BuilderInto<E::NonTerm>,
build_fn: impl FnOnce(&mut GrammarBuilder<E>),
) -> Result<Grammar<E>, GrammarErrors<E>>
where
E: ElemTypes,
{
let mut builder = GrammarBuilder::new(start.builder_into());
build_fn(&mut builder);
builder.build()
}
| {
let action_key = action_key.builder_into();
self
.action_map
.insert(action_key.clone(), action_value.builder_into());
self.prods.push(ProdInner {
action_key,
elements: elems.builder_into(),
});
self
} | identifier_body |
builder.rs | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use {
super::{
Elem, ElemTypes, Grammar, GrammarErrors, Name, ProdElement,
ProdInner, RuleInner,
},
std::collections::BTreeMap,
};
/// A helper trait to allow builder methods to either take a type `T`, or a
/// reference to `T` if it is clonable.
pub trait BuilderInto<T> {
/// Consumes self and produces a value of type `T`.
fn builder_into(self) -> T;
}
impl<T> BuilderInto<T> for T {
fn builder_into(self) -> T {
self
}
}
impl<'a, T> BuilderInto<T> for &'a T
where
T: Clone,
{
fn builder_into(self) -> T {
self.clone()
}
}
impl BuilderInto<Name> for &'_ str {
fn builder_into(self) -> Name {
Name::new(self)
}
}
pub struct ProductionBuilder<E: ElemTypes> {
action_key: E::ActionKey,
elems: Vec<ProdElement<E>>,
}
impl<E: ElemTypes> ProductionBuilder<E> {
fn new(action_key: E::ActionKey) -> Self {
ProductionBuilder {
action_key,
elems: Vec::new(),
}
}
fn build(self) -> ProdInner<E> {
let ProductionBuilder { action_key, elems } = self;
ProdInner::new(action_key, elems)
}
pub fn add_term(&mut self, term: impl BuilderInto<E::Term>) -> &mut Self {
self.elems.push(ProdElement::new_empty(Elem::Term(
term.builder_into(),
)));
self
}
pub fn add_named_term(
&mut self,
name: impl BuilderInto<Name>,
term: impl BuilderInto<E::Term>,
) -> &mut Self {
self.elems.push(ProdElement::new_with_name(
name.builder_into(),
Elem::Term(term.builder_into()),
));
self
}
pub fn add_nonterm(
&mut self,
nonterm: impl BuilderInto<E::NonTerm>,
) -> &mut Self {
self
.elems
.push(ProdElement::new_empty(Elem::NonTerm(
nonterm.builder_into(),
)));
self
}
pub fn add_named_nonterm(
&mut self,
name: impl BuilderInto<Name>,
nonterm: impl BuilderInto<E::NonTerm>,
) -> &mut Self {
self.elems.push(ProdElement::new_with_name(
name.builder_into(),
Elem::NonTerm(nonterm.builder_into()),
));
self
}
}
// ----------------
pub struct RuleBuilder<'a, E: ElemTypes> {
action_map: &'a mut BTreeMap<E::ActionKey, E::ActionValue>,
head: E::NonTerm,
prods: Vec<ProdInner<E>>,
}
impl<'a, E: ElemTypes> RuleBuilder<'a, E> {
fn new(
action_map: &'a mut BTreeMap<E::ActionKey, E::ActionValue>,
head: E::NonTerm,
) -> Self {
RuleBuilder {
action_map,
head,
prods: Vec::new(),
}
}
fn build(self) -> RuleInner<E> {
let RuleBuilder { head, prods, .. } = self;
RuleInner::new(head, prods)
}
pub fn add_prod(
&mut self,
action_key: impl BuilderInto<E::ActionKey>,
action_value: impl BuilderInto<E::ActionValue>,
build_fn: impl FnOnce(&mut ProductionBuilder<E>),
) -> &mut Self {
let action_key = action_key.builder_into();
self
.action_map
.insert(action_key.clone(), action_value.builder_into());
let mut builder = ProductionBuilder::new(action_key);
build_fn(&mut builder);
self.prods.push(builder.build());
self
}
pub fn | (
&mut self,
action_key: impl BuilderInto<E::ActionKey>,
action_value: impl BuilderInto<E::ActionValue>,
elems: impl BuilderInto<Vec<ProdElement<E>>>,
) -> &mut Self {
let action_key = action_key.builder_into();
self
.action_map
.insert(action_key.clone(), action_value.builder_into());
self.prods.push(ProdInner {
action_key,
elements: elems.builder_into(),
});
self
}
}
// ----------------
pub struct GrammarBuilder<E: ElemTypes> {
start: E::NonTerm,
rules: Vec<RuleInner<E>>,
action_map: BTreeMap<E::ActionKey, E::ActionValue>,
}
impl<E: ElemTypes> GrammarBuilder<E> {
fn new(start: E::NonTerm) -> Self {
GrammarBuilder {
start,
rules: Vec::new(),
action_map: BTreeMap::new(),
}
}
fn build(self) -> Result<Grammar<E>, GrammarErrors<E>> {
let GrammarBuilder {
start,
rules,
action_map,
} = self;
Grammar::new(start, rules, action_map)
}
pub fn add_rule<F>(
&mut self,
head: impl BuilderInto<E::NonTerm>,
build_fn: F,
) -> &mut Self
where
F: FnOnce(&mut RuleBuilder<E>),
{
let mut rule_builder =
RuleBuilder::new(&mut self.action_map, head.builder_into());
build_fn(&mut rule_builder);
self.rules.push(rule_builder.build());
self
}
}
/// Builds a grammar using a builder function.
///
/// Example:
///
/// ```rust
/// # use bongo::grammar::{Terminal, NonTerminal, BaseElementTypes,
/// # Grammar};
/// # use bongo::utils::Name;
/// let t_a = Terminal::new("A");
/// let nt_x = NonTerminal::new("x");
/// let g: Grammar<BaseElementTypes> =
/// bongo::grammar::build(&nt_x, |gb| {
/// gb.add_rule(&nt_x, |rb| {
/// rb.add_prod(Name::new("Recursive"), (), |pb| {
/// pb.add_term(&t_a).add_nonterm(&nt_x).add_term(&t_a);
/// })
/// .add_prod(Name::new("Empty"), (), |_pb| {});
/// });
/// }).unwrap();
/// ```
///
/// Note that arguments that take `E::Term`, `E::NonTerm`, or `E::Action` can
/// either take a non-reference value, or a cloneable reference value.
pub fn build<E>(
start: impl BuilderInto<E::NonTerm>,
build_fn: impl FnOnce(&mut GrammarBuilder<E>),
) -> Result<Grammar<E>, GrammarErrors<E>>
where
E: ElemTypes,
{
let mut builder = GrammarBuilder::new(start.builder_into());
build_fn(&mut builder);
builder.build()
}
| add_prod_with_elems | identifier_name |
module.ts | import { PostgresDatasource } from './datasource';
import { PostgresQueryCtrl } from './query_ctrl';
import { PostgresConfigCtrl } from './config_ctrl';
import { PostgresQuery } from './types';
import { DataSourcePlugin } from '@grafana/data';
const defaultQuery = `SELECT
extract(epoch from time_column) AS time,
text_column as text,
tags_column as tags
FROM
metric_table
WHERE
$__timeFilter(time_column)
`;
class PostgresAnnotationsQueryCtrl {
static templateUrl = 'partials/annotations.editor.html';
declare annotation: any;
| this.annotation.rawQuery = this.annotation.rawQuery || defaultQuery;
}
}
export const plugin = new DataSourcePlugin<PostgresDatasource, PostgresQuery>(PostgresDatasource)
.setQueryCtrl(PostgresQueryCtrl)
.setConfigCtrl(PostgresConfigCtrl)
.setAnnotationQueryCtrl(PostgresAnnotationsQueryCtrl); | /** @ngInject */
constructor($scope: any) {
this.annotation = $scope.ctrl.annotation; | random_line_split |
module.ts | import { PostgresDatasource } from './datasource';
import { PostgresQueryCtrl } from './query_ctrl';
import { PostgresConfigCtrl } from './config_ctrl';
import { PostgresQuery } from './types';
import { DataSourcePlugin } from '@grafana/data';
const defaultQuery = `SELECT
extract(epoch from time_column) AS time,
text_column as text,
tags_column as tags
FROM
metric_table
WHERE
$__timeFilter(time_column)
`;
class PostgresAnnotationsQueryCtrl {
static templateUrl = 'partials/annotations.editor.html';
declare annotation: any;
/** @ngInject */
| ($scope: any) {
this.annotation = $scope.ctrl.annotation;
this.annotation.rawQuery = this.annotation.rawQuery || defaultQuery;
}
}
export const plugin = new DataSourcePlugin<PostgresDatasource, PostgresQuery>(PostgresDatasource)
.setQueryCtrl(PostgresQueryCtrl)
.setConfigCtrl(PostgresConfigCtrl)
.setAnnotationQueryCtrl(PostgresAnnotationsQueryCtrl);
| constructor | identifier_name |
qidian_ranking.py | #!/usr/bin/env python
import time
from talonspider import Spider, Item, TextField, AttrField
from talonspider.utils import get_random_user_agent
import os
os.environ['MODE'] = 'PRO'
from owllook.database.mongodb import MotorBase
from owllook.utils.tools import async_callback
class RankingItem(Item):
target_item = TextField(css_select='.rank-list')
ranking_title = TextField(css_select='h3.wrap-title')
more = AttrField(css_select='h3>a.more', attr='href')
book_list = TextField(css_select='div.book-list>ul>li')
def tal_more(self, more):
return "http:" + more
class | (Item):
top_name = TextField(css_select='h4>a')
other_name = TextField(css_select='a.name')
class QidianRankingSpider(Spider):
start_urls = ["http://r.qidian.com/?chn=" + str(url) for url in [-1, 21, 1, 2, 22, 4, 15, 6, 5, 7, 8, 9, 10, 12]]
headers = {
"User-Agent": get_random_user_agent()
}
set_mul = True
qidian_type = {
'-1': '全部类别',
'21': '玄幻',
'1': '奇幻',
'2': '武侠',
'22': '仙侠',
'4': '都市',
'15': '职场',
'6': '军事',
'5': '历史',
'7': '游戏',
'8': '体育',
'9': '科幻',
'10': '灵异',
'12': '二次元',
}
def parse(self, res):
items_data = RankingItem.get_items(html=res.html)
result = []
res_dic = {}
for item in items_data:
each_book_list = []
# 只取排名前十的书籍数据
for index, value in enumerate(item.book_list[:10]):
item_data = NameItem.get_item(html_etree=value)
name = item_data.get('top_name') or item_data.get('other_name')
each_book_list.append({
'num': index + 1,
'name': name
})
data = {
'title': item.ranking_title,
'more': item.more,
'book_list': each_book_list,
'updated_at': time.strftime("%Y-%m-%d %X", time.localtime()),
}
result.append(data)
res_dic['data'] = result
res_dic['target_url'] = res.url
res_dic['type'] = self.qidian_type.get(res.url.split('=')[-1])
res_dic['spider'] = "qidian"
async_callback(self.save, res_dic=res_dic)
async def save(self, **kwargs):
# 存进数据库
res_dic = kwargs.get('res_dic')
try:
motor_db = MotorBase().db
await motor_db.novels_ranking.update_one({
'target_url': res_dic['target_url']},
{'$set': {
'data': res_dic['data'],
'spider': res_dic['spider'],
'type': res_dic['type'],
'finished_at': time.strftime("%Y-%m-%d %X", time.localtime())
}},
upsert=True)
except Exception as e:
self.logger.exception(e)
if __name__ == '__main__':
QidianRankingSpider().start()
| NameItem | identifier_name |
qidian_ranking.py | #!/usr/bin/env python
import time
from talonspider import Spider, Item, TextField, AttrField
from talonspider.utils import get_random_user_agent
import os
os.environ['MODE'] = 'PRO'
from owllook.database.mongodb import MotorBase
from owllook.utils.tools import async_callback
class RankingItem(Item):
target_item = TextField(css_select='.rank-list')
ranking_title = TextField(css_select='h3.wrap-title')
more = AttrField(css_select='h3>a.more', attr='href')
book_list = TextField(css_select='div.book-list>ul>li')
def tal_more(self, more):
return "http:" + more
class NameItem(Item):
|
class QidianRankingSpider(Spider):
start_urls = ["http://r.qidian.com/?chn=" + str(url) for url in [-1, 21, 1, 2, 22, 4, 15, 6, 5, 7, 8, 9, 10, 12]]
headers = {
"User-Agent": get_random_user_agent()
}
set_mul = True
qidian_type = {
'-1': '全部类别',
'21': '玄幻',
'1': '奇幻',
'2': '武侠',
'22': '仙侠',
'4': '都市',
'15': '职场',
'6': '军事',
'5': '历史',
'7': '游戏',
'8': '体育',
'9': '科幻',
'10': '灵异',
'12': '二次元',
}
def parse(self, res):
items_data = RankingItem.get_items(html=res.html)
result = []
res_dic = {}
for item in items_data:
each_book_list = []
# 只取排名前十的书籍数据
for index, value in enumerate(item.book_list[:10]):
item_data = NameItem.get_item(html_etree=value)
name = item_data.get('top_name') or item_data.get('other_name')
each_book_list.append({
'num': index + 1,
'name': name
})
data = {
'title': item.ranking_title,
'more': item.more,
'book_list': each_book_list,
'updated_at': time.strftime("%Y-%m-%d %X", time.localtime()),
}
result.append(data)
res_dic['data'] = result
res_dic['target_url'] = res.url
res_dic['type'] = self.qidian_type.get(res.url.split('=')[-1])
res_dic['spider'] = "qidian"
async_callback(self.save, res_dic=res_dic)
async def save(self, **kwargs):
# 存进数据库
res_dic = kwargs.get('res_dic')
try:
motor_db = MotorBase().db
await motor_db.novels_ranking.update_one({
'target_url': res_dic['target_url']},
{'$set': {
'data': res_dic['data'],
'spider': res_dic['spider'],
'type': res_dic['type'],
'finished_at': time.strftime("%Y-%m-%d %X", time.localtime())
}},
upsert=True)
except Exception as e:
self.logger.exception(e)
if __name__ == '__main__':
QidianRankingSpider().start()
| top_name = TextField(css_select='h4>a')
other_name = TextField(css_select='a.name') | identifier_body |
qidian_ranking.py | #!/usr/bin/env python
import time
from talonspider import Spider, Item, TextField, AttrField
from talonspider.utils import get_random_user_agent
import os
os.environ['MODE'] = 'PRO'
from owllook.database.mongodb import MotorBase
from owllook.utils.tools import async_callback
class RankingItem(Item):
target_item = TextField(css_select='.rank-list')
ranking_title = TextField(css_select='h3.wrap-title')
more = AttrField(css_select='h3>a.more', attr='href')
book_list = TextField(css_select='div.book-list>ul>li')
def tal_more(self, more):
return "http:" + more
class NameItem(Item):
top_name = TextField(css_select='h4>a')
other_name = TextField(css_select='a.name')
class QidianRankingSpider(Spider):
start_urls = ["http://r.qidian.com/?chn=" + str(url) for url in [-1, 21, 1, 2, 22, 4, 15, 6, 5, 7, 8, 9, 10, 12]]
headers = {
"User-Agent": get_random_user_agent()
}
set_mul = True
qidian_type = {
'-1': '全部类别',
'21': '玄幻',
'1': '奇幻',
'2': '武侠',
'22': '仙侠',
'4': '都市',
'15': '职场',
'6': '军事',
'5': '历史',
'7': '游戏',
'8': '体育',
'9': '科幻',
'10': '灵异',
'12': '二次元',
}
def parse(self, res):
items_data = RankingItem.get_items(html=res.html)
result = []
res_dic = {}
for item in items_data:
each_book_list = []
# 只取排名前十的书籍数据
for index, value in enumerate(item.book_list[:10]):
item_data = NameItem.get_item(html_etree=value)
name = item_data.get('top_name') or item_data.get('other_name')
each_book_list.append({
'num': index + 1,
'name': name
})
data = {
'title': item.ranking_title,
'more': item.more,
'book_list': each_book_list,
'updated_at': time.strftime("%Y-%m-%d %X", time.localtime()), | res_dic['data'] = result
res_dic['target_url'] = res.url
res_dic['type'] = self.qidian_type.get(res.url.split('=')[-1])
res_dic['spider'] = "qidian"
async_callback(self.save, res_dic=res_dic)
async def save(self, **kwargs):
# 存进数据库
res_dic = kwargs.get('res_dic')
try:
motor_db = MotorBase().db
await motor_db.novels_ranking.update_one({
'target_url': res_dic['target_url']},
{'$set': {
'data': res_dic['data'],
'spider': res_dic['spider'],
'type': res_dic['type'],
'finished_at': time.strftime("%Y-%m-%d %X", time.localtime())
}},
upsert=True)
except Exception as e:
self.logger.exception(e)
if __name__ == '__main__':
QidianRankingSpider().start() | }
result.append(data) | random_line_split |
qidian_ranking.py | #!/usr/bin/env python
import time
from talonspider import Spider, Item, TextField, AttrField
from talonspider.utils import get_random_user_agent
import os
os.environ['MODE'] = 'PRO'
from owllook.database.mongodb import MotorBase
from owllook.utils.tools import async_callback
class RankingItem(Item):
target_item = TextField(css_select='.rank-list')
ranking_title = TextField(css_select='h3.wrap-title')
more = AttrField(css_select='h3>a.more', attr='href')
book_list = TextField(css_select='div.book-list>ul>li')
def tal_more(self, more):
return "http:" + more
class NameItem(Item):
top_name = TextField(css_select='h4>a')
other_name = TextField(css_select='a.name')
class QidianRankingSpider(Spider):
start_urls = ["http://r.qidian.com/?chn=" + str(url) for url in [-1, 21, 1, 2, 22, 4, 15, 6, 5, 7, 8, 9, 10, 12]]
headers = {
"User-Agent": get_random_user_agent()
}
set_mul = True
qidian_type = {
'-1': '全部类别',
'21': '玄幻',
'1': '奇幻',
'2': '武侠',
'22': '仙侠',
'4': '都市',
'15': '职场',
'6': '军事',
'5': '历史',
'7': '游戏',
'8': '体育',
'9': '科幻',
'10': '灵异',
'12': '二次元',
}
def parse(self, res):
items_data = RankingItem.get_items(html=res.html)
result = []
res_dic = {}
for item in items_data:
each_book_list = []
# 只取排名前十的书籍数据
for | s_dic['type'] = self.qidian_type.get(res.url.split('=')[-1])
res_dic['spider'] = "qidian"
async_callback(self.save, res_dic=res_dic)
async def save(self, **kwargs):
# 存进数据库
res_dic = kwargs.get('res_dic')
try:
motor_db = MotorBase().db
await motor_db.novels_ranking.update_one({
'target_url': res_dic['target_url']},
{'$set': {
'data': res_dic['data'],
'spider': res_dic['spider'],
'type': res_dic['type'],
'finished_at': time.strftime("%Y-%m-%d %X", time.localtime())
}},
upsert=True)
except Exception as e:
self.logger.exception(e)
if __name__ == '__main__':
QidianRankingSpider().start()
| index, value in enumerate(item.book_list[:10]):
item_data = NameItem.get_item(html_etree=value)
name = item_data.get('top_name') or item_data.get('other_name')
each_book_list.append({
'num': index + 1,
'name': name
})
data = {
'title': item.ranking_title,
'more': item.more,
'book_list': each_book_list,
'updated_at': time.strftime("%Y-%m-%d %X", time.localtime()),
}
result.append(data)
res_dic['data'] = result
res_dic['target_url'] = res.url
re | conditional_block |
utils.rs | use snafu::{ResultExt, Snafu};
use std::path::{Path, PathBuf};
use tokio::fs;
use tokio::io::AsyncWriteExt;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
#[snafu(display("Invalid Download URL: {} ({})", source, details))]
InvalidUrl {
details: String,
source: url::ParseError,
},
#[snafu(display("Invalid IO: {} ({})", source, details))]
InvalidIO {
details: String,
source: std::io::Error,
},
#[snafu(display("Download Error: {} ({})", source, details))]
Download {
details: String,
source: reqwest::Error,
},
}
pub async fn file_exists(path: &Path) -> bool {
fs::metadata(path).await.is_ok()
}
pub async fn create_dir_if_not_exists(path: &Path) -> Result<(), Error> {
if !file_exists(path).await {
fs::create_dir(path).await.context(InvalidIOSnafu {
details: format!("could no create directory {}", path.display()),
})?;
}
Ok(())
}
pub async fn create_dir_if_not_exists_rec(path: &Path) -> Result<(), Error> {
let mut head = PathBuf::new();
for fragment in path {
head.push(fragment);
create_dir_if_not_exists(&head).await?;
}
Ok(())
}
/// Downloads the file identified by the url and saves it to the given path.
/// If a file is already present, it will append to that file.
pub async fn download_to_file(path: &Path, url: &str) -> Result<(), Error> {
let mut file = tokio::io::BufWriter::new({
fs::OpenOptions::new()
.append(true)
.create(true)
.open(&path)
.await |
let mut resp = reqwest::get(url)
.await
.context(DownloadSnafu {
details: format!("could not download url {}", url),
})?
.error_for_status()
.context(DownloadSnafu {
details: format!("download response error for {}", url),
})?;
while let Some(chunk) = resp.chunk().await.context(DownloadSnafu {
details: format!("read chunk error during download of {}", url),
})? {
file.write_all(&chunk).await.context(InvalidIOSnafu {
details: format!("write chunk error during download of {}", url),
})?;
}
file.flush().await.context(InvalidIOSnafu {
details: format!("flush error during download of {}", url),
})?;
Ok(())
} | .context(InvalidIOSnafu {
details: format!("could no create file for download {}", path.display()),
})?
}); | random_line_split |
utils.rs | use snafu::{ResultExt, Snafu};
use std::path::{Path, PathBuf};
use tokio::fs;
use tokio::io::AsyncWriteExt;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
#[snafu(display("Invalid Download URL: {} ({})", source, details))]
InvalidUrl {
details: String,
source: url::ParseError,
},
#[snafu(display("Invalid IO: {} ({})", source, details))]
InvalidIO {
details: String,
source: std::io::Error,
},
#[snafu(display("Download Error: {} ({})", source, details))]
Download {
details: String,
source: reqwest::Error,
},
}
pub async fn file_exists(path: &Path) -> bool {
fs::metadata(path).await.is_ok()
}
pub async fn create_dir_if_not_exists(path: &Path) -> Result<(), Error> {
if !file_exists(path).await |
Ok(())
}
pub async fn create_dir_if_not_exists_rec(path: &Path) -> Result<(), Error> {
let mut head = PathBuf::new();
for fragment in path {
head.push(fragment);
create_dir_if_not_exists(&head).await?;
}
Ok(())
}
/// Downloads the file identified by the url and saves it to the given path.
/// If a file is already present, it will append to that file.
pub async fn download_to_file(path: &Path, url: &str) -> Result<(), Error> {
let mut file = tokio::io::BufWriter::new({
fs::OpenOptions::new()
.append(true)
.create(true)
.open(&path)
.await
.context(InvalidIOSnafu {
details: format!("could no create file for download {}", path.display()),
})?
});
let mut resp = reqwest::get(url)
.await
.context(DownloadSnafu {
details: format!("could not download url {}", url),
})?
.error_for_status()
.context(DownloadSnafu {
details: format!("download response error for {}", url),
})?;
while let Some(chunk) = resp.chunk().await.context(DownloadSnafu {
details: format!("read chunk error during download of {}", url),
})? {
file.write_all(&chunk).await.context(InvalidIOSnafu {
details: format!("write chunk error during download of {}", url),
})?;
}
file.flush().await.context(InvalidIOSnafu {
details: format!("flush error during download of {}", url),
})?;
Ok(())
}
| {
fs::create_dir(path).await.context(InvalidIOSnafu {
details: format!("could no create directory {}", path.display()),
})?;
} | conditional_block |
utils.rs | use snafu::{ResultExt, Snafu};
use std::path::{Path, PathBuf};
use tokio::fs;
use tokio::io::AsyncWriteExt;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
#[snafu(display("Invalid Download URL: {} ({})", source, details))]
InvalidUrl {
details: String,
source: url::ParseError,
},
#[snafu(display("Invalid IO: {} ({})", source, details))]
InvalidIO {
details: String,
source: std::io::Error,
},
#[snafu(display("Download Error: {} ({})", source, details))]
Download {
details: String,
source: reqwest::Error,
},
}
pub async fn file_exists(path: &Path) -> bool {
fs::metadata(path).await.is_ok()
}
pub async fn create_dir_if_not_exists(path: &Path) -> Result<(), Error> {
if !file_exists(path).await {
fs::create_dir(path).await.context(InvalidIOSnafu {
details: format!("could no create directory {}", path.display()),
})?;
}
Ok(())
}
pub async fn create_dir_if_not_exists_rec(path: &Path) -> Result<(), Error> {
let mut head = PathBuf::new();
for fragment in path {
head.push(fragment);
create_dir_if_not_exists(&head).await?;
}
Ok(())
}
/// Downloads the file identified by the url and saves it to the given path.
/// If a file is already present, it will append to that file.
pub async fn | (path: &Path, url: &str) -> Result<(), Error> {
let mut file = tokio::io::BufWriter::new({
fs::OpenOptions::new()
.append(true)
.create(true)
.open(&path)
.await
.context(InvalidIOSnafu {
details: format!("could no create file for download {}", path.display()),
})?
});
let mut resp = reqwest::get(url)
.await
.context(DownloadSnafu {
details: format!("could not download url {}", url),
})?
.error_for_status()
.context(DownloadSnafu {
details: format!("download response error for {}", url),
})?;
while let Some(chunk) = resp.chunk().await.context(DownloadSnafu {
details: format!("read chunk error during download of {}", url),
})? {
file.write_all(&chunk).await.context(InvalidIOSnafu {
details: format!("write chunk error during download of {}", url),
})?;
}
file.flush().await.context(InvalidIOSnafu {
details: format!("flush error during download of {}", url),
})?;
Ok(())
}
| download_to_file | identifier_name |
cluster.py | # -*- coding: utf-8 -*-
"""
聚类和EM算法
~~~~~~~~~~~~~~~~
聚类
:copyright: (c) 2016 by the huaxz1986.
:license: lgpl-3.0, see LICENSE for more details.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_blobs
# from .agglomerative_clustering import test_AgglomerativeClustering,test_AgglomerativeClustering_nclusters,test_AgglomerativeClustering_linkage
# from .dbscan import test_DBSCAN,test_DBSCAN_epsilon,test_DBSCAN_min_samples
from chapters.Cluster_EM.gmm import test_GMM,test_GMM_cov_type,test_GMM_n_components
# from .kmeans import test_Kmeans,test_Kmeans_n_init,test_Kmeans_nclusters
def create_data(centers,num=100,std=0.7):
'''
生成用于聚类的数据集
:param centers: 聚类的中心点组成的数组。如果中心点是二维的,则产生的每个样本都是二维的。
:param num: 样本数
:param std: 每个簇中样本的标准差
:return: 用于聚类的数据集。是一个元组,第一个元素为样本集,第二个元素为样本集的真实簇分类标记
'''
X, labels_true = make_blobs(n_samples=num, centers=centers, cluster_std=std)
return X,labels_true
def plot_data(*data):
'''
绘制用于聚类的数据集
:param data: 可变参数。它是一个元组。元组元素依次为:第一个元素为样本集,第二个元素为样本集的真实簇分类标记
:return: None | labels=np.unique(labels_true)
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
colors='rgbyckm' # 每个簇的样本标记不同的颜色
for i,label in enumerate(labels):
position=labels_true==label
ax.scatter(X[position,0],X[position,1],label="cluster %d"%label,
color=colors[i%len(colors)])
ax.legend(loc="best",framealpha=0.5)
ax.set_xlabel("X[0]")
ax.set_ylabel("Y[1]")
ax.set_title("data")
plt.show()
if __name__=='__main__':
centers=[[1,1],[2,2],[1,2],[10,20]] # 用于产生聚类的中心点
X,labels_true=create_data(centers,1000,0.5) # 产生用于聚类的数据集
# plot_data(X,labels_true) # 绘制用于聚类的数据集
# test_Kmeans(X,labels_true) # 调用 test_Kmeans 函数
# test_Kmeans_nclusters(X,labels_true) # 调用 test_Kmeans_nclusters 函数
# test_Kmeans_n_init(X,labels_true) # 调用 test_Kmeans_n_init 函数
# test_DBSCAN(X,labels_true) # 调用 test_DBSCAN 函数
# test_DBSCAN_epsilon(X,labels_true) # 调用 test_DBSCAN_epsilon 函数
# test_DBSCAN_min_samples(X,labels_true) # 调用 test_DBSCAN_min_samples 函数
# test_AgglomerativeClustering(X,labels_true) # 调用 test_AgglomerativeClustering 函数
# test_AgglomerativeClustering_nclusters(X,labels_true) # 调用 test_AgglomerativeClustering_nclusters 函数
# test_AgglomerativeClustering_linkage(X,labels_true) # 调用 test_AgglomerativeClustering_linkage 函数
# test_GMM(X,labels_true) # 调用 test_GMM 函数
# test_GMM_n_components(X,labels_true) # 调用 test_GMM_n_components 函数
test_GMM_cov_type(X,labels_true) # 调用 test_GMM_cov_type 函数 | '''
X,labels_true=data | random_line_split |
cluster.py | # -*- coding: utf-8 -*-
"""
聚类和EM算法
~~~~~~~~~~~~~~~~
聚类
:copyright: (c) 2016 by the huaxz1986.
:license: lgpl-3.0, see LICENSE for more details.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_blobs
# from .agglomerative_clustering import test_AgglomerativeClustering,test_AgglomerativeClustering_nclusters,test_AgglomerativeClustering_linkage
# from .dbscan import test_DBSCAN,test_DBSCAN_epsilon,test_DBSCAN_min_samples
from chapters.Cluster_EM.gmm import test_GMM,test_GMM_cov_type,test_GMM_n_components
# from .kmeans import test_Kmeans,test_Kmeans_n_init,test_Kmeans_nclusters
def create_data(centers,num=100,std=0.7):
'''
生成用于聚类的数据集
:param centers: 聚类的中心点组成的数组。如果中心点是二维的,则产生的每个样本都是二维的。
:param num: 样本数
:param std: 每个簇中样本的标准差
:return: 用于聚类的数据集。是一个元组,第一个元素为样本集,第二个元素为样本集的真实簇分类标记
'''
X, labels_true = make_blobs(n_samples=num, centers=centers, cluster_std=std)
return X,labels_true
def plot_data(*data):
'''
绘制用于聚类的数据集
:param data: 可变参数。它是一个元组。元组元素依次为:第一个元素为样本集,第二个元素为样本集的真实簇分类标记
:return: None
'''
X,labels_true=data
labels=np.unique(labels_true)
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
colors='rgbyckm' # 每个簇的样本标记不同的颜色
for i,label in enumerate(labels):
position=labels_true==label
ax.scatter(X[position,0],X[position,1],label="cluster %d"%label,
color=colors[i%len(colors)])
ax.legend(loc="best",framealpha=0.5)
ax.set_xlabel("X[0]")
ax.set_ylabel("Y[1]")
ax.set_title("data")
plt.show()
if __name__=='__main__':
centers=[[1,1],[2,2],[1,2],[10,20]] # 用于产生聚类的中心点
X,labels_true=create_data(centers,1000,0.5) # 产生用于聚类的数据集
# plot_data(X,labels_true) # 绘制用于聚类的数据集
# test_Kmeans(X,labels_true) # 调用 test_Kmeans 函数
# test_Kmeans_nclusters(X,labels_true) # 调用 test_Kmeans_nclusters 函数
# test_Kmeans_n_init(X,labels_true) # 调用 test_Kmeans_n_init 函数
# | test_DBSCAN(X,labels_true) # 调用 test_DBSCAN 函数
# test_DBSCAN_epsilon(X,labels_true) # 调用 test_DBSCAN_epsilon 函数
# test_DBSCAN_min_samples(X,labels_true) # 调用 test_DBSCAN_min_samples 函数
# test_AgglomerativeClustering(X,labels_true) # 调用 test_AgglomerativeClustering 函数
# test_AgglomerativeClustering_nclusters(X,labels_true) # 调用 test_AgglomerativeClustering_nclusters 函数
# test_AgglomerativeClustering_linkage(X,labels_true) # 调用 test_AgglomerativeClustering_linkage 函数
# test_GMM(X,labels_true) # 调用 test_GMM 函数
# test_GMM_n_components(X,labels_true) # 调用 test_GMM_n_components 函数
test_GMM_cov_type(X,labels_true) # 调用 test_GMM_cov_type 函数
| conditional_block |
|
cluster.py | # -*- coding: utf-8 -*-
"""
聚类和EM算法
~~~~~~~~~~~~~~~~
聚类
:copyright: (c) 2016 by the huaxz1986.
:license: lgpl-3.0, see LICENSE for more details.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_blobs
# from .agglomerative_clustering import test_AgglomerativeClustering,test_AgglomerativeClustering_nclusters,test_AgglomerativeClustering_linkage
# from .dbscan import test_DBSCAN,test_DBSCAN_epsilon,test_DBSCAN_min_samples
from chapters.Cluster_EM.gmm import test_GMM,test_GMM_cov_type,test_GMM_n_components
# from .kmeans import test_Kmeans,test_Kmeans_n_init,test_Kmeans_nclusters
def create_data(centers,num=100,std=0.7):
'''
生成用于聚类的数据集
:param centers: 聚类的中心点组成的数组。如果中心点是二维的,则产生的每个样本都是二维的。
:param num: 样本数
:param std: 每个簇中样本的标准差
:return: 用于聚类的数据集。是一个元组,第一个元素为样本集,第二个元素为样本集的真实簇分类标记
'''
X, labels_true = make_blobs(n_samples=num, centers=centers, cluster_std=std)
return X,labels_true
def plot_data(*data):
'''
绘制用于聚类的数据集
:param data: 可变参数。它是一个元组。元组元素依次为:第一个元素为样本集,第二个元素为样本集的真实簇分类标记
:return: None
'''
X,labels_true=data
labels=np.unique(labels_true)
fig=plt.figure()
ax=fig.add_subplot(1,1, | 调用 test_Kmeans_n_init 函数
# test_DBSCAN(X,labels_true) # 调用 test_DBSCAN 函数
# test_DBSCAN_epsilon(X,labels_true) # 调用 test_DBSCAN_epsilon 函数
# test_DBSCAN_min_samples(X,labels_true) # 调用 test_DBSCAN_min_samples 函数
# test_AgglomerativeClustering(X,labels_true) # 调用 test_AgglomerativeClustering 函数
# test_AgglomerativeClustering_nclusters(X,labels_true) # 调用 test_AgglomerativeClustering_nclusters 函数
# test_AgglomerativeClustering_linkage(X,labels_true) # 调用 test_AgglomerativeClustering_linkage 函数
# test_GMM(X,labels_true) # 调用 test_GMM 函数
# test_GMM_n_components(X,labels_true) # 调用 test_GMM_n_components 函数
test_GMM_cov_type(X,labels_true) # 调用 test_GMM_cov_type 函数
| 1)
colors='rgbyckm' # 每个簇的样本标记不同的颜色
for i,label in enumerate(labels):
position=labels_true==label
ax.scatter(X[position,0],X[position,1],label="cluster %d"%label,
color=colors[i%len(colors)])
ax.legend(loc="best",framealpha=0.5)
ax.set_xlabel("X[0]")
ax.set_ylabel("Y[1]")
ax.set_title("data")
plt.show()
if __name__=='__main__':
centers=[[1,1],[2,2],[1,2],[10,20]] # 用于产生聚类的中心点
X,labels_true=create_data(centers,1000,0.5) # 产生用于聚类的数据集
# plot_data(X,labels_true) # 绘制用于聚类的数据集
# test_Kmeans(X,labels_true) # 调用 test_Kmeans 函数
# test_Kmeans_nclusters(X,labels_true) # 调用 test_Kmeans_nclusters 函数
# test_Kmeans_n_init(X,labels_true) # | identifier_body |
cluster.py | # -*- coding: utf-8 -*-
"""
聚类和EM算法
~~~~~~~~~~~~~~~~
聚类
:copyright: (c) 2016 by the huaxz1986.
:license: lgpl-3.0, see LICENSE for more details.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_blobs
# from .agglomerative_clustering import test_AgglomerativeClustering,test_AgglomerativeClustering_nclusters,test_AgglomerativeClustering_linkage
# from .dbscan import test_DBSCAN,test_DBSCAN_epsilon,test_DBSCAN_min_samples
from chapters.Cluster_EM.gmm import test_GMM,test_GMM_cov_type,test_GMM_n_components
# from .kmeans import test_Kmeans,test_Kmeans_n_init,test_Kmeans_nclusters
def create_data(centers,num=100,std=0.7):
'''
生成用于聚类的数据集
:param centers: 聚类的中心点组成的数组。如果中心点是二维的,则产生的每个样本都是二维的。
:param num: 样本数
:param std: 每个簇中样本的标准差
:return: 用于聚类的数据集。是一个元组,第一个元素为样本集,第二个元素为样本集的真实簇分类标记
'''
X, labels_true = make_blobs(n_samples=num, centers=centers, cluster_std=std)
return X,labels_true
def plot_data(*data):
'''
绘制用于聚类的数据集
:param data: 可变参数。它是一个元组。元组元素依次为:第一个元素为样本集,第二个元素为样本集的真实簇分类标记
:return: None
'''
X,labels_true=data
labels=np.unique(labels_true)
fig=plt.figure()
a | _subplot(1,1,1)
colors='rgbyckm' # 每个簇的样本标记不同的颜色
for i,label in enumerate(labels):
position=labels_true==label
ax.scatter(X[position,0],X[position,1],label="cluster %d"%label,
color=colors[i%len(colors)])
ax.legend(loc="best",framealpha=0.5)
ax.set_xlabel("X[0]")
ax.set_ylabel("Y[1]")
ax.set_title("data")
plt.show()
if __name__=='__main__':
centers=[[1,1],[2,2],[1,2],[10,20]] # 用于产生聚类的中心点
X,labels_true=create_data(centers,1000,0.5) # 产生用于聚类的数据集
# plot_data(X,labels_true) # 绘制用于聚类的数据集
# test_Kmeans(X,labels_true) # 调用 test_Kmeans 函数
# test_Kmeans_nclusters(X,labels_true) # 调用 test_Kmeans_nclusters 函数
# test_Kmeans_n_init(X,labels_true) # 调用 test_Kmeans_n_init 函数
# test_DBSCAN(X,labels_true) # 调用 test_DBSCAN 函数
# test_DBSCAN_epsilon(X,labels_true) # 调用 test_DBSCAN_epsilon 函数
# test_DBSCAN_min_samples(X,labels_true) # 调用 test_DBSCAN_min_samples 函数
# test_AgglomerativeClustering(X,labels_true) # 调用 test_AgglomerativeClustering 函数
# test_AgglomerativeClustering_nclusters(X,labels_true) # 调用 test_AgglomerativeClustering_nclusters 函数
# test_AgglomerativeClustering_linkage(X,labels_true) # 调用 test_AgglomerativeClustering_linkage 函数
# test_GMM(X,labels_true) # 调用 test_GMM 函数
# test_GMM_n_components(X,labels_true) # 调用 test_GMM_n_components 函数
test_GMM_cov_type(X,labels_true) # 调用 test_GMM_cov_type 函数
| x=fig.add | identifier_name |
sr.js | /*
| For licensing, see LICENSE.md or http://ckeditor.com/license
*/
CKEDITOR.plugins.setLang( 'colorbutton', 'sr', {
auto: 'Аутоматски',
bgColorTitle: 'Боја позадине',
colors: {
'000': 'Black',
'800000': 'Maroon',
'8B4513': 'Saddle Brown',
'2F4F4F': 'Dark Slate Gray',
'008080': 'Teal',
'000080': 'Navy',
'4B0082': 'Indigo',
'696969': 'Dark Gray',
B22222: 'Fire Brick',
A52A2A: 'Brown',
DAA520: 'Golden Rod',
'006400': 'Dark Green',
'40E0D0': 'Turquoise',
'0000CD': 'Medium Blue',
'800080': 'Purple',
'808080': 'Gray',
F00: 'Red',
FF8C00: 'Dark Orange',
FFD700: 'Gold',
'008000': 'Green',
'0FF': 'Cyan',
'00F': 'Blue',
EE82EE: 'Violet',
A9A9A9: 'Dim Gray',
FFA07A: 'Light Salmon',
FFA500: 'Orange',
FFFF00: 'Yellow',
'00FF00': 'Lime',
AFEEEE: 'Pale Turquoise',
ADD8E6: 'Light Blue',
DDA0DD: 'Plum',
D3D3D3: 'Light Grey',
FFF0F5: 'Lavender Blush',
FAEBD7: 'Antique White',
FFFFE0: 'Light Yellow',
F0FFF0: 'Honeydew',
F0FFFF: 'Azure',
F0F8FF: 'Alice Blue',
E6E6FA: 'Lavender',
FFF: 'White'
},
more: 'Више боја...',
panelTitle: 'Colors',
textColorTitle: 'Боја текста'
} ); | Copyright (c) 2003-2014, CKSource - Frederico Knabben. All rights reserved.
| random_line_split |
capture-clauses-boxed-closures.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn each<T>(x: &[T], f: |&T|) {
for val in x.iter() {
f(val)
}
}
fn | () {
let mut sum = 0u;
let elems = [ 1u, 2, 3, 4, 5 ];
each(elems, |val| sum += *val);
assert_eq!(sum, 15);
}
| main | identifier_name |
capture-clauses-boxed-closures.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn each<T>(x: &[T], f: |&T|) |
fn main() {
let mut sum = 0u;
let elems = [ 1u, 2, 3, 4, 5 ];
each(elems, |val| sum += *val);
assert_eq!(sum, 15);
}
| {
for val in x.iter() {
f(val)
}
} | identifier_body |
capture-clauses-boxed-closures.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn each<T>(x: &[T], f: |&T|) {
for val in x.iter() {
f(val)
}
}
| fn main() {
let mut sum = 0u;
let elems = [ 1u, 2, 3, 4, 5 ];
each(elems, |val| sum += *val);
assert_eq!(sum, 15);
} | random_line_split |
|
ComputerPanel.js | /*
* Copyright (C) 2019 - present Instructure, Inc.
*
* This file is part of Canvas.
*
* Canvas is free software: you can redistribute it and/or modify it under
* the terms of the GNU Affero General Public License as published by the Free
* Software Foundation, version 3 of the License.
*
* Canvas is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
* details.
*
* You should have received a copy of the GNU Affero General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import React, {useEffect, useRef, useState} from 'react'
import {arrayOf, bool, func, instanceOf, oneOfType, string} from 'prop-types'
import {StyleSheet, css} from 'aphrodite'
import {FileDrop} from '@instructure/ui-forms'
import {Billboard} from '@instructure/ui-billboard'
import {Button} from '@instructure/ui-buttons'
import {PresentationContent, ScreenReaderContent} from '@instructure/ui-a11y'
import {IconTrashLine} from '@instructure/ui-icons'
import {Img, Text, TruncateText} from '@instructure/ui-elements'
import {Flex, View} from '@instructure/ui-layout'
import {VideoPlayer} from '@instructure/ui-media-player'
import RocketSVG from '@instructure/canvas-media/lib/RocketSVG'
import useComputerPanelFocus from '@instructure/canvas-media/lib/useComputerPanelFocus'
import useSizeVideoPlayer from '@instructure/canvas-media/lib/useSizeVideoPlayer'
import formatMessage from '../../../../format-message'
import {getIconFromType, isAudioOrVideo, isImage, isText} from '../fileTypeUtils'
function readFile(theFile) {
const p = new Promise((resolve, reject) => {
const reader = new FileReader()
reader.onload = () => {
let result = reader.result
if (isText(theFile.type) && result.length > 1000) {
result = `${result.substr(0, 1000)}...`
}
resolve(result)
}
reader.onerror = () => {
reject()
}
if (isImage(theFile.type)) {
reader.readAsDataURL(theFile)
} else if (isText(theFile.type)) {
reader.readAsText(theFile)
} else if (isAudioOrVideo(theFile.type)) {
const sources = [{label: theFile.name, src: URL.createObjectURL(theFile)}]
resolve(<VideoPlayer sources={sources} />)
} else {
const icon = getIconFromType(theFile.type)
resolve(icon)
}
})
return p
}
export default function ComputerPanel({
theFile,
setFile,
hasUploadedFile,
setHasUploadedFile,
accept,
label
}) {
const [messages, setMessages] = useState([])
const [preview, setPreview] = useState({preview: null, isLoading: false})
useEffect(() => {
if (!theFile || preview.isLoading || preview.preview || preview.error) return
async function getPreview() {
setPreview({preview: null, isLoading: true})
try {
const preview = await readFile(theFile)
setPreview({preview, isLoading: false})
if (isImage(theFile.type)) {
// we need the preview to know the image size to show the placeholder
theFile.preview = preview
setFile(theFile)
}
} catch (ex) {
setPreview({
preview: null,
error: formatMessage('An error occurred generating the file preview'),
isLoading: false
})
}
}
getPreview()
})
const previewPanelRef = useRef(null)
const {playerWidth, playerHeight} = useSizeVideoPlayer(
theFile,
previewPanelRef,
preview.isLoading
)
const clearButtonRef = useRef(null)
const panelRef = useRef(null)
useComputerPanelFocus(theFile, panelRef, clearButtonRef)
function renderPreview() {
if (preview.isLoading) {
return (
<div aria-live="polite">
<Text color="secondary">{formatMessage('Generating preview...')}</Text>
</div>
)
} else if (preview.error) {
return (
<div className={css(styles.previewContainer)} aria-live="polite">
<Text color="error">{preview.error}</Text>
</div>
)
} else if (preview.preview) {
if (isImage(theFile.type)) {
return (
<Img
aria-label={formatMessage('{filename} image preview', {filename: theFile.name})}
src={preview.preview}
constrain="contain"
inline={false}
/>
)
} else if (isText(theFile.type)) {
return (
<View
as="pre"
display="block"
padding="x-small"
aria-label={formatMessage('{filename} text preview', {filename: theFile.name})}
>
<TruncateText maxLines={21}>{preview.preview}</TruncateText>
</View>
)
} else if (isAudioOrVideo(theFile.type)) {
return preview.preview
} else {
return (
<div
aria-label={formatMessage('{filename} file icon', {filename: theFile.name})}
className={css(styles.previewContainer)}
style={{textAlign: 'center'}}
>
<preview.preview size="medium" />
</div>
)
}
}
}
if (hasUploadedFile) {
return (
<div style={{position: 'relative'}} ref={previewPanelRef}>
<Flex direction="row-reverse" margin="none none medium">
<Flex.Item>
<Button
buttonRef={el => {
clearButtonRef.current = el
}}
onClick={() => {
setFile(null)
setPreview({preview: null, isLoading: false, error: null})
setHasUploadedFile(false)
}}
icon={IconTrashLine}
>
<ScreenReaderContent>
{formatMessage('Clear selected file: {filename}', {filename: theFile.name})}
</ScreenReaderContent>
</Button>
</Flex.Item>
<Flex.Item grow shrink>
<PresentationContent>
<Text>{theFile.name}</Text>
</PresentationContent>
</Flex.Item>
</Flex>
{isAudioOrVideo(theFile.type) ? (
<View
as="div"
height={playerHeight}
width={playerWidth}
textAlign="center"
margin="0 auto"
>
{renderPreview()}
</View>
) : (
<View as="div" height="300px" width="300px" margin="0 auto">
{renderPreview()}
</View>
)}
</div>
)
}
return (
<div ref={panelRef}>
<FileDrop
accept={accept}
onDropAccepted={([file]) => {
if (messages.length) {
setMessages([])
}
setFile(file)
setHasUploadedFile(true)
}}
onDropRejected={() => {
setMessages(
messages.concat({
text: formatMessage('Invalid file type'),
type: 'error'
})
)
}}
messages={messages}
label={
<Billboard
heading={label}
hero={<RocketSVG width="3em" height="3em" />}
message={formatMessage('Drag and drop, or click to browse your computer')}
/>
}
/>
</div>
)
}
ComputerPanel.propTypes = {
theFile: instanceOf(File),
setFile: func.isRequired,
hasUploadedFile: bool,
setHasUploadedFile: func.isRequired,
accept: oneOfType([string, arrayOf(string)]),
label: string.isRequired
}
export const styles = StyleSheet.create({
previewContainer: {
maxHeight: '250px',
overflow: 'hidden',
boxSizing: 'border-box',
margin: '5rem .375rem 0',
position: 'relative'
},
previewArea: {
width: '100%', | boxSizing: 'border-box',
objectFit: 'contain',
overflow: 'hidden'
}
}) | height: '100%',
maxHeight: '250px', | random_line_split |
ComputerPanel.js | /*
* Copyright (C) 2019 - present Instructure, Inc.
*
* This file is part of Canvas.
*
* Canvas is free software: you can redistribute it and/or modify it under
* the terms of the GNU Affero General Public License as published by the Free
* Software Foundation, version 3 of the License.
*
* Canvas is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
* details.
*
* You should have received a copy of the GNU Affero General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import React, {useEffect, useRef, useState} from 'react'
import {arrayOf, bool, func, instanceOf, oneOfType, string} from 'prop-types'
import {StyleSheet, css} from 'aphrodite'
import {FileDrop} from '@instructure/ui-forms'
import {Billboard} from '@instructure/ui-billboard'
import {Button} from '@instructure/ui-buttons'
import {PresentationContent, ScreenReaderContent} from '@instructure/ui-a11y'
import {IconTrashLine} from '@instructure/ui-icons'
import {Img, Text, TruncateText} from '@instructure/ui-elements'
import {Flex, View} from '@instructure/ui-layout'
import {VideoPlayer} from '@instructure/ui-media-player'
import RocketSVG from '@instructure/canvas-media/lib/RocketSVG'
import useComputerPanelFocus from '@instructure/canvas-media/lib/useComputerPanelFocus'
import useSizeVideoPlayer from '@instructure/canvas-media/lib/useSizeVideoPlayer'
import formatMessage from '../../../../format-message'
import {getIconFromType, isAudioOrVideo, isImage, isText} from '../fileTypeUtils'
function readFile(theFile) {
const p = new Promise((resolve, reject) => {
const reader = new FileReader()
reader.onload = () => {
let result = reader.result
if (isText(theFile.type) && result.length > 1000) {
result = `${result.substr(0, 1000)}...`
}
resolve(result)
}
reader.onerror = () => {
reject()
}
if (isImage(theFile.type)) {
reader.readAsDataURL(theFile)
} else if (isText(theFile.type)) {
reader.readAsText(theFile)
} else if (isAudioOrVideo(theFile.type)) {
const sources = [{label: theFile.name, src: URL.createObjectURL(theFile)}]
resolve(<VideoPlayer sources={sources} />)
} else {
const icon = getIconFromType(theFile.type)
resolve(icon)
}
})
return p
}
export default function ComputerPanel({
theFile,
setFile,
hasUploadedFile,
setHasUploadedFile,
accept,
label
}) {
const [messages, setMessages] = useState([])
const [preview, setPreview] = useState({preview: null, isLoading: false})
useEffect(() => {
if (!theFile || preview.isLoading || preview.preview || preview.error) return
async function getPreview() {
setPreview({preview: null, isLoading: true})
try {
const preview = await readFile(theFile)
setPreview({preview, isLoading: false})
if (isImage(theFile.type)) {
// we need the preview to know the image size to show the placeholder
theFile.preview = preview
setFile(theFile)
}
} catch (ex) {
setPreview({
preview: null,
error: formatMessage('An error occurred generating the file preview'),
isLoading: false
})
}
}
getPreview()
})
const previewPanelRef = useRef(null)
const {playerWidth, playerHeight} = useSizeVideoPlayer(
theFile,
previewPanelRef,
preview.isLoading
)
const clearButtonRef = useRef(null)
const panelRef = useRef(null)
useComputerPanelFocus(theFile, panelRef, clearButtonRef)
function | () {
if (preview.isLoading) {
return (
<div aria-live="polite">
<Text color="secondary">{formatMessage('Generating preview...')}</Text>
</div>
)
} else if (preview.error) {
return (
<div className={css(styles.previewContainer)} aria-live="polite">
<Text color="error">{preview.error}</Text>
</div>
)
} else if (preview.preview) {
if (isImage(theFile.type)) {
return (
<Img
aria-label={formatMessage('{filename} image preview', {filename: theFile.name})}
src={preview.preview}
constrain="contain"
inline={false}
/>
)
} else if (isText(theFile.type)) {
return (
<View
as="pre"
display="block"
padding="x-small"
aria-label={formatMessage('{filename} text preview', {filename: theFile.name})}
>
<TruncateText maxLines={21}>{preview.preview}</TruncateText>
</View>
)
} else if (isAudioOrVideo(theFile.type)) {
return preview.preview
} else {
return (
<div
aria-label={formatMessage('{filename} file icon', {filename: theFile.name})}
className={css(styles.previewContainer)}
style={{textAlign: 'center'}}
>
<preview.preview size="medium" />
</div>
)
}
}
}
if (hasUploadedFile) {
return (
<div style={{position: 'relative'}} ref={previewPanelRef}>
<Flex direction="row-reverse" margin="none none medium">
<Flex.Item>
<Button
buttonRef={el => {
clearButtonRef.current = el
}}
onClick={() => {
setFile(null)
setPreview({preview: null, isLoading: false, error: null})
setHasUploadedFile(false)
}}
icon={IconTrashLine}
>
<ScreenReaderContent>
{formatMessage('Clear selected file: {filename}', {filename: theFile.name})}
</ScreenReaderContent>
</Button>
</Flex.Item>
<Flex.Item grow shrink>
<PresentationContent>
<Text>{theFile.name}</Text>
</PresentationContent>
</Flex.Item>
</Flex>
{isAudioOrVideo(theFile.type) ? (
<View
as="div"
height={playerHeight}
width={playerWidth}
textAlign="center"
margin="0 auto"
>
{renderPreview()}
</View>
) : (
<View as="div" height="300px" width="300px" margin="0 auto">
{renderPreview()}
</View>
)}
</div>
)
}
return (
<div ref={panelRef}>
<FileDrop
accept={accept}
onDropAccepted={([file]) => {
if (messages.length) {
setMessages([])
}
setFile(file)
setHasUploadedFile(true)
}}
onDropRejected={() => {
setMessages(
messages.concat({
text: formatMessage('Invalid file type'),
type: 'error'
})
)
}}
messages={messages}
label={
<Billboard
heading={label}
hero={<RocketSVG width="3em" height="3em" />}
message={formatMessage('Drag and drop, or click to browse your computer')}
/>
}
/>
</div>
)
}
ComputerPanel.propTypes = {
theFile: instanceOf(File),
setFile: func.isRequired,
hasUploadedFile: bool,
setHasUploadedFile: func.isRequired,
accept: oneOfType([string, arrayOf(string)]),
label: string.isRequired
}
export const styles = StyleSheet.create({
previewContainer: {
maxHeight: '250px',
overflow: 'hidden',
boxSizing: 'border-box',
margin: '5rem .375rem 0',
position: 'relative'
},
previewArea: {
width: '100%',
height: '100%',
maxHeight: '250px',
boxSizing: 'border-box',
objectFit: 'contain',
overflow: 'hidden'
}
})
| renderPreview | identifier_name |
ComputerPanel.js | /*
* Copyright (C) 2019 - present Instructure, Inc.
*
* This file is part of Canvas.
*
* Canvas is free software: you can redistribute it and/or modify it under
* the terms of the GNU Affero General Public License as published by the Free
* Software Foundation, version 3 of the License.
*
* Canvas is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
* details.
*
* You should have received a copy of the GNU Affero General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import React, {useEffect, useRef, useState} from 'react'
import {arrayOf, bool, func, instanceOf, oneOfType, string} from 'prop-types'
import {StyleSheet, css} from 'aphrodite'
import {FileDrop} from '@instructure/ui-forms'
import {Billboard} from '@instructure/ui-billboard'
import {Button} from '@instructure/ui-buttons'
import {PresentationContent, ScreenReaderContent} from '@instructure/ui-a11y'
import {IconTrashLine} from '@instructure/ui-icons'
import {Img, Text, TruncateText} from '@instructure/ui-elements'
import {Flex, View} from '@instructure/ui-layout'
import {VideoPlayer} from '@instructure/ui-media-player'
import RocketSVG from '@instructure/canvas-media/lib/RocketSVG'
import useComputerPanelFocus from '@instructure/canvas-media/lib/useComputerPanelFocus'
import useSizeVideoPlayer from '@instructure/canvas-media/lib/useSizeVideoPlayer'
import formatMessage from '../../../../format-message'
import {getIconFromType, isAudioOrVideo, isImage, isText} from '../fileTypeUtils'
function readFile(theFile) {
const p = new Promise((resolve, reject) => {
const reader = new FileReader()
reader.onload = () => {
let result = reader.result
if (isText(theFile.type) && result.length > 1000) {
result = `${result.substr(0, 1000)}...`
}
resolve(result)
}
reader.onerror = () => {
reject()
}
if (isImage(theFile.type)) {
reader.readAsDataURL(theFile)
} else if (isText(theFile.type)) {
reader.readAsText(theFile)
} else if (isAudioOrVideo(theFile.type)) {
const sources = [{label: theFile.name, src: URL.createObjectURL(theFile)}]
resolve(<VideoPlayer sources={sources} />)
} else {
const icon = getIconFromType(theFile.type)
resolve(icon)
}
})
return p
}
export default function ComputerPanel({
theFile,
setFile,
hasUploadedFile,
setHasUploadedFile,
accept,
label
}) {
const [messages, setMessages] = useState([])
const [preview, setPreview] = useState({preview: null, isLoading: false})
useEffect(() => {
if (!theFile || preview.isLoading || preview.preview || preview.error) return
async function getPreview() {
setPreview({preview: null, isLoading: true})
try {
const preview = await readFile(theFile)
setPreview({preview, isLoading: false})
if (isImage(theFile.type)) {
// we need the preview to know the image size to show the placeholder
theFile.preview = preview
setFile(theFile)
}
} catch (ex) {
setPreview({
preview: null,
error: formatMessage('An error occurred generating the file preview'),
isLoading: false
})
}
}
getPreview()
})
const previewPanelRef = useRef(null)
const {playerWidth, playerHeight} = useSizeVideoPlayer(
theFile,
previewPanelRef,
preview.isLoading
)
const clearButtonRef = useRef(null)
const panelRef = useRef(null)
useComputerPanelFocus(theFile, panelRef, clearButtonRef)
function renderPreview() {
if (preview.isLoading) {
return (
<div aria-live="polite">
<Text color="secondary">{formatMessage('Generating preview...')}</Text>
</div>
)
} else if (preview.error) {
return (
<div className={css(styles.previewContainer)} aria-live="polite">
<Text color="error">{preview.error}</Text>
</div>
)
} else if (preview.preview) {
if (isImage(theFile.type)) {
return (
<Img
aria-label={formatMessage('{filename} image preview', {filename: theFile.name})}
src={preview.preview}
constrain="contain"
inline={false}
/>
)
} else if (isText(theFile.type)) {
return (
<View
as="pre"
display="block"
padding="x-small"
aria-label={formatMessage('{filename} text preview', {filename: theFile.name})}
>
<TruncateText maxLines={21}>{preview.preview}</TruncateText>
</View>
)
} else if (isAudioOrVideo(theFile.type)) {
return preview.preview
} else {
return (
<div
aria-label={formatMessage('{filename} file icon', {filename: theFile.name})}
className={css(styles.previewContainer)}
style={{textAlign: 'center'}}
>
<preview.preview size="medium" />
</div>
)
}
}
}
if (hasUploadedFile) {
return (
<div style={{position: 'relative'}} ref={previewPanelRef}>
<Flex direction="row-reverse" margin="none none medium">
<Flex.Item>
<Button
buttonRef={el => {
clearButtonRef.current = el
}}
onClick={() => {
setFile(null)
setPreview({preview: null, isLoading: false, error: null})
setHasUploadedFile(false)
}}
icon={IconTrashLine}
>
<ScreenReaderContent>
{formatMessage('Clear selected file: {filename}', {filename: theFile.name})}
</ScreenReaderContent>
</Button>
</Flex.Item>
<Flex.Item grow shrink>
<PresentationContent>
<Text>{theFile.name}</Text>
</PresentationContent>
</Flex.Item>
</Flex>
{isAudioOrVideo(theFile.type) ? (
<View
as="div"
height={playerHeight}
width={playerWidth}
textAlign="center"
margin="0 auto"
>
{renderPreview()}
</View>
) : (
<View as="div" height="300px" width="300px" margin="0 auto">
{renderPreview()}
</View>
)}
</div>
)
}
return (
<div ref={panelRef}>
<FileDrop
accept={accept}
onDropAccepted={([file]) => {
if (messages.length) |
setFile(file)
setHasUploadedFile(true)
}}
onDropRejected={() => {
setMessages(
messages.concat({
text: formatMessage('Invalid file type'),
type: 'error'
})
)
}}
messages={messages}
label={
<Billboard
heading={label}
hero={<RocketSVG width="3em" height="3em" />}
message={formatMessage('Drag and drop, or click to browse your computer')}
/>
}
/>
</div>
)
}
ComputerPanel.propTypes = {
theFile: instanceOf(File),
setFile: func.isRequired,
hasUploadedFile: bool,
setHasUploadedFile: func.isRequired,
accept: oneOfType([string, arrayOf(string)]),
label: string.isRequired
}
export const styles = StyleSheet.create({
previewContainer: {
maxHeight: '250px',
overflow: 'hidden',
boxSizing: 'border-box',
margin: '5rem .375rem 0',
position: 'relative'
},
previewArea: {
width: '100%',
height: '100%',
maxHeight: '250px',
boxSizing: 'border-box',
objectFit: 'contain',
overflow: 'hidden'
}
})
| {
setMessages([])
} | conditional_block |
ComputerPanel.js | /*
* Copyright (C) 2019 - present Instructure, Inc.
*
* This file is part of Canvas.
*
* Canvas is free software: you can redistribute it and/or modify it under
* the terms of the GNU Affero General Public License as published by the Free
* Software Foundation, version 3 of the License.
*
* Canvas is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
* details.
*
* You should have received a copy of the GNU Affero General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import React, {useEffect, useRef, useState} from 'react'
import {arrayOf, bool, func, instanceOf, oneOfType, string} from 'prop-types'
import {StyleSheet, css} from 'aphrodite'
import {FileDrop} from '@instructure/ui-forms'
import {Billboard} from '@instructure/ui-billboard'
import {Button} from '@instructure/ui-buttons'
import {PresentationContent, ScreenReaderContent} from '@instructure/ui-a11y'
import {IconTrashLine} from '@instructure/ui-icons'
import {Img, Text, TruncateText} from '@instructure/ui-elements'
import {Flex, View} from '@instructure/ui-layout'
import {VideoPlayer} from '@instructure/ui-media-player'
import RocketSVG from '@instructure/canvas-media/lib/RocketSVG'
import useComputerPanelFocus from '@instructure/canvas-media/lib/useComputerPanelFocus'
import useSizeVideoPlayer from '@instructure/canvas-media/lib/useSizeVideoPlayer'
import formatMessage from '../../../../format-message'
import {getIconFromType, isAudioOrVideo, isImage, isText} from '../fileTypeUtils'
function readFile(theFile) {
const p = new Promise((resolve, reject) => {
const reader = new FileReader()
reader.onload = () => {
let result = reader.result
if (isText(theFile.type) && result.length > 1000) {
result = `${result.substr(0, 1000)}...`
}
resolve(result)
}
reader.onerror = () => {
reject()
}
if (isImage(theFile.type)) {
reader.readAsDataURL(theFile)
} else if (isText(theFile.type)) {
reader.readAsText(theFile)
} else if (isAudioOrVideo(theFile.type)) {
const sources = [{label: theFile.name, src: URL.createObjectURL(theFile)}]
resolve(<VideoPlayer sources={sources} />)
} else {
const icon = getIconFromType(theFile.type)
resolve(icon)
}
})
return p
}
export default function ComputerPanel({
theFile,
setFile,
hasUploadedFile,
setHasUploadedFile,
accept,
label
}) |
ComputerPanel.propTypes = {
theFile: instanceOf(File),
setFile: func.isRequired,
hasUploadedFile: bool,
setHasUploadedFile: func.isRequired,
accept: oneOfType([string, arrayOf(string)]),
label: string.isRequired
}
export const styles = StyleSheet.create({
previewContainer: {
maxHeight: '250px',
overflow: 'hidden',
boxSizing: 'border-box',
margin: '5rem .375rem 0',
position: 'relative'
},
previewArea: {
width: '100%',
height: '100%',
maxHeight: '250px',
boxSizing: 'border-box',
objectFit: 'contain',
overflow: 'hidden'
}
})
| {
const [messages, setMessages] = useState([])
const [preview, setPreview] = useState({preview: null, isLoading: false})
useEffect(() => {
if (!theFile || preview.isLoading || preview.preview || preview.error) return
async function getPreview() {
setPreview({preview: null, isLoading: true})
try {
const preview = await readFile(theFile)
setPreview({preview, isLoading: false})
if (isImage(theFile.type)) {
// we need the preview to know the image size to show the placeholder
theFile.preview = preview
setFile(theFile)
}
} catch (ex) {
setPreview({
preview: null,
error: formatMessage('An error occurred generating the file preview'),
isLoading: false
})
}
}
getPreview()
})
const previewPanelRef = useRef(null)
const {playerWidth, playerHeight} = useSizeVideoPlayer(
theFile,
previewPanelRef,
preview.isLoading
)
const clearButtonRef = useRef(null)
const panelRef = useRef(null)
useComputerPanelFocus(theFile, panelRef, clearButtonRef)
function renderPreview() {
if (preview.isLoading) {
return (
<div aria-live="polite">
<Text color="secondary">{formatMessage('Generating preview...')}</Text>
</div>
)
} else if (preview.error) {
return (
<div className={css(styles.previewContainer)} aria-live="polite">
<Text color="error">{preview.error}</Text>
</div>
)
} else if (preview.preview) {
if (isImage(theFile.type)) {
return (
<Img
aria-label={formatMessage('{filename} image preview', {filename: theFile.name})}
src={preview.preview}
constrain="contain"
inline={false}
/>
)
} else if (isText(theFile.type)) {
return (
<View
as="pre"
display="block"
padding="x-small"
aria-label={formatMessage('{filename} text preview', {filename: theFile.name})}
>
<TruncateText maxLines={21}>{preview.preview}</TruncateText>
</View>
)
} else if (isAudioOrVideo(theFile.type)) {
return preview.preview
} else {
return (
<div
aria-label={formatMessage('{filename} file icon', {filename: theFile.name})}
className={css(styles.previewContainer)}
style={{textAlign: 'center'}}
>
<preview.preview size="medium" />
</div>
)
}
}
}
if (hasUploadedFile) {
return (
<div style={{position: 'relative'}} ref={previewPanelRef}>
<Flex direction="row-reverse" margin="none none medium">
<Flex.Item>
<Button
buttonRef={el => {
clearButtonRef.current = el
}}
onClick={() => {
setFile(null)
setPreview({preview: null, isLoading: false, error: null})
setHasUploadedFile(false)
}}
icon={IconTrashLine}
>
<ScreenReaderContent>
{formatMessage('Clear selected file: {filename}', {filename: theFile.name})}
</ScreenReaderContent>
</Button>
</Flex.Item>
<Flex.Item grow shrink>
<PresentationContent>
<Text>{theFile.name}</Text>
</PresentationContent>
</Flex.Item>
</Flex>
{isAudioOrVideo(theFile.type) ? (
<View
as="div"
height={playerHeight}
width={playerWidth}
textAlign="center"
margin="0 auto"
>
{renderPreview()}
</View>
) : (
<View as="div" height="300px" width="300px" margin="0 auto">
{renderPreview()}
</View>
)}
</div>
)
}
return (
<div ref={panelRef}>
<FileDrop
accept={accept}
onDropAccepted={([file]) => {
if (messages.length) {
setMessages([])
}
setFile(file)
setHasUploadedFile(true)
}}
onDropRejected={() => {
setMessages(
messages.concat({
text: formatMessage('Invalid file type'),
type: 'error'
})
)
}}
messages={messages}
label={
<Billboard
heading={label}
hero={<RocketSVG width="3em" height="3em" />}
message={formatMessage('Drag and drop, or click to browse your computer')}
/>
}
/>
</div>
)
} | identifier_body |
no-capture-arc.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT. | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: use of moved value
extern mod extra;
use extra::arc;
use std::task;
fn main() {
let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let arc_v = arc::Arc::new(v);
do task::spawn() {
let v = arc_v.get();
assert_eq!(v[3], 4);
};
assert_eq!((arc_v.get())[2], 3);
info2!("{:?}", arc_v);
} | //
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | random_line_split |
no-capture-arc.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: use of moved value
extern mod extra;
use extra::arc;
use std::task;
fn main() | {
let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let arc_v = arc::Arc::new(v);
do task::spawn() {
let v = arc_v.get();
assert_eq!(v[3], 4);
};
assert_eq!((arc_v.get())[2], 3);
info2!("{:?}", arc_v);
} | identifier_body |
|
no-capture-arc.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern: use of moved value
extern mod extra;
use extra::arc;
use std::task;
fn | () {
let v = ~[1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let arc_v = arc::Arc::new(v);
do task::spawn() {
let v = arc_v.get();
assert_eq!(v[3], 4);
};
assert_eq!((arc_v.get())[2], 3);
info2!("{:?}", arc_v);
}
| main | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.