file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs
|
//! This expands upon the implementation defined on [Rosetta Code][element definition] and consists
//! of the relevant lines from the `LinkedList` implementation in the Rust standard library.
//!
//! [element definition]: http://rosettacode.org/wiki/Doubly-linked_list/Element_definition
#![allow(dead_code)]
use std::mem;
use std::ptr;
pub struct LinkedList<T> {
length: usize,
list_head: Link<T>,
list_tail: Rawlink<Node<T>>,
}
type Link<T> = Option<Box<Node<T>>>;
struct Rawlink<T> {
p: *mut T,
}
struct Node<T> {
next: Link<T>,
prev: Rawlink<Node<T>>,
value: T,
}
impl<T> Node<T> {
fn new(v: T) -> Node<T> {
Node {
value: v,
next: None,
prev: Rawlink::none(),
}
}
}
impl<T> Rawlink<T> {
fn none() -> Self {
Rawlink { p: ptr::null_mut() }
}
fn some(n: &mut T) -> Rawlink<T> {
Rawlink { p: n }
}
}
impl<'a, T> From<&'a mut Link<T>> for Rawlink<Node<T>> {
fn from(node: &'a mut Link<T>) -> Self {
match node.as_mut() {
None => Rawlink::none(),
Some(ptr) => Rawlink::some(ptr),
}
}
}
fn link_no_prev<T>(mut next: Box<Node<T>>) -> Link<T> {
next.prev = Rawlink::none();
Some(next)
}
impl<T> LinkedList<T> {
pub fn new() -> LinkedList<T> {
LinkedList {
length: 0,
list_head: None,
list_tail: Rawlink { p: ptr::null_mut() },
}
}
#[inline]
fn push_front_node(&mut self, mut new_head: Box<Node<T>>) {
match self.list_head {
None => {
self.list_head = link_no_prev(new_head);
self.list_tail = Rawlink::from(&mut self.list_head);
}
Some(ref mut head) => {
new_head.prev = Rawlink::none();
head.prev = Rawlink::some(&mut *new_head);
mem::swap(head, &mut new_head);
head.next = Some(new_head);
}
}
self.length += 1;
}
pub fn push_front(&mut self, elt: T) {
self.push_front_node(Box::new(Node::new(elt)));
}
}
impl<T> Default for LinkedList<T> {
fn default() -> Self
|
}
fn main() {
use std::collections;
let mut list1 = collections::LinkedList::new();
list1.push_front(8);
let mut list2 = LinkedList::new();
list2.push_front(8);
}
|
{
Self::new()
}
|
identifier_body
|
main.rs
|
//! This expands upon the implementation defined on [Rosetta Code][element definition] and consists
//! of the relevant lines from the `LinkedList` implementation in the Rust standard library.
//!
//! [element definition]: http://rosettacode.org/wiki/Doubly-linked_list/Element_definition
#![allow(dead_code)]
use std::mem;
use std::ptr;
pub struct LinkedList<T> {
length: usize,
list_head: Link<T>,
list_tail: Rawlink<Node<T>>,
}
type Link<T> = Option<Box<Node<T>>>;
struct Rawlink<T> {
p: *mut T,
}
struct Node<T> {
next: Link<T>,
prev: Rawlink<Node<T>>,
value: T,
}
impl<T> Node<T> {
fn new(v: T) -> Node<T> {
Node {
value: v,
next: None,
prev: Rawlink::none(),
}
}
}
impl<T> Rawlink<T> {
fn none() -> Self {
Rawlink { p: ptr::null_mut() }
}
fn some(n: &mut T) -> Rawlink<T> {
Rawlink { p: n }
}
}
impl<'a, T> From<&'a mut Link<T>> for Rawlink<Node<T>> {
fn from(node: &'a mut Link<T>) -> Self {
match node.as_mut() {
None => Rawlink::none(),
Some(ptr) => Rawlink::some(ptr),
}
}
}
fn link_no_prev<T>(mut next: Box<Node<T>>) -> Link<T> {
next.prev = Rawlink::none();
Some(next)
}
impl<T> LinkedList<T> {
pub fn new() -> LinkedList<T> {
LinkedList {
length: 0,
list_head: None,
list_tail: Rawlink { p: ptr::null_mut() },
}
}
#[inline]
fn push_front_node(&mut self, mut new_head: Box<Node<T>>) {
match self.list_head {
None => {
self.list_head = link_no_prev(new_head);
self.list_tail = Rawlink::from(&mut self.list_head);
}
Some(ref mut head) => {
new_head.prev = Rawlink::none();
head.prev = Rawlink::some(&mut *new_head);
mem::swap(head, &mut new_head);
head.next = Some(new_head);
}
}
self.length += 1;
}
pub fn push_front(&mut self, elt: T) {
self.push_front_node(Box::new(Node::new(elt)));
}
}
impl<T> Default for LinkedList<T> {
fn default() -> Self {
Self::new()
}
}
fn
|
() {
use std::collections;
let mut list1 = collections::LinkedList::new();
list1.push_front(8);
let mut list2 = LinkedList::new();
list2.push_front(8);
}
|
main
|
identifier_name
|
output.rs
|
use escape_grapheme::{escape_grapheme, lang, CharEncoder, Named};
use std::io;
pub fn write_to<W: io::Write>(writer: &mut W, grapheme: &str) -> std::io::Result<()> {
writeln!(writer)?;
writeln!(writer, "{}", language_output(grapheme, lang::css::Css))?;
|
writeln!(writer)?;
writeln!(writer, "{}", language_output(grapheme, lang::html::Html))?;
writeln!(writer)?;
writeln!(writer, "{}", language_output(grapheme, lang::js::Js))?;
Ok(())
}
fn language_output<T: CharEncoder + Named>(grapheme: &str, t: T) -> String {
let grey = ansi_term::Colour::Black.bold();
let escape = escape_grapheme(grapheme, t);
let lang = grey.paint(format!("-- {}", T::name()));
format!(" {:<10} {}", escape, lang)
}
#[cfg(test)]
mod tests {
use super::language_output;
use escape_grapheme::{CharEncoder, Named};
#[test]
fn test_output() {
struct AlwaysHello;
impl CharEncoder for AlwaysHello {
fn encode(iter: &mut dyn Iterator<Item = char>) -> Option<String> {
iter.next().map(|_| "hello".to_string())
}
fn wrap_in_quotes() -> bool {
false
}
}
impl Named for AlwaysHello {
fn name() -> &'static str {
"AlwaysHello"
}
}
let grey = ansi_term::Colour::Black.bold();
let actual = language_output("a", AlwaysHello);
let expected = format!(" hello {}", grey.paint("-- AlwaysHello"));
assert_eq!(actual, expected);
}
}
|
random_line_split
|
|
output.rs
|
use escape_grapheme::{escape_grapheme, lang, CharEncoder, Named};
use std::io;
pub fn write_to<W: io::Write>(writer: &mut W, grapheme: &str) -> std::io::Result<()> {
writeln!(writer)?;
writeln!(writer, "{}", language_output(grapheme, lang::css::Css))?;
writeln!(writer)?;
writeln!(writer, "{}", language_output(grapheme, lang::html::Html))?;
writeln!(writer)?;
writeln!(writer, "{}", language_output(grapheme, lang::js::Js))?;
Ok(())
}
fn language_output<T: CharEncoder + Named>(grapheme: &str, t: T) -> String {
let grey = ansi_term::Colour::Black.bold();
let escape = escape_grapheme(grapheme, t);
let lang = grey.paint(format!("-- {}", T::name()));
format!(" {:<10} {}", escape, lang)
}
#[cfg(test)]
mod tests {
use super::language_output;
use escape_grapheme::{CharEncoder, Named};
#[test]
fn test_output() {
struct AlwaysHello;
impl CharEncoder for AlwaysHello {
fn encode(iter: &mut dyn Iterator<Item = char>) -> Option<String> {
iter.next().map(|_| "hello".to_string())
}
fn wrap_in_quotes() -> bool {
false
}
}
impl Named for AlwaysHello {
fn
|
() -> &'static str {
"AlwaysHello"
}
}
let grey = ansi_term::Colour::Black.bold();
let actual = language_output("a", AlwaysHello);
let expected = format!(" hello {}", grey.paint("-- AlwaysHello"));
assert_eq!(actual, expected);
}
}
|
name
|
identifier_name
|
output.rs
|
use escape_grapheme::{escape_grapheme, lang, CharEncoder, Named};
use std::io;
pub fn write_to<W: io::Write>(writer: &mut W, grapheme: &str) -> std::io::Result<()>
|
fn language_output<T: CharEncoder + Named>(grapheme: &str, t: T) -> String {
let grey = ansi_term::Colour::Black.bold();
let escape = escape_grapheme(grapheme, t);
let lang = grey.paint(format!("-- {}", T::name()));
format!(" {:<10} {}", escape, lang)
}
#[cfg(test)]
mod tests {
use super::language_output;
use escape_grapheme::{CharEncoder, Named};
#[test]
fn test_output() {
struct AlwaysHello;
impl CharEncoder for AlwaysHello {
fn encode(iter: &mut dyn Iterator<Item = char>) -> Option<String> {
iter.next().map(|_| "hello".to_string())
}
fn wrap_in_quotes() -> bool {
false
}
}
impl Named for AlwaysHello {
fn name() -> &'static str {
"AlwaysHello"
}
}
let grey = ansi_term::Colour::Black.bold();
let actual = language_output("a", AlwaysHello);
let expected = format!(" hello {}", grey.paint("-- AlwaysHello"));
assert_eq!(actual, expected);
}
}
|
{
writeln!(writer)?;
writeln!(writer, "{}", language_output(grapheme, lang::css::Css))?;
writeln!(writer)?;
writeln!(writer, "{}", language_output(grapheme, lang::html::Html))?;
writeln!(writer)?;
writeln!(writer, "{}", language_output(grapheme, lang::js::Js))?;
Ok(())
}
|
identifier_body
|
flags.rs
|
// Copyright 2017 Jeremy Wall <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Contains code for converting a UCG Val into the command line flag output target.
use std::io::Write;
use std::rc::Rc;
use crate::build::Val;
use crate::convert::traits::{ConvertResult, Converter};
use crate::error::{BuildError, ErrorType};
/// FlagConverter implements the conversion logic for converting a Val into a set
/// of command line flags.
pub struct FlagConverter {
sep: &'static str,
}
impl FlagConverter {
pub fn new() -> Self {
FlagConverter { sep: "." }
}
pub fn with_sep(mut self, sep: &'static str) -> Self {
self.sep = sep;
self
}
fn write_flag_name(&self, pfx: &str, name: &str, w: &mut dyn Write) -> ConvertResult {
if name.chars().count() > 1 || pfx.chars().count() > 0 {
write!(w, "--{}{} ", pfx, name)?;
} else {
write!(w, "-{} ", name)?;
}
return Ok(());
}
fn write_list_flag(
&self,
pfx: &str,
name: &str,
def: &Vec<Rc<Val>>,
w: &mut dyn Write,
) -> ConvertResult {
// first of all we need to make sure that each &Val is only a primitive type.
for v in def.iter() {
let vref = v.as_ref();
if vref.is_list() || vref.is_tuple() {
eprintln!(
"Skipping non primitive val in list for flag {}{}",
pfx, name
);
} else {
self.write_flag_name(pfx, name, w)?;
self.write_simple_value(vref, w)?;
}
}
return Ok(());
}
fn write_simple_value(&self, v: &Val, w: &mut dyn Write) -> ConvertResult {
match v {
&Val::Empty => {
// Empty is a noop.
return Ok(());
}
&Val::Boolean(b) => {
write!(w, "{} ", if b { "true" } else { "false" })?;
}
&Val::Float(ref f) => {
write!(w, "{} ", f)?;
}
&Val::Int(ref i) => {
write!(w, "{} ", i)?;
}
&Val::Str(ref s) => {
write!(w, "'{}' ", s)?;
}
&Val::List(_) | &Val::Tuple(_) | &Val::Env(_) => {
// This is ignored
eprintln!("Skipping {}...", v.type_name());
}
}
Ok(())
}
fn write(&self, pfx: &str, flds: &Vec<(String, Rc<Val>)>, w: &mut dyn Write) -> ConvertResult {
for &(ref name, ref val) in flds.iter() {
if let &Val::Empty = val.as_ref() {
self.write_flag_name(pfx, name, w)?;
continue;
}
match val.as_ref() {
&Val::Tuple(_) | &Val::Env(_) => {
eprintln!("Skipping {} in flag output tuple.", val.type_name());
}
&Val::List(ref def) =>
|
&Val::Boolean(_) | &Val::Empty | &Val::Float(_) | &Val::Int(_) | &Val::Str(_) => {
self.write_flag_name(pfx, name, w)?;
self.write_simple_value(val, w)?;
}
}
}
Ok(())
}
}
impl Converter for FlagConverter {
fn convert(&self, v: Rc<Val>, mut w: &mut dyn Write) -> ConvertResult {
if let &Val::Tuple(ref flds) = v.as_ref() {
self.write("", flds, &mut w)
} else {
return Err(Box::new(BuildError::new(
"Flag outputs must be a tuple",
ErrorType::ConvertError,
)));
}
}
fn file_ext(&self) -> String {
String::from("txt")
}
fn description(&self) -> String {
"Convert ucg Vals into command line flags.".to_string()
}
#[allow(unused_must_use)]
fn help(&self) -> String {
include_str!("flags_help.txt").to_string()
}
}
|
{
self.write_list_flag(pfx, name, def, w)?;
}
|
conditional_block
|
flags.rs
|
// Copyright 2017 Jeremy Wall <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Contains code for converting a UCG Val into the command line flag output target.
use std::io::Write;
use std::rc::Rc;
use crate::build::Val;
use crate::convert::traits::{ConvertResult, Converter};
use crate::error::{BuildError, ErrorType};
/// FlagConverter implements the conversion logic for converting a Val into a set
/// of command line flags.
pub struct FlagConverter {
sep: &'static str,
}
impl FlagConverter {
pub fn new() -> Self {
FlagConverter { sep: "." }
}
pub fn with_sep(mut self, sep: &'static str) -> Self {
self.sep = sep;
self
}
fn write_flag_name(&self, pfx: &str, name: &str, w: &mut dyn Write) -> ConvertResult {
if name.chars().count() > 1 || pfx.chars().count() > 0 {
write!(w, "--{}{} ", pfx, name)?;
} else {
write!(w, "-{} ", name)?;
}
return Ok(());
}
fn write_list_flag(
&self,
pfx: &str,
name: &str,
def: &Vec<Rc<Val>>,
w: &mut dyn Write,
) -> ConvertResult {
// first of all we need to make sure that each &Val is only a primitive type.
for v in def.iter() {
let vref = v.as_ref();
if vref.is_list() || vref.is_tuple() {
eprintln!(
"Skipping non primitive val in list for flag {}{}",
pfx, name
);
} else {
self.write_flag_name(pfx, name, w)?;
self.write_simple_value(vref, w)?;
}
}
return Ok(());
}
fn
|
(&self, v: &Val, w: &mut dyn Write) -> ConvertResult {
match v {
&Val::Empty => {
// Empty is a noop.
return Ok(());
}
&Val::Boolean(b) => {
write!(w, "{} ", if b { "true" } else { "false" })?;
}
&Val::Float(ref f) => {
write!(w, "{} ", f)?;
}
&Val::Int(ref i) => {
write!(w, "{} ", i)?;
}
&Val::Str(ref s) => {
write!(w, "'{}' ", s)?;
}
&Val::List(_) | &Val::Tuple(_) | &Val::Env(_) => {
// This is ignored
eprintln!("Skipping {}...", v.type_name());
}
}
Ok(())
}
fn write(&self, pfx: &str, flds: &Vec<(String, Rc<Val>)>, w: &mut dyn Write) -> ConvertResult {
for &(ref name, ref val) in flds.iter() {
if let &Val::Empty = val.as_ref() {
self.write_flag_name(pfx, name, w)?;
continue;
}
match val.as_ref() {
&Val::Tuple(_) | &Val::Env(_) => {
eprintln!("Skipping {} in flag output tuple.", val.type_name());
}
&Val::List(ref def) => {
self.write_list_flag(pfx, name, def, w)?;
}
&Val::Boolean(_) | &Val::Empty | &Val::Float(_) | &Val::Int(_) | &Val::Str(_) => {
self.write_flag_name(pfx, name, w)?;
self.write_simple_value(val, w)?;
}
}
}
Ok(())
}
}
impl Converter for FlagConverter {
fn convert(&self, v: Rc<Val>, mut w: &mut dyn Write) -> ConvertResult {
if let &Val::Tuple(ref flds) = v.as_ref() {
self.write("", flds, &mut w)
} else {
return Err(Box::new(BuildError::new(
"Flag outputs must be a tuple",
ErrorType::ConvertError,
)));
}
}
fn file_ext(&self) -> String {
String::from("txt")
}
fn description(&self) -> String {
"Convert ucg Vals into command line flags.".to_string()
}
#[allow(unused_must_use)]
fn help(&self) -> String {
include_str!("flags_help.txt").to_string()
}
}
|
write_simple_value
|
identifier_name
|
flags.rs
|
// Copyright 2017 Jeremy Wall <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
|
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Contains code for converting a UCG Val into the command line flag output target.
use std::io::Write;
use std::rc::Rc;
use crate::build::Val;
use crate::convert::traits::{ConvertResult, Converter};
use crate::error::{BuildError, ErrorType};
/// FlagConverter implements the conversion logic for converting a Val into a set
/// of command line flags.
pub struct FlagConverter {
sep: &'static str,
}
impl FlagConverter {
pub fn new() -> Self {
FlagConverter { sep: "." }
}
pub fn with_sep(mut self, sep: &'static str) -> Self {
self.sep = sep;
self
}
fn write_flag_name(&self, pfx: &str, name: &str, w: &mut dyn Write) -> ConvertResult {
if name.chars().count() > 1 || pfx.chars().count() > 0 {
write!(w, "--{}{} ", pfx, name)?;
} else {
write!(w, "-{} ", name)?;
}
return Ok(());
}
fn write_list_flag(
&self,
pfx: &str,
name: &str,
def: &Vec<Rc<Val>>,
w: &mut dyn Write,
) -> ConvertResult {
// first of all we need to make sure that each &Val is only a primitive type.
for v in def.iter() {
let vref = v.as_ref();
if vref.is_list() || vref.is_tuple() {
eprintln!(
"Skipping non primitive val in list for flag {}{}",
pfx, name
);
} else {
self.write_flag_name(pfx, name, w)?;
self.write_simple_value(vref, w)?;
}
}
return Ok(());
}
fn write_simple_value(&self, v: &Val, w: &mut dyn Write) -> ConvertResult {
match v {
&Val::Empty => {
// Empty is a noop.
return Ok(());
}
&Val::Boolean(b) => {
write!(w, "{} ", if b { "true" } else { "false" })?;
}
&Val::Float(ref f) => {
write!(w, "{} ", f)?;
}
&Val::Int(ref i) => {
write!(w, "{} ", i)?;
}
&Val::Str(ref s) => {
write!(w, "'{}' ", s)?;
}
&Val::List(_) | &Val::Tuple(_) | &Val::Env(_) => {
// This is ignored
eprintln!("Skipping {}...", v.type_name());
}
}
Ok(())
}
fn write(&self, pfx: &str, flds: &Vec<(String, Rc<Val>)>, w: &mut dyn Write) -> ConvertResult {
for &(ref name, ref val) in flds.iter() {
if let &Val::Empty = val.as_ref() {
self.write_flag_name(pfx, name, w)?;
continue;
}
match val.as_ref() {
&Val::Tuple(_) | &Val::Env(_) => {
eprintln!("Skipping {} in flag output tuple.", val.type_name());
}
&Val::List(ref def) => {
self.write_list_flag(pfx, name, def, w)?;
}
&Val::Boolean(_) | &Val::Empty | &Val::Float(_) | &Val::Int(_) | &Val::Str(_) => {
self.write_flag_name(pfx, name, w)?;
self.write_simple_value(val, w)?;
}
}
}
Ok(())
}
}
impl Converter for FlagConverter {
fn convert(&self, v: Rc<Val>, mut w: &mut dyn Write) -> ConvertResult {
if let &Val::Tuple(ref flds) = v.as_ref() {
self.write("", flds, &mut w)
} else {
return Err(Box::new(BuildError::new(
"Flag outputs must be a tuple",
ErrorType::ConvertError,
)));
}
}
fn file_ext(&self) -> String {
String::from("txt")
}
fn description(&self) -> String {
"Convert ucg Vals into command line flags.".to_string()
}
#[allow(unused_must_use)]
fn help(&self) -> String {
include_str!("flags_help.txt").to_string()
}
}
|
// you may not use this file except in compliance with the License.
|
random_line_split
|
get_filesystem_type.rs
|
// Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::syscall;
use crate::Result;
use libc::fstatfs;
use std::fs::File;
use std::mem::MaybeUninit;
use std::os::unix::io::AsRawFd;
/// Obtain file system type of the file system that the file is served from.
pub fn get_filesystem_type(file: &File) -> Result<i64>
|
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn simple_test() {
let file = File::open("/dev/null").unwrap();
let _fstype = get_filesystem_type(&file).unwrap();
}
}
|
{
let mut statfs_buf = MaybeUninit::<libc::statfs>::uninit();
// Safe because we just got the memory space with exact required amount and
// passing that on.
syscall!(unsafe { fstatfs(file.as_raw_fd(), statfs_buf.as_mut_ptr()) })?;
// Safe because the kernel guarantees the struct is initialized.
let statfs_buf = unsafe { statfs_buf.assume_init() };
Ok(statfs_buf.f_type as i64)
}
|
identifier_body
|
get_filesystem_type.rs
|
// Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::syscall;
use crate::Result;
use libc::fstatfs;
use std::fs::File;
use std::mem::MaybeUninit;
use std::os::unix::io::AsRawFd;
/// Obtain file system type of the file system that the file is served from.
pub fn get_filesystem_type(file: &File) -> Result<i64> {
let mut statfs_buf = MaybeUninit::<libc::statfs>::uninit();
// Safe because we just got the memory space with exact required amount and
// passing that on.
syscall!(unsafe { fstatfs(file.as_raw_fd(), statfs_buf.as_mut_ptr()) })?;
// Safe because the kernel guarantees the struct is initialized.
let statfs_buf = unsafe { statfs_buf.assume_init() };
Ok(statfs_buf.f_type as i64)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn
|
() {
let file = File::open("/dev/null").unwrap();
let _fstype = get_filesystem_type(&file).unwrap();
}
}
|
simple_test
|
identifier_name
|
get_filesystem_type.rs
|
// Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
|
use crate::Result;
use libc::fstatfs;
use std::fs::File;
use std::mem::MaybeUninit;
use std::os::unix::io::AsRawFd;
/// Obtain file system type of the file system that the file is served from.
pub fn get_filesystem_type(file: &File) -> Result<i64> {
let mut statfs_buf = MaybeUninit::<libc::statfs>::uninit();
// Safe because we just got the memory space with exact required amount and
// passing that on.
syscall!(unsafe { fstatfs(file.as_raw_fd(), statfs_buf.as_mut_ptr()) })?;
// Safe because the kernel guarantees the struct is initialized.
let statfs_buf = unsafe { statfs_buf.assume_init() };
Ok(statfs_buf.f_type as i64)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn simple_test() {
let file = File::open("/dev/null").unwrap();
let _fstype = get_filesystem_type(&file).unwrap();
}
}
|
// found in the LICENSE file.
use crate::syscall;
|
random_line_split
|
display_list_builder.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Constructs display lists from boxes.
use layout::box_::Box;
use layout::context::LayoutContext;
use layout::util::OpaqueNode;
use gfx;
use style;
pub trait ExtraDisplayListData {
fn new(box_: &Box) -> Self;
}
pub type Nothing = ();
impl ExtraDisplayListData for OpaqueNode {
fn new(box_: &Box) -> OpaqueNode {
box_.node
}
}
impl ExtraDisplayListData for Nothing {
fn new(_: &Box) -> Nothing
|
}
/// A builder object that manages display list builder should mainly hold information about the
/// initial request and desired result--for example, whether the `DisplayList` is to be used for
/// painting or hit testing. This can affect which boxes are created.
///
/// Right now, the builder isn't used for much, but it establishes the pattern we'll need once we
/// support display-list-based hit testing and so forth.
pub struct DisplayListBuilder<'a> {
ctx: &'a LayoutContext,
}
//
// Miscellaneous useful routines
//
/// Allows a CSS color to be converted into a graphics color.
pub trait ToGfxColor {
/// Converts a CSS color to a graphics color.
fn to_gfx_color(&self) -> gfx::color::Color;
}
impl ToGfxColor for style::computed_values::RGBA {
fn to_gfx_color(&self) -> gfx::color::Color {
gfx::color::rgba(self.red, self.green, self.blue, self.alpha)
}
}
|
{
()
}
|
identifier_body
|
display_list_builder.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Constructs display lists from boxes.
use layout::box_::Box;
use layout::context::LayoutContext;
use layout::util::OpaqueNode;
use gfx;
use style;
pub trait ExtraDisplayListData {
fn new(box_: &Box) -> Self;
}
pub type Nothing = ();
impl ExtraDisplayListData for OpaqueNode {
fn new(box_: &Box) -> OpaqueNode {
box_.node
}
}
|
()
}
}
/// A builder object that manages display list builder should mainly hold information about the
/// initial request and desired result--for example, whether the `DisplayList` is to be used for
/// painting or hit testing. This can affect which boxes are created.
///
/// Right now, the builder isn't used for much, but it establishes the pattern we'll need once we
/// support display-list-based hit testing and so forth.
pub struct DisplayListBuilder<'a> {
ctx: &'a LayoutContext,
}
//
// Miscellaneous useful routines
//
/// Allows a CSS color to be converted into a graphics color.
pub trait ToGfxColor {
/// Converts a CSS color to a graphics color.
fn to_gfx_color(&self) -> gfx::color::Color;
}
impl ToGfxColor for style::computed_values::RGBA {
fn to_gfx_color(&self) -> gfx::color::Color {
gfx::color::rgba(self.red, self.green, self.blue, self.alpha)
}
}
|
impl ExtraDisplayListData for Nothing {
fn new(_: &Box) -> Nothing {
|
random_line_split
|
display_list_builder.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Constructs display lists from boxes.
use layout::box_::Box;
use layout::context::LayoutContext;
use layout::util::OpaqueNode;
use gfx;
use style;
pub trait ExtraDisplayListData {
fn new(box_: &Box) -> Self;
}
pub type Nothing = ();
impl ExtraDisplayListData for OpaqueNode {
fn new(box_: &Box) -> OpaqueNode {
box_.node
}
}
impl ExtraDisplayListData for Nothing {
fn new(_: &Box) -> Nothing {
()
}
}
/// A builder object that manages display list builder should mainly hold information about the
/// initial request and desired result--for example, whether the `DisplayList` is to be used for
/// painting or hit testing. This can affect which boxes are created.
///
/// Right now, the builder isn't used for much, but it establishes the pattern we'll need once we
/// support display-list-based hit testing and so forth.
pub struct DisplayListBuilder<'a> {
ctx: &'a LayoutContext,
}
//
// Miscellaneous useful routines
//
/// Allows a CSS color to be converted into a graphics color.
pub trait ToGfxColor {
/// Converts a CSS color to a graphics color.
fn to_gfx_color(&self) -> gfx::color::Color;
}
impl ToGfxColor for style::computed_values::RGBA {
fn
|
(&self) -> gfx::color::Color {
gfx::color::rgba(self.red, self.green, self.blue, self.alpha)
}
}
|
to_gfx_color
|
identifier_name
|
object_safety.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! "Object safety" refers to the ability for a trait to be converted
//! to an object. In general, traits may only be converted to an
//! object if all of their methods meet certain criteria. In particular,
//! they must:
//!
//! - have a suitable receiver from which we can extract a vtable;
//! - not reference the erased type `Self` except for in this receiver;
//! - not have generic type parameters
use super::supertraits;
use super::elaborate_predicates;
use middle::subst::{self, SelfSpace, TypeSpace};
use middle::traits;
use middle::ty::{self, ToPolyTraitRef, Ty};
use std::rc::Rc;
use syntax::ast;
#[derive(Debug)]
pub enum ObjectSafetyViolation<'tcx> {
/// Self : Sized declared on the trait
SizedSelf,
/// Supertrait reference references `Self` an in illegal location
/// (e.g. `trait Foo : Bar<Self>`)
SupertraitSelf,
/// Method has something illegal
Method(Rc<ty::Method<'tcx>>, MethodViolationCode),
}
/// Reasons a method might not be object-safe.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum MethodViolationCode {
/// e.g., `fn foo()`
StaticMethod,
/// e.g., `fn foo(&self, x: Self)` or `fn foo(&self) -> Self`
ReferencesSelf,
/// e.g., `fn foo<A>()`
Generic,
}
pub fn is_object_safe<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
// Because we query yes/no results frequently, we keep a cache:
let def = tcx.lookup_trait_def(trait_def_id);
let result = def.object_safety().unwrap_or_else(|| {
let result = object_safety_violations(tcx, trait_def_id).is_empty();
// Record just a yes/no result in the cache; this is what is
// queried most frequently. Note that this may overwrite a
// previous result, but always with the same thing.
def.set_object_safety(result);
result
});
debug!("is_object_safe({:?}) = {}", trait_def_id, result);
result
}
pub fn object_safety_violations<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> Vec<ObjectSafetyViolation<'tcx>>
{
traits::supertrait_def_ids(tcx, trait_def_id)
.flat_map(|def_id| object_safety_violations_for_trait(tcx, def_id))
.collect()
}
fn object_safety_violations_for_trait<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> Vec<ObjectSafetyViolation<'tcx>>
{
// Check methods for violations.
let mut violations: Vec<_> =
tcx.trait_items(trait_def_id).iter()
.flat_map(|item| {
match *item {
ty::MethodTraitItem(ref m) => {
object_safety_violation_for_method(tcx, trait_def_id, &**m)
.map(|code| ObjectSafetyViolation::Method(m.clone(), code))
.into_iter()
}
_ => None.into_iter(),
}
})
.collect();
// Check the trait itself.
if trait_has_sized_self(tcx, trait_def_id) {
violations.push(ObjectSafetyViolation::SizedSelf);
}
if supertraits_reference_self(tcx, trait_def_id) {
violations.push(ObjectSafetyViolation::SupertraitSelf);
}
debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}",
trait_def_id,
violations);
violations
}
fn supertraits_reference_self<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = trait_def.trait_ref.clone();
let trait_ref = trait_ref.to_poly_trait_ref();
let predicates = tcx.lookup_super_predicates(trait_def_id);
predicates
.predicates
.into_iter()
.map(|predicate| predicate.subst_supertrait(tcx, &trait_ref))
.any(|predicate| {
match predicate {
ty::Predicate::Trait(ref data) => {
// In the case of a trait predicate, we can skip the "self" type.
data.0.trait_ref.substs.types.get_slice(TypeSpace)
.iter()
.cloned()
.any(is_self)
}
ty::Predicate::Projection(..) |
ty::Predicate::WellFormed(..) |
ty::Predicate::ObjectSafe(..) |
ty::Predicate::TypeOutlives(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::Equate(..) => {
false
}
}
})
}
fn trait_has_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_predicates = tcx.lookup_predicates(trait_def_id);
generics_require_sized_self(tcx, &trait_def.generics, &trait_predicates)
}
fn generics_require_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
generics: &ty::Generics<'tcx>,
predicates: &ty::GenericPredicates<'tcx>)
-> bool
{
let sized_def_id = match tcx.lang_items.sized_trait() {
Some(def_id) => def_id,
None => { return false; /* No Sized trait, can't require it! */ }
};
// Search for a predicate like `Self : Sized` amongst the trait bounds.
let free_substs = tcx.construct_free_substs(generics, ast::DUMMY_NODE_ID);
let predicates = predicates.instantiate(tcx, &free_substs).predicates.into_vec();
elaborate_predicates(tcx, predicates)
.any(|predicate| {
match predicate {
ty::Predicate::Trait(ref trait_pred) if trait_pred.def_id() == sized_def_id => {
is_self(trait_pred.0.self_ty())
}
ty::Predicate::Projection(..) |
ty::Predicate::Trait(..) |
ty::Predicate::Equate(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::WellFormed(..) |
ty::Predicate::ObjectSafe(..) |
ty::Predicate::TypeOutlives(..) => {
false
}
}
})
}
/// Returns `Some(_)` if this method makes the containing trait not object safe.
fn object_safety_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> Option<MethodViolationCode>
{
// Any method that has a `Self : Sized` requisite is otherwise
// exempt from the regulations.
if generics_require_sized_self(tcx, &method.generics, &method.predicates) {
return None;
}
virtual_call_violation_for_method(tcx, trait_def_id, method)
}
/// We say a method is *vtable safe* if it can be invoked on a trait
/// object. Note that object-safe traits can have some
/// non-vtable-safe methods, so long as they require `Self:Sized` or
/// otherwise ensure that they cannot be used when `Self=Trait`.
pub fn is_vtable_safe_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> bool
{
virtual_call_violation_for_method(tcx, trait_def_id, method).is_none()
}
/// Returns `Some(_)` if this method cannot be called on a trait
/// object; this does not necessarily imply that the enclosing trait
/// is not object safe, because the method might have a where clause
/// `Self:Sized`.
fn virtual_call_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> Option<MethodViolationCode>
{
// The method's first parameter must be something that derefs (or
// autorefs) to `&self`. For now, we only accept `self`, `&self`
// and `Box<Self>`.
match method.explicit_self {
ty::StaticExplicitSelfCategory => {
return Some(MethodViolationCode::StaticMethod);
}
ty::ByValueExplicitSelfCategory |
ty::ByReferenceExplicitSelfCategory(..) |
ty::ByBoxExplicitSelfCategory => {
}
}
// The `Self` type is erased, so it should not appear in list of
// arguments or return type apart from the receiver.
let ref sig = method.fty.sig;
for &input_ty in &sig.0.inputs[1..] {
if contains_illegal_self_type_reference(tcx, trait_def_id, input_ty) {
return Some(MethodViolationCode::ReferencesSelf);
}
}
if let ty::FnConverging(result_type) = sig.0.output {
if contains_illegal_self_type_reference(tcx, trait_def_id, result_type) {
return Some(MethodViolationCode::ReferencesSelf);
}
}
// We can't monomorphize things like `fn foo<A>(...)`.
if!method.generics.types.is_empty_in(subst::FnSpace) {
return Some(MethodViolationCode::Generic);
}
None
}
fn contains_illegal_self_type_reference<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
ty: Ty<'tcx>)
-> bool
{
// This is somewhat subtle. In general, we want to forbid
// references to `Self` in the argument and return types,
// since the value of `Self` is erased. However, there is one
// exception: it is ok to reference `Self` in order to access
// an associated type of the current trait, since we retain
// the value of those associated types in the object type
// itself.
//
// ```rust
// trait SuperTrait {
// type X;
// }
//
// trait Trait : SuperTrait {
// type Y;
// fn foo(&self, x: Self) // bad
// fn foo(&self) -> Self // bad
// fn foo(&self) -> Option<Self> // bad
// fn foo(&self) -> Self::Y // OK, desugars to next example
// fn foo(&self) -> <Self as Trait>::Y // OK
|
// ```
//
// However, it is not as simple as allowing `Self` in a projected
// type, because there are illegal ways to use `Self` as well:
//
// ```rust
// trait Trait : SuperTrait {
// ...
// fn foo(&self) -> <Self as SomeOtherTrait>::X;
// }
// ```
//
// Here we will not have the type of `X` recorded in the
// object type, and we cannot resolve `Self as SomeOtherTrait`
// without knowing what `Self` is.
let mut supertraits: Option<Vec<ty::PolyTraitRef<'tcx>>> = None;
let mut error = false;
ty.maybe_walk(|ty| {
match ty.sty {
ty::TyParam(ref param_ty) => {
if param_ty.space == SelfSpace {
error = true;
}
false // no contained types to walk
}
ty::TyProjection(ref data) => {
// This is a projected type `<Foo as SomeTrait>::X`.
// Compute supertraits of current trait lazily.
if supertraits.is_none() {
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = ty::Binder(trait_def.trait_ref.clone());
supertraits = Some(traits::supertraits(tcx, trait_ref).collect());
}
// Determine whether the trait reference `Foo as
// SomeTrait` is in fact a supertrait of the
// current trait. In that case, this type is
// legal, because the type `X` will be specified
// in the object type. Note that we can just use
// direct equality here because all of these types
// are part of the formal parameter listing, and
// hence there should be no inference variables.
let projection_trait_ref = ty::Binder(data.trait_ref.clone());
let is_supertrait_of_current_trait =
supertraits.as_ref().unwrap().contains(&projection_trait_ref);
if is_supertrait_of_current_trait {
false // do not walk contained types, do not report error, do collect $200
} else {
true // DO walk contained types, POSSIBLY reporting an error
}
}
_ => true, // walk contained types, if any
}
});
error
}
fn is_self<'tcx>(ty: Ty<'tcx>) -> bool {
match ty.sty {
ty::TyParam(ref data) => data.space == subst::SelfSpace,
_ => false,
}
}
|
// fn foo(&self) -> Self::X // OK, desugars to next example
// fn foo(&self) -> <Self as SuperTrait>::X // OK
// }
|
random_line_split
|
object_safety.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! "Object safety" refers to the ability for a trait to be converted
//! to an object. In general, traits may only be converted to an
//! object if all of their methods meet certain criteria. In particular,
//! they must:
//!
//! - have a suitable receiver from which we can extract a vtable;
//! - not reference the erased type `Self` except for in this receiver;
//! - not have generic type parameters
use super::supertraits;
use super::elaborate_predicates;
use middle::subst::{self, SelfSpace, TypeSpace};
use middle::traits;
use middle::ty::{self, ToPolyTraitRef, Ty};
use std::rc::Rc;
use syntax::ast;
#[derive(Debug)]
pub enum ObjectSafetyViolation<'tcx> {
/// Self : Sized declared on the trait
SizedSelf,
/// Supertrait reference references `Self` an in illegal location
/// (e.g. `trait Foo : Bar<Self>`)
SupertraitSelf,
/// Method has something illegal
Method(Rc<ty::Method<'tcx>>, MethodViolationCode),
}
/// Reasons a method might not be object-safe.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum MethodViolationCode {
/// e.g., `fn foo()`
StaticMethod,
/// e.g., `fn foo(&self, x: Self)` or `fn foo(&self) -> Self`
ReferencesSelf,
/// e.g., `fn foo<A>()`
Generic,
}
pub fn is_object_safe<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
// Because we query yes/no results frequently, we keep a cache:
let def = tcx.lookup_trait_def(trait_def_id);
let result = def.object_safety().unwrap_or_else(|| {
let result = object_safety_violations(tcx, trait_def_id).is_empty();
// Record just a yes/no result in the cache; this is what is
// queried most frequently. Note that this may overwrite a
// previous result, but always with the same thing.
def.set_object_safety(result);
result
});
debug!("is_object_safe({:?}) = {}", trait_def_id, result);
result
}
pub fn object_safety_violations<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> Vec<ObjectSafetyViolation<'tcx>>
{
traits::supertrait_def_ids(tcx, trait_def_id)
.flat_map(|def_id| object_safety_violations_for_trait(tcx, def_id))
.collect()
}
fn object_safety_violations_for_trait<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> Vec<ObjectSafetyViolation<'tcx>>
{
// Check methods for violations.
let mut violations: Vec<_> =
tcx.trait_items(trait_def_id).iter()
.flat_map(|item| {
match *item {
ty::MethodTraitItem(ref m) => {
object_safety_violation_for_method(tcx, trait_def_id, &**m)
.map(|code| ObjectSafetyViolation::Method(m.clone(), code))
.into_iter()
}
_ => None.into_iter(),
}
})
.collect();
// Check the trait itself.
if trait_has_sized_self(tcx, trait_def_id) {
violations.push(ObjectSafetyViolation::SizedSelf);
}
if supertraits_reference_self(tcx, trait_def_id) {
violations.push(ObjectSafetyViolation::SupertraitSelf);
}
debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}",
trait_def_id,
violations);
violations
}
fn supertraits_reference_self<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = trait_def.trait_ref.clone();
let trait_ref = trait_ref.to_poly_trait_ref();
let predicates = tcx.lookup_super_predicates(trait_def_id);
predicates
.predicates
.into_iter()
.map(|predicate| predicate.subst_supertrait(tcx, &trait_ref))
.any(|predicate| {
match predicate {
ty::Predicate::Trait(ref data) => {
// In the case of a trait predicate, we can skip the "self" type.
data.0.trait_ref.substs.types.get_slice(TypeSpace)
.iter()
.cloned()
.any(is_self)
}
ty::Predicate::Projection(..) |
ty::Predicate::WellFormed(..) |
ty::Predicate::ObjectSafe(..) |
ty::Predicate::TypeOutlives(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::Equate(..) => {
false
}
}
})
}
fn trait_has_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
|
fn generics_require_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
generics: &ty::Generics<'tcx>,
predicates: &ty::GenericPredicates<'tcx>)
-> bool
{
let sized_def_id = match tcx.lang_items.sized_trait() {
Some(def_id) => def_id,
None => { return false; /* No Sized trait, can't require it! */ }
};
// Search for a predicate like `Self : Sized` amongst the trait bounds.
let free_substs = tcx.construct_free_substs(generics, ast::DUMMY_NODE_ID);
let predicates = predicates.instantiate(tcx, &free_substs).predicates.into_vec();
elaborate_predicates(tcx, predicates)
.any(|predicate| {
match predicate {
ty::Predicate::Trait(ref trait_pred) if trait_pred.def_id() == sized_def_id => {
is_self(trait_pred.0.self_ty())
}
ty::Predicate::Projection(..) |
ty::Predicate::Trait(..) |
ty::Predicate::Equate(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::WellFormed(..) |
ty::Predicate::ObjectSafe(..) |
ty::Predicate::TypeOutlives(..) => {
false
}
}
})
}
/// Returns `Some(_)` if this method makes the containing trait not object safe.
fn object_safety_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> Option<MethodViolationCode>
{
// Any method that has a `Self : Sized` requisite is otherwise
// exempt from the regulations.
if generics_require_sized_self(tcx, &method.generics, &method.predicates) {
return None;
}
virtual_call_violation_for_method(tcx, trait_def_id, method)
}
/// We say a method is *vtable safe* if it can be invoked on a trait
/// object. Note that object-safe traits can have some
/// non-vtable-safe methods, so long as they require `Self:Sized` or
/// otherwise ensure that they cannot be used when `Self=Trait`.
pub fn is_vtable_safe_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> bool
{
virtual_call_violation_for_method(tcx, trait_def_id, method).is_none()
}
/// Returns `Some(_)` if this method cannot be called on a trait
/// object; this does not necessarily imply that the enclosing trait
/// is not object safe, because the method might have a where clause
/// `Self:Sized`.
fn virtual_call_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> Option<MethodViolationCode>
{
// The method's first parameter must be something that derefs (or
// autorefs) to `&self`. For now, we only accept `self`, `&self`
// and `Box<Self>`.
match method.explicit_self {
ty::StaticExplicitSelfCategory => {
return Some(MethodViolationCode::StaticMethod);
}
ty::ByValueExplicitSelfCategory |
ty::ByReferenceExplicitSelfCategory(..) |
ty::ByBoxExplicitSelfCategory => {
}
}
// The `Self` type is erased, so it should not appear in list of
// arguments or return type apart from the receiver.
let ref sig = method.fty.sig;
for &input_ty in &sig.0.inputs[1..] {
if contains_illegal_self_type_reference(tcx, trait_def_id, input_ty) {
return Some(MethodViolationCode::ReferencesSelf);
}
}
if let ty::FnConverging(result_type) = sig.0.output {
if contains_illegal_self_type_reference(tcx, trait_def_id, result_type) {
return Some(MethodViolationCode::ReferencesSelf);
}
}
// We can't monomorphize things like `fn foo<A>(...)`.
if!method.generics.types.is_empty_in(subst::FnSpace) {
return Some(MethodViolationCode::Generic);
}
None
}
fn contains_illegal_self_type_reference<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
ty: Ty<'tcx>)
-> bool
{
// This is somewhat subtle. In general, we want to forbid
// references to `Self` in the argument and return types,
// since the value of `Self` is erased. However, there is one
// exception: it is ok to reference `Self` in order to access
// an associated type of the current trait, since we retain
// the value of those associated types in the object type
// itself.
//
// ```rust
// trait SuperTrait {
// type X;
// }
//
// trait Trait : SuperTrait {
// type Y;
// fn foo(&self, x: Self) // bad
// fn foo(&self) -> Self // bad
// fn foo(&self) -> Option<Self> // bad
// fn foo(&self) -> Self::Y // OK, desugars to next example
// fn foo(&self) -> <Self as Trait>::Y // OK
// fn foo(&self) -> Self::X // OK, desugars to next example
// fn foo(&self) -> <Self as SuperTrait>::X // OK
// }
// ```
//
// However, it is not as simple as allowing `Self` in a projected
// type, because there are illegal ways to use `Self` as well:
//
// ```rust
// trait Trait : SuperTrait {
// ...
// fn foo(&self) -> <Self as SomeOtherTrait>::X;
// }
// ```
//
// Here we will not have the type of `X` recorded in the
// object type, and we cannot resolve `Self as SomeOtherTrait`
// without knowing what `Self` is.
let mut supertraits: Option<Vec<ty::PolyTraitRef<'tcx>>> = None;
let mut error = false;
ty.maybe_walk(|ty| {
match ty.sty {
ty::TyParam(ref param_ty) => {
if param_ty.space == SelfSpace {
error = true;
}
false // no contained types to walk
}
ty::TyProjection(ref data) => {
// This is a projected type `<Foo as SomeTrait>::X`.
// Compute supertraits of current trait lazily.
if supertraits.is_none() {
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = ty::Binder(trait_def.trait_ref.clone());
supertraits = Some(traits::supertraits(tcx, trait_ref).collect());
}
// Determine whether the trait reference `Foo as
// SomeTrait` is in fact a supertrait of the
// current trait. In that case, this type is
// legal, because the type `X` will be specified
// in the object type. Note that we can just use
// direct equality here because all of these types
// are part of the formal parameter listing, and
// hence there should be no inference variables.
let projection_trait_ref = ty::Binder(data.trait_ref.clone());
let is_supertrait_of_current_trait =
supertraits.as_ref().unwrap().contains(&projection_trait_ref);
if is_supertrait_of_current_trait {
false // do not walk contained types, do not report error, do collect $200
} else {
true // DO walk contained types, POSSIBLY reporting an error
}
}
_ => true, // walk contained types, if any
}
});
error
}
fn is_self<'tcx>(ty: Ty<'tcx>) -> bool {
match ty.sty {
ty::TyParam(ref data) => data.space == subst::SelfSpace,
_ => false,
}
}
|
{
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_predicates = tcx.lookup_predicates(trait_def_id);
generics_require_sized_self(tcx, &trait_def.generics, &trait_predicates)
}
|
identifier_body
|
object_safety.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! "Object safety" refers to the ability for a trait to be converted
//! to an object. In general, traits may only be converted to an
//! object if all of their methods meet certain criteria. In particular,
//! they must:
//!
//! - have a suitable receiver from which we can extract a vtable;
//! - not reference the erased type `Self` except for in this receiver;
//! - not have generic type parameters
use super::supertraits;
use super::elaborate_predicates;
use middle::subst::{self, SelfSpace, TypeSpace};
use middle::traits;
use middle::ty::{self, ToPolyTraitRef, Ty};
use std::rc::Rc;
use syntax::ast;
#[derive(Debug)]
pub enum ObjectSafetyViolation<'tcx> {
/// Self : Sized declared on the trait
SizedSelf,
/// Supertrait reference references `Self` an in illegal location
/// (e.g. `trait Foo : Bar<Self>`)
SupertraitSelf,
/// Method has something illegal
Method(Rc<ty::Method<'tcx>>, MethodViolationCode),
}
/// Reasons a method might not be object-safe.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum MethodViolationCode {
/// e.g., `fn foo()`
StaticMethod,
/// e.g., `fn foo(&self, x: Self)` or `fn foo(&self) -> Self`
ReferencesSelf,
/// e.g., `fn foo<A>()`
Generic,
}
pub fn is_object_safe<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
// Because we query yes/no results frequently, we keep a cache:
let def = tcx.lookup_trait_def(trait_def_id);
let result = def.object_safety().unwrap_or_else(|| {
let result = object_safety_violations(tcx, trait_def_id).is_empty();
// Record just a yes/no result in the cache; this is what is
// queried most frequently. Note that this may overwrite a
// previous result, but always with the same thing.
def.set_object_safety(result);
result
});
debug!("is_object_safe({:?}) = {}", trait_def_id, result);
result
}
pub fn object_safety_violations<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> Vec<ObjectSafetyViolation<'tcx>>
{
traits::supertrait_def_ids(tcx, trait_def_id)
.flat_map(|def_id| object_safety_violations_for_trait(tcx, def_id))
.collect()
}
fn object_safety_violations_for_trait<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> Vec<ObjectSafetyViolation<'tcx>>
{
// Check methods for violations.
let mut violations: Vec<_> =
tcx.trait_items(trait_def_id).iter()
.flat_map(|item| {
match *item {
ty::MethodTraitItem(ref m) => {
object_safety_violation_for_method(tcx, trait_def_id, &**m)
.map(|code| ObjectSafetyViolation::Method(m.clone(), code))
.into_iter()
}
_ => None.into_iter(),
}
})
.collect();
// Check the trait itself.
if trait_has_sized_self(tcx, trait_def_id) {
violations.push(ObjectSafetyViolation::SizedSelf);
}
if supertraits_reference_self(tcx, trait_def_id) {
violations.push(ObjectSafetyViolation::SupertraitSelf);
}
debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}",
trait_def_id,
violations);
violations
}
fn supertraits_reference_self<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = trait_def.trait_ref.clone();
let trait_ref = trait_ref.to_poly_trait_ref();
let predicates = tcx.lookup_super_predicates(trait_def_id);
predicates
.predicates
.into_iter()
.map(|predicate| predicate.subst_supertrait(tcx, &trait_ref))
.any(|predicate| {
match predicate {
ty::Predicate::Trait(ref data) => {
// In the case of a trait predicate, we can skip the "self" type.
data.0.trait_ref.substs.types.get_slice(TypeSpace)
.iter()
.cloned()
.any(is_self)
}
ty::Predicate::Projection(..) |
ty::Predicate::WellFormed(..) |
ty::Predicate::ObjectSafe(..) |
ty::Predicate::TypeOutlives(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::Equate(..) => {
false
}
}
})
}
fn trait_has_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_predicates = tcx.lookup_predicates(trait_def_id);
generics_require_sized_self(tcx, &trait_def.generics, &trait_predicates)
}
fn generics_require_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
generics: &ty::Generics<'tcx>,
predicates: &ty::GenericPredicates<'tcx>)
-> bool
{
let sized_def_id = match tcx.lang_items.sized_trait() {
Some(def_id) => def_id,
None => { return false; /* No Sized trait, can't require it! */ }
};
// Search for a predicate like `Self : Sized` amongst the trait bounds.
let free_substs = tcx.construct_free_substs(generics, ast::DUMMY_NODE_ID);
let predicates = predicates.instantiate(tcx, &free_substs).predicates.into_vec();
elaborate_predicates(tcx, predicates)
.any(|predicate| {
match predicate {
ty::Predicate::Trait(ref trait_pred) if trait_pred.def_id() == sized_def_id => {
is_self(trait_pred.0.self_ty())
}
ty::Predicate::Projection(..) |
ty::Predicate::Trait(..) |
ty::Predicate::Equate(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::WellFormed(..) |
ty::Predicate::ObjectSafe(..) |
ty::Predicate::TypeOutlives(..) => {
false
}
}
})
}
/// Returns `Some(_)` if this method makes the containing trait not object safe.
fn object_safety_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> Option<MethodViolationCode>
{
// Any method that has a `Self : Sized` requisite is otherwise
// exempt from the regulations.
if generics_require_sized_self(tcx, &method.generics, &method.predicates) {
return None;
}
virtual_call_violation_for_method(tcx, trait_def_id, method)
}
/// We say a method is *vtable safe* if it can be invoked on a trait
/// object. Note that object-safe traits can have some
/// non-vtable-safe methods, so long as they require `Self:Sized` or
/// otherwise ensure that they cannot be used when `Self=Trait`.
pub fn is_vtable_safe_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> bool
{
virtual_call_violation_for_method(tcx, trait_def_id, method).is_none()
}
/// Returns `Some(_)` if this method cannot be called on a trait
/// object; this does not necessarily imply that the enclosing trait
/// is not object safe, because the method might have a where clause
/// `Self:Sized`.
fn virtual_call_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> Option<MethodViolationCode>
{
// The method's first parameter must be something that derefs (or
// autorefs) to `&self`. For now, we only accept `self`, `&self`
// and `Box<Self>`.
match method.explicit_self {
ty::StaticExplicitSelfCategory => {
return Some(MethodViolationCode::StaticMethod);
}
ty::ByValueExplicitSelfCategory |
ty::ByReferenceExplicitSelfCategory(..) |
ty::ByBoxExplicitSelfCategory => {
}
}
// The `Self` type is erased, so it should not appear in list of
// arguments or return type apart from the receiver.
let ref sig = method.fty.sig;
for &input_ty in &sig.0.inputs[1..] {
if contains_illegal_self_type_reference(tcx, trait_def_id, input_ty) {
return Some(MethodViolationCode::ReferencesSelf);
}
}
if let ty::FnConverging(result_type) = sig.0.output {
if contains_illegal_self_type_reference(tcx, trait_def_id, result_type)
|
}
// We can't monomorphize things like `fn foo<A>(...)`.
if!method.generics.types.is_empty_in(subst::FnSpace) {
return Some(MethodViolationCode::Generic);
}
None
}
fn contains_illegal_self_type_reference<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
ty: Ty<'tcx>)
-> bool
{
// This is somewhat subtle. In general, we want to forbid
// references to `Self` in the argument and return types,
// since the value of `Self` is erased. However, there is one
// exception: it is ok to reference `Self` in order to access
// an associated type of the current trait, since we retain
// the value of those associated types in the object type
// itself.
//
// ```rust
// trait SuperTrait {
// type X;
// }
//
// trait Trait : SuperTrait {
// type Y;
// fn foo(&self, x: Self) // bad
// fn foo(&self) -> Self // bad
// fn foo(&self) -> Option<Self> // bad
// fn foo(&self) -> Self::Y // OK, desugars to next example
// fn foo(&self) -> <Self as Trait>::Y // OK
// fn foo(&self) -> Self::X // OK, desugars to next example
// fn foo(&self) -> <Self as SuperTrait>::X // OK
// }
// ```
//
// However, it is not as simple as allowing `Self` in a projected
// type, because there are illegal ways to use `Self` as well:
//
// ```rust
// trait Trait : SuperTrait {
// ...
// fn foo(&self) -> <Self as SomeOtherTrait>::X;
// }
// ```
//
// Here we will not have the type of `X` recorded in the
// object type, and we cannot resolve `Self as SomeOtherTrait`
// without knowing what `Self` is.
let mut supertraits: Option<Vec<ty::PolyTraitRef<'tcx>>> = None;
let mut error = false;
ty.maybe_walk(|ty| {
match ty.sty {
ty::TyParam(ref param_ty) => {
if param_ty.space == SelfSpace {
error = true;
}
false // no contained types to walk
}
ty::TyProjection(ref data) => {
// This is a projected type `<Foo as SomeTrait>::X`.
// Compute supertraits of current trait lazily.
if supertraits.is_none() {
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = ty::Binder(trait_def.trait_ref.clone());
supertraits = Some(traits::supertraits(tcx, trait_ref).collect());
}
// Determine whether the trait reference `Foo as
// SomeTrait` is in fact a supertrait of the
// current trait. In that case, this type is
// legal, because the type `X` will be specified
// in the object type. Note that we can just use
// direct equality here because all of these types
// are part of the formal parameter listing, and
// hence there should be no inference variables.
let projection_trait_ref = ty::Binder(data.trait_ref.clone());
let is_supertrait_of_current_trait =
supertraits.as_ref().unwrap().contains(&projection_trait_ref);
if is_supertrait_of_current_trait {
false // do not walk contained types, do not report error, do collect $200
} else {
true // DO walk contained types, POSSIBLY reporting an error
}
}
_ => true, // walk contained types, if any
}
});
error
}
fn is_self<'tcx>(ty: Ty<'tcx>) -> bool {
match ty.sty {
ty::TyParam(ref data) => data.space == subst::SelfSpace,
_ => false,
}
}
|
{
return Some(MethodViolationCode::ReferencesSelf);
}
|
conditional_block
|
object_safety.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! "Object safety" refers to the ability for a trait to be converted
//! to an object. In general, traits may only be converted to an
//! object if all of their methods meet certain criteria. In particular,
//! they must:
//!
//! - have a suitable receiver from which we can extract a vtable;
//! - not reference the erased type `Self` except for in this receiver;
//! - not have generic type parameters
use super::supertraits;
use super::elaborate_predicates;
use middle::subst::{self, SelfSpace, TypeSpace};
use middle::traits;
use middle::ty::{self, ToPolyTraitRef, Ty};
use std::rc::Rc;
use syntax::ast;
#[derive(Debug)]
pub enum ObjectSafetyViolation<'tcx> {
/// Self : Sized declared on the trait
SizedSelf,
/// Supertrait reference references `Self` an in illegal location
/// (e.g. `trait Foo : Bar<Self>`)
SupertraitSelf,
/// Method has something illegal
Method(Rc<ty::Method<'tcx>>, MethodViolationCode),
}
/// Reasons a method might not be object-safe.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum MethodViolationCode {
/// e.g., `fn foo()`
StaticMethod,
/// e.g., `fn foo(&self, x: Self)` or `fn foo(&self) -> Self`
ReferencesSelf,
/// e.g., `fn foo<A>()`
Generic,
}
pub fn
|
<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
// Because we query yes/no results frequently, we keep a cache:
let def = tcx.lookup_trait_def(trait_def_id);
let result = def.object_safety().unwrap_or_else(|| {
let result = object_safety_violations(tcx, trait_def_id).is_empty();
// Record just a yes/no result in the cache; this is what is
// queried most frequently. Note that this may overwrite a
// previous result, but always with the same thing.
def.set_object_safety(result);
result
});
debug!("is_object_safe({:?}) = {}", trait_def_id, result);
result
}
pub fn object_safety_violations<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> Vec<ObjectSafetyViolation<'tcx>>
{
traits::supertrait_def_ids(tcx, trait_def_id)
.flat_map(|def_id| object_safety_violations_for_trait(tcx, def_id))
.collect()
}
fn object_safety_violations_for_trait<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> Vec<ObjectSafetyViolation<'tcx>>
{
// Check methods for violations.
let mut violations: Vec<_> =
tcx.trait_items(trait_def_id).iter()
.flat_map(|item| {
match *item {
ty::MethodTraitItem(ref m) => {
object_safety_violation_for_method(tcx, trait_def_id, &**m)
.map(|code| ObjectSafetyViolation::Method(m.clone(), code))
.into_iter()
}
_ => None.into_iter(),
}
})
.collect();
// Check the trait itself.
if trait_has_sized_self(tcx, trait_def_id) {
violations.push(ObjectSafetyViolation::SizedSelf);
}
if supertraits_reference_self(tcx, trait_def_id) {
violations.push(ObjectSafetyViolation::SupertraitSelf);
}
debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}",
trait_def_id,
violations);
violations
}
fn supertraits_reference_self<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = trait_def.trait_ref.clone();
let trait_ref = trait_ref.to_poly_trait_ref();
let predicates = tcx.lookup_super_predicates(trait_def_id);
predicates
.predicates
.into_iter()
.map(|predicate| predicate.subst_supertrait(tcx, &trait_ref))
.any(|predicate| {
match predicate {
ty::Predicate::Trait(ref data) => {
// In the case of a trait predicate, we can skip the "self" type.
data.0.trait_ref.substs.types.get_slice(TypeSpace)
.iter()
.cloned()
.any(is_self)
}
ty::Predicate::Projection(..) |
ty::Predicate::WellFormed(..) |
ty::Predicate::ObjectSafe(..) |
ty::Predicate::TypeOutlives(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::Equate(..) => {
false
}
}
})
}
fn trait_has_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId)
-> bool
{
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_predicates = tcx.lookup_predicates(trait_def_id);
generics_require_sized_self(tcx, &trait_def.generics, &trait_predicates)
}
fn generics_require_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>,
generics: &ty::Generics<'tcx>,
predicates: &ty::GenericPredicates<'tcx>)
-> bool
{
let sized_def_id = match tcx.lang_items.sized_trait() {
Some(def_id) => def_id,
None => { return false; /* No Sized trait, can't require it! */ }
};
// Search for a predicate like `Self : Sized` amongst the trait bounds.
let free_substs = tcx.construct_free_substs(generics, ast::DUMMY_NODE_ID);
let predicates = predicates.instantiate(tcx, &free_substs).predicates.into_vec();
elaborate_predicates(tcx, predicates)
.any(|predicate| {
match predicate {
ty::Predicate::Trait(ref trait_pred) if trait_pred.def_id() == sized_def_id => {
is_self(trait_pred.0.self_ty())
}
ty::Predicate::Projection(..) |
ty::Predicate::Trait(..) |
ty::Predicate::Equate(..) |
ty::Predicate::RegionOutlives(..) |
ty::Predicate::WellFormed(..) |
ty::Predicate::ObjectSafe(..) |
ty::Predicate::TypeOutlives(..) => {
false
}
}
})
}
/// Returns `Some(_)` if this method makes the containing trait not object safe.
fn object_safety_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> Option<MethodViolationCode>
{
// Any method that has a `Self : Sized` requisite is otherwise
// exempt from the regulations.
if generics_require_sized_self(tcx, &method.generics, &method.predicates) {
return None;
}
virtual_call_violation_for_method(tcx, trait_def_id, method)
}
/// We say a method is *vtable safe* if it can be invoked on a trait
/// object. Note that object-safe traits can have some
/// non-vtable-safe methods, so long as they require `Self:Sized` or
/// otherwise ensure that they cannot be used when `Self=Trait`.
pub fn is_vtable_safe_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> bool
{
virtual_call_violation_for_method(tcx, trait_def_id, method).is_none()
}
/// Returns `Some(_)` if this method cannot be called on a trait
/// object; this does not necessarily imply that the enclosing trait
/// is not object safe, because the method might have a where clause
/// `Self:Sized`.
fn virtual_call_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
method: &ty::Method<'tcx>)
-> Option<MethodViolationCode>
{
// The method's first parameter must be something that derefs (or
// autorefs) to `&self`. For now, we only accept `self`, `&self`
// and `Box<Self>`.
match method.explicit_self {
ty::StaticExplicitSelfCategory => {
return Some(MethodViolationCode::StaticMethod);
}
ty::ByValueExplicitSelfCategory |
ty::ByReferenceExplicitSelfCategory(..) |
ty::ByBoxExplicitSelfCategory => {
}
}
// The `Self` type is erased, so it should not appear in list of
// arguments or return type apart from the receiver.
let ref sig = method.fty.sig;
for &input_ty in &sig.0.inputs[1..] {
if contains_illegal_self_type_reference(tcx, trait_def_id, input_ty) {
return Some(MethodViolationCode::ReferencesSelf);
}
}
if let ty::FnConverging(result_type) = sig.0.output {
if contains_illegal_self_type_reference(tcx, trait_def_id, result_type) {
return Some(MethodViolationCode::ReferencesSelf);
}
}
// We can't monomorphize things like `fn foo<A>(...)`.
if!method.generics.types.is_empty_in(subst::FnSpace) {
return Some(MethodViolationCode::Generic);
}
None
}
fn contains_illegal_self_type_reference<'tcx>(tcx: &ty::ctxt<'tcx>,
trait_def_id: ast::DefId,
ty: Ty<'tcx>)
-> bool
{
// This is somewhat subtle. In general, we want to forbid
// references to `Self` in the argument and return types,
// since the value of `Self` is erased. However, there is one
// exception: it is ok to reference `Self` in order to access
// an associated type of the current trait, since we retain
// the value of those associated types in the object type
// itself.
//
// ```rust
// trait SuperTrait {
// type X;
// }
//
// trait Trait : SuperTrait {
// type Y;
// fn foo(&self, x: Self) // bad
// fn foo(&self) -> Self // bad
// fn foo(&self) -> Option<Self> // bad
// fn foo(&self) -> Self::Y // OK, desugars to next example
// fn foo(&self) -> <Self as Trait>::Y // OK
// fn foo(&self) -> Self::X // OK, desugars to next example
// fn foo(&self) -> <Self as SuperTrait>::X // OK
// }
// ```
//
// However, it is not as simple as allowing `Self` in a projected
// type, because there are illegal ways to use `Self` as well:
//
// ```rust
// trait Trait : SuperTrait {
// ...
// fn foo(&self) -> <Self as SomeOtherTrait>::X;
// }
// ```
//
// Here we will not have the type of `X` recorded in the
// object type, and we cannot resolve `Self as SomeOtherTrait`
// without knowing what `Self` is.
let mut supertraits: Option<Vec<ty::PolyTraitRef<'tcx>>> = None;
let mut error = false;
ty.maybe_walk(|ty| {
match ty.sty {
ty::TyParam(ref param_ty) => {
if param_ty.space == SelfSpace {
error = true;
}
false // no contained types to walk
}
ty::TyProjection(ref data) => {
// This is a projected type `<Foo as SomeTrait>::X`.
// Compute supertraits of current trait lazily.
if supertraits.is_none() {
let trait_def = tcx.lookup_trait_def(trait_def_id);
let trait_ref = ty::Binder(trait_def.trait_ref.clone());
supertraits = Some(traits::supertraits(tcx, trait_ref).collect());
}
// Determine whether the trait reference `Foo as
// SomeTrait` is in fact a supertrait of the
// current trait. In that case, this type is
// legal, because the type `X` will be specified
// in the object type. Note that we can just use
// direct equality here because all of these types
// are part of the formal parameter listing, and
// hence there should be no inference variables.
let projection_trait_ref = ty::Binder(data.trait_ref.clone());
let is_supertrait_of_current_trait =
supertraits.as_ref().unwrap().contains(&projection_trait_ref);
if is_supertrait_of_current_trait {
false // do not walk contained types, do not report error, do collect $200
} else {
true // DO walk contained types, POSSIBLY reporting an error
}
}
_ => true, // walk contained types, if any
}
});
error
}
fn is_self<'tcx>(ty: Ty<'tcx>) -> bool {
match ty.sty {
ty::TyParam(ref data) => data.space == subst::SelfSpace,
_ => false,
}
}
|
is_object_safe
|
identifier_name
|
opeq.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// -*- rust -*-
pub fn main()
|
{
let mut x: int = 1;
x *= 2;
info2!("{}", x);
assert_eq!(x, 2);
x += 3;
info2!("{}", x);
assert_eq!(x, 5);
x *= x;
info2!("{}", x);
assert_eq!(x, 25);
x /= 5;
info2!("{}", x);
assert_eq!(x, 5);
}
|
identifier_body
|
|
opeq.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// -*- rust -*-
pub fn main() {
let mut x: int = 1;
x *= 2;
info2!("{}", x);
assert_eq!(x, 2);
x += 3;
info2!("{}", x);
assert_eq!(x, 5);
x *= x;
info2!("{}", x);
assert_eq!(x, 25);
x /= 5;
info2!("{}", x);
|
}
|
assert_eq!(x, 5);
|
random_line_split
|
opeq.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// -*- rust -*-
pub fn
|
() {
let mut x: int = 1;
x *= 2;
info2!("{}", x);
assert_eq!(x, 2);
x += 3;
info2!("{}", x);
assert_eq!(x, 5);
x *= x;
info2!("{}", x);
assert_eq!(x, 25);
x /= 5;
info2!("{}", x);
assert_eq!(x, 5);
}
|
main
|
identifier_name
|
xhci_backend_device.rs
|
// Copyright 2019 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use super::xhci_transfer::XhciTransfer;
use crate::usb::host_backend::error::Result;
/// Address of this usb device, as in Set Address standard usb device request.
pub type UsbDeviceAddress = u32;
/// The type USB device provided by the backend device.
#[derive(PartialEq, Eq)]
pub enum BackendType {
Usb2,
Usb3,
}
/// Xhci backend device is a virtual device connected to xHCI controller. It handles xhci transfers.
pub trait XhciBackendDevice: Send {
/// Returns the type of USB device provided by this device.
fn get_backend_type(&self) -> BackendType;
/// Get vendor id of this device.
fn get_vid(&self) -> u16;
/// Get product id of this device.
fn get_pid(&self) -> u16;
|
/// Reset the backend device.
fn reset(&mut self) -> Result<()>;
}
|
/// Submit a xhci transfer to backend.
fn submit_transfer(&mut self, transfer: XhciTransfer) -> Result<()>;
/// Set address of this backend.
fn set_address(&mut self, address: UsbDeviceAddress);
|
random_line_split
|
xhci_backend_device.rs
|
// Copyright 2019 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use super::xhci_transfer::XhciTransfer;
use crate::usb::host_backend::error::Result;
/// Address of this usb device, as in Set Address standard usb device request.
pub type UsbDeviceAddress = u32;
/// The type USB device provided by the backend device.
#[derive(PartialEq, Eq)]
pub enum
|
{
Usb2,
Usb3,
}
/// Xhci backend device is a virtual device connected to xHCI controller. It handles xhci transfers.
pub trait XhciBackendDevice: Send {
/// Returns the type of USB device provided by this device.
fn get_backend_type(&self) -> BackendType;
/// Get vendor id of this device.
fn get_vid(&self) -> u16;
/// Get product id of this device.
fn get_pid(&self) -> u16;
/// Submit a xhci transfer to backend.
fn submit_transfer(&mut self, transfer: XhciTransfer) -> Result<()>;
/// Set address of this backend.
fn set_address(&mut self, address: UsbDeviceAddress);
/// Reset the backend device.
fn reset(&mut self) -> Result<()>;
}
|
BackendType
|
identifier_name
|
unix.rs
|
use std::io;
use std::os::raw::{c_int, c_void};
use std::os::unix::io::{AsRawFd, RawFd};
use ::{Rawv, Writev, Readv};
struct
|
(RawFd);
impl<T: AsRawFd> Rawv for T {
fn readv(&mut self, buffers: &[&mut [u8]]) -> io::Result<usize> {
UnixFd(self.as_raw_fd()).readv(buffers)
}
fn writev(&mut self, buffers: &[&[u8]]) -> io::Result<usize> {
UnixFd(self.as_raw_fd()).writev(buffers)
}
}
impl Writev for UnixFd {
fn writev(&mut self, buffers: &[&[u8]]) -> io::Result<usize> {
unsafe {
let ret = writev(self.0, buffers.as_ptr() as *const IoVec, buffers.len() as c_int);
if ret == -1 {
Err(io::Error::last_os_error())
} else {
Ok(ret as usize)
}
}
}
}
impl Readv for UnixFd {
fn readv(&mut self, buffers: &[&mut [u8]]) -> io::Result<usize> {
unsafe {
let ret = readv(self.0, buffers.as_ptr() as *const IoVec, buffers.len() as c_int);
if ret == -1 {
Err(io::Error::last_os_error())
} else {
Ok(ret as usize)
}
}
}
}
#[repr(C)]
struct IoVec {
iov_base: *mut c_void,
iov_len: c_int,
}
extern {
fn readv(fd: RawFd, bufs: *const IoVec, count: c_int) -> c_int;
fn writev(fd: RawFd, bufs: *const IoVec, count: c_int) -> c_int;
}
#[test]
fn test_unix() {
use std::io::Write;
use std::fs::File;
let mut f = File::create("foo.txt").unwrap();
assert_eq!(f.writev(&[b"foo", b"bar"]).unwrap(), 6);
f.flush().unwrap();
let mut f = File::open("foo.txt").unwrap();
let mut first = [0u8; 2];
let mut second = [0u8; 4];
assert_eq!(f.readv(&[&mut first, &mut second]).unwrap(), 6);
assert_eq!(&first, b"fo");
assert_eq!(&second, b"obar");
}
|
UnixFd
|
identifier_name
|
unix.rs
|
use std::io;
use std::os::raw::{c_int, c_void};
use std::os::unix::io::{AsRawFd, RawFd};
use ::{Rawv, Writev, Readv};
struct UnixFd(RawFd);
impl<T: AsRawFd> Rawv for T {
fn readv(&mut self, buffers: &[&mut [u8]]) -> io::Result<usize> {
UnixFd(self.as_raw_fd()).readv(buffers)
}
fn writev(&mut self, buffers: &[&[u8]]) -> io::Result<usize> {
UnixFd(self.as_raw_fd()).writev(buffers)
}
}
impl Writev for UnixFd {
fn writev(&mut self, buffers: &[&[u8]]) -> io::Result<usize> {
unsafe {
let ret = writev(self.0, buffers.as_ptr() as *const IoVec, buffers.len() as c_int);
if ret == -1 {
Err(io::Error::last_os_error())
} else {
Ok(ret as usize)
}
}
}
}
impl Readv for UnixFd {
fn readv(&mut self, buffers: &[&mut [u8]]) -> io::Result<usize> {
unsafe {
let ret = readv(self.0, buffers.as_ptr() as *const IoVec, buffers.len() as c_int);
if ret == -1
|
else {
Ok(ret as usize)
}
}
}
}
#[repr(C)]
struct IoVec {
iov_base: *mut c_void,
iov_len: c_int,
}
extern {
fn readv(fd: RawFd, bufs: *const IoVec, count: c_int) -> c_int;
fn writev(fd: RawFd, bufs: *const IoVec, count: c_int) -> c_int;
}
#[test]
fn test_unix() {
use std::io::Write;
use std::fs::File;
let mut f = File::create("foo.txt").unwrap();
assert_eq!(f.writev(&[b"foo", b"bar"]).unwrap(), 6);
f.flush().unwrap();
let mut f = File::open("foo.txt").unwrap();
let mut first = [0u8; 2];
let mut second = [0u8; 4];
assert_eq!(f.readv(&[&mut first, &mut second]).unwrap(), 6);
assert_eq!(&first, b"fo");
assert_eq!(&second, b"obar");
}
|
{
Err(io::Error::last_os_error())
}
|
conditional_block
|
unix.rs
|
use std::io;
use std::os::raw::{c_int, c_void};
use std::os::unix::io::{AsRawFd, RawFd};
use ::{Rawv, Writev, Readv};
struct UnixFd(RawFd);
impl<T: AsRawFd> Rawv for T {
fn readv(&mut self, buffers: &[&mut [u8]]) -> io::Result<usize> {
UnixFd(self.as_raw_fd()).readv(buffers)
}
fn writev(&mut self, buffers: &[&[u8]]) -> io::Result<usize> {
UnixFd(self.as_raw_fd()).writev(buffers)
|
unsafe {
let ret = writev(self.0, buffers.as_ptr() as *const IoVec, buffers.len() as c_int);
if ret == -1 {
Err(io::Error::last_os_error())
} else {
Ok(ret as usize)
}
}
}
}
impl Readv for UnixFd {
fn readv(&mut self, buffers: &[&mut [u8]]) -> io::Result<usize> {
unsafe {
let ret = readv(self.0, buffers.as_ptr() as *const IoVec, buffers.len() as c_int);
if ret == -1 {
Err(io::Error::last_os_error())
} else {
Ok(ret as usize)
}
}
}
}
#[repr(C)]
struct IoVec {
iov_base: *mut c_void,
iov_len: c_int,
}
extern {
fn readv(fd: RawFd, bufs: *const IoVec, count: c_int) -> c_int;
fn writev(fd: RawFd, bufs: *const IoVec, count: c_int) -> c_int;
}
#[test]
fn test_unix() {
use std::io::Write;
use std::fs::File;
let mut f = File::create("foo.txt").unwrap();
assert_eq!(f.writev(&[b"foo", b"bar"]).unwrap(), 6);
f.flush().unwrap();
let mut f = File::open("foo.txt").unwrap();
let mut first = [0u8; 2];
let mut second = [0u8; 4];
assert_eq!(f.readv(&[&mut first, &mut second]).unwrap(), 6);
assert_eq!(&first, b"fo");
assert_eq!(&second, b"obar");
}
|
}
}
impl Writev for UnixFd {
fn writev(&mut self, buffers: &[&[u8]]) -> io::Result<usize> {
|
random_line_split
|
unix.rs
|
use std::io;
use std::os::raw::{c_int, c_void};
use std::os::unix::io::{AsRawFd, RawFd};
use ::{Rawv, Writev, Readv};
struct UnixFd(RawFd);
impl<T: AsRawFd> Rawv for T {
fn readv(&mut self, buffers: &[&mut [u8]]) -> io::Result<usize> {
UnixFd(self.as_raw_fd()).readv(buffers)
}
fn writev(&mut self, buffers: &[&[u8]]) -> io::Result<usize> {
UnixFd(self.as_raw_fd()).writev(buffers)
}
}
impl Writev for UnixFd {
fn writev(&mut self, buffers: &[&[u8]]) -> io::Result<usize>
|
}
impl Readv for UnixFd {
fn readv(&mut self, buffers: &[&mut [u8]]) -> io::Result<usize> {
unsafe {
let ret = readv(self.0, buffers.as_ptr() as *const IoVec, buffers.len() as c_int);
if ret == -1 {
Err(io::Error::last_os_error())
} else {
Ok(ret as usize)
}
}
}
}
#[repr(C)]
struct IoVec {
iov_base: *mut c_void,
iov_len: c_int,
}
extern {
fn readv(fd: RawFd, bufs: *const IoVec, count: c_int) -> c_int;
fn writev(fd: RawFd, bufs: *const IoVec, count: c_int) -> c_int;
}
#[test]
fn test_unix() {
use std::io::Write;
use std::fs::File;
let mut f = File::create("foo.txt").unwrap();
assert_eq!(f.writev(&[b"foo", b"bar"]).unwrap(), 6);
f.flush().unwrap();
let mut f = File::open("foo.txt").unwrap();
let mut first = [0u8; 2];
let mut second = [0u8; 4];
assert_eq!(f.readv(&[&mut first, &mut second]).unwrap(), 6);
assert_eq!(&first, b"fo");
assert_eq!(&second, b"obar");
}
|
{
unsafe {
let ret = writev(self.0, buffers.as_ptr() as *const IoVec, buffers.len() as c_int);
if ret == -1 {
Err(io::Error::last_os_error())
} else {
Ok(ret as usize)
}
}
}
|
identifier_body
|
task.rs
|
use std::ffi::c_void;
use std::future::Future;
use std::pin::Pin;
use std::ptr;
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc, Mutex, Weak,
};
use std::task::{Context, Poll};
use futures_util::stream::{FuturesUnordered, Stream};
use libc::c_int;
use super::error::hyper_code;
use super::UserDataPointer;
type BoxFuture<T> = Pin<Box<dyn Future<Output = T> + Send>>;
type BoxAny = Box<dyn AsTaskType + Send + Sync>;
/// Return in a poll function to indicate it was ready.
pub const HYPER_POLL_READY: c_int = 0;
/// Return in a poll function to indicate it is still pending.
///
/// The passed in `hyper_waker` should be registered to wake up the task at
/// some later point.
pub const HYPER_POLL_PENDING: c_int = 1;
/// Return in a poll function indicate an error.
pub const HYPER_POLL_ERROR: c_int = 3;
/// A task executor for `hyper_task`s.
pub struct hyper_executor {
/// The executor of all task futures.
///
/// There should never be contention on the mutex, as it is only locked
/// to drive the futures. However, we cannot gaurantee proper usage from
/// `hyper_executor_poll()`, which in C could potentially be called inside
/// one of the stored futures. The mutex isn't re-entrant, so doing so
/// would result in a deadlock, but that's better than data corruption.
driver: Mutex<FuturesUnordered<TaskFuture>>,
/// The queue of futures that need to be pushed into the `driver`.
///
/// This is has a separate mutex since `spawn` could be called from inside
/// a future, which would mean the driver's mutex is already locked.
spawn_queue: Mutex<Vec<TaskFuture>>,
/// This is used to track when a future calls `wake` while we are within
/// `hyper_executor::poll_next`.
is_woken: Arc<ExecWaker>,
}
#[derive(Clone)]
pub(crate) struct WeakExec(Weak<hyper_executor>);
struct ExecWaker(AtomicBool);
/// An async task.
pub struct hyper_task {
future: BoxFuture<BoxAny>,
output: Option<BoxAny>,
userdata: UserDataPointer,
}
struct TaskFuture {
task: Option<Box<hyper_task>>,
}
/// An async context for a task that contains the related waker.
pub struct hyper_context<'a>(Context<'a>);
/// A waker that is saved and used to waken a pending task.
pub struct
|
{
waker: std::task::Waker,
}
/// A descriptor for what type a `hyper_task` value is.
#[repr(C)]
pub enum hyper_task_return_type {
/// The value of this task is null (does not imply an error).
HYPER_TASK_EMPTY,
/// The value of this task is `hyper_error *`.
HYPER_TASK_ERROR,
/// The value of this task is `hyper_clientconn *`.
HYPER_TASK_CLIENTCONN,
/// The value of this task is `hyper_response *`.
HYPER_TASK_RESPONSE,
/// The value of this task is `hyper_buf *`.
HYPER_TASK_BUF,
}
pub(crate) unsafe trait AsTaskType {
fn as_task_type(&self) -> hyper_task_return_type;
}
pub(crate) trait IntoDynTaskType {
fn into_dyn_task_type(self) -> BoxAny;
}
// ===== impl hyper_executor =====
impl hyper_executor {
fn new() -> Arc<hyper_executor> {
Arc::new(hyper_executor {
driver: Mutex::new(FuturesUnordered::new()),
spawn_queue: Mutex::new(Vec::new()),
is_woken: Arc::new(ExecWaker(AtomicBool::new(false))),
})
}
pub(crate) fn downgrade(exec: &Arc<hyper_executor>) -> WeakExec {
WeakExec(Arc::downgrade(exec))
}
fn spawn(&self, task: Box<hyper_task>) {
self.spawn_queue
.lock()
.unwrap()
.push(TaskFuture { task: Some(task) });
}
fn poll_next(&self) -> Option<Box<hyper_task>> {
// Drain the queue first.
self.drain_queue();
let waker = futures_util::task::waker_ref(&self.is_woken);
let mut cx = Context::from_waker(&waker);
loop {
match Pin::new(&mut *self.driver.lock().unwrap()).poll_next(&mut cx) {
Poll::Ready(val) => return val,
Poll::Pending => {
// Check if any of the pending tasks tried to spawn
// some new tasks. If so, drain into the driver and loop.
if self.drain_queue() {
continue;
}
// If the driver called `wake` while we were polling,
// we should poll again immediately!
if self.is_woken.0.swap(false, Ordering::SeqCst) {
continue;
}
return None;
}
}
}
}
fn drain_queue(&self) -> bool {
let mut queue = self.spawn_queue.lock().unwrap();
if queue.is_empty() {
return false;
}
let driver = self.driver.lock().unwrap();
for task in queue.drain(..) {
driver.push(task);
}
true
}
}
impl futures_util::task::ArcWake for ExecWaker {
fn wake_by_ref(me: &Arc<ExecWaker>) {
me.0.store(true, Ordering::SeqCst);
}
}
// ===== impl WeakExec =====
impl WeakExec {
pub(crate) fn new() -> Self {
WeakExec(Weak::new())
}
}
impl crate::rt::Executor<BoxFuture<()>> for WeakExec {
fn execute(&self, fut: BoxFuture<()>) {
if let Some(exec) = self.0.upgrade() {
exec.spawn(hyper_task::boxed(fut));
}
}
}
ffi_fn! {
/// Creates a new task executor.
fn hyper_executor_new() -> *const hyper_executor {
Arc::into_raw(hyper_executor::new())
}?= ptr::null()
}
ffi_fn! {
/// Frees an executor and any incomplete tasks still part of it.
fn hyper_executor_free(exec: *const hyper_executor) {
drop(non_null!(Arc::from_raw(exec)?= ()));
}
}
ffi_fn! {
/// Push a task onto the executor.
///
/// The executor takes ownership of the task, it should not be accessed
/// again unless returned back to the user with `hyper_executor_poll`.
fn hyper_executor_push(exec: *const hyper_executor, task: *mut hyper_task) -> hyper_code {
let exec = non_null!(&*exec?= hyper_code::HYPERE_INVALID_ARG);
let task = non_null!(Box::from_raw(task)?= hyper_code::HYPERE_INVALID_ARG);
exec.spawn(task);
hyper_code::HYPERE_OK
}
}
ffi_fn! {
/// Polls the executor, trying to make progress on any tasks that have notified
/// that they are ready again.
///
/// If ready, returns a task from the executor that has completed.
///
/// If there are no ready tasks, this returns `NULL`.
fn hyper_executor_poll(exec: *const hyper_executor) -> *mut hyper_task {
let exec = non_null!(&*exec?= ptr::null_mut());
match exec.poll_next() {
Some(task) => Box::into_raw(task),
None => ptr::null_mut(),
}
}?= ptr::null_mut()
}
// ===== impl hyper_task =====
impl hyper_task {
pub(crate) fn boxed<F>(fut: F) -> Box<hyper_task>
where
F: Future + Send +'static,
F::Output: IntoDynTaskType + Send + Sync +'static,
{
Box::new(hyper_task {
future: Box::pin(async move { fut.await.into_dyn_task_type() }),
output: None,
userdata: UserDataPointer(ptr::null_mut()),
})
}
fn output_type(&self) -> hyper_task_return_type {
match self.output {
None => hyper_task_return_type::HYPER_TASK_EMPTY,
Some(ref val) => val.as_task_type(),
}
}
}
impl Future for TaskFuture {
type Output = Box<hyper_task>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match Pin::new(&mut self.task.as_mut().unwrap().future).poll(cx) {
Poll::Ready(val) => {
let mut task = self.task.take().unwrap();
task.output = Some(val);
Poll::Ready(task)
}
Poll::Pending => Poll::Pending,
}
}
}
ffi_fn! {
/// Free a task.
fn hyper_task_free(task: *mut hyper_task) {
drop(non_null!(Box::from_raw(task)?= ()));
}
}
ffi_fn! {
/// Takes the output value of this task.
///
/// This must only be called once polling the task on an executor has finished
/// this task.
///
/// Use `hyper_task_type` to determine the type of the `void *` return value.
fn hyper_task_value(task: *mut hyper_task) -> *mut c_void {
let task = non_null!(&mut *task?= ptr::null_mut());
if let Some(val) = task.output.take() {
let p = Box::into_raw(val) as *mut c_void;
// protect from returning fake pointers to empty types
if p == std::ptr::NonNull::<c_void>::dangling().as_ptr() {
ptr::null_mut()
} else {
p
}
} else {
ptr::null_mut()
}
}?= ptr::null_mut()
}
ffi_fn! {
/// Query the return type of this task.
fn hyper_task_type(task: *mut hyper_task) -> hyper_task_return_type {
// instead of blowing up spectacularly, just say this null task
// doesn't have a value to retrieve.
non_null!(&*task?= hyper_task_return_type::HYPER_TASK_EMPTY).output_type()
}
}
ffi_fn! {
/// Set a user data pointer to be associated with this task.
///
/// This value will be passed to task callbacks, and can be checked later
/// with `hyper_task_userdata`.
fn hyper_task_set_userdata(task: *mut hyper_task, userdata: *mut c_void) {
if task.is_null() {
return;
}
unsafe { (*task).userdata = UserDataPointer(userdata) };
}
}
ffi_fn! {
/// Retrieve the userdata that has been set via `hyper_task_set_userdata`.
fn hyper_task_userdata(task: *mut hyper_task) -> *mut c_void {
non_null!(&*task?= ptr::null_mut()).userdata.0
}?= ptr::null_mut()
}
// ===== impl AsTaskType =====
unsafe impl AsTaskType for () {
fn as_task_type(&self) -> hyper_task_return_type {
hyper_task_return_type::HYPER_TASK_EMPTY
}
}
unsafe impl AsTaskType for crate::Error {
fn as_task_type(&self) -> hyper_task_return_type {
hyper_task_return_type::HYPER_TASK_ERROR
}
}
impl<T> IntoDynTaskType for T
where
T: AsTaskType + Send + Sync +'static,
{
fn into_dyn_task_type(self) -> BoxAny {
Box::new(self)
}
}
impl<T> IntoDynTaskType for crate::Result<T>
where
T: IntoDynTaskType + Send + Sync +'static,
{
fn into_dyn_task_type(self) -> BoxAny {
match self {
Ok(val) => val.into_dyn_task_type(),
Err(err) => Box::new(err),
}
}
}
impl<T> IntoDynTaskType for Option<T>
where
T: IntoDynTaskType + Send + Sync +'static,
{
fn into_dyn_task_type(self) -> BoxAny {
match self {
Some(val) => val.into_dyn_task_type(),
None => ().into_dyn_task_type(),
}
}
}
// ===== impl hyper_context =====
impl hyper_context<'_> {
pub(crate) fn wrap<'a, 'b>(cx: &'a mut Context<'b>) -> &'a mut hyper_context<'b> {
// A struct with only one field has the same layout as that field.
unsafe { std::mem::transmute::<&mut Context<'_>, &mut hyper_context<'_>>(cx) }
}
}
ffi_fn! {
/// Copies a waker out of the task context.
fn hyper_context_waker(cx: *mut hyper_context<'_>) -> *mut hyper_waker {
let waker = non_null!(&mut *cx?= ptr::null_mut()).0.waker().clone();
Box::into_raw(Box::new(hyper_waker { waker }))
}?= ptr::null_mut()
}
// ===== impl hyper_waker =====
ffi_fn! {
/// Free a waker that hasn't been woken.
fn hyper_waker_free(waker: *mut hyper_waker) {
drop(non_null!(Box::from_raw(waker)?= ()));
}
}
ffi_fn! {
/// Wake up the task associated with a waker.
///
/// NOTE: This consumes the waker. You should not use or free the waker afterwards.
fn hyper_waker_wake(waker: *mut hyper_waker) {
let waker = non_null!(Box::from_raw(waker)?= ());
waker.waker.wake();
}
}
|
hyper_waker
|
identifier_name
|
task.rs
|
use std::ffi::c_void;
use std::future::Future;
use std::pin::Pin;
use std::ptr;
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc, Mutex, Weak,
};
use std::task::{Context, Poll};
use futures_util::stream::{FuturesUnordered, Stream};
use libc::c_int;
use super::error::hyper_code;
use super::UserDataPointer;
type BoxFuture<T> = Pin<Box<dyn Future<Output = T> + Send>>;
type BoxAny = Box<dyn AsTaskType + Send + Sync>;
/// Return in a poll function to indicate it was ready.
pub const HYPER_POLL_READY: c_int = 0;
/// Return in a poll function to indicate it is still pending.
///
/// The passed in `hyper_waker` should be registered to wake up the task at
/// some later point.
pub const HYPER_POLL_PENDING: c_int = 1;
/// Return in a poll function indicate an error.
pub const HYPER_POLL_ERROR: c_int = 3;
/// A task executor for `hyper_task`s.
pub struct hyper_executor {
/// The executor of all task futures.
///
/// There should never be contention on the mutex, as it is only locked
/// to drive the futures. However, we cannot gaurantee proper usage from
/// `hyper_executor_poll()`, which in C could potentially be called inside
/// one of the stored futures. The mutex isn't re-entrant, so doing so
/// would result in a deadlock, but that's better than data corruption.
driver: Mutex<FuturesUnordered<TaskFuture>>,
/// The queue of futures that need to be pushed into the `driver`.
///
/// This is has a separate mutex since `spawn` could be called from inside
/// a future, which would mean the driver's mutex is already locked.
spawn_queue: Mutex<Vec<TaskFuture>>,
/// This is used to track when a future calls `wake` while we are within
/// `hyper_executor::poll_next`.
is_woken: Arc<ExecWaker>,
}
#[derive(Clone)]
pub(crate) struct WeakExec(Weak<hyper_executor>);
struct ExecWaker(AtomicBool);
/// An async task.
pub struct hyper_task {
future: BoxFuture<BoxAny>,
output: Option<BoxAny>,
userdata: UserDataPointer,
}
struct TaskFuture {
task: Option<Box<hyper_task>>,
}
/// An async context for a task that contains the related waker.
pub struct hyper_context<'a>(Context<'a>);
/// A waker that is saved and used to waken a pending task.
pub struct hyper_waker {
waker: std::task::Waker,
}
/// A descriptor for what type a `hyper_task` value is.
#[repr(C)]
pub enum hyper_task_return_type {
/// The value of this task is null (does not imply an error).
HYPER_TASK_EMPTY,
/// The value of this task is `hyper_error *`.
HYPER_TASK_ERROR,
/// The value of this task is `hyper_clientconn *`.
HYPER_TASK_CLIENTCONN,
/// The value of this task is `hyper_response *`.
HYPER_TASK_RESPONSE,
/// The value of this task is `hyper_buf *`.
HYPER_TASK_BUF,
}
pub(crate) unsafe trait AsTaskType {
fn as_task_type(&self) -> hyper_task_return_type;
}
pub(crate) trait IntoDynTaskType {
fn into_dyn_task_type(self) -> BoxAny;
}
// ===== impl hyper_executor =====
impl hyper_executor {
fn new() -> Arc<hyper_executor> {
Arc::new(hyper_executor {
driver: Mutex::new(FuturesUnordered::new()),
spawn_queue: Mutex::new(Vec::new()),
is_woken: Arc::new(ExecWaker(AtomicBool::new(false))),
})
}
pub(crate) fn downgrade(exec: &Arc<hyper_executor>) -> WeakExec {
WeakExec(Arc::downgrade(exec))
}
fn spawn(&self, task: Box<hyper_task>) {
self.spawn_queue
.lock()
.unwrap()
.push(TaskFuture { task: Some(task) });
}
fn poll_next(&self) -> Option<Box<hyper_task>> {
// Drain the queue first.
self.drain_queue();
let waker = futures_util::task::waker_ref(&self.is_woken);
let mut cx = Context::from_waker(&waker);
loop {
match Pin::new(&mut *self.driver.lock().unwrap()).poll_next(&mut cx) {
Poll::Ready(val) => return val,
Poll::Pending => {
// Check if any of the pending tasks tried to spawn
// some new tasks. If so, drain into the driver and loop.
if self.drain_queue() {
continue;
}
// If the driver called `wake` while we were polling,
// we should poll again immediately!
if self.is_woken.0.swap(false, Ordering::SeqCst) {
continue;
}
return None;
}
}
}
}
fn drain_queue(&self) -> bool {
let mut queue = self.spawn_queue.lock().unwrap();
if queue.is_empty() {
return false;
}
let driver = self.driver.lock().unwrap();
for task in queue.drain(..) {
driver.push(task);
}
true
}
}
impl futures_util::task::ArcWake for ExecWaker {
fn wake_by_ref(me: &Arc<ExecWaker>) {
me.0.store(true, Ordering::SeqCst);
}
}
// ===== impl WeakExec =====
impl WeakExec {
pub(crate) fn new() -> Self {
WeakExec(Weak::new())
}
}
impl crate::rt::Executor<BoxFuture<()>> for WeakExec {
fn execute(&self, fut: BoxFuture<()>) {
if let Some(exec) = self.0.upgrade() {
exec.spawn(hyper_task::boxed(fut));
}
}
}
ffi_fn! {
/// Creates a new task executor.
fn hyper_executor_new() -> *const hyper_executor {
Arc::into_raw(hyper_executor::new())
}?= ptr::null()
}
ffi_fn! {
/// Frees an executor and any incomplete tasks still part of it.
fn hyper_executor_free(exec: *const hyper_executor) {
drop(non_null!(Arc::from_raw(exec)?= ()));
}
}
ffi_fn! {
/// Push a task onto the executor.
///
/// The executor takes ownership of the task, it should not be accessed
/// again unless returned back to the user with `hyper_executor_poll`.
fn hyper_executor_push(exec: *const hyper_executor, task: *mut hyper_task) -> hyper_code {
let exec = non_null!(&*exec?= hyper_code::HYPERE_INVALID_ARG);
let task = non_null!(Box::from_raw(task)?= hyper_code::HYPERE_INVALID_ARG);
exec.spawn(task);
hyper_code::HYPERE_OK
}
}
ffi_fn! {
/// Polls the executor, trying to make progress on any tasks that have notified
/// that they are ready again.
///
/// If ready, returns a task from the executor that has completed.
///
/// If there are no ready tasks, this returns `NULL`.
fn hyper_executor_poll(exec: *const hyper_executor) -> *mut hyper_task {
let exec = non_null!(&*exec?= ptr::null_mut());
match exec.poll_next() {
Some(task) => Box::into_raw(task),
None => ptr::null_mut(),
}
}?= ptr::null_mut()
}
// ===== impl hyper_task =====
impl hyper_task {
pub(crate) fn boxed<F>(fut: F) -> Box<hyper_task>
where
F: Future + Send +'static,
F::Output: IntoDynTaskType + Send + Sync +'static,
|
fn output_type(&self) -> hyper_task_return_type {
match self.output {
None => hyper_task_return_type::HYPER_TASK_EMPTY,
Some(ref val) => val.as_task_type(),
}
}
}
impl Future for TaskFuture {
type Output = Box<hyper_task>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match Pin::new(&mut self.task.as_mut().unwrap().future).poll(cx) {
Poll::Ready(val) => {
let mut task = self.task.take().unwrap();
task.output = Some(val);
Poll::Ready(task)
}
Poll::Pending => Poll::Pending,
}
}
}
ffi_fn! {
/// Free a task.
fn hyper_task_free(task: *mut hyper_task) {
drop(non_null!(Box::from_raw(task)?= ()));
}
}
ffi_fn! {
/// Takes the output value of this task.
///
/// This must only be called once polling the task on an executor has finished
/// this task.
///
/// Use `hyper_task_type` to determine the type of the `void *` return value.
fn hyper_task_value(task: *mut hyper_task) -> *mut c_void {
let task = non_null!(&mut *task?= ptr::null_mut());
if let Some(val) = task.output.take() {
let p = Box::into_raw(val) as *mut c_void;
// protect from returning fake pointers to empty types
if p == std::ptr::NonNull::<c_void>::dangling().as_ptr() {
ptr::null_mut()
} else {
p
}
} else {
ptr::null_mut()
}
}?= ptr::null_mut()
}
ffi_fn! {
/// Query the return type of this task.
fn hyper_task_type(task: *mut hyper_task) -> hyper_task_return_type {
// instead of blowing up spectacularly, just say this null task
// doesn't have a value to retrieve.
non_null!(&*task?= hyper_task_return_type::HYPER_TASK_EMPTY).output_type()
}
}
ffi_fn! {
/// Set a user data pointer to be associated with this task.
///
/// This value will be passed to task callbacks, and can be checked later
/// with `hyper_task_userdata`.
fn hyper_task_set_userdata(task: *mut hyper_task, userdata: *mut c_void) {
if task.is_null() {
return;
}
unsafe { (*task).userdata = UserDataPointer(userdata) };
}
}
ffi_fn! {
/// Retrieve the userdata that has been set via `hyper_task_set_userdata`.
fn hyper_task_userdata(task: *mut hyper_task) -> *mut c_void {
non_null!(&*task?= ptr::null_mut()).userdata.0
}?= ptr::null_mut()
}
// ===== impl AsTaskType =====
unsafe impl AsTaskType for () {
fn as_task_type(&self) -> hyper_task_return_type {
hyper_task_return_type::HYPER_TASK_EMPTY
}
}
unsafe impl AsTaskType for crate::Error {
fn as_task_type(&self) -> hyper_task_return_type {
hyper_task_return_type::HYPER_TASK_ERROR
}
}
impl<T> IntoDynTaskType for T
where
T: AsTaskType + Send + Sync +'static,
{
fn into_dyn_task_type(self) -> BoxAny {
Box::new(self)
}
}
impl<T> IntoDynTaskType for crate::Result<T>
where
T: IntoDynTaskType + Send + Sync +'static,
{
fn into_dyn_task_type(self) -> BoxAny {
match self {
Ok(val) => val.into_dyn_task_type(),
Err(err) => Box::new(err),
}
}
}
impl<T> IntoDynTaskType for Option<T>
where
T: IntoDynTaskType + Send + Sync +'static,
{
fn into_dyn_task_type(self) -> BoxAny {
match self {
Some(val) => val.into_dyn_task_type(),
None => ().into_dyn_task_type(),
}
}
}
// ===== impl hyper_context =====
impl hyper_context<'_> {
pub(crate) fn wrap<'a, 'b>(cx: &'a mut Context<'b>) -> &'a mut hyper_context<'b> {
// A struct with only one field has the same layout as that field.
unsafe { std::mem::transmute::<&mut Context<'_>, &mut hyper_context<'_>>(cx) }
}
}
ffi_fn! {
/// Copies a waker out of the task context.
fn hyper_context_waker(cx: *mut hyper_context<'_>) -> *mut hyper_waker {
let waker = non_null!(&mut *cx?= ptr::null_mut()).0.waker().clone();
Box::into_raw(Box::new(hyper_waker { waker }))
}?= ptr::null_mut()
}
// ===== impl hyper_waker =====
ffi_fn! {
/// Free a waker that hasn't been woken.
fn hyper_waker_free(waker: *mut hyper_waker) {
drop(non_null!(Box::from_raw(waker)?= ()));
}
}
ffi_fn! {
/// Wake up the task associated with a waker.
///
/// NOTE: This consumes the waker. You should not use or free the waker afterwards.
fn hyper_waker_wake(waker: *mut hyper_waker) {
let waker = non_null!(Box::from_raw(waker)?= ());
waker.waker.wake();
}
}
|
{
Box::new(hyper_task {
future: Box::pin(async move { fut.await.into_dyn_task_type() }),
output: None,
userdata: UserDataPointer(ptr::null_mut()),
})
}
|
identifier_body
|
task.rs
|
use std::ffi::c_void;
use std::future::Future;
use std::pin::Pin;
use std::ptr;
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc, Mutex, Weak,
};
use std::task::{Context, Poll};
use futures_util::stream::{FuturesUnordered, Stream};
use libc::c_int;
use super::error::hyper_code;
use super::UserDataPointer;
type BoxFuture<T> = Pin<Box<dyn Future<Output = T> + Send>>;
type BoxAny = Box<dyn AsTaskType + Send + Sync>;
/// Return in a poll function to indicate it was ready.
pub const HYPER_POLL_READY: c_int = 0;
/// Return in a poll function to indicate it is still pending.
///
/// The passed in `hyper_waker` should be registered to wake up the task at
/// some later point.
pub const HYPER_POLL_PENDING: c_int = 1;
/// Return in a poll function indicate an error.
pub const HYPER_POLL_ERROR: c_int = 3;
/// A task executor for `hyper_task`s.
pub struct hyper_executor {
/// The executor of all task futures.
///
/// There should never be contention on the mutex, as it is only locked
/// to drive the futures. However, we cannot gaurantee proper usage from
/// `hyper_executor_poll()`, which in C could potentially be called inside
/// one of the stored futures. The mutex isn't re-entrant, so doing so
/// would result in a deadlock, but that's better than data corruption.
driver: Mutex<FuturesUnordered<TaskFuture>>,
/// The queue of futures that need to be pushed into the `driver`.
///
/// This is has a separate mutex since `spawn` could be called from inside
/// a future, which would mean the driver's mutex is already locked.
spawn_queue: Mutex<Vec<TaskFuture>>,
/// This is used to track when a future calls `wake` while we are within
/// `hyper_executor::poll_next`.
is_woken: Arc<ExecWaker>,
}
#[derive(Clone)]
pub(crate) struct WeakExec(Weak<hyper_executor>);
struct ExecWaker(AtomicBool);
/// An async task.
pub struct hyper_task {
future: BoxFuture<BoxAny>,
output: Option<BoxAny>,
userdata: UserDataPointer,
}
struct TaskFuture {
task: Option<Box<hyper_task>>,
}
/// An async context for a task that contains the related waker.
pub struct hyper_context<'a>(Context<'a>);
/// A waker that is saved and used to waken a pending task.
pub struct hyper_waker {
waker: std::task::Waker,
}
/// A descriptor for what type a `hyper_task` value is.
#[repr(C)]
pub enum hyper_task_return_type {
/// The value of this task is null (does not imply an error).
HYPER_TASK_EMPTY,
/// The value of this task is `hyper_error *`.
HYPER_TASK_ERROR,
/// The value of this task is `hyper_clientconn *`.
HYPER_TASK_CLIENTCONN,
/// The value of this task is `hyper_response *`.
HYPER_TASK_RESPONSE,
/// The value of this task is `hyper_buf *`.
HYPER_TASK_BUF,
}
pub(crate) unsafe trait AsTaskType {
fn as_task_type(&self) -> hyper_task_return_type;
}
pub(crate) trait IntoDynTaskType {
fn into_dyn_task_type(self) -> BoxAny;
}
// ===== impl hyper_executor =====
impl hyper_executor {
fn new() -> Arc<hyper_executor> {
Arc::new(hyper_executor {
driver: Mutex::new(FuturesUnordered::new()),
spawn_queue: Mutex::new(Vec::new()),
is_woken: Arc::new(ExecWaker(AtomicBool::new(false))),
})
}
pub(crate) fn downgrade(exec: &Arc<hyper_executor>) -> WeakExec {
WeakExec(Arc::downgrade(exec))
}
fn spawn(&self, task: Box<hyper_task>) {
self.spawn_queue
.lock()
.unwrap()
.push(TaskFuture { task: Some(task) });
}
fn poll_next(&self) -> Option<Box<hyper_task>> {
// Drain the queue first.
self.drain_queue();
let waker = futures_util::task::waker_ref(&self.is_woken);
let mut cx = Context::from_waker(&waker);
loop {
match Pin::new(&mut *self.driver.lock().unwrap()).poll_next(&mut cx) {
Poll::Ready(val) => return val,
Poll::Pending => {
// Check if any of the pending tasks tried to spawn
// some new tasks. If so, drain into the driver and loop.
if self.drain_queue() {
continue;
}
// If the driver called `wake` while we were polling,
// we should poll again immediately!
if self.is_woken.0.swap(false, Ordering::SeqCst) {
continue;
}
return None;
}
}
}
}
fn drain_queue(&self) -> bool {
let mut queue = self.spawn_queue.lock().unwrap();
if queue.is_empty() {
return false;
}
let driver = self.driver.lock().unwrap();
for task in queue.drain(..) {
driver.push(task);
}
true
}
}
impl futures_util::task::ArcWake for ExecWaker {
fn wake_by_ref(me: &Arc<ExecWaker>) {
me.0.store(true, Ordering::SeqCst);
}
}
// ===== impl WeakExec =====
impl WeakExec {
pub(crate) fn new() -> Self {
WeakExec(Weak::new())
}
}
impl crate::rt::Executor<BoxFuture<()>> for WeakExec {
fn execute(&self, fut: BoxFuture<()>) {
if let Some(exec) = self.0.upgrade() {
exec.spawn(hyper_task::boxed(fut));
}
}
}
ffi_fn! {
/// Creates a new task executor.
fn hyper_executor_new() -> *const hyper_executor {
Arc::into_raw(hyper_executor::new())
}?= ptr::null()
}
ffi_fn! {
/// Frees an executor and any incomplete tasks still part of it.
fn hyper_executor_free(exec: *const hyper_executor) {
drop(non_null!(Arc::from_raw(exec)?= ()));
}
}
ffi_fn! {
/// Push a task onto the executor.
///
/// The executor takes ownership of the task, it should not be accessed
/// again unless returned back to the user with `hyper_executor_poll`.
fn hyper_executor_push(exec: *const hyper_executor, task: *mut hyper_task) -> hyper_code {
let exec = non_null!(&*exec?= hyper_code::HYPERE_INVALID_ARG);
let task = non_null!(Box::from_raw(task)?= hyper_code::HYPERE_INVALID_ARG);
exec.spawn(task);
hyper_code::HYPERE_OK
}
}
ffi_fn! {
/// Polls the executor, trying to make progress on any tasks that have notified
/// that they are ready again.
///
/// If ready, returns a task from the executor that has completed.
///
/// If there are no ready tasks, this returns `NULL`.
fn hyper_executor_poll(exec: *const hyper_executor) -> *mut hyper_task {
let exec = non_null!(&*exec?= ptr::null_mut());
match exec.poll_next() {
Some(task) => Box::into_raw(task),
None => ptr::null_mut(),
}
}?= ptr::null_mut()
}
// ===== impl hyper_task =====
impl hyper_task {
pub(crate) fn boxed<F>(fut: F) -> Box<hyper_task>
where
F: Future + Send +'static,
F::Output: IntoDynTaskType + Send + Sync +'static,
{
Box::new(hyper_task {
future: Box::pin(async move { fut.await.into_dyn_task_type() }),
output: None,
userdata: UserDataPointer(ptr::null_mut()),
})
}
fn output_type(&self) -> hyper_task_return_type {
match self.output {
None => hyper_task_return_type::HYPER_TASK_EMPTY,
Some(ref val) => val.as_task_type(),
}
}
}
impl Future for TaskFuture {
type Output = Box<hyper_task>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match Pin::new(&mut self.task.as_mut().unwrap().future).poll(cx) {
Poll::Ready(val) => {
let mut task = self.task.take().unwrap();
task.output = Some(val);
Poll::Ready(task)
}
Poll::Pending => Poll::Pending,
}
}
}
ffi_fn! {
/// Free a task.
fn hyper_task_free(task: *mut hyper_task) {
drop(non_null!(Box::from_raw(task)?= ()));
}
}
ffi_fn! {
/// Takes the output value of this task.
///
/// This must only be called once polling the task on an executor has finished
/// this task.
///
/// Use `hyper_task_type` to determine the type of the `void *` return value.
fn hyper_task_value(task: *mut hyper_task) -> *mut c_void {
let task = non_null!(&mut *task?= ptr::null_mut());
if let Some(val) = task.output.take() {
let p = Box::into_raw(val) as *mut c_void;
// protect from returning fake pointers to empty types
if p == std::ptr::NonNull::<c_void>::dangling().as_ptr() {
ptr::null_mut()
} else {
p
}
} else {
ptr::null_mut()
}
}?= ptr::null_mut()
}
ffi_fn! {
/// Query the return type of this task.
fn hyper_task_type(task: *mut hyper_task) -> hyper_task_return_type {
// instead of blowing up spectacularly, just say this null task
// doesn't have a value to retrieve.
non_null!(&*task?= hyper_task_return_type::HYPER_TASK_EMPTY).output_type()
}
}
ffi_fn! {
/// Set a user data pointer to be associated with this task.
///
/// This value will be passed to task callbacks, and can be checked later
/// with `hyper_task_userdata`.
fn hyper_task_set_userdata(task: *mut hyper_task, userdata: *mut c_void) {
if task.is_null() {
return;
}
unsafe { (*task).userdata = UserDataPointer(userdata) };
}
}
ffi_fn! {
/// Retrieve the userdata that has been set via `hyper_task_set_userdata`.
fn hyper_task_userdata(task: *mut hyper_task) -> *mut c_void {
non_null!(&*task?= ptr::null_mut()).userdata.0
}?= ptr::null_mut()
}
// ===== impl AsTaskType =====
unsafe impl AsTaskType for () {
|
fn as_task_type(&self) -> hyper_task_return_type {
hyper_task_return_type::HYPER_TASK_EMPTY
}
}
unsafe impl AsTaskType for crate::Error {
fn as_task_type(&self) -> hyper_task_return_type {
hyper_task_return_type::HYPER_TASK_ERROR
}
}
impl<T> IntoDynTaskType for T
where
T: AsTaskType + Send + Sync +'static,
{
fn into_dyn_task_type(self) -> BoxAny {
Box::new(self)
}
}
impl<T> IntoDynTaskType for crate::Result<T>
where
T: IntoDynTaskType + Send + Sync +'static,
{
fn into_dyn_task_type(self) -> BoxAny {
match self {
Ok(val) => val.into_dyn_task_type(),
Err(err) => Box::new(err),
}
}
}
impl<T> IntoDynTaskType for Option<T>
where
T: IntoDynTaskType + Send + Sync +'static,
{
fn into_dyn_task_type(self) -> BoxAny {
match self {
Some(val) => val.into_dyn_task_type(),
None => ().into_dyn_task_type(),
}
}
}
// ===== impl hyper_context =====
impl hyper_context<'_> {
pub(crate) fn wrap<'a, 'b>(cx: &'a mut Context<'b>) -> &'a mut hyper_context<'b> {
// A struct with only one field has the same layout as that field.
unsafe { std::mem::transmute::<&mut Context<'_>, &mut hyper_context<'_>>(cx) }
}
}
ffi_fn! {
/// Copies a waker out of the task context.
fn hyper_context_waker(cx: *mut hyper_context<'_>) -> *mut hyper_waker {
let waker = non_null!(&mut *cx?= ptr::null_mut()).0.waker().clone();
Box::into_raw(Box::new(hyper_waker { waker }))
}?= ptr::null_mut()
}
// ===== impl hyper_waker =====
ffi_fn! {
/// Free a waker that hasn't been woken.
fn hyper_waker_free(waker: *mut hyper_waker) {
drop(non_null!(Box::from_raw(waker)?= ()));
}
}
ffi_fn! {
/// Wake up the task associated with a waker.
///
/// NOTE: This consumes the waker. You should not use or free the waker afterwards.
fn hyper_waker_wake(waker: *mut hyper_waker) {
let waker = non_null!(Box::from_raw(waker)?= ());
waker.waker.wake();
}
}
|
random_line_split
|
|
lib.rs
|
// DO NOT EDIT!
// This file was generated automatically from'src/mako/api/lib.rs.mako'
// DO NOT EDIT!
//! This documentation was generated from *DoubleClick Bid Manager* crate version *0.1.8+20150326*, where *20150326* is the exact revision of the *doubleclickbidmanager:v1* schema built by the [mako](http://www.makotemplates.org/) code generator *v0.1.8*.
//!
//! Everything else about the *DoubleClick Bid Manager* *v1* API can be found at the
//! [official documentation site](https://developers.google.com/bid-manager/).
//! The original source code is [on github](https://github.com/Byron/google-apis-rs/tree/master/gen/doubleclickbidmanager1).
//! # Features
//!
//! Handle the following *Resources* with ease from the central [hub](struct.DoubleClickBidManager.html)...
//!
//! * lineitems
//! * [*downloadlineitems*](struct.LineitemDownloadlineitemCall.html) and [*uploadlineitems*](struct.LineitemUploadlineitemCall.html)
//! * [queries](struct.Query.html)
//! * [*createquery*](struct.QueryCreatequeryCall.html), [*deletequery*](struct.QueryDeletequeryCall.html), [*getquery*](struct.QueryGetqueryCall.html), [*listqueries*](struct.QueryListqueryCall.html) and [*runquery*](struct.QueryRunqueryCall.html)
//! * [reports](struct.Report.html)
//! * [*listreports*](struct.ReportListreportCall.html)
//!
//!
//!
//!
//! Not what you are looking for? Find all other Google APIs in their Rust [documentation index](../index.html).
//!
//! # Structure of this Library
//!
//! The API is structured into the following primary items:
//!
//! * **[Hub](struct.DoubleClickBidManager.html)**
//! * a central object to maintain state and allow accessing all *Activities*
//! * creates [*Method Builders*](trait.MethodsBuilder.html) which in turn
//! allow access to individual [*Call Builders*](trait.CallBuilder.html)
//! * **[Resources](trait.Resource.html)**
//! * primary types that you can apply *Activities* to
//! * a collection of properties and *Parts*
//! * **[Parts](trait.Part.html)**
//! * a collection of properties
//! * never directly used in *Activities*
//! * **[Activities](trait.CallBuilder.html)**
//! * operations to apply to *Resources*
//!
//! All *structures* are marked with applicable traits to further categorize them and ease browsing.
//!
//! Generally speaking, you can invoke *Activities* like this:
//!
//! ```Rust,ignore
//! let r = hub.resource().activity(...).doit()
//! ```
//!
//! Or specifically...
//!
//! ```ignore
//! let r = hub.queries().getquery(...).doit()
//! let r = hub.queries().createquery(...).doit()
//! ```
//!
//! The `resource()` and `activity(...)` calls create [builders][builder-pattern]. The second one dealing with `Activities`
//! supports various methods to configure the impending operation (not shown here). It is made such that all required arguments have to be
//! specified right away (i.e. `(...)`), whereas all optional ones can be [build up][builder-pattern] as desired.
//! The `doit()` method performs the actual communication with the server and returns the respective result.
//!
//! # Usage
//!
//! ## Setting up your Project
//!
//! To use this library, you would put the following lines into your `Cargo.toml` file:
//!
//! ```toml
//! [dependencies]
//! google-doubleclickbidmanager1 = "*"
//! ```
//!
//! ## A complete example
//!
//! ```test_harness,no_run
//! extern crate hyper;
//! extern crate yup_oauth2 as oauth2;
//! extern crate google_doubleclickbidmanager1 as doubleclickbidmanager1;
//! use doubleclickbidmanager1::{Result, Error};
//! # #[test] fn egal() {
//! use std::default::Default;
//! use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage};
//! use doubleclickbidmanager1::DoubleClickBidManager;
//!
//! // Get an ApplicationSecret instance by some means. It contains the `client_id` and
//! // `client_secret`, among other things.
//! let secret: ApplicationSecret = Default::default();
//! // Instantiate the authenticator. It will choose a suitable authentication flow for you,
//! // unless you replace `None` with the desired Flow.
//! // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about
//! // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and
//! // retrieve them from storage.
//! let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate,
//! hyper::Client::new(),
//! <MemoryStorage as Default>::default(), None);
//! let mut hub = DoubleClickBidManager::new(hyper::Client::new(), auth);
//! // You can configure optional parameters by calling the respective setters at will, and
//! // execute the final call using `doit()`.
//! // Values shown here are possibly random and not representative!
//! let result = hub.queries().getquery("queryId")
//! .doit();
//!
//! match result {
//! Err(e) => match e {
//! // The Error enum provides details about what exactly happened.
//! // You can also just use its `Debug`, `Display` or `Error` traits
//! Error::HttpError(_)
//! |Error::MissingAPIKey
//! |Error::MissingToken(_)
//! |Error::Cancelled
//! |Error::UploadSizeLimitExceeded(_, _)
//! |Error::Failure(_)
//! |Error::BadRequest(_)
//! |Error::FieldClash(_)
//! |Error::JsonDecodeError(_, _) => println!("{}", e),
//! },
//! Ok(res) => println!("Success: {:?}", res),
//! }
//! # }
//! ```
//! ## Handling Errors
//!
//! All errors produced by the system are provided either as [Result](enum.Result.html) enumeration as return value of
//! the doit() methods, or handed as possibly intermediate results to either the
//! [Hub Delegate](trait.Delegate.html), or the [Authenticator Delegate](../yup-oauth2/trait.AuthenticatorDelegate.html).
//!
//! When delegates handle errors or intermediate values, they may have a chance to instruct the system to retry. This
//! makes the system potentially resilient to all kinds of errors.
//!
//! ## Uploads and Downloads
//! If a method supports downloads, the response body, which is part of the [Result](enum.Result.html), should be
//! read by you to obtain the media.
//! If such a method also supports a [Response Result](trait.ResponseResult.html), it will return that by default.
//! You can see it as meta-data for the actual media. To trigger a media download, you will have to set up the builder by making
//! this call: `.param("alt", "media")`.
//!
//! Methods supporting uploads can do so using up to 2 different protocols:
//! *simple* and *resumable*. The distinctiveness of each is represented by customized
//! `doit(...)` methods, which are then named `upload(...)` and `upload_resumable(...)` respectively.
//!
//! ## Customization and Callbacks
//!
//! You may alter the way an `doit()` method is called by providing a [delegate](trait.Delegate.html) to the
//! [Method Builder](trait.CallBuilder.html) before making the final `doit()` call.
|
//! retry on failure.
//!
//! The [delegate trait](trait.Delegate.html) is default-implemented, allowing you to customize it with minimal effort.
//!
//! ## Optional Parts in Server-Requests
//!
//! All structures provided by this library are made to be [enocodable](trait.RequestValue.html) and
//! [decodable](trait.ResponseResult.html) via *json*. Optionals are used to indicate that partial requests are responses
//! are valid.
//! Most optionals are are considered [Parts](trait.Part.html) which are identifiable by name, which will be sent to
//! the server to indicate either the set parts of the request or the desired parts in the response.
//!
//! ## Builder Arguments
//!
//! Using [method builders](trait.CallBuilder.html), you are able to prepare an action call by repeatedly calling it's methods.
//! These will always take a single argument, for which the following statements are true.
//!
//! * [PODs][wiki-pod] are handed by copy
//! * strings are passed as `&str`
//! * [request values](trait.RequestValue.html) are moved
//!
//! Arguments will always be copied or cloned into the builder, to make them independent of their original life times.
//!
//! [wiki-pod]: http://en.wikipedia.org/wiki/Plain_old_data_structure
//! [builder-pattern]: http://en.wikipedia.org/wiki/Builder_pattern
//! [google-go-api]: https://github.com/google/google-api-go-client
//!
//!
// Unused attributes happen thanks to defined, but unused structures
// We don't warn about this, as depending on the API, some data structures or facilities are never used.
// Instead of pre-determining this, we just disable the lint. It's manually tuned to not have any
// unused imports in fully featured APIs. Same with unused_mut....
#![allow(unused_imports, unused_mut, dead_code)]
include!(concat!(env!("OUT_DIR"), "/lib.rs"));
|
//! Respective methods will be called to provide progress information, as well as determine whether the system should
|
random_line_split
|
typeck-unsafe-always-share.rs
|
// Verify that UnsafeCell is *always*!Sync regardless if `T` is sync.
#![feature(negative_impls)]
use std::cell::UnsafeCell;
use std::marker::Sync;
|
u: UnsafeCell<T>
}
struct NoSync;
impl!Sync for NoSync {}
fn test<T: Sync>(s: T) {}
fn main() {
let us = UnsafeCell::new(MySync{u: UnsafeCell::new(0)});
test(us);
//~^ ERROR `UnsafeCell<MySync<{integer}>>` cannot be shared between threads safely
let uns = UnsafeCell::new(NoSync);
test(uns);
//~^ ERROR `UnsafeCell<NoSync>` cannot be shared between threads safely [E0277]
let ms = MySync{u: uns};
test(ms);
//~^ ERROR `UnsafeCell<NoSync>` cannot be shared between threads safely [E0277]
test(NoSync);
//~^ ERROR `NoSync` cannot be shared between threads safely [E0277]
}
|
struct MySync<T> {
|
random_line_split
|
typeck-unsafe-always-share.rs
|
// Verify that UnsafeCell is *always*!Sync regardless if `T` is sync.
#![feature(negative_impls)]
use std::cell::UnsafeCell;
use std::marker::Sync;
struct MySync<T> {
u: UnsafeCell<T>
}
struct NoSync;
impl!Sync for NoSync {}
fn
|
<T: Sync>(s: T) {}
fn main() {
let us = UnsafeCell::new(MySync{u: UnsafeCell::new(0)});
test(us);
//~^ ERROR `UnsafeCell<MySync<{integer}>>` cannot be shared between threads safely
let uns = UnsafeCell::new(NoSync);
test(uns);
//~^ ERROR `UnsafeCell<NoSync>` cannot be shared between threads safely [E0277]
let ms = MySync{u: uns};
test(ms);
//~^ ERROR `UnsafeCell<NoSync>` cannot be shared between threads safely [E0277]
test(NoSync);
//~^ ERROR `NoSync` cannot be shared between threads safely [E0277]
}
|
test
|
identifier_name
|
typeck-unsafe-always-share.rs
|
// Verify that UnsafeCell is *always*!Sync regardless if `T` is sync.
#![feature(negative_impls)]
use std::cell::UnsafeCell;
use std::marker::Sync;
struct MySync<T> {
u: UnsafeCell<T>
}
struct NoSync;
impl!Sync for NoSync {}
fn test<T: Sync>(s: T) {}
fn main()
|
{
let us = UnsafeCell::new(MySync{u: UnsafeCell::new(0)});
test(us);
//~^ ERROR `UnsafeCell<MySync<{integer}>>` cannot be shared between threads safely
let uns = UnsafeCell::new(NoSync);
test(uns);
//~^ ERROR `UnsafeCell<NoSync>` cannot be shared between threads safely [E0277]
let ms = MySync{u: uns};
test(ms);
//~^ ERROR `UnsafeCell<NoSync>` cannot be shared between threads safely [E0277]
test(NoSync);
//~^ ERROR `NoSync` cannot be shared between threads safely [E0277]
}
|
identifier_body
|
|
blkfile.rs
|
use std::collections::HashMap;
use std::convert::From;
use std::fs::{self, DirEntry, File};
use std::io::{self, BufReader, Seek, SeekFrom};
use std::path::{Path, PathBuf};
use byteorder::{LittleEndian, ReadBytesExt};
use crate::blockchain::parser::reader::BlockchainRead;
use crate::blockchain::proto::block::Block;
use crate::errors::{OpError, OpErrorKind, OpResult};
/// Holds all necessary data about a raw blk file
#[derive(Debug)]
pub struct BlkFile {
pub path: PathBuf,
pub size: u64,
}
impl BlkFile {
|
#[inline]
pub fn read_block(&self, offset: u64, version_id: u8) -> OpResult<Block> {
let mut f = BufReader::new(File::open(&self.path)?);
f.seek(SeekFrom::Start(offset - 4))?;
let block_size = f.read_u32::<LittleEndian>()?;
f.read_block(block_size, version_id)
}
/// Collects all blk*.dat paths in the given directory
pub fn from_path(path: &Path) -> OpResult<HashMap<usize, BlkFile>> {
info!(target: "blkfile", "Reading files from {}...", path.display());
let mut collected = HashMap::with_capacity(4000);
for entry in fs::read_dir(path)? {
match entry {
Ok(de) => {
let path = BlkFile::resolve_path(&de)?;
if!path.is_file() {
continue;
}
let file_name =
String::from(transform!(path.as_path().file_name().unwrap().to_str()));
// Check if it's a valid blk file
if let Some(index) = BlkFile::parse_blk_index(&file_name, "blk", ".dat") {
// Build BlkFile structures
let size = fs::metadata(path.as_path())?.len();
trace!(target: "blkfile", "Adding {}... (index: {}, size: {})", path.display(), index, size);
collected.insert(index, BlkFile::new(path, size));
}
}
Err(msg) => {
warn!(target: "blkfile", "Unable to read blk file!: {}", msg);
}
}
}
trace!(target: "blkfile", "Found {} blk files", collected.len());
if collected.is_empty() {
Err(OpError::new(OpErrorKind::RuntimeError).join_msg("No blk files found!"))
} else {
Ok(collected)
}
}
/// Resolves a PathBuf for the given entry.
/// Also resolves symlinks if present.
fn resolve_path(entry: &DirEntry) -> io::Result<PathBuf> {
if entry.file_type()?.is_symlink() {
fs::read_link(entry.path())
} else {
Ok(entry.path())
}
}
/// Identifies blk file and parses index
/// Returns None if this is no blk file
fn parse_blk_index(file_name: &str, prefix: &str, ext: &str) -> Option<usize> {
if file_name.starts_with(prefix) && file_name.ends_with(ext) {
// Parse blk_index, this means we extract 42 from blk000042.dat
file_name[prefix.len()..(file_name.len() - ext.len())]
.parse::<usize>()
.ok()
} else {
None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_blk_index() {
let prefix = "blk";
let ext = ".dat";
assert_eq!(
0,
BlkFile::parse_blk_index("blk00000.dat", prefix, ext).unwrap()
);
assert_eq!(
6,
BlkFile::parse_blk_index("blk6.dat", prefix, ext).unwrap()
);
assert_eq!(
1202,
BlkFile::parse_blk_index("blk1202.dat", prefix, ext).unwrap()
);
assert_eq!(
13412451,
BlkFile::parse_blk_index("blk13412451.dat", prefix, ext).unwrap()
);
assert_eq!(
true,
BlkFile::parse_blk_index("blkindex.dat", prefix, ext).is_none()
);
assert_eq!(
true,
BlkFile::parse_blk_index("invalid.dat", prefix, ext).is_none()
);
}
}
|
#[inline]
fn new(path: PathBuf, size: u64) -> BlkFile {
BlkFile { path, size }
}
|
random_line_split
|
blkfile.rs
|
use std::collections::HashMap;
use std::convert::From;
use std::fs::{self, DirEntry, File};
use std::io::{self, BufReader, Seek, SeekFrom};
use std::path::{Path, PathBuf};
use byteorder::{LittleEndian, ReadBytesExt};
use crate::blockchain::parser::reader::BlockchainRead;
use crate::blockchain::proto::block::Block;
use crate::errors::{OpError, OpErrorKind, OpResult};
/// Holds all necessary data about a raw blk file
#[derive(Debug)]
pub struct BlkFile {
pub path: PathBuf,
pub size: u64,
}
impl BlkFile {
#[inline]
fn
|
(path: PathBuf, size: u64) -> BlkFile {
BlkFile { path, size }
}
#[inline]
pub fn read_block(&self, offset: u64, version_id: u8) -> OpResult<Block> {
let mut f = BufReader::new(File::open(&self.path)?);
f.seek(SeekFrom::Start(offset - 4))?;
let block_size = f.read_u32::<LittleEndian>()?;
f.read_block(block_size, version_id)
}
/// Collects all blk*.dat paths in the given directory
pub fn from_path(path: &Path) -> OpResult<HashMap<usize, BlkFile>> {
info!(target: "blkfile", "Reading files from {}...", path.display());
let mut collected = HashMap::with_capacity(4000);
for entry in fs::read_dir(path)? {
match entry {
Ok(de) => {
let path = BlkFile::resolve_path(&de)?;
if!path.is_file() {
continue;
}
let file_name =
String::from(transform!(path.as_path().file_name().unwrap().to_str()));
// Check if it's a valid blk file
if let Some(index) = BlkFile::parse_blk_index(&file_name, "blk", ".dat") {
// Build BlkFile structures
let size = fs::metadata(path.as_path())?.len();
trace!(target: "blkfile", "Adding {}... (index: {}, size: {})", path.display(), index, size);
collected.insert(index, BlkFile::new(path, size));
}
}
Err(msg) => {
warn!(target: "blkfile", "Unable to read blk file!: {}", msg);
}
}
}
trace!(target: "blkfile", "Found {} blk files", collected.len());
if collected.is_empty() {
Err(OpError::new(OpErrorKind::RuntimeError).join_msg("No blk files found!"))
} else {
Ok(collected)
}
}
/// Resolves a PathBuf for the given entry.
/// Also resolves symlinks if present.
fn resolve_path(entry: &DirEntry) -> io::Result<PathBuf> {
if entry.file_type()?.is_symlink() {
fs::read_link(entry.path())
} else {
Ok(entry.path())
}
}
/// Identifies blk file and parses index
/// Returns None if this is no blk file
fn parse_blk_index(file_name: &str, prefix: &str, ext: &str) -> Option<usize> {
if file_name.starts_with(prefix) && file_name.ends_with(ext) {
// Parse blk_index, this means we extract 42 from blk000042.dat
file_name[prefix.len()..(file_name.len() - ext.len())]
.parse::<usize>()
.ok()
} else {
None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_blk_index() {
let prefix = "blk";
let ext = ".dat";
assert_eq!(
0,
BlkFile::parse_blk_index("blk00000.dat", prefix, ext).unwrap()
);
assert_eq!(
6,
BlkFile::parse_blk_index("blk6.dat", prefix, ext).unwrap()
);
assert_eq!(
1202,
BlkFile::parse_blk_index("blk1202.dat", prefix, ext).unwrap()
);
assert_eq!(
13412451,
BlkFile::parse_blk_index("blk13412451.dat", prefix, ext).unwrap()
);
assert_eq!(
true,
BlkFile::parse_blk_index("blkindex.dat", prefix, ext).is_none()
);
assert_eq!(
true,
BlkFile::parse_blk_index("invalid.dat", prefix, ext).is_none()
);
}
}
|
new
|
identifier_name
|
blkfile.rs
|
use std::collections::HashMap;
use std::convert::From;
use std::fs::{self, DirEntry, File};
use std::io::{self, BufReader, Seek, SeekFrom};
use std::path::{Path, PathBuf};
use byteorder::{LittleEndian, ReadBytesExt};
use crate::blockchain::parser::reader::BlockchainRead;
use crate::blockchain::proto::block::Block;
use crate::errors::{OpError, OpErrorKind, OpResult};
/// Holds all necessary data about a raw blk file
#[derive(Debug)]
pub struct BlkFile {
pub path: PathBuf,
pub size: u64,
}
impl BlkFile {
#[inline]
fn new(path: PathBuf, size: u64) -> BlkFile {
BlkFile { path, size }
}
#[inline]
pub fn read_block(&self, offset: u64, version_id: u8) -> OpResult<Block> {
let mut f = BufReader::new(File::open(&self.path)?);
f.seek(SeekFrom::Start(offset - 4))?;
let block_size = f.read_u32::<LittleEndian>()?;
f.read_block(block_size, version_id)
}
/// Collects all blk*.dat paths in the given directory
pub fn from_path(path: &Path) -> OpResult<HashMap<usize, BlkFile>> {
info!(target: "blkfile", "Reading files from {}...", path.display());
let mut collected = HashMap::with_capacity(4000);
for entry in fs::read_dir(path)? {
match entry {
Ok(de) => {
let path = BlkFile::resolve_path(&de)?;
if!path.is_file() {
continue;
}
let file_name =
String::from(transform!(path.as_path().file_name().unwrap().to_str()));
// Check if it's a valid blk file
if let Some(index) = BlkFile::parse_blk_index(&file_name, "blk", ".dat") {
// Build BlkFile structures
let size = fs::metadata(path.as_path())?.len();
trace!(target: "blkfile", "Adding {}... (index: {}, size: {})", path.display(), index, size);
collected.insert(index, BlkFile::new(path, size));
}
}
Err(msg) => {
warn!(target: "blkfile", "Unable to read blk file!: {}", msg);
}
}
}
trace!(target: "blkfile", "Found {} blk files", collected.len());
if collected.is_empty() {
Err(OpError::new(OpErrorKind::RuntimeError).join_msg("No blk files found!"))
} else {
Ok(collected)
}
}
/// Resolves a PathBuf for the given entry.
/// Also resolves symlinks if present.
fn resolve_path(entry: &DirEntry) -> io::Result<PathBuf> {
if entry.file_type()?.is_symlink() {
fs::read_link(entry.path())
} else {
Ok(entry.path())
}
}
/// Identifies blk file and parses index
/// Returns None if this is no blk file
fn parse_blk_index(file_name: &str, prefix: &str, ext: &str) -> Option<usize>
|
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_blk_index() {
let prefix = "blk";
let ext = ".dat";
assert_eq!(
0,
BlkFile::parse_blk_index("blk00000.dat", prefix, ext).unwrap()
);
assert_eq!(
6,
BlkFile::parse_blk_index("blk6.dat", prefix, ext).unwrap()
);
assert_eq!(
1202,
BlkFile::parse_blk_index("blk1202.dat", prefix, ext).unwrap()
);
assert_eq!(
13412451,
BlkFile::parse_blk_index("blk13412451.dat", prefix, ext).unwrap()
);
assert_eq!(
true,
BlkFile::parse_blk_index("blkindex.dat", prefix, ext).is_none()
);
assert_eq!(
true,
BlkFile::parse_blk_index("invalid.dat", prefix, ext).is_none()
);
}
}
|
{
if file_name.starts_with(prefix) && file_name.ends_with(ext) {
// Parse blk_index, this means we extract 42 from blk000042.dat
file_name[prefix.len()..(file_name.len() - ext.len())]
.parse::<usize>()
.ok()
} else {
None
}
}
|
identifier_body
|
blkfile.rs
|
use std::collections::HashMap;
use std::convert::From;
use std::fs::{self, DirEntry, File};
use std::io::{self, BufReader, Seek, SeekFrom};
use std::path::{Path, PathBuf};
use byteorder::{LittleEndian, ReadBytesExt};
use crate::blockchain::parser::reader::BlockchainRead;
use crate::blockchain::proto::block::Block;
use crate::errors::{OpError, OpErrorKind, OpResult};
/// Holds all necessary data about a raw blk file
#[derive(Debug)]
pub struct BlkFile {
pub path: PathBuf,
pub size: u64,
}
impl BlkFile {
#[inline]
fn new(path: PathBuf, size: u64) -> BlkFile {
BlkFile { path, size }
}
#[inline]
pub fn read_block(&self, offset: u64, version_id: u8) -> OpResult<Block> {
let mut f = BufReader::new(File::open(&self.path)?);
f.seek(SeekFrom::Start(offset - 4))?;
let block_size = f.read_u32::<LittleEndian>()?;
f.read_block(block_size, version_id)
}
/// Collects all blk*.dat paths in the given directory
pub fn from_path(path: &Path) -> OpResult<HashMap<usize, BlkFile>> {
info!(target: "blkfile", "Reading files from {}...", path.display());
let mut collected = HashMap::with_capacity(4000);
for entry in fs::read_dir(path)? {
match entry {
Ok(de) => {
let path = BlkFile::resolve_path(&de)?;
if!path.is_file() {
continue;
}
let file_name =
String::from(transform!(path.as_path().file_name().unwrap().to_str()));
// Check if it's a valid blk file
if let Some(index) = BlkFile::parse_blk_index(&file_name, "blk", ".dat") {
// Build BlkFile structures
let size = fs::metadata(path.as_path())?.len();
trace!(target: "blkfile", "Adding {}... (index: {}, size: {})", path.display(), index, size);
collected.insert(index, BlkFile::new(path, size));
}
}
Err(msg) => {
warn!(target: "blkfile", "Unable to read blk file!: {}", msg);
}
}
}
trace!(target: "blkfile", "Found {} blk files", collected.len());
if collected.is_empty() {
Err(OpError::new(OpErrorKind::RuntimeError).join_msg("No blk files found!"))
} else
|
}
/// Resolves a PathBuf for the given entry.
/// Also resolves symlinks if present.
fn resolve_path(entry: &DirEntry) -> io::Result<PathBuf> {
if entry.file_type()?.is_symlink() {
fs::read_link(entry.path())
} else {
Ok(entry.path())
}
}
/// Identifies blk file and parses index
/// Returns None if this is no blk file
fn parse_blk_index(file_name: &str, prefix: &str, ext: &str) -> Option<usize> {
if file_name.starts_with(prefix) && file_name.ends_with(ext) {
// Parse blk_index, this means we extract 42 from blk000042.dat
file_name[prefix.len()..(file_name.len() - ext.len())]
.parse::<usize>()
.ok()
} else {
None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_blk_index() {
let prefix = "blk";
let ext = ".dat";
assert_eq!(
0,
BlkFile::parse_blk_index("blk00000.dat", prefix, ext).unwrap()
);
assert_eq!(
6,
BlkFile::parse_blk_index("blk6.dat", prefix, ext).unwrap()
);
assert_eq!(
1202,
BlkFile::parse_blk_index("blk1202.dat", prefix, ext).unwrap()
);
assert_eq!(
13412451,
BlkFile::parse_blk_index("blk13412451.dat", prefix, ext).unwrap()
);
assert_eq!(
true,
BlkFile::parse_blk_index("blkindex.dat", prefix, ext).is_none()
);
assert_eq!(
true,
BlkFile::parse_blk_index("invalid.dat", prefix, ext).is_none()
);
}
}
|
{
Ok(collected)
}
|
conditional_block
|
htmltablerowelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::HTMLTableRowElementBinding;
use dom::bindings::utils::{DOMString, ErrorResult};
use dom::document::AbstractDocument;
use dom::element::HTMLTableRowElementTypeId;
use dom::htmlelement::HTMLElement;
use dom::node::{AbstractNode, Node};
pub struct HTMLTableRowElement {
htmlelement: HTMLElement,
}
|
}
}
pub fn new(localName: ~str, document: AbstractDocument) -> AbstractNode {
let element = HTMLTableRowElement::new_inherited(localName, document);
Node::reflect_node(@mut element, document, HTMLTableRowElementBinding::Wrap)
}
}
impl HTMLTableRowElement {
pub fn RowIndex(&self) -> i32 {
0
}
pub fn GetRowIndex(&self) -> i32 {
0
}
pub fn SectionRowIndex(&self) -> i32 {
0
}
pub fn GetSectionRowIndex(&self) -> i32 {
0
}
pub fn DeleteCell(&mut self, _index: i32) -> ErrorResult {
Ok(())
}
pub fn Align(&self) -> DOMString {
~""
}
pub fn SetAlign(&self, _align: DOMString) -> ErrorResult {
Ok(())
}
pub fn Ch(&self) -> DOMString {
~""
}
pub fn SetCh(&self, _ch: DOMString) -> ErrorResult {
Ok(())
}
pub fn ChOff(&self) -> DOMString {
~""
}
pub fn SetChOff(&self, _ch_off: DOMString) -> ErrorResult {
Ok(())
}
pub fn VAlign(&self) -> DOMString {
~""
}
pub fn SetVAlign(&self, _v_align: DOMString) -> ErrorResult {
Ok(())
}
pub fn BgColor(&self) -> DOMString {
~""
}
pub fn SetBgColor(&self, _bg_color: DOMString) -> ErrorResult {
Ok(())
}
}
|
impl HTMLTableRowElement {
pub fn new_inherited(localName: ~str, document: AbstractDocument) -> HTMLTableRowElement {
HTMLTableRowElement {
htmlelement: HTMLElement::new_inherited(HTMLTableRowElementTypeId, localName, document)
|
random_line_split
|
htmltablerowelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::HTMLTableRowElementBinding;
use dom::bindings::utils::{DOMString, ErrorResult};
use dom::document::AbstractDocument;
use dom::element::HTMLTableRowElementTypeId;
use dom::htmlelement::HTMLElement;
use dom::node::{AbstractNode, Node};
pub struct HTMLTableRowElement {
htmlelement: HTMLElement,
}
impl HTMLTableRowElement {
pub fn new_inherited(localName: ~str, document: AbstractDocument) -> HTMLTableRowElement {
HTMLTableRowElement {
htmlelement: HTMLElement::new_inherited(HTMLTableRowElementTypeId, localName, document)
}
}
pub fn new(localName: ~str, document: AbstractDocument) -> AbstractNode {
let element = HTMLTableRowElement::new_inherited(localName, document);
Node::reflect_node(@mut element, document, HTMLTableRowElementBinding::Wrap)
}
}
impl HTMLTableRowElement {
pub fn RowIndex(&self) -> i32 {
0
}
pub fn GetRowIndex(&self) -> i32 {
0
}
pub fn SectionRowIndex(&self) -> i32 {
0
}
pub fn GetSectionRowIndex(&self) -> i32 {
0
}
pub fn DeleteCell(&mut self, _index: i32) -> ErrorResult {
Ok(())
}
pub fn Align(&self) -> DOMString {
~""
}
pub fn SetAlign(&self, _align: DOMString) -> ErrorResult {
Ok(())
}
pub fn Ch(&self) -> DOMString {
~""
}
pub fn SetCh(&self, _ch: DOMString) -> ErrorResult {
Ok(())
}
pub fn ChOff(&self) -> DOMString {
~""
}
pub fn SetChOff(&self, _ch_off: DOMString) -> ErrorResult {
Ok(())
}
pub fn VAlign(&self) -> DOMString {
~""
}
pub fn SetVAlign(&self, _v_align: DOMString) -> ErrorResult {
Ok(())
}
pub fn BgColor(&self) -> DOMString {
~""
}
pub fn
|
(&self, _bg_color: DOMString) -> ErrorResult {
Ok(())
}
}
|
SetBgColor
|
identifier_name
|
htmltablerowelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::HTMLTableRowElementBinding;
use dom::bindings::utils::{DOMString, ErrorResult};
use dom::document::AbstractDocument;
use dom::element::HTMLTableRowElementTypeId;
use dom::htmlelement::HTMLElement;
use dom::node::{AbstractNode, Node};
pub struct HTMLTableRowElement {
htmlelement: HTMLElement,
}
impl HTMLTableRowElement {
pub fn new_inherited(localName: ~str, document: AbstractDocument) -> HTMLTableRowElement {
HTMLTableRowElement {
htmlelement: HTMLElement::new_inherited(HTMLTableRowElementTypeId, localName, document)
}
}
pub fn new(localName: ~str, document: AbstractDocument) -> AbstractNode {
let element = HTMLTableRowElement::new_inherited(localName, document);
Node::reflect_node(@mut element, document, HTMLTableRowElementBinding::Wrap)
}
}
impl HTMLTableRowElement {
pub fn RowIndex(&self) -> i32 {
0
}
pub fn GetRowIndex(&self) -> i32 {
0
}
pub fn SectionRowIndex(&self) -> i32 {
0
}
pub fn GetSectionRowIndex(&self) -> i32 {
0
}
pub fn DeleteCell(&mut self, _index: i32) -> ErrorResult {
Ok(())
}
pub fn Align(&self) -> DOMString {
~""
}
pub fn SetAlign(&self, _align: DOMString) -> ErrorResult {
Ok(())
}
pub fn Ch(&self) -> DOMString {
~""
}
pub fn SetCh(&self, _ch: DOMString) -> ErrorResult {
Ok(())
}
pub fn ChOff(&self) -> DOMString {
~""
}
pub fn SetChOff(&self, _ch_off: DOMString) -> ErrorResult {
Ok(())
}
pub fn VAlign(&self) -> DOMString {
~""
}
pub fn SetVAlign(&self, _v_align: DOMString) -> ErrorResult
|
pub fn BgColor(&self) -> DOMString {
~""
}
pub fn SetBgColor(&self, _bg_color: DOMString) -> ErrorResult {
Ok(())
}
}
|
{
Ok(())
}
|
identifier_body
|
bug-2470-bounds-check-overflow.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:index out of bounds
use std::mem;
fn main()
|
}
|
{
// This should cause a bounds-check panic, but may not if we do our
// bounds checking by comparing the scaled index to the vector's
// address-bounds, since we've scaled the index to wrap around to the
// address of the 0th cell in the array (even though the index is
// huge).
let x = vec![1_usize, 2_usize, 3_usize];
let base = x.as_ptr() as usize;
let idx = base / mem::size_of::<usize>();
println!("ov1 base = 0x{:x}", base);
println!("ov1 idx = 0x{:x}", idx);
println!("ov1 sizeof::<usize>() = 0x{:x}", mem::size_of::<usize>());
println!("ov1 idx * sizeof::<usize>() = 0x{:x}",
idx * mem::size_of::<usize>());
// This should panic.
println!("ov1 0x{:x}", x[idx]);
|
identifier_body
|
bug-2470-bounds-check-overflow.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:index out of bounds
use std::mem;
fn main() {
// This should cause a bounds-check panic, but may not if we do our
// bounds checking by comparing the scaled index to the vector's
// address-bounds, since we've scaled the index to wrap around to the
// address of the 0th cell in the array (even though the index is
// huge).
let x = vec![1_usize, 2_usize, 3_usize];
|
println!("ov1 idx * sizeof::<usize>() = 0x{:x}",
idx * mem::size_of::<usize>());
// This should panic.
println!("ov1 0x{:x}", x[idx]);
}
|
let base = x.as_ptr() as usize;
let idx = base / mem::size_of::<usize>();
println!("ov1 base = 0x{:x}", base);
println!("ov1 idx = 0x{:x}", idx);
println!("ov1 sizeof::<usize>() = 0x{:x}", mem::size_of::<usize>());
|
random_line_split
|
bug-2470-bounds-check-overflow.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:index out of bounds
use std::mem;
fn
|
() {
// This should cause a bounds-check panic, but may not if we do our
// bounds checking by comparing the scaled index to the vector's
// address-bounds, since we've scaled the index to wrap around to the
// address of the 0th cell in the array (even though the index is
// huge).
let x = vec![1_usize, 2_usize, 3_usize];
let base = x.as_ptr() as usize;
let idx = base / mem::size_of::<usize>();
println!("ov1 base = 0x{:x}", base);
println!("ov1 idx = 0x{:x}", idx);
println!("ov1 sizeof::<usize>() = 0x{:x}", mem::size_of::<usize>());
println!("ov1 idx * sizeof::<usize>() = 0x{:x}",
idx * mem::size_of::<usize>());
// This should panic.
println!("ov1 0x{:x}", x[idx]);
}
|
main
|
identifier_name
|
svgelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::inheritance::Castable;
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::Element;
use dom::virtualmethods::VirtualMethods;
use dom_struct::dom_struct;
use html5ever_atoms::LocalName;
use style::element_state::ElementState;
#[dom_struct]
pub struct SVGElement {
element: Element,
}
impl SVGElement {
pub fn
|
(state: ElementState, tag_name: LocalName,
prefix: Option<DOMString>, document: &Document)
-> SVGElement {
SVGElement {
element:
Element::new_inherited_with_state(state, tag_name, ns!(svg), prefix, document),
}
}
}
impl VirtualMethods for SVGElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<Element>() as &VirtualMethods)
}
}
|
new_inherited_with_state
|
identifier_name
|
svgelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::inheritance::Castable;
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::Element;
use dom::virtualmethods::VirtualMethods;
use dom_struct::dom_struct;
use html5ever_atoms::LocalName;
use style::element_state::ElementState;
#[dom_struct]
pub struct SVGElement {
element: Element,
}
impl SVGElement {
|
Element::new_inherited_with_state(state, tag_name, ns!(svg), prefix, document),
}
}
}
impl VirtualMethods for SVGElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<Element>() as &VirtualMethods)
}
}
|
pub fn new_inherited_with_state(state: ElementState, tag_name: LocalName,
prefix: Option<DOMString>, document: &Document)
-> SVGElement {
SVGElement {
element:
|
random_line_split
|
slice.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(unused_variables)]
// Test slicing sugar.
extern crate core;
use core::ops::{Index, IndexMut, Range, RangeTo, RangeFrom, RangeFull};
static mut COUNT: usize = 0;
struct Foo;
impl Index<Range<Foo>> for Foo {
type Output = Foo;
fn
|
(&self, index: Range<Foo>) -> &Foo {
unsafe { COUNT += 1; }
self
}
}
impl Index<RangeTo<Foo>> for Foo {
type Output = Foo;
fn index(&self, index: RangeTo<Foo>) -> &Foo {
unsafe { COUNT += 1; }
self
}
}
impl Index<RangeFrom<Foo>> for Foo {
type Output = Foo;
fn index(&self, index: RangeFrom<Foo>) -> &Foo {
unsafe { COUNT += 1; }
self
}
}
impl Index<RangeFull> for Foo {
type Output = Foo;
fn index(&self, _index: RangeFull) -> &Foo {
unsafe { COUNT += 1; }
self
}
}
impl IndexMut<Range<Foo>> for Foo {
fn index_mut(&mut self, index: Range<Foo>) -> &mut Foo {
unsafe { COUNT += 1; }
self
}
}
impl IndexMut<RangeTo<Foo>> for Foo {
fn index_mut(&mut self, index: RangeTo<Foo>) -> &mut Foo {
unsafe { COUNT += 1; }
self
}
}
impl IndexMut<RangeFrom<Foo>> for Foo {
fn index_mut(&mut self, index: RangeFrom<Foo>) -> &mut Foo {
unsafe { COUNT += 1; }
self
}
}
impl IndexMut<RangeFull> for Foo {
fn index_mut(&mut self, _index: RangeFull) -> &mut Foo {
unsafe { COUNT += 1; }
self
}
}
fn main() {
let mut x = Foo;
&x[..];
&x[Foo..];
&x[..Foo];
&x[Foo..Foo];
&mut x[..];
&mut x[Foo..];
&mut x[..Foo];
&mut x[Foo..Foo];
unsafe {
assert_eq!(COUNT, 8);
}
}
|
index
|
identifier_name
|
slice.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(unused_variables)]
// Test slicing sugar.
extern crate core;
use core::ops::{Index, IndexMut, Range, RangeTo, RangeFrom, RangeFull};
static mut COUNT: usize = 0;
struct Foo;
impl Index<Range<Foo>> for Foo {
type Output = Foo;
fn index(&self, index: Range<Foo>) -> &Foo
|
}
impl Index<RangeTo<Foo>> for Foo {
type Output = Foo;
fn index(&self, index: RangeTo<Foo>) -> &Foo {
unsafe { COUNT += 1; }
self
}
}
impl Index<RangeFrom<Foo>> for Foo {
type Output = Foo;
fn index(&self, index: RangeFrom<Foo>) -> &Foo {
unsafe { COUNT += 1; }
self
}
}
impl Index<RangeFull> for Foo {
type Output = Foo;
fn index(&self, _index: RangeFull) -> &Foo {
unsafe { COUNT += 1; }
self
}
}
impl IndexMut<Range<Foo>> for Foo {
fn index_mut(&mut self, index: Range<Foo>) -> &mut Foo {
unsafe { COUNT += 1; }
self
}
}
impl IndexMut<RangeTo<Foo>> for Foo {
fn index_mut(&mut self, index: RangeTo<Foo>) -> &mut Foo {
unsafe { COUNT += 1; }
self
}
}
impl IndexMut<RangeFrom<Foo>> for Foo {
fn index_mut(&mut self, index: RangeFrom<Foo>) -> &mut Foo {
unsafe { COUNT += 1; }
self
}
}
impl IndexMut<RangeFull> for Foo {
fn index_mut(&mut self, _index: RangeFull) -> &mut Foo {
unsafe { COUNT += 1; }
self
}
}
fn main() {
let mut x = Foo;
&x[..];
&x[Foo..];
&x[..Foo];
&x[Foo..Foo];
&mut x[..];
&mut x[Foo..];
&mut x[..Foo];
&mut x[Foo..Foo];
unsafe {
assert_eq!(COUNT, 8);
}
}
|
{
unsafe { COUNT += 1; }
self
}
|
identifier_body
|
slice.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(unused_variables)]
// Test slicing sugar.
extern crate core;
use core::ops::{Index, IndexMut, Range, RangeTo, RangeFrom, RangeFull};
static mut COUNT: usize = 0;
struct Foo;
impl Index<Range<Foo>> for Foo {
type Output = Foo;
fn index(&self, index: Range<Foo>) -> &Foo {
unsafe { COUNT += 1; }
self
}
}
impl Index<RangeTo<Foo>> for Foo {
type Output = Foo;
fn index(&self, index: RangeTo<Foo>) -> &Foo {
unsafe { COUNT += 1; }
self
}
}
impl Index<RangeFrom<Foo>> for Foo {
type Output = Foo;
fn index(&self, index: RangeFrom<Foo>) -> &Foo {
unsafe { COUNT += 1; }
self
}
}
impl Index<RangeFull> for Foo {
type Output = Foo;
fn index(&self, _index: RangeFull) -> &Foo {
unsafe { COUNT += 1; }
self
}
}
impl IndexMut<Range<Foo>> for Foo {
fn index_mut(&mut self, index: Range<Foo>) -> &mut Foo {
unsafe { COUNT += 1; }
self
}
}
impl IndexMut<RangeTo<Foo>> for Foo {
fn index_mut(&mut self, index: RangeTo<Foo>) -> &mut Foo {
unsafe { COUNT += 1; }
self
}
}
impl IndexMut<RangeFrom<Foo>> for Foo {
fn index_mut(&mut self, index: RangeFrom<Foo>) -> &mut Foo {
unsafe { COUNT += 1; }
self
}
}
impl IndexMut<RangeFull> for Foo {
fn index_mut(&mut self, _index: RangeFull) -> &mut Foo {
unsafe { COUNT += 1; }
self
}
|
fn main() {
let mut x = Foo;
&x[..];
&x[Foo..];
&x[..Foo];
&x[Foo..Foo];
&mut x[..];
&mut x[Foo..];
&mut x[..Foo];
&mut x[Foo..Foo];
unsafe {
assert_eq!(COUNT, 8);
}
}
|
}
|
random_line_split
|
image.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Generic types for the handling of [images].
//!
//! [images]: https://drafts.csswg.org/css-images/#image-values
use custom_properties;
use servo_arc::Arc;
use std::fmt::{self, Write};
use style_traits::{CssWriter, ToCss};
use values::serialize_atom_identifier;
use Atom;
/// An [image].
///
/// [image]: https://drafts.csswg.org/css-images/#image-values
#[derive(Clone, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToComputedValue)]
pub enum Image<Gradient, MozImageRect, ImageUrl> {
/// A `<url()>` image.
Url(ImageUrl),
/// A `<gradient>` image. Gradients are rather large, and not nearly as
/// common as urls, so we box them here to keep the size of this enum sane.
Gradient(Box<Gradient>),
/// A `-moz-image-rect` image. Also fairly large and rare.
Rect(Box<MozImageRect>),
/// A `-moz-element(# <element-id>)`
#[css(function = "-moz-element")]
Element(Atom),
/// A paint worklet image.
/// <https://drafts.css-houdini.org/css-paint-api/>
#[cfg(feature = "servo")]
PaintWorklet(PaintWorklet),
}
/// A CSS gradient.
/// <https://drafts.csswg.org/css-images/#gradients>
#[derive(Clone, Debug, MallocSizeOf, PartialEq, ToComputedValue)]
pub struct Gradient<LineDirection, Length, LengthOrPercentage, Position, Color, Angle> {
/// Gradients can be linear or radial.
pub kind: GradientKind<LineDirection, Length, LengthOrPercentage, Position, Angle>,
/// The color stops and interpolation hints.
pub items: Vec<GradientItem<Color, LengthOrPercentage>>,
/// True if this is a repeating gradient.
pub repeating: bool,
/// Compatibility mode.
pub compat_mode: CompatMode,
}
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue)]
/// Whether we used the modern notation or the compatibility `-webkit`, `-moz` prefixes.
pub enum CompatMode {
/// Modern syntax.
Modern,
/// `-webkit` prefix.
WebKit,
/// `-moz` prefix
Moz,
}
/// A gradient kind.
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue)]
pub enum GradientKind<LineDirection, Length, LengthOrPercentage, Position, Angle> {
/// A linear gradient.
Linear(LineDirection),
/// A radial gradient.
Radial(
EndingShape<Length, LengthOrPercentage>,
Position,
Option<Angle>,
),
}
/// A radial gradient's ending shape.
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue, ToCss)]
pub enum EndingShape<Length, LengthOrPercentage> {
/// A circular gradient.
Circle(Circle<Length>),
/// An elliptic gradient.
Ellipse(Ellipse<LengthOrPercentage>),
}
/// A circle shape.
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue)]
pub enum Circle<Length> {
/// A circle radius.
Radius(Length),
/// A circle extent.
Extent(ShapeExtent),
}
/// An ellipse shape.
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue, ToCss)]
pub enum Ellipse<LengthOrPercentage> {
/// An ellipse pair of radii.
Radii(LengthOrPercentage, LengthOrPercentage),
/// An ellipse extent.
Extent(ShapeExtent),
}
/// <https://drafts.csswg.org/css-images/#typedef-extent-keyword>
#[allow(missing_docs)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
#[derive(Clone, Copy, Debug, Eq, MallocSizeOf, Parse, PartialEq, ToComputedValue, ToCss)]
pub enum ShapeExtent {
ClosestSide,
FarthestSide,
ClosestCorner,
FarthestCorner,
Contain,
Cover,
}
/// A gradient item.
/// <https://drafts.csswg.org/css-images-4/#color-stop-syntax>
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue, ToCss)]
pub enum GradientItem<Color, LengthOrPercentage> {
/// A color stop.
ColorStop(ColorStop<Color, LengthOrPercentage>),
/// An interpolation hint.
InterpolationHint(LengthOrPercentage),
}
/// A color stop.
/// <https://drafts.csswg.org/css-images/#typedef-color-stop-list>
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue, ToCss)]
pub struct ColorStop<Color, LengthOrPercentage> {
/// The color of this stop.
pub color: Color,
/// The position of this stop.
pub position: Option<LengthOrPercentage>,
}
/// Specified values for a paint worklet.
/// <https://drafts.css-houdini.org/css-paint-api/>
#[cfg_attr(feature = "servo", derive(MallocSizeOf))]
#[derive(Clone, Debug, PartialEq, ToComputedValue)]
pub struct PaintWorklet {
/// The name the worklet was registered with.
pub name: Atom,
/// The arguments for the worklet.
/// TODO: store a parsed representation of the arguments.
#[cfg_attr(feature = "servo", ignore_malloc_size_of = "Arc")]
pub arguments: Vec<Arc<custom_properties::SpecifiedValue>>,
}
impl ::style_traits::SpecifiedValueInfo for PaintWorklet {}
impl ToCss for PaintWorklet {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
dest.write_str("paint(")?;
serialize_atom_identifier(&self.name, dest)?;
for argument in &self.arguments {
dest.write_str(", ")?;
argument.to_css(dest)?;
}
dest.write_str(")")
}
}
/// Values for `moz-image-rect`.
///
/// `-moz-image-rect(<uri>, top, right, bottom, left);`
#[allow(missing_docs)]
#[css(comma, function)]
#[derive(Clone, Debug, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToComputedValue, ToCss)]
pub struct MozImageRect<NumberOrPercentage, MozImageRectUrl> {
pub url: MozImageRectUrl,
pub top: NumberOrPercentage,
pub right: NumberOrPercentage,
pub bottom: NumberOrPercentage,
pub left: NumberOrPercentage,
}
impl<G, R, U> fmt::Debug for Image<G, R, U>
where
G: ToCss,
R: ToCss,
U: ToCss,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.to_css(&mut CssWriter::new(f))
}
}
impl<G, R, U> ToCss for Image<G, R, U>
where
G: ToCss,
R: ToCss,
U: ToCss,
{
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
match *self {
Image::Url(ref url) => url.to_css(dest),
Image::Gradient(ref gradient) => gradient.to_css(dest),
Image::Rect(ref rect) => rect.to_css(dest),
#[cfg(feature = "servo")]
Image::PaintWorklet(ref paint_worklet) => paint_worklet.to_css(dest),
Image::Element(ref selector) => {
dest.write_str("-moz-element(#")?;
serialize_atom_identifier(selector, dest)?;
dest.write_str(")")
},
}
}
}
impl<D, L, LoP, P, C, A> ToCss for Gradient<D, L, LoP, P, C, A>
where
D: LineDirection,
L: ToCss,
LoP: ToCss,
P: ToCss,
C: ToCss,
A: ToCss,
{
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
match self.compat_mode {
CompatMode::WebKit => dest.write_str("-webkit-")?,
CompatMode::Moz => dest.write_str("-moz-")?,
_ => {},
}
if self.repeating {
dest.write_str("repeating-")?;
}
dest.write_str(self.kind.label())?;
dest.write_str("-gradient(")?;
let mut skip_comma = match self.kind {
GradientKind::Linear(ref direction) if direction.points_downwards(self.compat_mode) => {
true
},
GradientKind::Linear(ref direction) => {
direction.to_css(dest, self.compat_mode)?;
false
},
GradientKind::Radial(ref shape, ref position, ref angle) =>
|
dest.write_str(", ")?;
shape.to_css(dest)?;
}
}
false
}
,
};
for item in &self.items {
if!skip_comma {
dest.write_str(", ")?;
}
skip_comma = false;
item.to_css(dest)?;
}
dest.write_str(")")
}
}
impl<D, L, LoP, P, A> GradientKind<D, L, LoP, P, A> {
fn label(&self) -> &str {
match *self {
GradientKind::Linear(..) => "linear",
GradientKind::Radial(..) => "radial",
}
}
}
/// The direction of a linear gradient.
pub trait LineDirection {
/// Whether this direction points towards, and thus can be omitted.
fn points_downwards(&self, compat_mode: CompatMode) -> bool;
/// Serialises this direction according to the compatibility mode.
fn to_css<W>(&self, dest: &mut CssWriter<W>, compat_mode: CompatMode) -> fmt::Result
where
W: Write;
}
impl<L> ToCss for Circle<L>
where
L: ToCss,
{
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
match *self {
Circle::Extent(ShapeExtent::FarthestCorner) | Circle::Extent(ShapeExtent::Cover) => {
dest.write_str("circle")
},
Circle::Extent(keyword) => {
dest.write_str("circle ")?;
keyword.to_css(dest)
},
Circle::Radius(ref length) => length.to_css(dest),
}
}
}
|
{
let omit_shape = match *shape {
EndingShape::Ellipse(Ellipse::Extent(ShapeExtent::Cover)) |
EndingShape::Ellipse(Ellipse::Extent(ShapeExtent::FarthestCorner)) => true,
_ => false,
};
if self.compat_mode == CompatMode::Modern {
if !omit_shape {
shape.to_css(dest)?;
dest.write_str(" ")?;
}
dest.write_str("at ")?;
position.to_css(dest)?;
} else {
position.to_css(dest)?;
if let Some(ref a) = *angle {
dest.write_str(" ")?;
a.to_css(dest)?;
}
if !omit_shape {
|
conditional_block
|
image.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Generic types for the handling of [images].
//!
//! [images]: https://drafts.csswg.org/css-images/#image-values
use custom_properties;
use servo_arc::Arc;
use std::fmt::{self, Write};
use style_traits::{CssWriter, ToCss};
use values::serialize_atom_identifier;
use Atom;
/// An [image].
///
/// [image]: https://drafts.csswg.org/css-images/#image-values
#[derive(Clone, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToComputedValue)]
pub enum Image<Gradient, MozImageRect, ImageUrl> {
/// A `<url()>` image.
Url(ImageUrl),
/// A `<gradient>` image. Gradients are rather large, and not nearly as
/// common as urls, so we box them here to keep the size of this enum sane.
Gradient(Box<Gradient>),
/// A `-moz-image-rect` image. Also fairly large and rare.
Rect(Box<MozImageRect>),
/// A `-moz-element(# <element-id>)`
#[css(function = "-moz-element")]
Element(Atom),
/// A paint worklet image.
/// <https://drafts.css-houdini.org/css-paint-api/>
#[cfg(feature = "servo")]
PaintWorklet(PaintWorklet),
}
/// A CSS gradient.
/// <https://drafts.csswg.org/css-images/#gradients>
#[derive(Clone, Debug, MallocSizeOf, PartialEq, ToComputedValue)]
pub struct Gradient<LineDirection, Length, LengthOrPercentage, Position, Color, Angle> {
/// Gradients can be linear or radial.
pub kind: GradientKind<LineDirection, Length, LengthOrPercentage, Position, Angle>,
/// The color stops and interpolation hints.
pub items: Vec<GradientItem<Color, LengthOrPercentage>>,
/// True if this is a repeating gradient.
pub repeating: bool,
/// Compatibility mode.
pub compat_mode: CompatMode,
}
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue)]
/// Whether we used the modern notation or the compatibility `-webkit`, `-moz` prefixes.
pub enum CompatMode {
/// Modern syntax.
Modern,
/// `-webkit` prefix.
WebKit,
/// `-moz` prefix
Moz,
}
/// A gradient kind.
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue)]
pub enum GradientKind<LineDirection, Length, LengthOrPercentage, Position, Angle> {
/// A linear gradient.
Linear(LineDirection),
/// A radial gradient.
Radial(
EndingShape<Length, LengthOrPercentage>,
Position,
Option<Angle>,
),
}
/// A radial gradient's ending shape.
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue, ToCss)]
pub enum EndingShape<Length, LengthOrPercentage> {
/// A circular gradient.
Circle(Circle<Length>),
/// An elliptic gradient.
Ellipse(Ellipse<LengthOrPercentage>),
}
/// A circle shape.
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue)]
pub enum Circle<Length> {
/// A circle radius.
Radius(Length),
/// A circle extent.
Extent(ShapeExtent),
}
/// An ellipse shape.
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue, ToCss)]
pub enum Ellipse<LengthOrPercentage> {
/// An ellipse pair of radii.
Radii(LengthOrPercentage, LengthOrPercentage),
/// An ellipse extent.
Extent(ShapeExtent),
}
/// <https://drafts.csswg.org/css-images/#typedef-extent-keyword>
#[allow(missing_docs)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
#[derive(Clone, Copy, Debug, Eq, MallocSizeOf, Parse, PartialEq, ToComputedValue, ToCss)]
pub enum ShapeExtent {
ClosestSide,
FarthestSide,
ClosestCorner,
FarthestCorner,
Contain,
Cover,
}
/// A gradient item.
/// <https://drafts.csswg.org/css-images-4/#color-stop-syntax>
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue, ToCss)]
pub enum GradientItem<Color, LengthOrPercentage> {
/// A color stop.
ColorStop(ColorStop<Color, LengthOrPercentage>),
/// An interpolation hint.
InterpolationHint(LengthOrPercentage),
}
/// A color stop.
/// <https://drafts.csswg.org/css-images/#typedef-color-stop-list>
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue, ToCss)]
pub struct ColorStop<Color, LengthOrPercentage> {
/// The color of this stop.
pub color: Color,
/// The position of this stop.
pub position: Option<LengthOrPercentage>,
}
/// Specified values for a paint worklet.
/// <https://drafts.css-houdini.org/css-paint-api/>
#[cfg_attr(feature = "servo", derive(MallocSizeOf))]
#[derive(Clone, Debug, PartialEq, ToComputedValue)]
pub struct PaintWorklet {
/// The name the worklet was registered with.
pub name: Atom,
/// The arguments for the worklet.
/// TODO: store a parsed representation of the arguments.
#[cfg_attr(feature = "servo", ignore_malloc_size_of = "Arc")]
pub arguments: Vec<Arc<custom_properties::SpecifiedValue>>,
}
impl ::style_traits::SpecifiedValueInfo for PaintWorklet {}
impl ToCss for PaintWorklet {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
dest.write_str("paint(")?;
serialize_atom_identifier(&self.name, dest)?;
for argument in &self.arguments {
dest.write_str(", ")?;
argument.to_css(dest)?;
}
dest.write_str(")")
}
}
/// Values for `moz-image-rect`.
///
/// `-moz-image-rect(<uri>, top, right, bottom, left);`
#[allow(missing_docs)]
#[css(comma, function)]
#[derive(Clone, Debug, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToComputedValue, ToCss)]
pub struct MozImageRect<NumberOrPercentage, MozImageRectUrl> {
pub url: MozImageRectUrl,
pub top: NumberOrPercentage,
pub right: NumberOrPercentage,
pub bottom: NumberOrPercentage,
pub left: NumberOrPercentage,
}
impl<G, R, U> fmt::Debug for Image<G, R, U>
where
G: ToCss,
R: ToCss,
U: ToCss,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.to_css(&mut CssWriter::new(f))
}
}
impl<G, R, U> ToCss for Image<G, R, U>
where
G: ToCss,
R: ToCss,
U: ToCss,
{
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
match *self {
Image::Url(ref url) => url.to_css(dest),
Image::Gradient(ref gradient) => gradient.to_css(dest),
Image::Rect(ref rect) => rect.to_css(dest),
#[cfg(feature = "servo")]
Image::PaintWorklet(ref paint_worklet) => paint_worklet.to_css(dest),
Image::Element(ref selector) => {
dest.write_str("-moz-element(#")?;
serialize_atom_identifier(selector, dest)?;
dest.write_str(")")
},
}
}
}
impl<D, L, LoP, P, C, A> ToCss for Gradient<D, L, LoP, P, C, A>
where
D: LineDirection,
L: ToCss,
LoP: ToCss,
P: ToCss,
C: ToCss,
A: ToCss,
{
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
match self.compat_mode {
CompatMode::WebKit => dest.write_str("-webkit-")?,
CompatMode::Moz => dest.write_str("-moz-")?,
_ => {},
}
if self.repeating {
dest.write_str("repeating-")?;
}
dest.write_str(self.kind.label())?;
dest.write_str("-gradient(")?;
let mut skip_comma = match self.kind {
GradientKind::Linear(ref direction) if direction.points_downwards(self.compat_mode) => {
true
},
GradientKind::Linear(ref direction) => {
direction.to_css(dest, self.compat_mode)?;
false
},
GradientKind::Radial(ref shape, ref position, ref angle) => {
let omit_shape = match *shape {
EndingShape::Ellipse(Ellipse::Extent(ShapeExtent::Cover)) |
EndingShape::Ellipse(Ellipse::Extent(ShapeExtent::FarthestCorner)) => true,
_ => false,
};
if self.compat_mode == CompatMode::Modern {
if!omit_shape {
shape.to_css(dest)?;
dest.write_str(" ")?;
}
dest.write_str("at ")?;
position.to_css(dest)?;
} else {
position.to_css(dest)?;
if let Some(ref a) = *angle {
dest.write_str(" ")?;
a.to_css(dest)?;
}
if!omit_shape {
dest.write_str(", ")?;
shape.to_css(dest)?;
}
}
false
},
};
for item in &self.items {
if!skip_comma {
dest.write_str(", ")?;
}
skip_comma = false;
item.to_css(dest)?;
}
dest.write_str(")")
}
}
impl<D, L, LoP, P, A> GradientKind<D, L, LoP, P, A> {
fn label(&self) -> &str {
match *self {
GradientKind::Linear(..) => "linear",
GradientKind::Radial(..) => "radial",
}
}
}
/// The direction of a linear gradient.
pub trait LineDirection {
/// Whether this direction points towards, and thus can be omitted.
fn points_downwards(&self, compat_mode: CompatMode) -> bool;
|
impl<L> ToCss for Circle<L>
where
L: ToCss,
{
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
match *self {
Circle::Extent(ShapeExtent::FarthestCorner) | Circle::Extent(ShapeExtent::Cover) => {
dest.write_str("circle")
},
Circle::Extent(keyword) => {
dest.write_str("circle ")?;
keyword.to_css(dest)
},
Circle::Radius(ref length) => length.to_css(dest),
}
}
}
|
/// Serialises this direction according to the compatibility mode.
fn to_css<W>(&self, dest: &mut CssWriter<W>, compat_mode: CompatMode) -> fmt::Result
where
W: Write;
}
|
random_line_split
|
image.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Generic types for the handling of [images].
//!
//! [images]: https://drafts.csswg.org/css-images/#image-values
use custom_properties;
use servo_arc::Arc;
use std::fmt::{self, Write};
use style_traits::{CssWriter, ToCss};
use values::serialize_atom_identifier;
use Atom;
/// An [image].
///
/// [image]: https://drafts.csswg.org/css-images/#image-values
#[derive(Clone, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToComputedValue)]
pub enum Image<Gradient, MozImageRect, ImageUrl> {
/// A `<url()>` image.
Url(ImageUrl),
/// A `<gradient>` image. Gradients are rather large, and not nearly as
/// common as urls, so we box them here to keep the size of this enum sane.
Gradient(Box<Gradient>),
/// A `-moz-image-rect` image. Also fairly large and rare.
Rect(Box<MozImageRect>),
/// A `-moz-element(# <element-id>)`
#[css(function = "-moz-element")]
Element(Atom),
/// A paint worklet image.
/// <https://drafts.css-houdini.org/css-paint-api/>
#[cfg(feature = "servo")]
PaintWorklet(PaintWorklet),
}
/// A CSS gradient.
/// <https://drafts.csswg.org/css-images/#gradients>
#[derive(Clone, Debug, MallocSizeOf, PartialEq, ToComputedValue)]
pub struct Gradient<LineDirection, Length, LengthOrPercentage, Position, Color, Angle> {
/// Gradients can be linear or radial.
pub kind: GradientKind<LineDirection, Length, LengthOrPercentage, Position, Angle>,
/// The color stops and interpolation hints.
pub items: Vec<GradientItem<Color, LengthOrPercentage>>,
/// True if this is a repeating gradient.
pub repeating: bool,
/// Compatibility mode.
pub compat_mode: CompatMode,
}
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue)]
/// Whether we used the modern notation or the compatibility `-webkit`, `-moz` prefixes.
pub enum CompatMode {
/// Modern syntax.
Modern,
/// `-webkit` prefix.
WebKit,
/// `-moz` prefix
Moz,
}
/// A gradient kind.
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue)]
pub enum
|
<LineDirection, Length, LengthOrPercentage, Position, Angle> {
/// A linear gradient.
Linear(LineDirection),
/// A radial gradient.
Radial(
EndingShape<Length, LengthOrPercentage>,
Position,
Option<Angle>,
),
}
/// A radial gradient's ending shape.
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue, ToCss)]
pub enum EndingShape<Length, LengthOrPercentage> {
/// A circular gradient.
Circle(Circle<Length>),
/// An elliptic gradient.
Ellipse(Ellipse<LengthOrPercentage>),
}
/// A circle shape.
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue)]
pub enum Circle<Length> {
/// A circle radius.
Radius(Length),
/// A circle extent.
Extent(ShapeExtent),
}
/// An ellipse shape.
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue, ToCss)]
pub enum Ellipse<LengthOrPercentage> {
/// An ellipse pair of radii.
Radii(LengthOrPercentage, LengthOrPercentage),
/// An ellipse extent.
Extent(ShapeExtent),
}
/// <https://drafts.csswg.org/css-images/#typedef-extent-keyword>
#[allow(missing_docs)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize))]
#[derive(Clone, Copy, Debug, Eq, MallocSizeOf, Parse, PartialEq, ToComputedValue, ToCss)]
pub enum ShapeExtent {
ClosestSide,
FarthestSide,
ClosestCorner,
FarthestCorner,
Contain,
Cover,
}
/// A gradient item.
/// <https://drafts.csswg.org/css-images-4/#color-stop-syntax>
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue, ToCss)]
pub enum GradientItem<Color, LengthOrPercentage> {
/// A color stop.
ColorStop(ColorStop<Color, LengthOrPercentage>),
/// An interpolation hint.
InterpolationHint(LengthOrPercentage),
}
/// A color stop.
/// <https://drafts.csswg.org/css-images/#typedef-color-stop-list>
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue, ToCss)]
pub struct ColorStop<Color, LengthOrPercentage> {
/// The color of this stop.
pub color: Color,
/// The position of this stop.
pub position: Option<LengthOrPercentage>,
}
/// Specified values for a paint worklet.
/// <https://drafts.css-houdini.org/css-paint-api/>
#[cfg_attr(feature = "servo", derive(MallocSizeOf))]
#[derive(Clone, Debug, PartialEq, ToComputedValue)]
pub struct PaintWorklet {
/// The name the worklet was registered with.
pub name: Atom,
/// The arguments for the worklet.
/// TODO: store a parsed representation of the arguments.
#[cfg_attr(feature = "servo", ignore_malloc_size_of = "Arc")]
pub arguments: Vec<Arc<custom_properties::SpecifiedValue>>,
}
impl ::style_traits::SpecifiedValueInfo for PaintWorklet {}
impl ToCss for PaintWorklet {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
dest.write_str("paint(")?;
serialize_atom_identifier(&self.name, dest)?;
for argument in &self.arguments {
dest.write_str(", ")?;
argument.to_css(dest)?;
}
dest.write_str(")")
}
}
/// Values for `moz-image-rect`.
///
/// `-moz-image-rect(<uri>, top, right, bottom, left);`
#[allow(missing_docs)]
#[css(comma, function)]
#[derive(Clone, Debug, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToComputedValue, ToCss)]
pub struct MozImageRect<NumberOrPercentage, MozImageRectUrl> {
pub url: MozImageRectUrl,
pub top: NumberOrPercentage,
pub right: NumberOrPercentage,
pub bottom: NumberOrPercentage,
pub left: NumberOrPercentage,
}
impl<G, R, U> fmt::Debug for Image<G, R, U>
where
G: ToCss,
R: ToCss,
U: ToCss,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.to_css(&mut CssWriter::new(f))
}
}
impl<G, R, U> ToCss for Image<G, R, U>
where
G: ToCss,
R: ToCss,
U: ToCss,
{
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
match *self {
Image::Url(ref url) => url.to_css(dest),
Image::Gradient(ref gradient) => gradient.to_css(dest),
Image::Rect(ref rect) => rect.to_css(dest),
#[cfg(feature = "servo")]
Image::PaintWorklet(ref paint_worklet) => paint_worklet.to_css(dest),
Image::Element(ref selector) => {
dest.write_str("-moz-element(#")?;
serialize_atom_identifier(selector, dest)?;
dest.write_str(")")
},
}
}
}
impl<D, L, LoP, P, C, A> ToCss for Gradient<D, L, LoP, P, C, A>
where
D: LineDirection,
L: ToCss,
LoP: ToCss,
P: ToCss,
C: ToCss,
A: ToCss,
{
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
match self.compat_mode {
CompatMode::WebKit => dest.write_str("-webkit-")?,
CompatMode::Moz => dest.write_str("-moz-")?,
_ => {},
}
if self.repeating {
dest.write_str("repeating-")?;
}
dest.write_str(self.kind.label())?;
dest.write_str("-gradient(")?;
let mut skip_comma = match self.kind {
GradientKind::Linear(ref direction) if direction.points_downwards(self.compat_mode) => {
true
},
GradientKind::Linear(ref direction) => {
direction.to_css(dest, self.compat_mode)?;
false
},
GradientKind::Radial(ref shape, ref position, ref angle) => {
let omit_shape = match *shape {
EndingShape::Ellipse(Ellipse::Extent(ShapeExtent::Cover)) |
EndingShape::Ellipse(Ellipse::Extent(ShapeExtent::FarthestCorner)) => true,
_ => false,
};
if self.compat_mode == CompatMode::Modern {
if!omit_shape {
shape.to_css(dest)?;
dest.write_str(" ")?;
}
dest.write_str("at ")?;
position.to_css(dest)?;
} else {
position.to_css(dest)?;
if let Some(ref a) = *angle {
dest.write_str(" ")?;
a.to_css(dest)?;
}
if!omit_shape {
dest.write_str(", ")?;
shape.to_css(dest)?;
}
}
false
},
};
for item in &self.items {
if!skip_comma {
dest.write_str(", ")?;
}
skip_comma = false;
item.to_css(dest)?;
}
dest.write_str(")")
}
}
impl<D, L, LoP, P, A> GradientKind<D, L, LoP, P, A> {
fn label(&self) -> &str {
match *self {
GradientKind::Linear(..) => "linear",
GradientKind::Radial(..) => "radial",
}
}
}
/// The direction of a linear gradient.
pub trait LineDirection {
/// Whether this direction points towards, and thus can be omitted.
fn points_downwards(&self, compat_mode: CompatMode) -> bool;
/// Serialises this direction according to the compatibility mode.
fn to_css<W>(&self, dest: &mut CssWriter<W>, compat_mode: CompatMode) -> fmt::Result
where
W: Write;
}
impl<L> ToCss for Circle<L>
where
L: ToCss,
{
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
match *self {
Circle::Extent(ShapeExtent::FarthestCorner) | Circle::Extent(ShapeExtent::Cover) => {
dest.write_str("circle")
},
Circle::Extent(keyword) => {
dest.write_str("circle ")?;
keyword.to_css(dest)
},
Circle::Radius(ref length) => length.to_css(dest),
}
}
}
|
GradientKind
|
identifier_name
|
arm.rs
|
use PT_FIRSTMACH;
pub type c_long = i32;
pub type c_ulong = u32;
|
cfg_if! {
if #[cfg(libc_const_size_of)] {
#[doc(hidden)]
pub const _ALIGNBYTES: usize = ::mem::size_of::<::c_longlong>() - 1;
} else {
#[doc(hidden)]
pub const _ALIGNBYTES: usize = 8 - 1;
}
}
pub const PT_GETREGS: ::c_int = PT_FIRSTMACH + 1;
pub const PT_SETREGS: ::c_int = PT_FIRSTMACH + 2;
pub const PT_GETFPREGS: ::c_int = PT_FIRSTMACH + 3;
pub const PT_SETFPREGS: ::c_int = PT_FIRSTMACH + 4;
|
pub type c_char = u8;
pub type __cpu_simple_lock_nv_t = ::c_int;
// should be pub(crate), but that requires Rust 1.18.0
|
random_line_split
|
lib.rs
|
#![allow(clippy::unnecessary_wraps)] // FIXME: Should get redone with better errors
use crate::types::QueueOperation;
use actix_web::{
|
use anyhow::Result;
use hybrid_clocks::{Timestamp, WallT};
use lazy_static::lazy_static;
use log::{debug, info};
use potboiler_common::{clock, db, get_raw_timestamp, types::Log};
use serde_derive::Deserialize;
use serde_json::{self, Map, Value};
use std::{env, io::Cursor, ops::Deref};
use time::Duration;
use uuid::{self, Uuid};
mod types;
lazy_static! {
static ref SERVER_URL: String = env::var("SERVER_URL").expect("Needed SERVER_URL");
static ref HOST: String = env::var("HOST").unwrap_or_else(|_| "localhost".to_string());
pub static ref PORT: u16 = env::var("PORT")
.unwrap_or_else(|_| "8000".to_string())
.parse::<u16>()
.unwrap();
}
#[derive(Deserialize)]
struct NewLogResponse {
id: Uuid,
}
fn add_queue_operation(op: &QueueOperation) -> actix_web::Result<NewLogResponse> {
let client = reqwest::Client::new();
let mut res = client.post(SERVER_URL.deref()).json(op).send().expect("sender ok");
assert_eq!(res.status(), reqwest::StatusCode::CREATED);
Ok(res.json().unwrap())
}
fn create_queue(op: Json<types::QueueCreate>) -> actix_web::Result<HttpResponse> {
let name = op.name.clone();
match add_queue_operation(&QueueOperation::Create(op.into_inner())) {
Ok(_) => {
let new_url = format!("/queue/{}", &name);
Ok(HttpResponse::Created().header(header::LOCATION, new_url).finish())
}
Err(val) => Err(val),
}
}
#[derive(Deserialize)]
struct NamedQueueRoute {
queue_name: String,
}
fn delete_queue(path: Path<NamedQueueRoute>) -> actix_web::Result<HttpResponse> {
add_queue_operation(&QueueOperation::Delete(path.queue_name.clone()))?;
Ok(HttpResponse::Ok().finish())
}
fn row_to_state(row: &db::Row) -> Result<types::QueueState> {
let raw_state: String = row.get("state");
// FIXME: format! bit is a hacky workaround for https://github.com/serde-rs/serde/issues/251
Ok(serde_json::from_str(&format!("\"{}\"", raw_state)).unwrap())
}
fn parse_progress<F>(state: &AppState, progress: &types::QueueProgress, should_update: F) -> Result<HttpResponse>
where
F: Fn(&types::QueueState, &Timestamp<WallT>) -> Option<(Timestamp<WallT>, String)>,
{
let conn = state.pool.get().unwrap();
let results = conn.query(&format!(
"SELECT task_name, state, hlc_tstamp from {} where id='{}'",
&progress.queue_name, &progress.id
))?;
if results.is_empty() {
Ok(HttpResponse::NotFound().body(format!("No queue item {} in {}", &progress.id, &progress.queue_name)))
} else {
let row = results.get(0);
let state = row_to_state(&row)?;
let hlc_tstamp: Vec<u8> = row.get("hlc_tstamp");
let when = Timestamp::read_bytes(Cursor::new(hlc_tstamp))?;
if let Some((log_when, status)) = should_update(&state, &when) {
let raw_timestamp = get_raw_timestamp(&log_when)?;
conn.execute(&format!(
"UPDATE {} set hlc_tstamp='{}', worker='{}', state='{}' where id='{}'",
&progress.queue_name,
&raw_timestamp.sql(),
&progress.worker_id,
&status,
&progress.id
))?;
Ok(HttpResponse::NoContent().finish())
} else {
Ok(HttpResponse::Conflict().body("Out of date change"))
}
}
}
fn new_event(log: Json<Log>, state: State<AppState>) -> actix_web::Result<HttpResponse> {
info!("log: {:?}", log);
let log_when = log.when;
state.clock.observe_timestamp(log_when);
let op = serde_json::from_value::<QueueOperation>(log.data.clone())?;
info!("op: {:?}", op);
let conn = state.pool.get().unwrap();
match op {
QueueOperation::Create(create) => {
info!("create: {:?}", create);
let qc = types::QueueConfig {
timeout_ms: create.timeout_ms,
};
match conn.execute(&format!(
"INSERT INTO queues (key, config) VALUES('{}', '{}')",
&create.name,
&serde_json::to_value(&qc)?
)) {
Ok(_) => {
conn.execute(&format!(
"CREATE TABLE IF NOT EXISTS {} (id UUID PRIMARY KEY, task_name \
VARCHAR(2083) NOT NULL, state VARCHAR(8) NOT NULL, info JSONB NOT \
NULL, hlc_tstamp BYTEA NOT NULL, worker UUID NULL)",
&create.name
))
.unwrap();
}
Err(db::Error::UniqueViolation) => {}
Err(err) => Err(err).unwrap(),
};
}
QueueOperation::Add(add) => {
info!("add: {:?}", add);
let raw_timestamp = get_raw_timestamp(&log.when)?;
conn.execute(&format!(
"INSERT INTO {} (id, task_name, state, info, hlc_tstamp) VALUES('{}', '{}', \
'{}', '{}', {})",
add.queue_name,
&log.id,
&add.task_name,
"pending",
&serde_json::to_value(&add.info)?,
&raw_timestamp.sql()
))
.unwrap();
}
QueueOperation::Progress(progress) => {
info!("progress: {:?}", progress);
return Ok(parse_progress(state.deref(), &progress, |state, when| {
if state == &types::QueueState::Pending || (state == &types::QueueState::Working && log_when > *when) {
Some((log_when, String::from("working")))
} else {
None
}
})
.unwrap());
}
QueueOperation::Done(done) => {
info!("done: {:?}", done);
return Ok(parse_progress(state.deref(), &done, |state, when| {
if state!= &types::QueueState::Done || (state == &types::QueueState::Done && log_when > *when) {
Some((log_when, String::from("done")))
} else {
None
}
})
.unwrap());
}
QueueOperation::Delete(queue_name) => {
//let trans = conn.transaction()?;
let trans = conn;
trans.execute(&format!("DROP TABLE IF EXISTS {}", queue_name)).unwrap();
trans
.execute(&format!("DELETE FROM queues where key={}", &queue_name))
.unwrap();
//trans.commit()?;
}
};
Ok(HttpResponse::NoContent().finish())
}
fn get_queue_items(state: State<AppState>, path: Path<NamedQueueRoute>) -> actix_web::Result<HttpResponse> {
let conn = state.pool.get().unwrap();
let config_row = conn
.query(&format!("select config from queues where key='{}'", &path.queue_name))
.unwrap();
if config_row.is_empty() {
return Ok(HttpResponse::NotFound().body(format!("No queue {}", path.queue_name)));
}
let config: types::QueueConfig = serde_json::from_value(config_row.get(0).get("config"))?;
let results = conn
.query(&format!(
"select id, task_name, state, hlc_tstamp from {}",
&path.queue_name
))
.unwrap();
let mut queue = Map::new();
let now = state.clock.get_timestamp().time.as_timespec();
let max_diff = Duration::milliseconds(config.timeout_ms);
for row in &results {
let id: Uuid = row.get("id");
let mut state = row_to_state(&row).unwrap();
if state == types::QueueState::Done {
continue;
}
if state == types::QueueState::Working {
let hlc_tstamp: Vec<u8> = row.get("hlc_tstamp");
let when = Timestamp::read_bytes(Cursor::new(hlc_tstamp))?;
let diff = now - when.time.as_timespec();
if diff > max_diff {
debug!("{} is out of date, so marking as pending", id);
state = types::QueueState::Pending;
}
}
let item = types::QueueListItem {
task_name: row.get("task_name"),
state,
};
queue.insert(id.to_string(), serde_json::to_value(&item)?);
}
let value = Value::Object(queue);
Ok(HttpResponse::Ok().json(value))
}
#[derive(Deserialize)]
struct QueueItemRoute {
queue_name: String,
id: String,
}
fn get_queue_item(path: Path<QueueItemRoute>, state: State<AppState>) -> actix_web::Result<HttpResponse> {
let conn = state.pool.get().unwrap();
let results = conn
.query(&format!(
"select task_name, state, info, worker from {} where id='{}'",
&path.queue_name, &path.id
))
.unwrap();
if results.is_empty() {
Ok(HttpResponse::NotFound().body(format!("No queue item {} in {}", path.id, path.queue_name)))
} else {
let row = results.get(0);
let item = types::QueueItem {
task_name: row.get("task_name"),
state: row_to_state(&row).unwrap(),
info: row.get("info"),
worker: row.get("worker"),
};
Ok(HttpResponse::Ok().json(item))
}
}
fn add_queue_item(json: Json<Value>, path: Path<NamedQueueRoute>) -> actix_web::Result<HttpResponse> {
let mut json_mut = json.into_inner();
let map = json_mut.as_object_mut().unwrap();
map.insert("queue_name".to_string(), serde_json::to_value(&path.queue_name)?);
let op = serde_json::from_value::<types::QueueAdd>(json_mut)?;
match add_queue_operation(&QueueOperation::Add(op)) {
Ok(val) => {
let new_url = format!("http://{}:8000/queue/{}/{}", HOST.deref(), &path.queue_name, &val.id);
Ok(HttpResponse::Created().header(header::LOCATION, new_url).finish())
}
Err(val) => Err(val),
}
}
fn build_queue_progress(json: Json<Value>, path: &Path<QueueItemRoute>) -> Result<types::QueueProgress> {
let mut json_mut = json.into_inner();
let map = json_mut.as_object_mut().unwrap();
map.insert("queue_name".to_string(), serde_json::to_value(&path.queue_name)?);
map.insert("id".to_string(), serde_json::to_value(&path.id)?);
Ok(serde_json::from_value::<types::QueueProgress>(json_mut)?)
}
fn progress_queue_item(
state: State<AppState>,
json: Json<Value>,
path: Path<QueueItemRoute>,
) -> actix_web::Result<HttpResponse> {
let op = build_queue_progress(json, &path).unwrap();
match add_queue_operation(&QueueOperation::Progress(op)) {
Ok(_) => get_queue_item(path, state),
Err(val) => Err(val),
}
}
fn finish_queue_item(json: Json<Value>, path: Path<QueueItemRoute>) -> actix_web::Result<HttpResponse> {
let op = build_queue_progress(json, &path).unwrap();
match add_queue_operation(&QueueOperation::Done(op)) {
Ok(_) => Ok(HttpResponse::Ok().finish()),
Err(val) => Err(val),
}
}
fn make_queue_table(conn: &db::Connection) {
conn.execute("CREATE TABLE IF NOT EXISTS queues (key VARCHAR(1024) PRIMARY KEY, config JSONB NOT NULL)")
.unwrap();
}
#[derive(Debug, Clone)]
pub struct AppState {
clock: clock::SyncClock,
pool: db::Pool,
}
impl AppState {
pub fn new(pool: db::Pool) -> Result<AppState> {
let clock = clock::SyncClock::new();
let conn = pool.get().unwrap();
make_queue_table(&conn);
Ok(AppState { clock, pool })
}
}
pub fn app_router(state: AppState) -> Result<App<AppState>> {
Ok(App::with_state(state)
.resource("/create", |r| r.method(Method::POST).with(create_queue))
.resource("/event", |r| r.method(Method::POST).with(new_event))
.resource("/queue/{queue_name}", |r| {
r.method(Method::GET).with(get_queue_items);
r.method(Method::POST).with(add_queue_item);
r.method(Method::DELETE).with(delete_queue);
})
.resource("/queue/{queue_name}/{id}", |r| {
r.method(Method::GET).with(get_queue_item);
r.method(Method::PUT).with(progress_queue_item);
r.method(Method::DELETE).with(finish_queue_item);
}))
}
pub fn register() {
let client = reqwest::Client::new();
let mut map = serde_json::Map::new();
map.insert(
"url".to_string(),
serde_json::Value::String(format!("http://{}:{}/event", HOST.deref(), PORT.deref())),
);
let res = client
.post(&format!("{}/register", SERVER_URL.deref()))
.json(&map)
.send()
.expect("Register ok");
assert_eq!(res.status(), reqwest::StatusCode::CREATED);
}
|
http::{header, Method},
App, HttpResponse, Json, Path, State,
};
|
random_line_split
|
lib.rs
|
#![allow(clippy::unnecessary_wraps)] // FIXME: Should get redone with better errors
use crate::types::QueueOperation;
use actix_web::{
http::{header, Method},
App, HttpResponse, Json, Path, State,
};
use anyhow::Result;
use hybrid_clocks::{Timestamp, WallT};
use lazy_static::lazy_static;
use log::{debug, info};
use potboiler_common::{clock, db, get_raw_timestamp, types::Log};
use serde_derive::Deserialize;
use serde_json::{self, Map, Value};
use std::{env, io::Cursor, ops::Deref};
use time::Duration;
use uuid::{self, Uuid};
mod types;
lazy_static! {
static ref SERVER_URL: String = env::var("SERVER_URL").expect("Needed SERVER_URL");
static ref HOST: String = env::var("HOST").unwrap_or_else(|_| "localhost".to_string());
pub static ref PORT: u16 = env::var("PORT")
.unwrap_or_else(|_| "8000".to_string())
.parse::<u16>()
.unwrap();
}
#[derive(Deserialize)]
struct NewLogResponse {
id: Uuid,
}
fn add_queue_operation(op: &QueueOperation) -> actix_web::Result<NewLogResponse>
|
fn create_queue(op: Json<types::QueueCreate>) -> actix_web::Result<HttpResponse> {
let name = op.name.clone();
match add_queue_operation(&QueueOperation::Create(op.into_inner())) {
Ok(_) => {
let new_url = format!("/queue/{}", &name);
Ok(HttpResponse::Created().header(header::LOCATION, new_url).finish())
}
Err(val) => Err(val),
}
}
#[derive(Deserialize)]
struct NamedQueueRoute {
queue_name: String,
}
fn delete_queue(path: Path<NamedQueueRoute>) -> actix_web::Result<HttpResponse> {
add_queue_operation(&QueueOperation::Delete(path.queue_name.clone()))?;
Ok(HttpResponse::Ok().finish())
}
fn row_to_state(row: &db::Row) -> Result<types::QueueState> {
let raw_state: String = row.get("state");
// FIXME: format! bit is a hacky workaround for https://github.com/serde-rs/serde/issues/251
Ok(serde_json::from_str(&format!("\"{}\"", raw_state)).unwrap())
}
fn parse_progress<F>(state: &AppState, progress: &types::QueueProgress, should_update: F) -> Result<HttpResponse>
where
F: Fn(&types::QueueState, &Timestamp<WallT>) -> Option<(Timestamp<WallT>, String)>,
{
let conn = state.pool.get().unwrap();
let results = conn.query(&format!(
"SELECT task_name, state, hlc_tstamp from {} where id='{}'",
&progress.queue_name, &progress.id
))?;
if results.is_empty() {
Ok(HttpResponse::NotFound().body(format!("No queue item {} in {}", &progress.id, &progress.queue_name)))
} else {
let row = results.get(0);
let state = row_to_state(&row)?;
let hlc_tstamp: Vec<u8> = row.get("hlc_tstamp");
let when = Timestamp::read_bytes(Cursor::new(hlc_tstamp))?;
if let Some((log_when, status)) = should_update(&state, &when) {
let raw_timestamp = get_raw_timestamp(&log_when)?;
conn.execute(&format!(
"UPDATE {} set hlc_tstamp='{}', worker='{}', state='{}' where id='{}'",
&progress.queue_name,
&raw_timestamp.sql(),
&progress.worker_id,
&status,
&progress.id
))?;
Ok(HttpResponse::NoContent().finish())
} else {
Ok(HttpResponse::Conflict().body("Out of date change"))
}
}
}
fn new_event(log: Json<Log>, state: State<AppState>) -> actix_web::Result<HttpResponse> {
info!("log: {:?}", log);
let log_when = log.when;
state.clock.observe_timestamp(log_when);
let op = serde_json::from_value::<QueueOperation>(log.data.clone())?;
info!("op: {:?}", op);
let conn = state.pool.get().unwrap();
match op {
QueueOperation::Create(create) => {
info!("create: {:?}", create);
let qc = types::QueueConfig {
timeout_ms: create.timeout_ms,
};
match conn.execute(&format!(
"INSERT INTO queues (key, config) VALUES('{}', '{}')",
&create.name,
&serde_json::to_value(&qc)?
)) {
Ok(_) => {
conn.execute(&format!(
"CREATE TABLE IF NOT EXISTS {} (id UUID PRIMARY KEY, task_name \
VARCHAR(2083) NOT NULL, state VARCHAR(8) NOT NULL, info JSONB NOT \
NULL, hlc_tstamp BYTEA NOT NULL, worker UUID NULL)",
&create.name
))
.unwrap();
}
Err(db::Error::UniqueViolation) => {}
Err(err) => Err(err).unwrap(),
};
}
QueueOperation::Add(add) => {
info!("add: {:?}", add);
let raw_timestamp = get_raw_timestamp(&log.when)?;
conn.execute(&format!(
"INSERT INTO {} (id, task_name, state, info, hlc_tstamp) VALUES('{}', '{}', \
'{}', '{}', {})",
add.queue_name,
&log.id,
&add.task_name,
"pending",
&serde_json::to_value(&add.info)?,
&raw_timestamp.sql()
))
.unwrap();
}
QueueOperation::Progress(progress) => {
info!("progress: {:?}", progress);
return Ok(parse_progress(state.deref(), &progress, |state, when| {
if state == &types::QueueState::Pending || (state == &types::QueueState::Working && log_when > *when) {
Some((log_when, String::from("working")))
} else {
None
}
})
.unwrap());
}
QueueOperation::Done(done) => {
info!("done: {:?}", done);
return Ok(parse_progress(state.deref(), &done, |state, when| {
if state!= &types::QueueState::Done || (state == &types::QueueState::Done && log_when > *when) {
Some((log_when, String::from("done")))
} else {
None
}
})
.unwrap());
}
QueueOperation::Delete(queue_name) => {
//let trans = conn.transaction()?;
let trans = conn;
trans.execute(&format!("DROP TABLE IF EXISTS {}", queue_name)).unwrap();
trans
.execute(&format!("DELETE FROM queues where key={}", &queue_name))
.unwrap();
//trans.commit()?;
}
};
Ok(HttpResponse::NoContent().finish())
}
fn get_queue_items(state: State<AppState>, path: Path<NamedQueueRoute>) -> actix_web::Result<HttpResponse> {
let conn = state.pool.get().unwrap();
let config_row = conn
.query(&format!("select config from queues where key='{}'", &path.queue_name))
.unwrap();
if config_row.is_empty() {
return Ok(HttpResponse::NotFound().body(format!("No queue {}", path.queue_name)));
}
let config: types::QueueConfig = serde_json::from_value(config_row.get(0).get("config"))?;
let results = conn
.query(&format!(
"select id, task_name, state, hlc_tstamp from {}",
&path.queue_name
))
.unwrap();
let mut queue = Map::new();
let now = state.clock.get_timestamp().time.as_timespec();
let max_diff = Duration::milliseconds(config.timeout_ms);
for row in &results {
let id: Uuid = row.get("id");
let mut state = row_to_state(&row).unwrap();
if state == types::QueueState::Done {
continue;
}
if state == types::QueueState::Working {
let hlc_tstamp: Vec<u8> = row.get("hlc_tstamp");
let when = Timestamp::read_bytes(Cursor::new(hlc_tstamp))?;
let diff = now - when.time.as_timespec();
if diff > max_diff {
debug!("{} is out of date, so marking as pending", id);
state = types::QueueState::Pending;
}
}
let item = types::QueueListItem {
task_name: row.get("task_name"),
state,
};
queue.insert(id.to_string(), serde_json::to_value(&item)?);
}
let value = Value::Object(queue);
Ok(HttpResponse::Ok().json(value))
}
#[derive(Deserialize)]
struct QueueItemRoute {
queue_name: String,
id: String,
}
fn get_queue_item(path: Path<QueueItemRoute>, state: State<AppState>) -> actix_web::Result<HttpResponse> {
let conn = state.pool.get().unwrap();
let results = conn
.query(&format!(
"select task_name, state, info, worker from {} where id='{}'",
&path.queue_name, &path.id
))
.unwrap();
if results.is_empty() {
Ok(HttpResponse::NotFound().body(format!("No queue item {} in {}", path.id, path.queue_name)))
} else {
let row = results.get(0);
let item = types::QueueItem {
task_name: row.get("task_name"),
state: row_to_state(&row).unwrap(),
info: row.get("info"),
worker: row.get("worker"),
};
Ok(HttpResponse::Ok().json(item))
}
}
fn add_queue_item(json: Json<Value>, path: Path<NamedQueueRoute>) -> actix_web::Result<HttpResponse> {
let mut json_mut = json.into_inner();
let map = json_mut.as_object_mut().unwrap();
map.insert("queue_name".to_string(), serde_json::to_value(&path.queue_name)?);
let op = serde_json::from_value::<types::QueueAdd>(json_mut)?;
match add_queue_operation(&QueueOperation::Add(op)) {
Ok(val) => {
let new_url = format!("http://{}:8000/queue/{}/{}", HOST.deref(), &path.queue_name, &val.id);
Ok(HttpResponse::Created().header(header::LOCATION, new_url).finish())
}
Err(val) => Err(val),
}
}
fn build_queue_progress(json: Json<Value>, path: &Path<QueueItemRoute>) -> Result<types::QueueProgress> {
let mut json_mut = json.into_inner();
let map = json_mut.as_object_mut().unwrap();
map.insert("queue_name".to_string(), serde_json::to_value(&path.queue_name)?);
map.insert("id".to_string(), serde_json::to_value(&path.id)?);
Ok(serde_json::from_value::<types::QueueProgress>(json_mut)?)
}
fn progress_queue_item(
state: State<AppState>,
json: Json<Value>,
path: Path<QueueItemRoute>,
) -> actix_web::Result<HttpResponse> {
let op = build_queue_progress(json, &path).unwrap();
match add_queue_operation(&QueueOperation::Progress(op)) {
Ok(_) => get_queue_item(path, state),
Err(val) => Err(val),
}
}
fn finish_queue_item(json: Json<Value>, path: Path<QueueItemRoute>) -> actix_web::Result<HttpResponse> {
let op = build_queue_progress(json, &path).unwrap();
match add_queue_operation(&QueueOperation::Done(op)) {
Ok(_) => Ok(HttpResponse::Ok().finish()),
Err(val) => Err(val),
}
}
fn make_queue_table(conn: &db::Connection) {
conn.execute("CREATE TABLE IF NOT EXISTS queues (key VARCHAR(1024) PRIMARY KEY, config JSONB NOT NULL)")
.unwrap();
}
#[derive(Debug, Clone)]
pub struct AppState {
clock: clock::SyncClock,
pool: db::Pool,
}
impl AppState {
pub fn new(pool: db::Pool) -> Result<AppState> {
let clock = clock::SyncClock::new();
let conn = pool.get().unwrap();
make_queue_table(&conn);
Ok(AppState { clock, pool })
}
}
pub fn app_router(state: AppState) -> Result<App<AppState>> {
Ok(App::with_state(state)
.resource("/create", |r| r.method(Method::POST).with(create_queue))
.resource("/event", |r| r.method(Method::POST).with(new_event))
.resource("/queue/{queue_name}", |r| {
r.method(Method::GET).with(get_queue_items);
r.method(Method::POST).with(add_queue_item);
r.method(Method::DELETE).with(delete_queue);
})
.resource("/queue/{queue_name}/{id}", |r| {
r.method(Method::GET).with(get_queue_item);
r.method(Method::PUT).with(progress_queue_item);
r.method(Method::DELETE).with(finish_queue_item);
}))
}
pub fn register() {
let client = reqwest::Client::new();
let mut map = serde_json::Map::new();
map.insert(
"url".to_string(),
serde_json::Value::String(format!("http://{}:{}/event", HOST.deref(), PORT.deref())),
);
let res = client
.post(&format!("{}/register", SERVER_URL.deref()))
.json(&map)
.send()
.expect("Register ok");
assert_eq!(res.status(), reqwest::StatusCode::CREATED);
}
|
{
let client = reqwest::Client::new();
let mut res = client.post(SERVER_URL.deref()).json(op).send().expect("sender ok");
assert_eq!(res.status(), reqwest::StatusCode::CREATED);
Ok(res.json().unwrap())
}
|
identifier_body
|
lib.rs
|
#![allow(clippy::unnecessary_wraps)] // FIXME: Should get redone with better errors
use crate::types::QueueOperation;
use actix_web::{
http::{header, Method},
App, HttpResponse, Json, Path, State,
};
use anyhow::Result;
use hybrid_clocks::{Timestamp, WallT};
use lazy_static::lazy_static;
use log::{debug, info};
use potboiler_common::{clock, db, get_raw_timestamp, types::Log};
use serde_derive::Deserialize;
use serde_json::{self, Map, Value};
use std::{env, io::Cursor, ops::Deref};
use time::Duration;
use uuid::{self, Uuid};
mod types;
lazy_static! {
static ref SERVER_URL: String = env::var("SERVER_URL").expect("Needed SERVER_URL");
static ref HOST: String = env::var("HOST").unwrap_or_else(|_| "localhost".to_string());
pub static ref PORT: u16 = env::var("PORT")
.unwrap_or_else(|_| "8000".to_string())
.parse::<u16>()
.unwrap();
}
#[derive(Deserialize)]
struct NewLogResponse {
id: Uuid,
}
fn add_queue_operation(op: &QueueOperation) -> actix_web::Result<NewLogResponse> {
let client = reqwest::Client::new();
let mut res = client.post(SERVER_URL.deref()).json(op).send().expect("sender ok");
assert_eq!(res.status(), reqwest::StatusCode::CREATED);
Ok(res.json().unwrap())
}
fn create_queue(op: Json<types::QueueCreate>) -> actix_web::Result<HttpResponse> {
let name = op.name.clone();
match add_queue_operation(&QueueOperation::Create(op.into_inner())) {
Ok(_) => {
let new_url = format!("/queue/{}", &name);
Ok(HttpResponse::Created().header(header::LOCATION, new_url).finish())
}
Err(val) => Err(val),
}
}
#[derive(Deserialize)]
struct NamedQueueRoute {
queue_name: String,
}
fn delete_queue(path: Path<NamedQueueRoute>) -> actix_web::Result<HttpResponse> {
add_queue_operation(&QueueOperation::Delete(path.queue_name.clone()))?;
Ok(HttpResponse::Ok().finish())
}
fn row_to_state(row: &db::Row) -> Result<types::QueueState> {
let raw_state: String = row.get("state");
// FIXME: format! bit is a hacky workaround for https://github.com/serde-rs/serde/issues/251
Ok(serde_json::from_str(&format!("\"{}\"", raw_state)).unwrap())
}
fn parse_progress<F>(state: &AppState, progress: &types::QueueProgress, should_update: F) -> Result<HttpResponse>
where
F: Fn(&types::QueueState, &Timestamp<WallT>) -> Option<(Timestamp<WallT>, String)>,
{
let conn = state.pool.get().unwrap();
let results = conn.query(&format!(
"SELECT task_name, state, hlc_tstamp from {} where id='{}'",
&progress.queue_name, &progress.id
))?;
if results.is_empty() {
Ok(HttpResponse::NotFound().body(format!("No queue item {} in {}", &progress.id, &progress.queue_name)))
} else {
let row = results.get(0);
let state = row_to_state(&row)?;
let hlc_tstamp: Vec<u8> = row.get("hlc_tstamp");
let when = Timestamp::read_bytes(Cursor::new(hlc_tstamp))?;
if let Some((log_when, status)) = should_update(&state, &when) {
let raw_timestamp = get_raw_timestamp(&log_when)?;
conn.execute(&format!(
"UPDATE {} set hlc_tstamp='{}', worker='{}', state='{}' where id='{}'",
&progress.queue_name,
&raw_timestamp.sql(),
&progress.worker_id,
&status,
&progress.id
))?;
Ok(HttpResponse::NoContent().finish())
} else {
Ok(HttpResponse::Conflict().body("Out of date change"))
}
}
}
fn new_event(log: Json<Log>, state: State<AppState>) -> actix_web::Result<HttpResponse> {
info!("log: {:?}", log);
let log_when = log.when;
state.clock.observe_timestamp(log_when);
let op = serde_json::from_value::<QueueOperation>(log.data.clone())?;
info!("op: {:?}", op);
let conn = state.pool.get().unwrap();
match op {
QueueOperation::Create(create) => {
info!("create: {:?}", create);
let qc = types::QueueConfig {
timeout_ms: create.timeout_ms,
};
match conn.execute(&format!(
"INSERT INTO queues (key, config) VALUES('{}', '{}')",
&create.name,
&serde_json::to_value(&qc)?
)) {
Ok(_) => {
conn.execute(&format!(
"CREATE TABLE IF NOT EXISTS {} (id UUID PRIMARY KEY, task_name \
VARCHAR(2083) NOT NULL, state VARCHAR(8) NOT NULL, info JSONB NOT \
NULL, hlc_tstamp BYTEA NOT NULL, worker UUID NULL)",
&create.name
))
.unwrap();
}
Err(db::Error::UniqueViolation) => {}
Err(err) => Err(err).unwrap(),
};
}
QueueOperation::Add(add) => {
info!("add: {:?}", add);
let raw_timestamp = get_raw_timestamp(&log.when)?;
conn.execute(&format!(
"INSERT INTO {} (id, task_name, state, info, hlc_tstamp) VALUES('{}', '{}', \
'{}', '{}', {})",
add.queue_name,
&log.id,
&add.task_name,
"pending",
&serde_json::to_value(&add.info)?,
&raw_timestamp.sql()
))
.unwrap();
}
QueueOperation::Progress(progress) => {
info!("progress: {:?}", progress);
return Ok(parse_progress(state.deref(), &progress, |state, when| {
if state == &types::QueueState::Pending || (state == &types::QueueState::Working && log_when > *when) {
Some((log_when, String::from("working")))
} else {
None
}
})
.unwrap());
}
QueueOperation::Done(done) => {
info!("done: {:?}", done);
return Ok(parse_progress(state.deref(), &done, |state, when| {
if state!= &types::QueueState::Done || (state == &types::QueueState::Done && log_when > *when) {
Some((log_when, String::from("done")))
} else {
None
}
})
.unwrap());
}
QueueOperation::Delete(queue_name) => {
//let trans = conn.transaction()?;
let trans = conn;
trans.execute(&format!("DROP TABLE IF EXISTS {}", queue_name)).unwrap();
trans
.execute(&format!("DELETE FROM queues where key={}", &queue_name))
.unwrap();
//trans.commit()?;
}
};
Ok(HttpResponse::NoContent().finish())
}
fn get_queue_items(state: State<AppState>, path: Path<NamedQueueRoute>) -> actix_web::Result<HttpResponse> {
let conn = state.pool.get().unwrap();
let config_row = conn
.query(&format!("select config from queues where key='{}'", &path.queue_name))
.unwrap();
if config_row.is_empty() {
return Ok(HttpResponse::NotFound().body(format!("No queue {}", path.queue_name)));
}
let config: types::QueueConfig = serde_json::from_value(config_row.get(0).get("config"))?;
let results = conn
.query(&format!(
"select id, task_name, state, hlc_tstamp from {}",
&path.queue_name
))
.unwrap();
let mut queue = Map::new();
let now = state.clock.get_timestamp().time.as_timespec();
let max_diff = Duration::milliseconds(config.timeout_ms);
for row in &results {
let id: Uuid = row.get("id");
let mut state = row_to_state(&row).unwrap();
if state == types::QueueState::Done {
continue;
}
if state == types::QueueState::Working {
let hlc_tstamp: Vec<u8> = row.get("hlc_tstamp");
let when = Timestamp::read_bytes(Cursor::new(hlc_tstamp))?;
let diff = now - when.time.as_timespec();
if diff > max_diff {
debug!("{} is out of date, so marking as pending", id);
state = types::QueueState::Pending;
}
}
let item = types::QueueListItem {
task_name: row.get("task_name"),
state,
};
queue.insert(id.to_string(), serde_json::to_value(&item)?);
}
let value = Value::Object(queue);
Ok(HttpResponse::Ok().json(value))
}
#[derive(Deserialize)]
struct QueueItemRoute {
queue_name: String,
id: String,
}
fn get_queue_item(path: Path<QueueItemRoute>, state: State<AppState>) -> actix_web::Result<HttpResponse> {
let conn = state.pool.get().unwrap();
let results = conn
.query(&format!(
"select task_name, state, info, worker from {} where id='{}'",
&path.queue_name, &path.id
))
.unwrap();
if results.is_empty() {
Ok(HttpResponse::NotFound().body(format!("No queue item {} in {}", path.id, path.queue_name)))
} else {
let row = results.get(0);
let item = types::QueueItem {
task_name: row.get("task_name"),
state: row_to_state(&row).unwrap(),
info: row.get("info"),
worker: row.get("worker"),
};
Ok(HttpResponse::Ok().json(item))
}
}
fn add_queue_item(json: Json<Value>, path: Path<NamedQueueRoute>) -> actix_web::Result<HttpResponse> {
let mut json_mut = json.into_inner();
let map = json_mut.as_object_mut().unwrap();
map.insert("queue_name".to_string(), serde_json::to_value(&path.queue_name)?);
let op = serde_json::from_value::<types::QueueAdd>(json_mut)?;
match add_queue_operation(&QueueOperation::Add(op)) {
Ok(val) => {
let new_url = format!("http://{}:8000/queue/{}/{}", HOST.deref(), &path.queue_name, &val.id);
Ok(HttpResponse::Created().header(header::LOCATION, new_url).finish())
}
Err(val) => Err(val),
}
}
fn build_queue_progress(json: Json<Value>, path: &Path<QueueItemRoute>) -> Result<types::QueueProgress> {
let mut json_mut = json.into_inner();
let map = json_mut.as_object_mut().unwrap();
map.insert("queue_name".to_string(), serde_json::to_value(&path.queue_name)?);
map.insert("id".to_string(), serde_json::to_value(&path.id)?);
Ok(serde_json::from_value::<types::QueueProgress>(json_mut)?)
}
fn progress_queue_item(
state: State<AppState>,
json: Json<Value>,
path: Path<QueueItemRoute>,
) -> actix_web::Result<HttpResponse> {
let op = build_queue_progress(json, &path).unwrap();
match add_queue_operation(&QueueOperation::Progress(op)) {
Ok(_) => get_queue_item(path, state),
Err(val) => Err(val),
}
}
fn finish_queue_item(json: Json<Value>, path: Path<QueueItemRoute>) -> actix_web::Result<HttpResponse> {
let op = build_queue_progress(json, &path).unwrap();
match add_queue_operation(&QueueOperation::Done(op)) {
Ok(_) => Ok(HttpResponse::Ok().finish()),
Err(val) => Err(val),
}
}
fn make_queue_table(conn: &db::Connection) {
conn.execute("CREATE TABLE IF NOT EXISTS queues (key VARCHAR(1024) PRIMARY KEY, config JSONB NOT NULL)")
.unwrap();
}
#[derive(Debug, Clone)]
pub struct AppState {
clock: clock::SyncClock,
pool: db::Pool,
}
impl AppState {
pub fn new(pool: db::Pool) -> Result<AppState> {
let clock = clock::SyncClock::new();
let conn = pool.get().unwrap();
make_queue_table(&conn);
Ok(AppState { clock, pool })
}
}
pub fn
|
(state: AppState) -> Result<App<AppState>> {
Ok(App::with_state(state)
.resource("/create", |r| r.method(Method::POST).with(create_queue))
.resource("/event", |r| r.method(Method::POST).with(new_event))
.resource("/queue/{queue_name}", |r| {
r.method(Method::GET).with(get_queue_items);
r.method(Method::POST).with(add_queue_item);
r.method(Method::DELETE).with(delete_queue);
})
.resource("/queue/{queue_name}/{id}", |r| {
r.method(Method::GET).with(get_queue_item);
r.method(Method::PUT).with(progress_queue_item);
r.method(Method::DELETE).with(finish_queue_item);
}))
}
pub fn register() {
let client = reqwest::Client::new();
let mut map = serde_json::Map::new();
map.insert(
"url".to_string(),
serde_json::Value::String(format!("http://{}:{}/event", HOST.deref(), PORT.deref())),
);
let res = client
.post(&format!("{}/register", SERVER_URL.deref()))
.json(&map)
.send()
.expect("Register ok");
assert_eq!(res.status(), reqwest::StatusCode::CREATED);
}
|
app_router
|
identifier_name
|
module_file.rs
|
// Copyright 2016 Pierre Talbot (IRCAM)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use driver::Config;
use std::path::{Path, PathBuf};
use std::io::prelude::*;
use std::fs::{OpenOptions, DirBuilder};
#[derive(Clone, Debug)]
pub struct ModuleFile
{
input_path: PathBuf,
output_path: Option<PathBuf>,
module_name: String
}
impl ModuleFile
{
pub fn new(config: &Config, file_path: PathBuf, lib: bool) -> Option<Self> {
if let Some(mod_name) = Self::extract_mod_name(file_path.clone()) {
let mod_file = match lib {
false => Self::core_file(config, file_path, mod_name),
true => Self::lib_file(file_path, mod_name),
};
return Some(mod_file);
}
None
}
pub fn extract_mod_name(file_path: PathBuf) -> Option<String> {
if let Some(ext) = file_path.clone().extension() {
if ext == "java" {
let p = file_path.clone();
let bonsai_file = Path::new(p.file_stem().unwrap());
if let Some(bonsai_ext) = bonsai_file.extension() {
if bonsai_ext == "bonsai" {
let mod_name = String::from(bonsai_file.file_stem().unwrap().to_str().unwrap());
return Some(mod_name);
}
}
}
}
None
}
pub fn is_lib(&self) -> bool {
self.output_path.is_none()
}
fn core_file(config: &Config, file_path: PathBuf, mod_name: String) -> Self {
ModuleFile {
input_path: file_path.clone(),
output_path: Some(Self::build_output_path(config, file_path, mod_name.clone())),
module_name: mod_name
}
}
fn lib_file(file_path: PathBuf, mod_name: String) -> Self {
ModuleFile {
input_path: file_path.clone(),
output_path: None,
module_name: mod_name
}
}
fn build_output_path(config: &Config, mut file_path: PathBuf, mod_name: String) -> PathBuf {
file_path.pop();
let file_name = PathBuf::from(&mod_name).with_extension("java");
// In testing mode, we do not deal with nested repository (`strip_prefix` does not work because `file_path` is a file and not a directory.)
let file_path =
if config.testing_mode {
file_name
}
else {
PathBuf::from(file_path.join(file_name).strip_prefix(&config.input).unwrap())
};
config.output.join(file_path)
}
pub fn mod_name(&self) -> String {
self.module_name.clone()
}
pub fn input_path_str(&self) -> String {
format!("{}", self.input_path.display())
}
pub fn input_path<'a>(&'a self) -> &'a Path {
self.input_path.as_path()
}
pub fn write_output(&self, output: String) {
let output_path = self.output_path.clone().expect(
"Try to compile a library file (this is a bug).");
self.build_output_directory(output_path.clone());
let mut file = OpenOptions::new()
.write(true)
|
.open(output_path.clone())
.expect(&format!("Output file ({})", output_path.to_str().unwrap_or("<invalid UTF8>")));
file.write_fmt(format_args!("{}", output)).unwrap();
}
fn build_output_directory(&self, output_path: PathBuf) {
if let Some(dir_path) = output_path.parent() {
DirBuilder::new()
.recursive(true)
.create(dir_path)
.expect("Recursive creation of directory for the output file.");
}
}
}
|
.truncate(true)
.create(true)
|
random_line_split
|
module_file.rs
|
// Copyright 2016 Pierre Talbot (IRCAM)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use driver::Config;
use std::path::{Path, PathBuf};
use std::io::prelude::*;
use std::fs::{OpenOptions, DirBuilder};
#[derive(Clone, Debug)]
pub struct ModuleFile
{
input_path: PathBuf,
output_path: Option<PathBuf>,
module_name: String
}
impl ModuleFile
{
pub fn new(config: &Config, file_path: PathBuf, lib: bool) -> Option<Self> {
if let Some(mod_name) = Self::extract_mod_name(file_path.clone()) {
let mod_file = match lib {
false => Self::core_file(config, file_path, mod_name),
true => Self::lib_file(file_path, mod_name),
};
return Some(mod_file);
}
None
}
pub fn extract_mod_name(file_path: PathBuf) -> Option<String> {
if let Some(ext) = file_path.clone().extension() {
if ext == "java" {
let p = file_path.clone();
let bonsai_file = Path::new(p.file_stem().unwrap());
if let Some(bonsai_ext) = bonsai_file.extension() {
if bonsai_ext == "bonsai"
|
}
}
}
None
}
pub fn is_lib(&self) -> bool {
self.output_path.is_none()
}
fn core_file(config: &Config, file_path: PathBuf, mod_name: String) -> Self {
ModuleFile {
input_path: file_path.clone(),
output_path: Some(Self::build_output_path(config, file_path, mod_name.clone())),
module_name: mod_name
}
}
fn lib_file(file_path: PathBuf, mod_name: String) -> Self {
ModuleFile {
input_path: file_path.clone(),
output_path: None,
module_name: mod_name
}
}
fn build_output_path(config: &Config, mut file_path: PathBuf, mod_name: String) -> PathBuf {
file_path.pop();
let file_name = PathBuf::from(&mod_name).with_extension("java");
// In testing mode, we do not deal with nested repository (`strip_prefix` does not work because `file_path` is a file and not a directory.)
let file_path =
if config.testing_mode {
file_name
}
else {
PathBuf::from(file_path.join(file_name).strip_prefix(&config.input).unwrap())
};
config.output.join(file_path)
}
pub fn mod_name(&self) -> String {
self.module_name.clone()
}
pub fn input_path_str(&self) -> String {
format!("{}", self.input_path.display())
}
pub fn input_path<'a>(&'a self) -> &'a Path {
self.input_path.as_path()
}
pub fn write_output(&self, output: String) {
let output_path = self.output_path.clone().expect(
"Try to compile a library file (this is a bug).");
self.build_output_directory(output_path.clone());
let mut file = OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(output_path.clone())
.expect(&format!("Output file ({})", output_path.to_str().unwrap_or("<invalid UTF8>")));
file.write_fmt(format_args!("{}", output)).unwrap();
}
fn build_output_directory(&self, output_path: PathBuf) {
if let Some(dir_path) = output_path.parent() {
DirBuilder::new()
.recursive(true)
.create(dir_path)
.expect("Recursive creation of directory for the output file.");
}
}
}
|
{
let mod_name = String::from(bonsai_file.file_stem().unwrap().to_str().unwrap());
return Some(mod_name);
}
|
conditional_block
|
module_file.rs
|
// Copyright 2016 Pierre Talbot (IRCAM)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use driver::Config;
use std::path::{Path, PathBuf};
use std::io::prelude::*;
use std::fs::{OpenOptions, DirBuilder};
#[derive(Clone, Debug)]
pub struct ModuleFile
{
input_path: PathBuf,
output_path: Option<PathBuf>,
module_name: String
}
impl ModuleFile
{
pub fn new(config: &Config, file_path: PathBuf, lib: bool) -> Option<Self> {
if let Some(mod_name) = Self::extract_mod_name(file_path.clone()) {
let mod_file = match lib {
false => Self::core_file(config, file_path, mod_name),
true => Self::lib_file(file_path, mod_name),
};
return Some(mod_file);
}
None
}
pub fn extract_mod_name(file_path: PathBuf) -> Option<String> {
if let Some(ext) = file_path.clone().extension() {
if ext == "java" {
let p = file_path.clone();
let bonsai_file = Path::new(p.file_stem().unwrap());
if let Some(bonsai_ext) = bonsai_file.extension() {
if bonsai_ext == "bonsai" {
let mod_name = String::from(bonsai_file.file_stem().unwrap().to_str().unwrap());
return Some(mod_name);
}
}
}
}
None
}
pub fn is_lib(&self) -> bool {
self.output_path.is_none()
}
fn
|
(config: &Config, file_path: PathBuf, mod_name: String) -> Self {
ModuleFile {
input_path: file_path.clone(),
output_path: Some(Self::build_output_path(config, file_path, mod_name.clone())),
module_name: mod_name
}
}
fn lib_file(file_path: PathBuf, mod_name: String) -> Self {
ModuleFile {
input_path: file_path.clone(),
output_path: None,
module_name: mod_name
}
}
fn build_output_path(config: &Config, mut file_path: PathBuf, mod_name: String) -> PathBuf {
file_path.pop();
let file_name = PathBuf::from(&mod_name).with_extension("java");
// In testing mode, we do not deal with nested repository (`strip_prefix` does not work because `file_path` is a file and not a directory.)
let file_path =
if config.testing_mode {
file_name
}
else {
PathBuf::from(file_path.join(file_name).strip_prefix(&config.input).unwrap())
};
config.output.join(file_path)
}
pub fn mod_name(&self) -> String {
self.module_name.clone()
}
pub fn input_path_str(&self) -> String {
format!("{}", self.input_path.display())
}
pub fn input_path<'a>(&'a self) -> &'a Path {
self.input_path.as_path()
}
pub fn write_output(&self, output: String) {
let output_path = self.output_path.clone().expect(
"Try to compile a library file (this is a bug).");
self.build_output_directory(output_path.clone());
let mut file = OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(output_path.clone())
.expect(&format!("Output file ({})", output_path.to_str().unwrap_or("<invalid UTF8>")));
file.write_fmt(format_args!("{}", output)).unwrap();
}
fn build_output_directory(&self, output_path: PathBuf) {
if let Some(dir_path) = output_path.parent() {
DirBuilder::new()
.recursive(true)
.create(dir_path)
.expect("Recursive creation of directory for the output file.");
}
}
}
|
core_file
|
identifier_name
|
module_file.rs
|
// Copyright 2016 Pierre Talbot (IRCAM)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use driver::Config;
use std::path::{Path, PathBuf};
use std::io::prelude::*;
use std::fs::{OpenOptions, DirBuilder};
#[derive(Clone, Debug)]
pub struct ModuleFile
{
input_path: PathBuf,
output_path: Option<PathBuf>,
module_name: String
}
impl ModuleFile
{
pub fn new(config: &Config, file_path: PathBuf, lib: bool) -> Option<Self> {
if let Some(mod_name) = Self::extract_mod_name(file_path.clone()) {
let mod_file = match lib {
false => Self::core_file(config, file_path, mod_name),
true => Self::lib_file(file_path, mod_name),
};
return Some(mod_file);
}
None
}
pub fn extract_mod_name(file_path: PathBuf) -> Option<String> {
if let Some(ext) = file_path.clone().extension() {
if ext == "java" {
let p = file_path.clone();
let bonsai_file = Path::new(p.file_stem().unwrap());
if let Some(bonsai_ext) = bonsai_file.extension() {
if bonsai_ext == "bonsai" {
let mod_name = String::from(bonsai_file.file_stem().unwrap().to_str().unwrap());
return Some(mod_name);
}
}
}
}
None
}
pub fn is_lib(&self) -> bool {
self.output_path.is_none()
}
fn core_file(config: &Config, file_path: PathBuf, mod_name: String) -> Self {
ModuleFile {
input_path: file_path.clone(),
output_path: Some(Self::build_output_path(config, file_path, mod_name.clone())),
module_name: mod_name
}
}
fn lib_file(file_path: PathBuf, mod_name: String) -> Self {
ModuleFile {
input_path: file_path.clone(),
output_path: None,
module_name: mod_name
}
}
fn build_output_path(config: &Config, mut file_path: PathBuf, mod_name: String) -> PathBuf {
file_path.pop();
let file_name = PathBuf::from(&mod_name).with_extension("java");
// In testing mode, we do not deal with nested repository (`strip_prefix` does not work because `file_path` is a file and not a directory.)
let file_path =
if config.testing_mode {
file_name
}
else {
PathBuf::from(file_path.join(file_name).strip_prefix(&config.input).unwrap())
};
config.output.join(file_path)
}
pub fn mod_name(&self) -> String {
self.module_name.clone()
}
pub fn input_path_str(&self) -> String {
format!("{}", self.input_path.display())
}
pub fn input_path<'a>(&'a self) -> &'a Path {
self.input_path.as_path()
}
pub fn write_output(&self, output: String)
|
fn build_output_directory(&self, output_path: PathBuf) {
if let Some(dir_path) = output_path.parent() {
DirBuilder::new()
.recursive(true)
.create(dir_path)
.expect("Recursive creation of directory for the output file.");
}
}
}
|
{
let output_path = self.output_path.clone().expect(
"Try to compile a library file (this is a bug).");
self.build_output_directory(output_path.clone());
let mut file = OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(output_path.clone())
.expect(&format!("Output file ({})", output_path.to_str().unwrap_or("<invalid UTF8>")));
file.write_fmt(format_args!("{}", output)).unwrap();
}
|
identifier_body
|
branch.rs
|
//! Represents Git branches.
use object_id::ObjectId;
use object_database::find_object_by_id;
use error::GitError::CorruptRepository;
use error::GitError;
use repository::Repository;
use commit::Commit;
use commit;
use git_object::GitObject::GitCommit;
use commit_sort_strategy::CommitSortStrategy::MostRecent;
use commit_filter::CommitFilter;
pub struct Branch {
pub name: String,
pub canonical_name: String,
pub tip_id: ObjectId,
pub tracked_branch: Option<Box<Branch>>,
pub is_remote: bool,
pub is_detached: bool,
}
impl Branch {
pub fn is_tracking(&self) -> bool {
self.tracked_branch.is_some()
}
}
pub fn
|
(repository: &Repository, branch: &Branch) -> Result<Commit, GitError> {
match try!(find_object_by_id(repository, &branch.tip_id)) {
box GitCommit(c) => Ok(c),
_ => Err(CorruptRepository("Could not find the commit the branch points to".into_cow())),
}
}
pub fn commits(repository: &Repository, branch: &Branch) -> Result<Vec<Commit>, GitError> {
let filter = CommitFilter {
since: Some(vec![branch.tip_id.clone()]),
until: None,
limit: -1,
offset: 0,
sort: MostRecent,
};
commit::find(repository, filter)
}
|
tip
|
identifier_name
|
branch.rs
|
//! Represents Git branches.
use object_id::ObjectId;
use object_database::find_object_by_id;
use error::GitError::CorruptRepository;
use error::GitError;
use repository::Repository;
use commit::Commit;
use commit;
use git_object::GitObject::GitCommit;
use commit_sort_strategy::CommitSortStrategy::MostRecent;
use commit_filter::CommitFilter;
pub struct Branch {
pub name: String,
pub canonical_name: String,
pub tip_id: ObjectId,
pub tracked_branch: Option<Box<Branch>>,
pub is_remote: bool,
pub is_detached: bool,
}
impl Branch {
pub fn is_tracking(&self) -> bool {
self.tracked_branch.is_some()
}
}
pub fn tip(repository: &Repository, branch: &Branch) -> Result<Commit, GitError> {
match try!(find_object_by_id(repository, &branch.tip_id)) {
box GitCommit(c) => Ok(c),
_ => Err(CorruptRepository("Could not find the commit the branch points to".into_cow())),
}
}
pub fn commits(repository: &Repository, branch: &Branch) -> Result<Vec<Commit>, GitError>
|
{
let filter = CommitFilter {
since: Some(vec![branch.tip_id.clone()]),
until: None,
limit: -1,
offset: 0,
sort: MostRecent,
};
commit::find(repository, filter)
}
|
identifier_body
|
|
branch.rs
|
//! Represents Git branches.
use object_id::ObjectId;
use object_database::find_object_by_id;
use error::GitError::CorruptRepository;
use error::GitError;
|
use git_object::GitObject::GitCommit;
use commit_sort_strategy::CommitSortStrategy::MostRecent;
use commit_filter::CommitFilter;
pub struct Branch {
pub name: String,
pub canonical_name: String,
pub tip_id: ObjectId,
pub tracked_branch: Option<Box<Branch>>,
pub is_remote: bool,
pub is_detached: bool,
}
impl Branch {
pub fn is_tracking(&self) -> bool {
self.tracked_branch.is_some()
}
}
pub fn tip(repository: &Repository, branch: &Branch) -> Result<Commit, GitError> {
match try!(find_object_by_id(repository, &branch.tip_id)) {
box GitCommit(c) => Ok(c),
_ => Err(CorruptRepository("Could not find the commit the branch points to".into_cow())),
}
}
pub fn commits(repository: &Repository, branch: &Branch) -> Result<Vec<Commit>, GitError> {
let filter = CommitFilter {
since: Some(vec![branch.tip_id.clone()]),
until: None,
limit: -1,
offset: 0,
sort: MostRecent,
};
commit::find(repository, filter)
}
|
use repository::Repository;
use commit::Commit;
use commit;
|
random_line_split
|
mod.rs
|
//! Bounding volumes.
#[doc(inline)]
pub use bounding_volume::bounding_volume::{HasBoundingVolume, BoundingVolume};
#[doc(inline)]
pub use bounding_volume::aabb::{HasAABB, AABB, aabb};
#[doc(inline)]
pub use bounding_volume::bounding_sphere::{HasBoundingSphere, BoundingSphere, bounding_sphere};
pub use bounding_volume::aabb_utils::{implicit_shape_aabb, point_cloud_aabb};
pub use bounding_volume::aabb_ball::ball_aabb;
pub use bounding_volume::bounding_sphere_utils::{point_cloud_bounding_sphere_with_center, point_cloud_bounding_sphere};
pub use bounding_volume::bounding_volume_bvt::BoundingVolumeInterferencesCollector;
use na::{Pnt2, Pnt3};
#[doc(hidden)]
pub mod bounding_volume;
mod bounding_volume_bvt;
#[doc(hidden)]
pub mod aabb;
mod aabb_cuboid;
mod aabb_support_map;
mod aabb_ball;
mod aabb_plane;
mod aabb_convex;
mod aabb_compound;
mod aabb_mesh;
mod aabb_utils;
mod aabb_repr;
#[doc(hidden)]
pub mod bounding_sphere;
mod bounding_sphere_cuboid;
mod bounding_sphere_cone;
mod bounding_sphere_ball;
mod bounding_sphere_cylinder;
mod bounding_sphere_capsule;
mod bounding_sphere_plane;
mod bounding_sphere_convex;
mod bounding_sphere_compound;
mod bounding_sphere_triangle;
mod bounding_sphere_segment;
mod bounding_sphere_mesh;
mod bounding_sphere_utils;
mod bounding_sphere_repr;
/*
*
* Aliases.
*
*/
/// A 2D bounding sphere.
pub type BoundingSphere2<N> = BoundingSphere<Pnt2<N>>;
/// A 2D AABB.
pub type AABB2<N> = AABB<Pnt2<N>>;
/// A 3D bounding sphere:
pub type BoundingSphere3<N> = BoundingSphere<Pnt3<N>>;
/// A 3D AABB.
|
pub type AABB3<N> = AABB<Pnt3<N>>;
|
random_line_split
|
|
error.rs
|
extern crate iron;
extern crate time;
use iron::prelude::*;
use iron::{Handler, BeforeMiddleware};
use iron::status;
use std::error::Error;
use std::fmt::{self, Debug};
struct ErrorHandler;
struct ErrorProducer;
#[derive(Debug)]
struct StringError(String);
impl fmt::Display for StringError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Debug::fmt(self, f)
}
}
impl Error for StringError {
fn description(&self) -> &str { &*self.0 }
}
impl Handler for ErrorHandler {
fn handle(&self, _: &mut Request) -> IronResult<Response> {
Ok(Response::new())
}
}
impl BeforeMiddleware for ErrorProducer {
fn before(&self, _: &mut Request) -> IronResult<()>
|
}
fn main() {
// Handler is attached here.
let mut chain = Chain::new(ErrorHandler);
// Link our error maker.
chain.link_before(ErrorProducer);
Iron::new(chain).http("localhost:3000").unwrap();
}
|
{
Err(IronError::new(StringError("Error".to_string()), status::Ok))
}
|
identifier_body
|
error.rs
|
extern crate iron;
extern crate time;
use iron::prelude::*;
use iron::{Handler, BeforeMiddleware};
use iron::status;
use std::error::Error;
use std::fmt::{self, Debug};
struct ErrorHandler;
struct ErrorProducer;
#[derive(Debug)]
struct StringError(String);
impl fmt::Display for StringError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Debug::fmt(self, f)
}
}
impl Error for StringError {
fn description(&self) -> &str { &*self.0 }
}
impl Handler for ErrorHandler {
fn handle(&self, _: &mut Request) -> IronResult<Response> {
Ok(Response::new())
}
}
impl BeforeMiddleware for ErrorProducer {
fn before(&self, _: &mut Request) -> IronResult<()> {
Err(IronError::new(StringError("Error".to_string()), status::Ok))
}
}
fn main() {
// Handler is attached here.
|
// Link our error maker.
chain.link_before(ErrorProducer);
Iron::new(chain).http("localhost:3000").unwrap();
}
|
let mut chain = Chain::new(ErrorHandler);
|
random_line_split
|
error.rs
|
extern crate iron;
extern crate time;
use iron::prelude::*;
use iron::{Handler, BeforeMiddleware};
use iron::status;
use std::error::Error;
use std::fmt::{self, Debug};
struct ErrorHandler;
struct ErrorProducer;
#[derive(Debug)]
struct StringError(String);
impl fmt::Display for StringError {
fn
|
(&self, f: &mut fmt::Formatter) -> fmt::Result {
Debug::fmt(self, f)
}
}
impl Error for StringError {
fn description(&self) -> &str { &*self.0 }
}
impl Handler for ErrorHandler {
fn handle(&self, _: &mut Request) -> IronResult<Response> {
Ok(Response::new())
}
}
impl BeforeMiddleware for ErrorProducer {
fn before(&self, _: &mut Request) -> IronResult<()> {
Err(IronError::new(StringError("Error".to_string()), status::Ok))
}
}
fn main() {
// Handler is attached here.
let mut chain = Chain::new(ErrorHandler);
// Link our error maker.
chain.link_before(ErrorProducer);
Iron::new(chain).http("localhost:3000").unwrap();
}
|
fmt
|
identifier_name
|
mod.rs
|
/*
TODO
Ass support for AOP (low proiroty)
*/
use token::Token as Token;
use common::ParseError as ParseError;
use type_name;
use type_name::TypeName as TypeName;
mod test;
pub struct
|
{
pub type_name: TypeName,
dependencies: Vec<TypeName>
}
impl Def {
pub fn accessor_at(&self, index: usize) -> &TypeName {
&self.dependencies[index]
}
}
pub fn get_def(tokens: &Vec<Token>, start: usize) -> (Result<Box<Def>, ParseError>, usize) {
let (type_name_result, seek) = type_name::get_type_name(&tokens, start);
return match type_name_result {
Err(e) => (Err(e), seek),
Ok(type_name) => after_type_name(&tokens, seek, type_name)
}
}
fn after_type_name(tokens: &Vec<Token>, start: usize, type_name: Box<TypeName>) -> (Result<Box<Def>, ParseError>, usize) {
let def = Box::new(Def {
type_name: *type_name,
dependencies: Vec::new()
});
if tokens[start].text == ":" {
return dependency(&tokens, start + 1, def);
}
return (Ok(def), start);
}
fn dependency(tokens: &Vec<Token>, start: usize, mut def: Box<Def>) -> (Result<Box<Def>, ParseError>, usize) {
let (type_name_result, seek) = type_name::get_type_name(&tokens, start);
return match type_name_result {
Err(e) => (Err(e), seek),
Ok(type_name) => {
def.dependencies.push(*type_name);
if tokens[seek].text == "," {
return dependency(&tokens, seek + 1, def);
} else {
(Ok(def), seek)
}
}
}
}
|
Def
|
identifier_name
|
mod.rs
|
/*
TODO
Ass support for AOP (low proiroty)
*/
use token::Token as Token;
use common::ParseError as ParseError;
use type_name;
use type_name::TypeName as TypeName;
mod test;
pub struct Def {
pub type_name: TypeName,
dependencies: Vec<TypeName>
}
impl Def {
pub fn accessor_at(&self, index: usize) -> &TypeName {
&self.dependencies[index]
}
}
pub fn get_def(tokens: &Vec<Token>, start: usize) -> (Result<Box<Def>, ParseError>, usize) {
let (type_name_result, seek) = type_name::get_type_name(&tokens, start);
return match type_name_result {
Err(e) => (Err(e), seek),
Ok(type_name) => after_type_name(&tokens, seek, type_name)
}
}
fn after_type_name(tokens: &Vec<Token>, start: usize, type_name: Box<TypeName>) -> (Result<Box<Def>, ParseError>, usize) {
let def = Box::new(Def {
type_name: *type_name,
dependencies: Vec::new()
});
if tokens[start].text == ":" {
return dependency(&tokens, start + 1, def);
}
return (Ok(def), start);
|
fn dependency(tokens: &Vec<Token>, start: usize, mut def: Box<Def>) -> (Result<Box<Def>, ParseError>, usize) {
let (type_name_result, seek) = type_name::get_type_name(&tokens, start);
return match type_name_result {
Err(e) => (Err(e), seek),
Ok(type_name) => {
def.dependencies.push(*type_name);
if tokens[seek].text == "," {
return dependency(&tokens, seek + 1, def);
} else {
(Ok(def), seek)
}
}
}
}
|
}
|
random_line_split
|
mod.rs
|
/*
TODO
Ass support for AOP (low proiroty)
*/
use token::Token as Token;
use common::ParseError as ParseError;
use type_name;
use type_name::TypeName as TypeName;
mod test;
pub struct Def {
pub type_name: TypeName,
dependencies: Vec<TypeName>
}
impl Def {
pub fn accessor_at(&self, index: usize) -> &TypeName {
&self.dependencies[index]
}
}
pub fn get_def(tokens: &Vec<Token>, start: usize) -> (Result<Box<Def>, ParseError>, usize) {
let (type_name_result, seek) = type_name::get_type_name(&tokens, start);
return match type_name_result {
Err(e) => (Err(e), seek),
Ok(type_name) => after_type_name(&tokens, seek, type_name)
}
}
fn after_type_name(tokens: &Vec<Token>, start: usize, type_name: Box<TypeName>) -> (Result<Box<Def>, ParseError>, usize) {
let def = Box::new(Def {
type_name: *type_name,
dependencies: Vec::new()
});
if tokens[start].text == ":" {
return dependency(&tokens, start + 1, def);
}
return (Ok(def), start);
}
fn dependency(tokens: &Vec<Token>, start: usize, mut def: Box<Def>) -> (Result<Box<Def>, ParseError>, usize) {
let (type_name_result, seek) = type_name::get_type_name(&tokens, start);
return match type_name_result {
Err(e) => (Err(e), seek),
Ok(type_name) =>
|
}
}
|
{
def.dependencies.push(*type_name);
if tokens[seek].text == "," {
return dependency(&tokens, seek + 1, def);
} else {
(Ok(def), seek)
}
}
|
conditional_block
|
mod.rs
|
/*
TODO
Ass support for AOP (low proiroty)
*/
use token::Token as Token;
use common::ParseError as ParseError;
use type_name;
use type_name::TypeName as TypeName;
mod test;
pub struct Def {
pub type_name: TypeName,
dependencies: Vec<TypeName>
}
impl Def {
pub fn accessor_at(&self, index: usize) -> &TypeName {
&self.dependencies[index]
}
}
pub fn get_def(tokens: &Vec<Token>, start: usize) -> (Result<Box<Def>, ParseError>, usize) {
let (type_name_result, seek) = type_name::get_type_name(&tokens, start);
return match type_name_result {
Err(e) => (Err(e), seek),
Ok(type_name) => after_type_name(&tokens, seek, type_name)
}
}
fn after_type_name(tokens: &Vec<Token>, start: usize, type_name: Box<TypeName>) -> (Result<Box<Def>, ParseError>, usize) {
let def = Box::new(Def {
type_name: *type_name,
dependencies: Vec::new()
});
if tokens[start].text == ":" {
return dependency(&tokens, start + 1, def);
}
return (Ok(def), start);
}
fn dependency(tokens: &Vec<Token>, start: usize, mut def: Box<Def>) -> (Result<Box<Def>, ParseError>, usize)
|
{
let (type_name_result, seek) = type_name::get_type_name(&tokens, start);
return match type_name_result {
Err(e) => (Err(e), seek),
Ok(type_name) => {
def.dependencies.push(*type_name);
if tokens[seek].text == "," {
return dependency(&tokens, seek + 1, def);
} else {
(Ok(def), seek)
}
}
}
}
|
identifier_body
|
|
ipc_memory_mapper.rs
|
// Copyright 2022 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Provide utility to communicate with an iommu in another process
use std::ops::Deref;
use std::result;
use base::{AsRawDescriptor, AsRawDescriptors, RawDescriptor, Tube};
use serde::{Deserialize, Serialize};
use crate::virtio::memory_mapper::{Error, MemRegion, Translate};
pub type Result<T> = result::Result<T, Error>;
#[derive(Serialize, Deserialize)]
pub struct TranslateRequest {
pub endpoint_id: u32,
pub iova: u64,
pub size: u64,
}
/// Sends an addr translation request to another process using `Tube`, and
/// gets the translated addr from another `Tube`
pub struct IpcMemoryMapper {
request_tx: Tube,
response_rx: Tube,
endpoint_id: u32,
}
impl IpcMemoryMapper {
/// Returns a new `IpcMemoryMapper` instance.
///
/// # Arguments
///
/// * `request_tx` - A tube to send `TranslateRequest` to another process.
/// * `response_rx` - A tube to receive `Option<Vec<MemRegion>>`
/// * `endpoint_id` - For the remote iommu to identify the device/ipc mapper.
pub fn new(request_tx: Tube, response_rx: Tube, endpoint_id: u32) -> Self {
Self {
request_tx,
response_rx,
endpoint_id,
}
}
}
impl Translate for IpcMemoryMapper {
fn
|
(&self, iova: u64, size: u64) -> Result<Vec<MemRegion>> {
let req = TranslateRequest {
endpoint_id: self.endpoint_id,
iova,
size,
};
self.request_tx.send(&req).map_err(Error::Tube)?;
let res: Option<Vec<MemRegion>> = self.response_rx.recv().map_err(Error::Tube)?;
res.ok_or(Error::InvalidIOVA(iova, size))
}
}
impl AsRawDescriptors for IpcMemoryMapper {
fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
vec![
self.request_tx.as_raw_descriptor(),
self.response_rx.as_raw_descriptor(),
]
}
}
impl Translate for std::sync::MutexGuard<'_, IpcMemoryMapper> {
fn translate(&self, iova: u64, size: u64) -> Result<Vec<MemRegion>> {
self.deref().translate(iova, size)
}
}
pub struct CreateIpcMapperRet {
pub mapper: IpcMemoryMapper,
pub response_tx: Tube,
}
/// Returns a new `IpcMemoryMapper` instance and a response_tx for the iommu
/// to respond to `TranslateRequest`s.
///
/// # Arguments
///
/// * `endpoint_id` - For the remote iommu to identify the device/ipc mapper.
/// * `request_tx` - A tube to send `TranslateRequest` to a remote iommu. This
/// should be cloned and shared between different ipc mappers
/// with different `endpoint_id`s.
pub fn create_ipc_mapper(endpoint_id: u32, request_tx: Tube) -> CreateIpcMapperRet {
let (response_tx, response_rx) = Tube::pair().expect("failed to create tube pair");
CreateIpcMapperRet {
mapper: IpcMemoryMapper::new(request_tx, response_rx, endpoint_id),
response_tx,
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::virtio::memory_mapper::Permission;
use std::thread;
use vm_memory::GuestAddress;
#[test]
fn test() {
let (request_tx, request_rx) = Tube::pair().expect("failed to create tube pair");
let CreateIpcMapperRet {
mapper,
response_tx,
} = create_ipc_mapper(3, request_tx);
let user_handle = thread::spawn(move || {
assert!(mapper
.translate(0x555, 1)
.unwrap()
.iter()
.zip(&vec![MemRegion {
gpa: GuestAddress(0x777),
len: 1,
perm: Permission::RW,
},])
.all(|(a, b)| a == b));
});
let iommu_handle = thread::spawn(move || {
let TranslateRequest {
endpoint_id,
iova,
size,
} = request_rx.recv().unwrap();
assert_eq!(endpoint_id, 3);
assert_eq!(iova, 0x555);
assert_eq!(size, 1);
response_tx
.send(&Some(vec![MemRegion {
gpa: GuestAddress(0x777),
len: 1,
perm: Permission::RW,
}]))
.unwrap();
});
iommu_handle.join().unwrap();
user_handle.join().unwrap();
}
}
|
translate
|
identifier_name
|
ipc_memory_mapper.rs
|
// Copyright 2022 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Provide utility to communicate with an iommu in another process
use std::ops::Deref;
use std::result;
use base::{AsRawDescriptor, AsRawDescriptors, RawDescriptor, Tube};
use serde::{Deserialize, Serialize};
use crate::virtio::memory_mapper::{Error, MemRegion, Translate};
pub type Result<T> = result::Result<T, Error>;
#[derive(Serialize, Deserialize)]
pub struct TranslateRequest {
pub endpoint_id: u32,
pub iova: u64,
pub size: u64,
}
/// Sends an addr translation request to another process using `Tube`, and
/// gets the translated addr from another `Tube`
pub struct IpcMemoryMapper {
request_tx: Tube,
response_rx: Tube,
endpoint_id: u32,
}
impl IpcMemoryMapper {
/// Returns a new `IpcMemoryMapper` instance.
///
/// # Arguments
///
/// * `request_tx` - A tube to send `TranslateRequest` to another process.
/// * `response_rx` - A tube to receive `Option<Vec<MemRegion>>`
/// * `endpoint_id` - For the remote iommu to identify the device/ipc mapper.
pub fn new(request_tx: Tube, response_rx: Tube, endpoint_id: u32) -> Self {
Self {
request_tx,
response_rx,
endpoint_id,
}
}
}
impl Translate for IpcMemoryMapper {
fn translate(&self, iova: u64, size: u64) -> Result<Vec<MemRegion>> {
let req = TranslateRequest {
endpoint_id: self.endpoint_id,
iova,
size,
};
self.request_tx.send(&req).map_err(Error::Tube)?;
let res: Option<Vec<MemRegion>> = self.response_rx.recv().map_err(Error::Tube)?;
res.ok_or(Error::InvalidIOVA(iova, size))
}
}
impl AsRawDescriptors for IpcMemoryMapper {
fn as_raw_descriptors(&self) -> Vec<RawDescriptor>
|
}
impl Translate for std::sync::MutexGuard<'_, IpcMemoryMapper> {
fn translate(&self, iova: u64, size: u64) -> Result<Vec<MemRegion>> {
self.deref().translate(iova, size)
}
}
pub struct CreateIpcMapperRet {
pub mapper: IpcMemoryMapper,
pub response_tx: Tube,
}
/// Returns a new `IpcMemoryMapper` instance and a response_tx for the iommu
/// to respond to `TranslateRequest`s.
///
/// # Arguments
///
/// * `endpoint_id` - For the remote iommu to identify the device/ipc mapper.
/// * `request_tx` - A tube to send `TranslateRequest` to a remote iommu. This
/// should be cloned and shared between different ipc mappers
/// with different `endpoint_id`s.
pub fn create_ipc_mapper(endpoint_id: u32, request_tx: Tube) -> CreateIpcMapperRet {
let (response_tx, response_rx) = Tube::pair().expect("failed to create tube pair");
CreateIpcMapperRet {
mapper: IpcMemoryMapper::new(request_tx, response_rx, endpoint_id),
response_tx,
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::virtio::memory_mapper::Permission;
use std::thread;
use vm_memory::GuestAddress;
#[test]
fn test() {
let (request_tx, request_rx) = Tube::pair().expect("failed to create tube pair");
let CreateIpcMapperRet {
mapper,
response_tx,
} = create_ipc_mapper(3, request_tx);
let user_handle = thread::spawn(move || {
assert!(mapper
.translate(0x555, 1)
.unwrap()
.iter()
.zip(&vec![MemRegion {
gpa: GuestAddress(0x777),
len: 1,
perm: Permission::RW,
},])
.all(|(a, b)| a == b));
});
let iommu_handle = thread::spawn(move || {
let TranslateRequest {
endpoint_id,
iova,
size,
} = request_rx.recv().unwrap();
assert_eq!(endpoint_id, 3);
assert_eq!(iova, 0x555);
assert_eq!(size, 1);
response_tx
.send(&Some(vec![MemRegion {
gpa: GuestAddress(0x777),
len: 1,
perm: Permission::RW,
}]))
.unwrap();
});
iommu_handle.join().unwrap();
user_handle.join().unwrap();
}
}
|
{
vec![
self.request_tx.as_raw_descriptor(),
self.response_rx.as_raw_descriptor(),
]
}
|
identifier_body
|
ipc_memory_mapper.rs
|
// Copyright 2022 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Provide utility to communicate with an iommu in another process
use std::ops::Deref;
use std::result;
use base::{AsRawDescriptor, AsRawDescriptors, RawDescriptor, Tube};
use serde::{Deserialize, Serialize};
use crate::virtio::memory_mapper::{Error, MemRegion, Translate};
pub type Result<T> = result::Result<T, Error>;
#[derive(Serialize, Deserialize)]
pub struct TranslateRequest {
pub endpoint_id: u32,
pub iova: u64,
pub size: u64,
}
/// Sends an addr translation request to another process using `Tube`, and
/// gets the translated addr from another `Tube`
pub struct IpcMemoryMapper {
request_tx: Tube,
response_rx: Tube,
endpoint_id: u32,
}
impl IpcMemoryMapper {
/// Returns a new `IpcMemoryMapper` instance.
///
/// # Arguments
///
/// * `request_tx` - A tube to send `TranslateRequest` to another process.
/// * `response_rx` - A tube to receive `Option<Vec<MemRegion>>`
/// * `endpoint_id` - For the remote iommu to identify the device/ipc mapper.
pub fn new(request_tx: Tube, response_rx: Tube, endpoint_id: u32) -> Self {
Self {
request_tx,
response_rx,
endpoint_id,
}
}
}
impl Translate for IpcMemoryMapper {
fn translate(&self, iova: u64, size: u64) -> Result<Vec<MemRegion>> {
let req = TranslateRequest {
endpoint_id: self.endpoint_id,
iova,
size,
};
self.request_tx.send(&req).map_err(Error::Tube)?;
let res: Option<Vec<MemRegion>> = self.response_rx.recv().map_err(Error::Tube)?;
res.ok_or(Error::InvalidIOVA(iova, size))
}
}
impl AsRawDescriptors for IpcMemoryMapper {
fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
vec![
self.request_tx.as_raw_descriptor(),
self.response_rx.as_raw_descriptor(),
]
}
}
impl Translate for std::sync::MutexGuard<'_, IpcMemoryMapper> {
fn translate(&self, iova: u64, size: u64) -> Result<Vec<MemRegion>> {
self.deref().translate(iova, size)
}
}
pub struct CreateIpcMapperRet {
pub mapper: IpcMemoryMapper,
pub response_tx: Tube,
}
/// Returns a new `IpcMemoryMapper` instance and a response_tx for the iommu
|
/// to respond to `TranslateRequest`s.
///
/// # Arguments
///
/// * `endpoint_id` - For the remote iommu to identify the device/ipc mapper.
/// * `request_tx` - A tube to send `TranslateRequest` to a remote iommu. This
/// should be cloned and shared between different ipc mappers
/// with different `endpoint_id`s.
pub fn create_ipc_mapper(endpoint_id: u32, request_tx: Tube) -> CreateIpcMapperRet {
let (response_tx, response_rx) = Tube::pair().expect("failed to create tube pair");
CreateIpcMapperRet {
mapper: IpcMemoryMapper::new(request_tx, response_rx, endpoint_id),
response_tx,
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::virtio::memory_mapper::Permission;
use std::thread;
use vm_memory::GuestAddress;
#[test]
fn test() {
let (request_tx, request_rx) = Tube::pair().expect("failed to create tube pair");
let CreateIpcMapperRet {
mapper,
response_tx,
} = create_ipc_mapper(3, request_tx);
let user_handle = thread::spawn(move || {
assert!(mapper
.translate(0x555, 1)
.unwrap()
.iter()
.zip(&vec![MemRegion {
gpa: GuestAddress(0x777),
len: 1,
perm: Permission::RW,
},])
.all(|(a, b)| a == b));
});
let iommu_handle = thread::spawn(move || {
let TranslateRequest {
endpoint_id,
iova,
size,
} = request_rx.recv().unwrap();
assert_eq!(endpoint_id, 3);
assert_eq!(iova, 0x555);
assert_eq!(size, 1);
response_tx
.send(&Some(vec![MemRegion {
gpa: GuestAddress(0x777),
len: 1,
perm: Permission::RW,
}]))
.unwrap();
});
iommu_handle.join().unwrap();
user_handle.join().unwrap();
}
}
|
random_line_split
|
|
send_txs.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use anyhow::format_err;
use exonum::{
blockchain::{ApiSender, Blockchain},
crypto::KeyPair,
helpers::Height,
merkledb::ObjectHash,
messages::{AnyTx, Verified},
runtime::SnapshotExt,
};
use exonum_node::FlushPoolStrategy;
use exonum_rust_runtime::{
spec::{Deploy, Spec},
DefaultInstance,
};
use futures::future;
use structopt::StructOpt;
use tokio::time::delay_for;
use std::{
fmt,
sync::{Arc, Mutex},
time::{Duration, Instant},
};
use exonum_soak_tests::{
services::{MainConfig, MainService, MainServiceInterface},
NetworkBuilder, RunHandle,
};
/// Runs a network with a service and sends transactions to it, measuring how fast
/// transactions appear in the mempool and are confirmed.
#[derive(Debug, StructOpt)]
#[structopt(name = "send_txs", set_term_width = 80)]
struct Args {
/// Number of nodes in the network.
#[structopt(name = "nodes", default_value = "4")]
node_count: u16,
/// Blockchain height to reach. If not specified, the test will run infinitely.
#[structopt(name = "tx-count", long, short = "T")]
tx_count: Option<u64>,
/// Pool flushing strategy serialized in a TOML-like format (e.g., `never`, `immediate`
/// or `timeout=20`).
#[structopt(name = "flush", long, short = "f", parse(try_from_str = parse_strategy))]
flush_strategy: Option<FlushPoolStrategy>,
/// Intensity of the test, in transactions per second. Sensible values are up to several
/// hundred tps.
#[structopt(name = "tps", long, short = "t", default_value = "10")]
tps: usize,
}
fn parse_strategy(s: &str) -> anyhow::Result<FlushPoolStrategy> {
match s.trim() {
"never" => Ok(FlushPoolStrategy::Never),
"immediate" => Ok(FlushPoolStrategy::Immediate),
s if s.starts_with("timeout=") => {
// 8 is the length of "timeout=".
let timeout: u64 = s[8..].parse()?;
Ok(FlushPoolStrategy::Timeout { timeout })
}
_ => Err(format_err!("Invalid pool flushing strategy")),
}
}
#[derive(Default)]
struct TimingStats {
total_duration: Duration,
max_duration: Duration,
samples: usize,
}
impl fmt::Display for TimingStats {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result
|
}
impl TimingStats {
fn push(&mut self, dur: Duration) {
if self.max_duration < dur {
self.max_duration = dur;
}
self.total_duration += dur;
self.samples += 1;
}
}
async fn transaction_task(
transaction: Verified<AnyTx>,
sender: ApiSender,
blockchain: Blockchain,
times_to_pool: Arc<Mutex<TimingStats>>,
times_to_commit: Arc<Mutex<TimingStats>>,
) {
/// Poll delay for each transaction.
const POLL_DELAY: Duration = Duration::from_millis(5);
let tx_hash = transaction.object_hash();
sender.broadcast_transaction(transaction).await.unwrap();
let start = Instant::now();
let mut in_pool = false;
loop {
// The additional block scope is needed to not spill vars across the `await` boundary.
{
let snapshot = blockchain.snapshot();
let snapshot = snapshot.for_core();
let tx_pool = snapshot.transactions_pool();
let tx_locations = snapshot.transactions_locations();
let now = Instant::now();
if tx_locations.contains(&tx_hash) {
log::trace!("Transaction {} is committed", tx_hash);
if!in_pool {
times_to_pool.lock().unwrap().push(now - start);
}
times_to_commit.lock().unwrap().push(now - start);
break;
} else if!in_pool && tx_pool.contains(&tx_hash) {
log::trace!("Transaction {} appeared in pool", tx_hash);
times_to_pool.lock().unwrap().push(now - start);
in_pool = true;
}
}
delay_for(POLL_DELAY).await;
}
}
#[tokio::main]
async fn main() {
exonum::crypto::init();
exonum::helpers::init_logger().ok();
let args = Args::from_args();
println!("Running test with {:?}", args);
let config = MainConfig {
generate_tx_in_after_commit: false,
};
let main_service = Spec::new(MainService).with_instance(
MainService::INSTANCE_ID,
MainService::INSTANCE_NAME,
config,
);
let flush_strategy = args.flush_strategy.unwrap_or_default();
let nodes = NetworkBuilder::new(args.node_count, 2_000)
.modify_config(|node_cfg| {
node_cfg.mempool.flush_pool_strategy = flush_strategy.clone();
})
.init_node(|genesis, rt| main_service.clone().deploy(genesis, rt))
.build();
let keys = KeyPair::random();
let delay = Duration::from_secs(1).mul_f64(1.0 / args.tps as f64);
loop {
let height = nodes[0].blockchain().last_block().height;
if height > Height(0) {
break;
}
delay_for(Duration::from_millis(200)).await;
}
log::info!("Started sending transactions");
let times_to_pool = Arc::new(Mutex::new(TimingStats::default()));
let times_to_commit = Arc::new(Mutex::new(TimingStats::default()));
let mut prev_report_time = Instant::now();
for i in 0..args.tx_count.unwrap_or_else(u64::max_value) {
let tx = keys.timestamp(MainService::INSTANCE_ID, Height(i));
let sender = nodes[0].blockchain().sender().to_owned();
let blockchain = nodes.last().unwrap().blockchain().to_owned();
let tx_task = transaction_task(
tx,
sender,
blockchain,
Arc::clone(×_to_pool),
Arc::clone(×_to_commit),
);
tokio::spawn(tx_task);
delay_for(delay).await;
let now = Instant::now();
if now - prev_report_time >= Duration::from_secs(1) {
prev_report_time = now;
println!(
"Transactions: {} total, {} committed",
i + 1,
times_to_commit.lock().unwrap().samples
);
println!("Time to pool: {}", times_to_pool.lock().unwrap());
println!("Time to commit: {}", times_to_commit.lock().unwrap());
}
}
future::join_all(nodes.into_iter().map(RunHandle::join)).await;
println!("\nOverall results:");
println!("Time to pool: {}", times_to_pool.lock().unwrap());
println!("Time to commit: {}", times_to_commit.lock().unwrap());
}
|
{
if self.samples == 0 {
formatter.write_str("(no samples)")
} else {
let avg_duration = self.total_duration.mul_f64(1.0 / (self.samples as f64));
write!(
formatter,
"avg: {} ms, max: {} ms",
avg_duration.as_millis(),
self.max_duration.as_millis()
)
}
}
|
identifier_body
|
send_txs.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use anyhow::format_err;
use exonum::{
blockchain::{ApiSender, Blockchain},
crypto::KeyPair,
helpers::Height,
merkledb::ObjectHash,
messages::{AnyTx, Verified},
runtime::SnapshotExt,
};
use exonum_node::FlushPoolStrategy;
use exonum_rust_runtime::{
spec::{Deploy, Spec},
DefaultInstance,
};
use futures::future;
use structopt::StructOpt;
use tokio::time::delay_for;
use std::{
fmt,
sync::{Arc, Mutex},
time::{Duration, Instant},
};
use exonum_soak_tests::{
services::{MainConfig, MainService, MainServiceInterface},
NetworkBuilder, RunHandle,
};
/// Runs a network with a service and sends transactions to it, measuring how fast
/// transactions appear in the mempool and are confirmed.
#[derive(Debug, StructOpt)]
#[structopt(name = "send_txs", set_term_width = 80)]
struct Args {
/// Number of nodes in the network.
#[structopt(name = "nodes", default_value = "4")]
node_count: u16,
/// Blockchain height to reach. If not specified, the test will run infinitely.
#[structopt(name = "tx-count", long, short = "T")]
tx_count: Option<u64>,
/// Pool flushing strategy serialized in a TOML-like format (e.g., `never`, `immediate`
/// or `timeout=20`).
#[structopt(name = "flush", long, short = "f", parse(try_from_str = parse_strategy))]
flush_strategy: Option<FlushPoolStrategy>,
/// Intensity of the test, in transactions per second. Sensible values are up to several
/// hundred tps.
#[structopt(name = "tps", long, short = "t", default_value = "10")]
tps: usize,
}
fn parse_strategy(s: &str) -> anyhow::Result<FlushPoolStrategy> {
match s.trim() {
"never" => Ok(FlushPoolStrategy::Never),
"immediate" => Ok(FlushPoolStrategy::Immediate),
s if s.starts_with("timeout=") =>
|
_ => Err(format_err!("Invalid pool flushing strategy")),
}
}
#[derive(Default)]
struct TimingStats {
total_duration: Duration,
max_duration: Duration,
samples: usize,
}
impl fmt::Display for TimingStats {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.samples == 0 {
formatter.write_str("(no samples)")
} else {
let avg_duration = self.total_duration.mul_f64(1.0 / (self.samples as f64));
write!(
formatter,
"avg: {} ms, max: {} ms",
avg_duration.as_millis(),
self.max_duration.as_millis()
)
}
}
}
impl TimingStats {
fn push(&mut self, dur: Duration) {
if self.max_duration < dur {
self.max_duration = dur;
}
self.total_duration += dur;
self.samples += 1;
}
}
async fn transaction_task(
transaction: Verified<AnyTx>,
sender: ApiSender,
blockchain: Blockchain,
times_to_pool: Arc<Mutex<TimingStats>>,
times_to_commit: Arc<Mutex<TimingStats>>,
) {
/// Poll delay for each transaction.
const POLL_DELAY: Duration = Duration::from_millis(5);
let tx_hash = transaction.object_hash();
sender.broadcast_transaction(transaction).await.unwrap();
let start = Instant::now();
let mut in_pool = false;
loop {
// The additional block scope is needed to not spill vars across the `await` boundary.
{
let snapshot = blockchain.snapshot();
let snapshot = snapshot.for_core();
let tx_pool = snapshot.transactions_pool();
let tx_locations = snapshot.transactions_locations();
let now = Instant::now();
if tx_locations.contains(&tx_hash) {
log::trace!("Transaction {} is committed", tx_hash);
if!in_pool {
times_to_pool.lock().unwrap().push(now - start);
}
times_to_commit.lock().unwrap().push(now - start);
break;
} else if!in_pool && tx_pool.contains(&tx_hash) {
log::trace!("Transaction {} appeared in pool", tx_hash);
times_to_pool.lock().unwrap().push(now - start);
in_pool = true;
}
}
delay_for(POLL_DELAY).await;
}
}
#[tokio::main]
async fn main() {
exonum::crypto::init();
exonum::helpers::init_logger().ok();
let args = Args::from_args();
println!("Running test with {:?}", args);
let config = MainConfig {
generate_tx_in_after_commit: false,
};
let main_service = Spec::new(MainService).with_instance(
MainService::INSTANCE_ID,
MainService::INSTANCE_NAME,
config,
);
let flush_strategy = args.flush_strategy.unwrap_or_default();
let nodes = NetworkBuilder::new(args.node_count, 2_000)
.modify_config(|node_cfg| {
node_cfg.mempool.flush_pool_strategy = flush_strategy.clone();
})
.init_node(|genesis, rt| main_service.clone().deploy(genesis, rt))
.build();
let keys = KeyPair::random();
let delay = Duration::from_secs(1).mul_f64(1.0 / args.tps as f64);
loop {
let height = nodes[0].blockchain().last_block().height;
if height > Height(0) {
break;
}
delay_for(Duration::from_millis(200)).await;
}
log::info!("Started sending transactions");
let times_to_pool = Arc::new(Mutex::new(TimingStats::default()));
let times_to_commit = Arc::new(Mutex::new(TimingStats::default()));
let mut prev_report_time = Instant::now();
for i in 0..args.tx_count.unwrap_or_else(u64::max_value) {
let tx = keys.timestamp(MainService::INSTANCE_ID, Height(i));
let sender = nodes[0].blockchain().sender().to_owned();
let blockchain = nodes.last().unwrap().blockchain().to_owned();
let tx_task = transaction_task(
tx,
sender,
blockchain,
Arc::clone(×_to_pool),
Arc::clone(×_to_commit),
);
tokio::spawn(tx_task);
delay_for(delay).await;
let now = Instant::now();
if now - prev_report_time >= Duration::from_secs(1) {
prev_report_time = now;
println!(
"Transactions: {} total, {} committed",
i + 1,
times_to_commit.lock().unwrap().samples
);
println!("Time to pool: {}", times_to_pool.lock().unwrap());
println!("Time to commit: {}", times_to_commit.lock().unwrap());
}
}
future::join_all(nodes.into_iter().map(RunHandle::join)).await;
println!("\nOverall results:");
println!("Time to pool: {}", times_to_pool.lock().unwrap());
println!("Time to commit: {}", times_to_commit.lock().unwrap());
}
|
{
// 8 is the length of "timeout=".
let timeout: u64 = s[8..].parse()?;
Ok(FlushPoolStrategy::Timeout { timeout })
}
|
conditional_block
|
send_txs.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use anyhow::format_err;
use exonum::{
blockchain::{ApiSender, Blockchain},
crypto::KeyPair,
helpers::Height,
merkledb::ObjectHash,
messages::{AnyTx, Verified},
runtime::SnapshotExt,
};
use exonum_node::FlushPoolStrategy;
use exonum_rust_runtime::{
spec::{Deploy, Spec},
DefaultInstance,
};
use futures::future;
use structopt::StructOpt;
use tokio::time::delay_for;
use std::{
fmt,
sync::{Arc, Mutex},
time::{Duration, Instant},
};
use exonum_soak_tests::{
services::{MainConfig, MainService, MainServiceInterface},
NetworkBuilder, RunHandle,
};
/// Runs a network with a service and sends transactions to it, measuring how fast
/// transactions appear in the mempool and are confirmed.
#[derive(Debug, StructOpt)]
#[structopt(name = "send_txs", set_term_width = 80)]
struct Args {
/// Number of nodes in the network.
#[structopt(name = "nodes", default_value = "4")]
node_count: u16,
/// Blockchain height to reach. If not specified, the test will run infinitely.
#[structopt(name = "tx-count", long, short = "T")]
tx_count: Option<u64>,
/// Pool flushing strategy serialized in a TOML-like format (e.g., `never`, `immediate`
/// or `timeout=20`).
#[structopt(name = "flush", long, short = "f", parse(try_from_str = parse_strategy))]
flush_strategy: Option<FlushPoolStrategy>,
/// Intensity of the test, in transactions per second. Sensible values are up to several
/// hundred tps.
#[structopt(name = "tps", long, short = "t", default_value = "10")]
tps: usize,
}
fn parse_strategy(s: &str) -> anyhow::Result<FlushPoolStrategy> {
match s.trim() {
"never" => Ok(FlushPoolStrategy::Never),
"immediate" => Ok(FlushPoolStrategy::Immediate),
s if s.starts_with("timeout=") => {
// 8 is the length of "timeout=".
let timeout: u64 = s[8..].parse()?;
Ok(FlushPoolStrategy::Timeout { timeout })
}
_ => Err(format_err!("Invalid pool flushing strategy")),
}
}
#[derive(Default)]
struct TimingStats {
total_duration: Duration,
max_duration: Duration,
samples: usize,
}
impl fmt::Display for TimingStats {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.samples == 0 {
formatter.write_str("(no samples)")
} else {
let avg_duration = self.total_duration.mul_f64(1.0 / (self.samples as f64));
write!(
formatter,
"avg: {} ms, max: {} ms",
avg_duration.as_millis(),
self.max_duration.as_millis()
)
}
}
}
impl TimingStats {
fn push(&mut self, dur: Duration) {
if self.max_duration < dur {
self.max_duration = dur;
}
self.total_duration += dur;
self.samples += 1;
}
}
async fn transaction_task(
transaction: Verified<AnyTx>,
sender: ApiSender,
blockchain: Blockchain,
times_to_pool: Arc<Mutex<TimingStats>>,
times_to_commit: Arc<Mutex<TimingStats>>,
) {
/// Poll delay for each transaction.
const POLL_DELAY: Duration = Duration::from_millis(5);
let tx_hash = transaction.object_hash();
sender.broadcast_transaction(transaction).await.unwrap();
let start = Instant::now();
let mut in_pool = false;
loop {
// The additional block scope is needed to not spill vars across the `await` boundary.
{
let snapshot = blockchain.snapshot();
let snapshot = snapshot.for_core();
let tx_pool = snapshot.transactions_pool();
let tx_locations = snapshot.transactions_locations();
let now = Instant::now();
if tx_locations.contains(&tx_hash) {
log::trace!("Transaction {} is committed", tx_hash);
if!in_pool {
times_to_pool.lock().unwrap().push(now - start);
}
times_to_commit.lock().unwrap().push(now - start);
break;
} else if!in_pool && tx_pool.contains(&tx_hash) {
log::trace!("Transaction {} appeared in pool", tx_hash);
times_to_pool.lock().unwrap().push(now - start);
in_pool = true;
}
}
delay_for(POLL_DELAY).await;
}
}
#[tokio::main]
async fn main() {
exonum::crypto::init();
exonum::helpers::init_logger().ok();
let args = Args::from_args();
println!("Running test with {:?}", args);
let config = MainConfig {
generate_tx_in_after_commit: false,
};
let main_service = Spec::new(MainService).with_instance(
MainService::INSTANCE_ID,
MainService::INSTANCE_NAME,
config,
);
let flush_strategy = args.flush_strategy.unwrap_or_default();
let nodes = NetworkBuilder::new(args.node_count, 2_000)
.modify_config(|node_cfg| {
node_cfg.mempool.flush_pool_strategy = flush_strategy.clone();
})
.init_node(|genesis, rt| main_service.clone().deploy(genesis, rt))
.build();
let keys = KeyPair::random();
let delay = Duration::from_secs(1).mul_f64(1.0 / args.tps as f64);
loop {
let height = nodes[0].blockchain().last_block().height;
if height > Height(0) {
break;
}
delay_for(Duration::from_millis(200)).await;
}
log::info!("Started sending transactions");
let times_to_pool = Arc::new(Mutex::new(TimingStats::default()));
let times_to_commit = Arc::new(Mutex::new(TimingStats::default()));
let mut prev_report_time = Instant::now();
|
let sender = nodes[0].blockchain().sender().to_owned();
let blockchain = nodes.last().unwrap().blockchain().to_owned();
let tx_task = transaction_task(
tx,
sender,
blockchain,
Arc::clone(×_to_pool),
Arc::clone(×_to_commit),
);
tokio::spawn(tx_task);
delay_for(delay).await;
let now = Instant::now();
if now - prev_report_time >= Duration::from_secs(1) {
prev_report_time = now;
println!(
"Transactions: {} total, {} committed",
i + 1,
times_to_commit.lock().unwrap().samples
);
println!("Time to pool: {}", times_to_pool.lock().unwrap());
println!("Time to commit: {}", times_to_commit.lock().unwrap());
}
}
future::join_all(nodes.into_iter().map(RunHandle::join)).await;
println!("\nOverall results:");
println!("Time to pool: {}", times_to_pool.lock().unwrap());
println!("Time to commit: {}", times_to_commit.lock().unwrap());
}
|
for i in 0..args.tx_count.unwrap_or_else(u64::max_value) {
let tx = keys.timestamp(MainService::INSTANCE_ID, Height(i));
|
random_line_split
|
send_txs.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use anyhow::format_err;
use exonum::{
blockchain::{ApiSender, Blockchain},
crypto::KeyPair,
helpers::Height,
merkledb::ObjectHash,
messages::{AnyTx, Verified},
runtime::SnapshotExt,
};
use exonum_node::FlushPoolStrategy;
use exonum_rust_runtime::{
spec::{Deploy, Spec},
DefaultInstance,
};
use futures::future;
use structopt::StructOpt;
use tokio::time::delay_for;
use std::{
fmt,
sync::{Arc, Mutex},
time::{Duration, Instant},
};
use exonum_soak_tests::{
services::{MainConfig, MainService, MainServiceInterface},
NetworkBuilder, RunHandle,
};
/// Runs a network with a service and sends transactions to it, measuring how fast
/// transactions appear in the mempool and are confirmed.
#[derive(Debug, StructOpt)]
#[structopt(name = "send_txs", set_term_width = 80)]
struct Args {
/// Number of nodes in the network.
#[structopt(name = "nodes", default_value = "4")]
node_count: u16,
/// Blockchain height to reach. If not specified, the test will run infinitely.
#[structopt(name = "tx-count", long, short = "T")]
tx_count: Option<u64>,
/// Pool flushing strategy serialized in a TOML-like format (e.g., `never`, `immediate`
/// or `timeout=20`).
#[structopt(name = "flush", long, short = "f", parse(try_from_str = parse_strategy))]
flush_strategy: Option<FlushPoolStrategy>,
/// Intensity of the test, in transactions per second. Sensible values are up to several
/// hundred tps.
#[structopt(name = "tps", long, short = "t", default_value = "10")]
tps: usize,
}
fn parse_strategy(s: &str) -> anyhow::Result<FlushPoolStrategy> {
match s.trim() {
"never" => Ok(FlushPoolStrategy::Never),
"immediate" => Ok(FlushPoolStrategy::Immediate),
s if s.starts_with("timeout=") => {
// 8 is the length of "timeout=".
let timeout: u64 = s[8..].parse()?;
Ok(FlushPoolStrategy::Timeout { timeout })
}
_ => Err(format_err!("Invalid pool flushing strategy")),
}
}
#[derive(Default)]
struct TimingStats {
total_duration: Duration,
max_duration: Duration,
samples: usize,
}
impl fmt::Display for TimingStats {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.samples == 0 {
formatter.write_str("(no samples)")
} else {
let avg_duration = self.total_duration.mul_f64(1.0 / (self.samples as f64));
write!(
formatter,
"avg: {} ms, max: {} ms",
avg_duration.as_millis(),
self.max_duration.as_millis()
)
}
}
}
impl TimingStats {
fn push(&mut self, dur: Duration) {
if self.max_duration < dur {
self.max_duration = dur;
}
self.total_duration += dur;
self.samples += 1;
}
}
async fn transaction_task(
transaction: Verified<AnyTx>,
sender: ApiSender,
blockchain: Blockchain,
times_to_pool: Arc<Mutex<TimingStats>>,
times_to_commit: Arc<Mutex<TimingStats>>,
) {
/// Poll delay for each transaction.
const POLL_DELAY: Duration = Duration::from_millis(5);
let tx_hash = transaction.object_hash();
sender.broadcast_transaction(transaction).await.unwrap();
let start = Instant::now();
let mut in_pool = false;
loop {
// The additional block scope is needed to not spill vars across the `await` boundary.
{
let snapshot = blockchain.snapshot();
let snapshot = snapshot.for_core();
let tx_pool = snapshot.transactions_pool();
let tx_locations = snapshot.transactions_locations();
let now = Instant::now();
if tx_locations.contains(&tx_hash) {
log::trace!("Transaction {} is committed", tx_hash);
if!in_pool {
times_to_pool.lock().unwrap().push(now - start);
}
times_to_commit.lock().unwrap().push(now - start);
break;
} else if!in_pool && tx_pool.contains(&tx_hash) {
log::trace!("Transaction {} appeared in pool", tx_hash);
times_to_pool.lock().unwrap().push(now - start);
in_pool = true;
}
}
delay_for(POLL_DELAY).await;
}
}
#[tokio::main]
async fn
|
() {
exonum::crypto::init();
exonum::helpers::init_logger().ok();
let args = Args::from_args();
println!("Running test with {:?}", args);
let config = MainConfig {
generate_tx_in_after_commit: false,
};
let main_service = Spec::new(MainService).with_instance(
MainService::INSTANCE_ID,
MainService::INSTANCE_NAME,
config,
);
let flush_strategy = args.flush_strategy.unwrap_or_default();
let nodes = NetworkBuilder::new(args.node_count, 2_000)
.modify_config(|node_cfg| {
node_cfg.mempool.flush_pool_strategy = flush_strategy.clone();
})
.init_node(|genesis, rt| main_service.clone().deploy(genesis, rt))
.build();
let keys = KeyPair::random();
let delay = Duration::from_secs(1).mul_f64(1.0 / args.tps as f64);
loop {
let height = nodes[0].blockchain().last_block().height;
if height > Height(0) {
break;
}
delay_for(Duration::from_millis(200)).await;
}
log::info!("Started sending transactions");
let times_to_pool = Arc::new(Mutex::new(TimingStats::default()));
let times_to_commit = Arc::new(Mutex::new(TimingStats::default()));
let mut prev_report_time = Instant::now();
for i in 0..args.tx_count.unwrap_or_else(u64::max_value) {
let tx = keys.timestamp(MainService::INSTANCE_ID, Height(i));
let sender = nodes[0].blockchain().sender().to_owned();
let blockchain = nodes.last().unwrap().blockchain().to_owned();
let tx_task = transaction_task(
tx,
sender,
blockchain,
Arc::clone(×_to_pool),
Arc::clone(×_to_commit),
);
tokio::spawn(tx_task);
delay_for(delay).await;
let now = Instant::now();
if now - prev_report_time >= Duration::from_secs(1) {
prev_report_time = now;
println!(
"Transactions: {} total, {} committed",
i + 1,
times_to_commit.lock().unwrap().samples
);
println!("Time to pool: {}", times_to_pool.lock().unwrap());
println!("Time to commit: {}", times_to_commit.lock().unwrap());
}
}
future::join_all(nodes.into_iter().map(RunHandle::join)).await;
println!("\nOverall results:");
println!("Time to pool: {}", times_to_pool.lock().unwrap());
println!("Time to commit: {}", times_to_commit.lock().unwrap());
}
|
main
|
identifier_name
|
dom.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Types and traits used to access the DOM from style calculation.
#![allow(unsafe_code)]
use data::PseudoStyles;
use element_state::ElementState;
use parking_lot::RwLock;
use properties::{ComputedValues, PropertyDeclarationBlock};
use restyle_hints::{RESTYLE_DESCENDANTS, RESTYLE_LATER_SIBLINGS, RESTYLE_SELF, RestyleHint};
use selector_impl::{ElementExt, PseudoElement};
use selector_matching::ApplicableDeclarationBlock;
use sink::Push;
use std::fmt::Debug;
use std::ops::BitOr;
use std::sync::Arc;
use string_cache::{Atom, Namespace};
/// Opaque type stored in type-unsafe work queues for parallel layout.
/// Must be transmutable to and from TNode.
pub type UnsafeNode = (usize, usize);
/// An opaque handle to a node, which, unlike UnsafeNode, cannot be transformed
/// back into a non-opaque representation. The only safe operation that can be
/// performed on this node is to compare it to another opaque handle or to another
/// OpaqueNode.
///
/// Layout and Graphics use this to safely represent nodes for comparison purposes.
/// Because the script task's GC does not trace layout, node data cannot be safely stored in layout
/// data structures. Also, layout code tends to be faster when the DOM is not being accessed, for
/// locality reasons. Using `OpaqueNode` enforces this invariant.
#[derive(Clone, PartialEq, Copy, Debug, Hash, Eq)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf, Deserialize, Serialize))]
pub struct OpaqueNode(pub usize);
impl OpaqueNode {
/// Returns the address of this node, for debugging purposes.
#[inline]
pub fn
|
(&self) -> usize {
self.0
}
}
pub trait TRestyleDamage : Debug + PartialEq + BitOr<Output=Self> + Copy {
/// The source for our current computed values in the cascade. This is a
/// ComputedValues in Servo and a StyleContext in Gecko.
///
/// This is needed because Gecko has a few optimisations for the calculation
/// of the difference depending on which values have been used during
/// layout.
///
/// This should be obtained via TNode::existing_style_for_restyle_damage
type PreExistingComputedValues;
fn compute(old: &Self::PreExistingComputedValues,
new: &Arc<ComputedValues>) -> Self;
fn empty() -> Self;
fn rebuild_and_reflow() -> Self;
}
/// Simple trait to provide basic information about the type of an element.
///
/// We avoid exposing the full type id, since computing it in the general case
/// would be difficult for Gecko nodes.
pub trait NodeInfo {
fn is_element(&self) -> bool;
fn is_text_node(&self) -> bool;
// Comments, doctypes, etc are ignored by layout algorithms.
fn needs_layout(&self) -> bool { self.is_element() || self.is_text_node() }
}
pub struct LayoutIterator<T>(pub T);
impl<T, I> Iterator for LayoutIterator<T> where T: Iterator<Item=I>, I: NodeInfo {
type Item = I;
fn next(&mut self) -> Option<I> {
loop {
// Filter out nodes that layout should ignore.
let n = self.0.next();
if n.is_none() || n.as_ref().unwrap().needs_layout() {
return n
}
}
}
}
pub trait TNode : Sized + Copy + Clone + NodeInfo {
type ConcreteElement: TElement<ConcreteNode = Self, ConcreteDocument = Self::ConcreteDocument>;
type ConcreteDocument: TDocument<ConcreteNode = Self, ConcreteElement = Self::ConcreteElement>;
type ConcreteRestyleDamage: TRestyleDamage;
type ConcreteChildrenIterator: Iterator<Item = Self>;
fn to_unsafe(&self) -> UnsafeNode;
unsafe fn from_unsafe(n: &UnsafeNode) -> Self;
fn dump(self);
fn dump_style(self);
/// Returns an iterator over this node's children.
fn children(self) -> LayoutIterator<Self::ConcreteChildrenIterator>;
/// Converts self into an `OpaqueNode`.
fn opaque(&self) -> OpaqueNode;
/// While doing a reflow, the node at the root has no parent, as far as we're
/// concerned. This method returns `None` at the reflow root.
fn layout_parent_node(self, reflow_root: OpaqueNode) -> Option<Self>;
fn debug_id(self) -> usize;
fn as_element(&self) -> Option<Self::ConcreteElement>;
fn as_document(&self) -> Option<Self::ConcreteDocument>;
fn has_changed(&self) -> bool;
unsafe fn set_changed(&self, value: bool);
fn is_dirty(&self) -> bool;
unsafe fn set_dirty(&self, value: bool);
fn has_dirty_descendants(&self) -> bool;
unsafe fn set_dirty_descendants(&self, value: bool);
fn needs_dirty_on_viewport_size_changed(&self) -> bool;
unsafe fn set_dirty_on_viewport_size_changed(&self);
fn can_be_fragmented(&self) -> bool;
unsafe fn set_can_be_fragmented(&self, value: bool);
/// Atomically stores the number of children of this node that we will
/// need to process during bottom-up traversal.
fn store_children_to_process(&self, n: isize);
/// Atomically notes that a child has been processed during bottom-up
/// traversal. Returns the number of children left to process.
fn did_process_child(&self) -> isize;
/// Returns the computed style values corresponding to the existing style
/// for this node, if any.
///
/// This returns an cloned Arc (rather than a borrow) to abstract over the
/// multitude of ways these values may be stored under the hood. By
/// returning an enum with various OwningRef/OwningHandle entries, we could
/// avoid the refcounting traffic here, but it's probably not worth the
/// complexity.
fn get_existing_style(&self) -> Option<Arc<ComputedValues>>;
/// Sets the computed style for this node.
fn set_style(&self, style: Option<Arc<ComputedValues>>);
/// Transfers ownership of the existing pseudo styles, if any, to the
/// caller. The stored pseudo styles are replaced with an empty map.
fn take_pseudo_styles(&self) -> PseudoStyles;
/// Sets the pseudo styles on the element, replacing any existing styles.
fn set_pseudo_styles(&self, styles: PseudoStyles);
/// Get the description of how to account for recent style changes.
fn restyle_damage(self) -> Self::ConcreteRestyleDamage;
/// Set the restyle damage field.
fn set_restyle_damage(self, damage: Self::ConcreteRestyleDamage);
fn parent_node(&self) -> Option<Self>;
fn first_child(&self) -> Option<Self>;
fn last_child(&self) -> Option<Self>;
fn prev_sibling(&self) -> Option<Self>;
fn next_sibling(&self) -> Option<Self>;
/// XXX: It's a bit unfortunate we need to pass the current computed values
/// as an argument here, but otherwise Servo would crash due to double
/// borrows to return it.
fn existing_style_for_restyle_damage<'a>(&'a self,
current_computed_values: Option<&'a Arc<ComputedValues>>,
pseudo: Option<&PseudoElement>)
-> Option<&'a <Self::ConcreteRestyleDamage as TRestyleDamage>::PreExistingComputedValues>;
}
pub trait TDocument : Sized + Copy + Clone {
type ConcreteNode: TNode<ConcreteElement = Self::ConcreteElement, ConcreteDocument = Self>;
type ConcreteElement: TElement<ConcreteNode = Self::ConcreteNode, ConcreteDocument = Self>;
fn as_node(&self) -> Self::ConcreteNode;
fn root_node(&self) -> Option<Self::ConcreteNode>;
fn drain_modified_elements(&self) -> Vec<(Self::ConcreteElement,
<Self::ConcreteElement as ElementExt>::Snapshot)>;
fn needs_paint_from_layout(&self);
fn will_paint(&self);
}
pub trait PresentationalHintsSynthetizer {
fn synthesize_presentational_hints_for_legacy_attributes<V>(&self, hints: &mut V)
where V: Push<ApplicableDeclarationBlock>;
}
pub trait TElement : PartialEq + Debug + Sized + Copy + Clone + ElementExt + PresentationalHintsSynthetizer {
type ConcreteNode: TNode<ConcreteElement = Self, ConcreteDocument = Self::ConcreteDocument>;
type ConcreteDocument: TDocument<ConcreteNode = Self::ConcreteNode, ConcreteElement = Self>;
fn as_node(&self) -> Self::ConcreteNode;
fn style_attribute(&self) -> Option<&Arc<RwLock<PropertyDeclarationBlock>>>;
fn get_state(&self) -> ElementState;
fn has_attr(&self, namespace: &Namespace, attr: &Atom) -> bool;
fn attr_equals(&self, namespace: &Namespace, attr: &Atom, value: &Atom) -> bool;
/// Properly marks nodes as dirty in response to restyle hints.
fn note_restyle_hint(&self, hint: RestyleHint) {
// Bail early if there's no restyling to do.
if hint.is_empty() {
return;
}
// If the restyle hint is non-empty, we need to restyle either this element
// or one of its siblings. Mark our ancestor chain as having dirty descendants.
let node = self.as_node();
let mut curr = node;
while let Some(parent) = curr.parent_node() {
if parent.has_dirty_descendants() { break }
unsafe { parent.set_dirty_descendants(true); }
curr = parent;
}
// Process hints.
if hint.contains(RESTYLE_SELF) {
unsafe { node.set_dirty(true); }
// XXX(emilio): For now, dirty implies dirty descendants if found.
} else if hint.contains(RESTYLE_DESCENDANTS) {
unsafe { node.set_dirty_descendants(true); }
let mut current = node.first_child();
while let Some(node) = current {
unsafe { node.set_dirty(true); }
current = node.next_sibling();
}
}
if hint.contains(RESTYLE_LATER_SIBLINGS) {
let mut next = ::selectors::Element::next_sibling_element(self);
while let Some(sib) = next {
let sib_node = sib.as_node();
unsafe { sib_node.set_dirty(true) };
next = ::selectors::Element::next_sibling_element(&sib);
}
}
}
}
|
id
|
identifier_name
|
dom.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Types and traits used to access the DOM from style calculation.
#![allow(unsafe_code)]
use data::PseudoStyles;
use element_state::ElementState;
use parking_lot::RwLock;
use properties::{ComputedValues, PropertyDeclarationBlock};
use restyle_hints::{RESTYLE_DESCENDANTS, RESTYLE_LATER_SIBLINGS, RESTYLE_SELF, RestyleHint};
use selector_impl::{ElementExt, PseudoElement};
use selector_matching::ApplicableDeclarationBlock;
use sink::Push;
use std::fmt::Debug;
use std::ops::BitOr;
use std::sync::Arc;
use string_cache::{Atom, Namespace};
/// Opaque type stored in type-unsafe work queues for parallel layout.
/// Must be transmutable to and from TNode.
pub type UnsafeNode = (usize, usize);
/// An opaque handle to a node, which, unlike UnsafeNode, cannot be transformed
/// back into a non-opaque representation. The only safe operation that can be
/// performed on this node is to compare it to another opaque handle or to another
/// OpaqueNode.
///
/// Layout and Graphics use this to safely represent nodes for comparison purposes.
/// Because the script task's GC does not trace layout, node data cannot be safely stored in layout
/// data structures. Also, layout code tends to be faster when the DOM is not being accessed, for
/// locality reasons. Using `OpaqueNode` enforces this invariant.
#[derive(Clone, PartialEq, Copy, Debug, Hash, Eq)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf, Deserialize, Serialize))]
pub struct OpaqueNode(pub usize);
impl OpaqueNode {
/// Returns the address of this node, for debugging purposes.
#[inline]
pub fn id(&self) -> usize {
self.0
}
}
pub trait TRestyleDamage : Debug + PartialEq + BitOr<Output=Self> + Copy {
/// The source for our current computed values in the cascade. This is a
/// ComputedValues in Servo and a StyleContext in Gecko.
///
|
/// layout.
///
/// This should be obtained via TNode::existing_style_for_restyle_damage
type PreExistingComputedValues;
fn compute(old: &Self::PreExistingComputedValues,
new: &Arc<ComputedValues>) -> Self;
fn empty() -> Self;
fn rebuild_and_reflow() -> Self;
}
/// Simple trait to provide basic information about the type of an element.
///
/// We avoid exposing the full type id, since computing it in the general case
/// would be difficult for Gecko nodes.
pub trait NodeInfo {
fn is_element(&self) -> bool;
fn is_text_node(&self) -> bool;
// Comments, doctypes, etc are ignored by layout algorithms.
fn needs_layout(&self) -> bool { self.is_element() || self.is_text_node() }
}
pub struct LayoutIterator<T>(pub T);
impl<T, I> Iterator for LayoutIterator<T> where T: Iterator<Item=I>, I: NodeInfo {
type Item = I;
fn next(&mut self) -> Option<I> {
loop {
// Filter out nodes that layout should ignore.
let n = self.0.next();
if n.is_none() || n.as_ref().unwrap().needs_layout() {
return n
}
}
}
}
pub trait TNode : Sized + Copy + Clone + NodeInfo {
type ConcreteElement: TElement<ConcreteNode = Self, ConcreteDocument = Self::ConcreteDocument>;
type ConcreteDocument: TDocument<ConcreteNode = Self, ConcreteElement = Self::ConcreteElement>;
type ConcreteRestyleDamage: TRestyleDamage;
type ConcreteChildrenIterator: Iterator<Item = Self>;
fn to_unsafe(&self) -> UnsafeNode;
unsafe fn from_unsafe(n: &UnsafeNode) -> Self;
fn dump(self);
fn dump_style(self);
/// Returns an iterator over this node's children.
fn children(self) -> LayoutIterator<Self::ConcreteChildrenIterator>;
/// Converts self into an `OpaqueNode`.
fn opaque(&self) -> OpaqueNode;
/// While doing a reflow, the node at the root has no parent, as far as we're
/// concerned. This method returns `None` at the reflow root.
fn layout_parent_node(self, reflow_root: OpaqueNode) -> Option<Self>;
fn debug_id(self) -> usize;
fn as_element(&self) -> Option<Self::ConcreteElement>;
fn as_document(&self) -> Option<Self::ConcreteDocument>;
fn has_changed(&self) -> bool;
unsafe fn set_changed(&self, value: bool);
fn is_dirty(&self) -> bool;
unsafe fn set_dirty(&self, value: bool);
fn has_dirty_descendants(&self) -> bool;
unsafe fn set_dirty_descendants(&self, value: bool);
fn needs_dirty_on_viewport_size_changed(&self) -> bool;
unsafe fn set_dirty_on_viewport_size_changed(&self);
fn can_be_fragmented(&self) -> bool;
unsafe fn set_can_be_fragmented(&self, value: bool);
/// Atomically stores the number of children of this node that we will
/// need to process during bottom-up traversal.
fn store_children_to_process(&self, n: isize);
/// Atomically notes that a child has been processed during bottom-up
/// traversal. Returns the number of children left to process.
fn did_process_child(&self) -> isize;
/// Returns the computed style values corresponding to the existing style
/// for this node, if any.
///
/// This returns an cloned Arc (rather than a borrow) to abstract over the
/// multitude of ways these values may be stored under the hood. By
/// returning an enum with various OwningRef/OwningHandle entries, we could
/// avoid the refcounting traffic here, but it's probably not worth the
/// complexity.
fn get_existing_style(&self) -> Option<Arc<ComputedValues>>;
/// Sets the computed style for this node.
fn set_style(&self, style: Option<Arc<ComputedValues>>);
/// Transfers ownership of the existing pseudo styles, if any, to the
/// caller. The stored pseudo styles are replaced with an empty map.
fn take_pseudo_styles(&self) -> PseudoStyles;
/// Sets the pseudo styles on the element, replacing any existing styles.
fn set_pseudo_styles(&self, styles: PseudoStyles);
/// Get the description of how to account for recent style changes.
fn restyle_damage(self) -> Self::ConcreteRestyleDamage;
/// Set the restyle damage field.
fn set_restyle_damage(self, damage: Self::ConcreteRestyleDamage);
fn parent_node(&self) -> Option<Self>;
fn first_child(&self) -> Option<Self>;
fn last_child(&self) -> Option<Self>;
fn prev_sibling(&self) -> Option<Self>;
fn next_sibling(&self) -> Option<Self>;
/// XXX: It's a bit unfortunate we need to pass the current computed values
/// as an argument here, but otherwise Servo would crash due to double
/// borrows to return it.
fn existing_style_for_restyle_damage<'a>(&'a self,
current_computed_values: Option<&'a Arc<ComputedValues>>,
pseudo: Option<&PseudoElement>)
-> Option<&'a <Self::ConcreteRestyleDamage as TRestyleDamage>::PreExistingComputedValues>;
}
pub trait TDocument : Sized + Copy + Clone {
type ConcreteNode: TNode<ConcreteElement = Self::ConcreteElement, ConcreteDocument = Self>;
type ConcreteElement: TElement<ConcreteNode = Self::ConcreteNode, ConcreteDocument = Self>;
fn as_node(&self) -> Self::ConcreteNode;
fn root_node(&self) -> Option<Self::ConcreteNode>;
fn drain_modified_elements(&self) -> Vec<(Self::ConcreteElement,
<Self::ConcreteElement as ElementExt>::Snapshot)>;
fn needs_paint_from_layout(&self);
fn will_paint(&self);
}
pub trait PresentationalHintsSynthetizer {
fn synthesize_presentational_hints_for_legacy_attributes<V>(&self, hints: &mut V)
where V: Push<ApplicableDeclarationBlock>;
}
pub trait TElement : PartialEq + Debug + Sized + Copy + Clone + ElementExt + PresentationalHintsSynthetizer {
type ConcreteNode: TNode<ConcreteElement = Self, ConcreteDocument = Self::ConcreteDocument>;
type ConcreteDocument: TDocument<ConcreteNode = Self::ConcreteNode, ConcreteElement = Self>;
fn as_node(&self) -> Self::ConcreteNode;
fn style_attribute(&self) -> Option<&Arc<RwLock<PropertyDeclarationBlock>>>;
fn get_state(&self) -> ElementState;
fn has_attr(&self, namespace: &Namespace, attr: &Atom) -> bool;
fn attr_equals(&self, namespace: &Namespace, attr: &Atom, value: &Atom) -> bool;
/// Properly marks nodes as dirty in response to restyle hints.
fn note_restyle_hint(&self, hint: RestyleHint) {
// Bail early if there's no restyling to do.
if hint.is_empty() {
return;
}
// If the restyle hint is non-empty, we need to restyle either this element
// or one of its siblings. Mark our ancestor chain as having dirty descendants.
let node = self.as_node();
let mut curr = node;
while let Some(parent) = curr.parent_node() {
if parent.has_dirty_descendants() { break }
unsafe { parent.set_dirty_descendants(true); }
curr = parent;
}
// Process hints.
if hint.contains(RESTYLE_SELF) {
unsafe { node.set_dirty(true); }
// XXX(emilio): For now, dirty implies dirty descendants if found.
} else if hint.contains(RESTYLE_DESCENDANTS) {
unsafe { node.set_dirty_descendants(true); }
let mut current = node.first_child();
while let Some(node) = current {
unsafe { node.set_dirty(true); }
current = node.next_sibling();
}
}
if hint.contains(RESTYLE_LATER_SIBLINGS) {
let mut next = ::selectors::Element::next_sibling_element(self);
while let Some(sib) = next {
let sib_node = sib.as_node();
unsafe { sib_node.set_dirty(true) };
next = ::selectors::Element::next_sibling_element(&sib);
}
}
}
}
|
/// This is needed because Gecko has a few optimisations for the calculation
/// of the difference depending on which values have been used during
|
random_line_split
|
dom.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Types and traits used to access the DOM from style calculation.
#![allow(unsafe_code)]
use data::PseudoStyles;
use element_state::ElementState;
use parking_lot::RwLock;
use properties::{ComputedValues, PropertyDeclarationBlock};
use restyle_hints::{RESTYLE_DESCENDANTS, RESTYLE_LATER_SIBLINGS, RESTYLE_SELF, RestyleHint};
use selector_impl::{ElementExt, PseudoElement};
use selector_matching::ApplicableDeclarationBlock;
use sink::Push;
use std::fmt::Debug;
use std::ops::BitOr;
use std::sync::Arc;
use string_cache::{Atom, Namespace};
/// Opaque type stored in type-unsafe work queues for parallel layout.
/// Must be transmutable to and from TNode.
pub type UnsafeNode = (usize, usize);
/// An opaque handle to a node, which, unlike UnsafeNode, cannot be transformed
/// back into a non-opaque representation. The only safe operation that can be
/// performed on this node is to compare it to another opaque handle or to another
/// OpaqueNode.
///
/// Layout and Graphics use this to safely represent nodes for comparison purposes.
/// Because the script task's GC does not trace layout, node data cannot be safely stored in layout
/// data structures. Also, layout code tends to be faster when the DOM is not being accessed, for
/// locality reasons. Using `OpaqueNode` enforces this invariant.
#[derive(Clone, PartialEq, Copy, Debug, Hash, Eq)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf, Deserialize, Serialize))]
pub struct OpaqueNode(pub usize);
impl OpaqueNode {
/// Returns the address of this node, for debugging purposes.
#[inline]
pub fn id(&self) -> usize {
self.0
}
}
pub trait TRestyleDamage : Debug + PartialEq + BitOr<Output=Self> + Copy {
/// The source for our current computed values in the cascade. This is a
/// ComputedValues in Servo and a StyleContext in Gecko.
///
/// This is needed because Gecko has a few optimisations for the calculation
/// of the difference depending on which values have been used during
/// layout.
///
/// This should be obtained via TNode::existing_style_for_restyle_damage
type PreExistingComputedValues;
fn compute(old: &Self::PreExistingComputedValues,
new: &Arc<ComputedValues>) -> Self;
fn empty() -> Self;
fn rebuild_and_reflow() -> Self;
}
/// Simple trait to provide basic information about the type of an element.
///
/// We avoid exposing the full type id, since computing it in the general case
/// would be difficult for Gecko nodes.
pub trait NodeInfo {
fn is_element(&self) -> bool;
fn is_text_node(&self) -> bool;
// Comments, doctypes, etc are ignored by layout algorithms.
fn needs_layout(&self) -> bool { self.is_element() || self.is_text_node() }
}
pub struct LayoutIterator<T>(pub T);
impl<T, I> Iterator for LayoutIterator<T> where T: Iterator<Item=I>, I: NodeInfo {
type Item = I;
fn next(&mut self) -> Option<I> {
loop {
// Filter out nodes that layout should ignore.
let n = self.0.next();
if n.is_none() || n.as_ref().unwrap().needs_layout() {
return n
}
}
}
}
pub trait TNode : Sized + Copy + Clone + NodeInfo {
type ConcreteElement: TElement<ConcreteNode = Self, ConcreteDocument = Self::ConcreteDocument>;
type ConcreteDocument: TDocument<ConcreteNode = Self, ConcreteElement = Self::ConcreteElement>;
type ConcreteRestyleDamage: TRestyleDamage;
type ConcreteChildrenIterator: Iterator<Item = Self>;
fn to_unsafe(&self) -> UnsafeNode;
unsafe fn from_unsafe(n: &UnsafeNode) -> Self;
fn dump(self);
fn dump_style(self);
/// Returns an iterator over this node's children.
fn children(self) -> LayoutIterator<Self::ConcreteChildrenIterator>;
/// Converts self into an `OpaqueNode`.
fn opaque(&self) -> OpaqueNode;
/// While doing a reflow, the node at the root has no parent, as far as we're
/// concerned. This method returns `None` at the reflow root.
fn layout_parent_node(self, reflow_root: OpaqueNode) -> Option<Self>;
fn debug_id(self) -> usize;
fn as_element(&self) -> Option<Self::ConcreteElement>;
fn as_document(&self) -> Option<Self::ConcreteDocument>;
fn has_changed(&self) -> bool;
unsafe fn set_changed(&self, value: bool);
fn is_dirty(&self) -> bool;
unsafe fn set_dirty(&self, value: bool);
fn has_dirty_descendants(&self) -> bool;
unsafe fn set_dirty_descendants(&self, value: bool);
fn needs_dirty_on_viewport_size_changed(&self) -> bool;
unsafe fn set_dirty_on_viewport_size_changed(&self);
fn can_be_fragmented(&self) -> bool;
unsafe fn set_can_be_fragmented(&self, value: bool);
/// Atomically stores the number of children of this node that we will
/// need to process during bottom-up traversal.
fn store_children_to_process(&self, n: isize);
/// Atomically notes that a child has been processed during bottom-up
/// traversal. Returns the number of children left to process.
fn did_process_child(&self) -> isize;
/// Returns the computed style values corresponding to the existing style
/// for this node, if any.
///
/// This returns an cloned Arc (rather than a borrow) to abstract over the
/// multitude of ways these values may be stored under the hood. By
/// returning an enum with various OwningRef/OwningHandle entries, we could
/// avoid the refcounting traffic here, but it's probably not worth the
/// complexity.
fn get_existing_style(&self) -> Option<Arc<ComputedValues>>;
/// Sets the computed style for this node.
fn set_style(&self, style: Option<Arc<ComputedValues>>);
/// Transfers ownership of the existing pseudo styles, if any, to the
/// caller. The stored pseudo styles are replaced with an empty map.
fn take_pseudo_styles(&self) -> PseudoStyles;
/// Sets the pseudo styles on the element, replacing any existing styles.
fn set_pseudo_styles(&self, styles: PseudoStyles);
/// Get the description of how to account for recent style changes.
fn restyle_damage(self) -> Self::ConcreteRestyleDamage;
/// Set the restyle damage field.
fn set_restyle_damage(self, damage: Self::ConcreteRestyleDamage);
fn parent_node(&self) -> Option<Self>;
fn first_child(&self) -> Option<Self>;
fn last_child(&self) -> Option<Self>;
fn prev_sibling(&self) -> Option<Self>;
fn next_sibling(&self) -> Option<Self>;
/// XXX: It's a bit unfortunate we need to pass the current computed values
/// as an argument here, but otherwise Servo would crash due to double
/// borrows to return it.
fn existing_style_for_restyle_damage<'a>(&'a self,
current_computed_values: Option<&'a Arc<ComputedValues>>,
pseudo: Option<&PseudoElement>)
-> Option<&'a <Self::ConcreteRestyleDamage as TRestyleDamage>::PreExistingComputedValues>;
}
pub trait TDocument : Sized + Copy + Clone {
type ConcreteNode: TNode<ConcreteElement = Self::ConcreteElement, ConcreteDocument = Self>;
type ConcreteElement: TElement<ConcreteNode = Self::ConcreteNode, ConcreteDocument = Self>;
fn as_node(&self) -> Self::ConcreteNode;
fn root_node(&self) -> Option<Self::ConcreteNode>;
fn drain_modified_elements(&self) -> Vec<(Self::ConcreteElement,
<Self::ConcreteElement as ElementExt>::Snapshot)>;
fn needs_paint_from_layout(&self);
fn will_paint(&self);
}
pub trait PresentationalHintsSynthetizer {
fn synthesize_presentational_hints_for_legacy_attributes<V>(&self, hints: &mut V)
where V: Push<ApplicableDeclarationBlock>;
}
pub trait TElement : PartialEq + Debug + Sized + Copy + Clone + ElementExt + PresentationalHintsSynthetizer {
type ConcreteNode: TNode<ConcreteElement = Self, ConcreteDocument = Self::ConcreteDocument>;
type ConcreteDocument: TDocument<ConcreteNode = Self::ConcreteNode, ConcreteElement = Self>;
fn as_node(&self) -> Self::ConcreteNode;
fn style_attribute(&self) -> Option<&Arc<RwLock<PropertyDeclarationBlock>>>;
fn get_state(&self) -> ElementState;
fn has_attr(&self, namespace: &Namespace, attr: &Atom) -> bool;
fn attr_equals(&self, namespace: &Namespace, attr: &Atom, value: &Atom) -> bool;
/// Properly marks nodes as dirty in response to restyle hints.
fn note_restyle_hint(&self, hint: RestyleHint)
|
} else if hint.contains(RESTYLE_DESCENDANTS) {
unsafe { node.set_dirty_descendants(true); }
let mut current = node.first_child();
while let Some(node) = current {
unsafe { node.set_dirty(true); }
current = node.next_sibling();
}
}
if hint.contains(RESTYLE_LATER_SIBLINGS) {
let mut next = ::selectors::Element::next_sibling_element(self);
while let Some(sib) = next {
let sib_node = sib.as_node();
unsafe { sib_node.set_dirty(true) };
next = ::selectors::Element::next_sibling_element(&sib);
}
}
}
}
|
{
// Bail early if there's no restyling to do.
if hint.is_empty() {
return;
}
// If the restyle hint is non-empty, we need to restyle either this element
// or one of its siblings. Mark our ancestor chain as having dirty descendants.
let node = self.as_node();
let mut curr = node;
while let Some(parent) = curr.parent_node() {
if parent.has_dirty_descendants() { break }
unsafe { parent.set_dirty_descendants(true); }
curr = parent;
}
// Process hints.
if hint.contains(RESTYLE_SELF) {
unsafe { node.set_dirty(true); }
// XXX(emilio): For now, dirty implies dirty descendants if found.
|
identifier_body
|
text_edit.rs
|
use crate::{TextUnit, TextRange, TextBuf, Text, tu};
use std::cmp::Ordering;
#[derive(Clone, Debug)]
pub struct TextEdit {
pub ops: Vec<TextEditOp>,
}
|
#[derive(Clone, Debug)]
pub enum TextEditOp {
Copy(TextRange), // TODO: check for disjoint ranges
Insert(TextBuf),
}
impl TextEdit {
pub fn apply(&self, text: Text) -> TextBuf {
let mut result = String::new();
for s in self.ops.iter() {
match *s {
TextEditOp::Copy(range) => result += &text.slice(range).to_cow(),
TextEditOp::Insert(ref i) => result += &i.as_text().to_cow(),
}
}
result.into()
}
}
pub struct TextEditBuilder {
segments: Vec<TextEditOp>,
last_offset: TextUnit,
text_len: TextUnit,
}
impl TextEditBuilder {
pub fn new(text: Text) -> TextEditBuilder {
TextEditBuilder {
segments: Vec::new(),
last_offset: tu(0),
text_len: text.len()
}
}
pub fn build(mut self) -> TextEdit {
let len = self.text_len;
self.advance_to(len);
TextEdit { ops: self.segments }
}
pub fn insert<T: Into<TextBuf>>(&mut self, offset: TextUnit, text: T) {
self.advance_to(offset);
self.insert_(text.into());
}
pub fn delete(&mut self, range: TextRange) {
self.advance_to(range.start());
self.delete_len(range.len());
}
pub fn replace<T: Into<TextBuf>>(&mut self, range: TextRange, text: T) {
self.advance_to(range.start());
self.insert_(text.into());
self.delete_len(range.len());
}
fn advance_to(&mut self, offset: TextUnit) {
match self.last_offset.cmp(&offset) {
Ordering::Less => self.copy_up_to(offset),
Ordering::Equal => (),
Ordering::Greater => panic!("Invalid edit"),
}
}
fn copy_up_to(&mut self, offset: TextUnit) {
let len = offset - self.last_offset;
self.copy_len(len)
}
fn copy_len(&mut self, len: TextUnit) {
let range = TextRange::from_len(self.last_offset, len);
self.segments.push(TextEditOp::Copy(range));
self.last_offset += len
}
fn insert_(&mut self, text: TextBuf) {
self.segments.push(TextEditOp::Insert(text))
}
fn delete_len(&mut self, len: TextUnit) {
self.last_offset += len
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_edits() {
let text: TextBuf = "Hello, World!".into();
let edit = {
let mut e = TextEditBuilder::new(text.as_text());
e.replace(TextRange::from_len(tu(0), tu(5)), "Goodbye");
e.insert(tu(7), "cruel ");
e.delete(TextRange::from_len(tu(12), tu(1)));
e.build()
};
let new_text = edit.apply(text.as_text());
assert_eq!(new_text, "Goodbye, cruel World");
}
}
|
random_line_split
|
|
text_edit.rs
|
use crate::{TextUnit, TextRange, TextBuf, Text, tu};
use std::cmp::Ordering;
#[derive(Clone, Debug)]
pub struct TextEdit {
pub ops: Vec<TextEditOp>,
}
#[derive(Clone, Debug)]
pub enum TextEditOp {
Copy(TextRange), // TODO: check for disjoint ranges
Insert(TextBuf),
}
impl TextEdit {
pub fn apply(&self, text: Text) -> TextBuf {
let mut result = String::new();
for s in self.ops.iter() {
match *s {
TextEditOp::Copy(range) => result += &text.slice(range).to_cow(),
TextEditOp::Insert(ref i) => result += &i.as_text().to_cow(),
}
}
result.into()
}
}
pub struct TextEditBuilder {
segments: Vec<TextEditOp>,
last_offset: TextUnit,
text_len: TextUnit,
}
impl TextEditBuilder {
pub fn new(text: Text) -> TextEditBuilder {
TextEditBuilder {
segments: Vec::new(),
last_offset: tu(0),
text_len: text.len()
}
}
pub fn build(mut self) -> TextEdit {
let len = self.text_len;
self.advance_to(len);
TextEdit { ops: self.segments }
}
pub fn insert<T: Into<TextBuf>>(&mut self, offset: TextUnit, text: T) {
self.advance_to(offset);
self.insert_(text.into());
}
pub fn delete(&mut self, range: TextRange) {
self.advance_to(range.start());
self.delete_len(range.len());
}
pub fn replace<T: Into<TextBuf>>(&mut self, range: TextRange, text: T) {
self.advance_to(range.start());
self.insert_(text.into());
self.delete_len(range.len());
}
fn advance_to(&mut self, offset: TextUnit) {
match self.last_offset.cmp(&offset) {
Ordering::Less => self.copy_up_to(offset),
Ordering::Equal => (),
Ordering::Greater => panic!("Invalid edit"),
}
}
fn
|
(&mut self, offset: TextUnit) {
let len = offset - self.last_offset;
self.copy_len(len)
}
fn copy_len(&mut self, len: TextUnit) {
let range = TextRange::from_len(self.last_offset, len);
self.segments.push(TextEditOp::Copy(range));
self.last_offset += len
}
fn insert_(&mut self, text: TextBuf) {
self.segments.push(TextEditOp::Insert(text))
}
fn delete_len(&mut self, len: TextUnit) {
self.last_offset += len
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_edits() {
let text: TextBuf = "Hello, World!".into();
let edit = {
let mut e = TextEditBuilder::new(text.as_text());
e.replace(TextRange::from_len(tu(0), tu(5)), "Goodbye");
e.insert(tu(7), "cruel ");
e.delete(TextRange::from_len(tu(12), tu(1)));
e.build()
};
let new_text = edit.apply(text.as_text());
assert_eq!(new_text, "Goodbye, cruel World");
}
}
|
copy_up_to
|
identifier_name
|
text_edit.rs
|
use crate::{TextUnit, TextRange, TextBuf, Text, tu};
use std::cmp::Ordering;
#[derive(Clone, Debug)]
pub struct TextEdit {
pub ops: Vec<TextEditOp>,
}
#[derive(Clone, Debug)]
pub enum TextEditOp {
Copy(TextRange), // TODO: check for disjoint ranges
Insert(TextBuf),
}
impl TextEdit {
pub fn apply(&self, text: Text) -> TextBuf {
let mut result = String::new();
for s in self.ops.iter() {
match *s {
TextEditOp::Copy(range) => result += &text.slice(range).to_cow(),
TextEditOp::Insert(ref i) => result += &i.as_text().to_cow(),
}
}
result.into()
}
}
pub struct TextEditBuilder {
segments: Vec<TextEditOp>,
last_offset: TextUnit,
text_len: TextUnit,
}
impl TextEditBuilder {
pub fn new(text: Text) -> TextEditBuilder {
TextEditBuilder {
segments: Vec::new(),
last_offset: tu(0),
text_len: text.len()
}
}
pub fn build(mut self) -> TextEdit {
let len = self.text_len;
self.advance_to(len);
TextEdit { ops: self.segments }
}
pub fn insert<T: Into<TextBuf>>(&mut self, offset: TextUnit, text: T) {
self.advance_to(offset);
self.insert_(text.into());
}
pub fn delete(&mut self, range: TextRange) {
self.advance_to(range.start());
self.delete_len(range.len());
}
pub fn replace<T: Into<TextBuf>>(&mut self, range: TextRange, text: T) {
self.advance_to(range.start());
self.insert_(text.into());
self.delete_len(range.len());
}
fn advance_to(&mut self, offset: TextUnit) {
match self.last_offset.cmp(&offset) {
Ordering::Less => self.copy_up_to(offset),
Ordering::Equal => (),
Ordering::Greater => panic!("Invalid edit"),
}
}
fn copy_up_to(&mut self, offset: TextUnit) {
let len = offset - self.last_offset;
self.copy_len(len)
}
fn copy_len(&mut self, len: TextUnit) {
let range = TextRange::from_len(self.last_offset, len);
self.segments.push(TextEditOp::Copy(range));
self.last_offset += len
}
fn insert_(&mut self, text: TextBuf) {
self.segments.push(TextEditOp::Insert(text))
}
fn delete_len(&mut self, len: TextUnit)
|
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_edits() {
let text: TextBuf = "Hello, World!".into();
let edit = {
let mut e = TextEditBuilder::new(text.as_text());
e.replace(TextRange::from_len(tu(0), tu(5)), "Goodbye");
e.insert(tu(7), "cruel ");
e.delete(TextRange::from_len(tu(12), tu(1)));
e.build()
};
let new_text = edit.apply(text.as_text());
assert_eq!(new_text, "Goodbye, cruel World");
}
}
|
{
self.last_offset += len
}
|
identifier_body
|
bounds-lifetime.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
type A = for<'b, 'a: 'b> fn(); //~ ERROR lifetime bounds cannot be used in this context
type B = for<'b, 'a: 'b,> fn(); //~ ERROR lifetime bounds cannot be used in this context
type C = for<'b, 'a: 'b +> fn(); //~ ERROR lifetime bounds cannot be used in this context
type D = for<'a, T> fn(); //~ ERROR only lifetime parameters can be used in this context
type E = for<T> Fn(); //~ ERROR only lifetime parameters can be used in this context
fn main()
|
{}
|
identifier_body
|
|
bounds-lifetime.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
type A = for<'b, 'a: 'b> fn(); //~ ERROR lifetime bounds cannot be used in this context
type B = for<'b, 'a: 'b,> fn(); //~ ERROR lifetime bounds cannot be used in this context
type C = for<'b, 'a: 'b +> fn(); //~ ERROR lifetime bounds cannot be used in this context
type D = for<'a, T> fn(); //~ ERROR only lifetime parameters can be used in this context
type E = for<T> Fn(); //~ ERROR only lifetime parameters can be used in this context
fn
|
() {}
|
main
|
identifier_name
|
bounds-lifetime.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
type A = for<'b, 'a: 'b> fn(); //~ ERROR lifetime bounds cannot be used in this context
type B = for<'b, 'a: 'b,> fn(); //~ ERROR lifetime bounds cannot be used in this context
type C = for<'b, 'a: 'b +> fn(); //~ ERROR lifetime bounds cannot be used in this context
type D = for<'a, T> fn(); //~ ERROR only lifetime parameters can be used in this context
type E = for<T> Fn(); //~ ERROR only lifetime parameters can be used in this context
fn main() {}
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
random_line_split
|
nodeiterator.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::NodeIteratorBinding;
use dom::bindings::codegen::Bindings::NodeIteratorBinding::NodeIteratorMethods;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflector, reflect_dom_object};
#[dom_struct]
pub struct NodeIterator {
reflector_: Reflector
}
impl NodeIterator {
fn new_inherited() -> NodeIterator {
NodeIterator {
reflector_: Reflector::new()
}
}
pub fn new(global: GlobalRef) -> Temporary<NodeIterator>
|
}
impl<'a> NodeIteratorMethods for JSRef<'a, NodeIterator> {
}
|
{
reflect_dom_object(box NodeIterator::new_inherited(), global, NodeIteratorBinding::Wrap)
}
|
identifier_body
|
nodeiterator.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::NodeIteratorBinding;
use dom::bindings::codegen::Bindings::NodeIteratorBinding::NodeIteratorMethods;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflector, reflect_dom_object};
#[dom_struct]
pub struct
|
{
reflector_: Reflector
}
impl NodeIterator {
fn new_inherited() -> NodeIterator {
NodeIterator {
reflector_: Reflector::new()
}
}
pub fn new(global: GlobalRef) -> Temporary<NodeIterator> {
reflect_dom_object(box NodeIterator::new_inherited(), global, NodeIteratorBinding::Wrap)
}
}
impl<'a> NodeIteratorMethods for JSRef<'a, NodeIterator> {
}
|
NodeIterator
|
identifier_name
|
nodeiterator.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::NodeIteratorBinding;
use dom::bindings::codegen::Bindings::NodeIteratorBinding::NodeIteratorMethods;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflector, reflect_dom_object};
#[dom_struct]
|
fn new_inherited() -> NodeIterator {
NodeIterator {
reflector_: Reflector::new()
}
}
pub fn new(global: GlobalRef) -> Temporary<NodeIterator> {
reflect_dom_object(box NodeIterator::new_inherited(), global, NodeIteratorBinding::Wrap)
}
}
impl<'a> NodeIteratorMethods for JSRef<'a, NodeIterator> {
}
|
pub struct NodeIterator {
reflector_: Reflector
}
impl NodeIterator {
|
random_line_split
|
encode.rs
|
/// get three u8 characters and convert to 4 indexes of ALPHABET table
fn encode_block(block: &[u8]) -> [u8; 4] {
let mut bitvec: u32 = 0xff_00_00_00;
// first char place in second octet of bitvec
// second char to third, etc.
for (i, chr) in block.iter().enumerate() {
let x: u32 = (*chr as u32) << (8 * (2 - i));
bitvec |= x;
}
// <-----> six bits
let mut mask: u32 = 0b0000_0000_1111_1100_0000_0000_0000_0000;
let mut res: [u8; 4] = [0; 4];
// divide three octets (2, 3, 4) of bitvec to four six-bits integers
for i in 0..4 {
res[i] = ((bitvec & mask) >> (6 * (3 - i))) as u8;
mask = mask >> 6;
}
res
}
/// get translation table and input data buffer, output data buffer
/// and return count of encoded bytes
pub fn
|
(table: &[u8], data: &[u8], out: &mut [u8]) -> usize {
let mut data = data.iter();
let mut block: [u8; 3] = [0; 3];
let mut done = false;
let mut count;
let mut out_size: usize = 0;
while!done {
count = 0;
// fill block with chars
// count only those symbols that were actually added
for i in 0..3 {
block[i] = match data.next() {
Some(chr) => {
count += 1;
*chr
},
None => {
done = true;
0
},
}
}
// in case of empty iterator
if count == 0 {
break
}
for idx in &encode_block(&block) {
if count + 1!= 0 {
out[out_size] = table[*idx as usize];
count -= 1;
} else {
out[out_size] = '=' as u8;
}
out_size += 1;
}
}
out_size
}
#[cfg(test)]
mod tests {
use super::{encode_block, encode};
#[test]
fn test_block() {
let examples = [
([0u8, 0, 0], [0u8, 0, 0, 0]),
([1, 0, 0], [0, 16, 0, 0]),
([0, 1, 0], [0, 0, 4, 0]),
([0, 0, 1], [0, 0, 0, 1]),
];
for &(block, res) in examples.iter() {
assert_eq!(res, encode_block(&block));
}
}
#[test]
fn test_encode() {
let c = '+' as u8;
let eq = '=' as u8;
let table = [c; 64];
let mut out = [0u8; 8];
let examples = [
("qwe", vec![c; 4]),
("qweqwe", vec![c; 8]),
("q", vec![c, c, eq, eq]),
("qw", vec![c, c, c, eq]),
];
for &(data, ref res) in examples.iter() {
let count = encode(&table, data.as_bytes(), &mut out);
assert_eq!(res.iter().collect::<Vec<_>>(),
out[..count].iter().collect::<Vec<_>>());
}
}
}
|
encode
|
identifier_name
|
encode.rs
|
/// get three u8 characters and convert to 4 indexes of ALPHABET table
fn encode_block(block: &[u8]) -> [u8; 4]
|
}
/// get translation table and input data buffer, output data buffer
/// and return count of encoded bytes
pub fn encode(table: &[u8], data: &[u8], out: &mut [u8]) -> usize {
let mut data = data.iter();
let mut block: [u8; 3] = [0; 3];
let mut done = false;
let mut count;
let mut out_size: usize = 0;
while!done {
count = 0;
// fill block with chars
// count only those symbols that were actually added
for i in 0..3 {
block[i] = match data.next() {
Some(chr) => {
count += 1;
*chr
},
None => {
done = true;
0
},
}
}
// in case of empty iterator
if count == 0 {
break
}
for idx in &encode_block(&block) {
if count + 1!= 0 {
out[out_size] = table[*idx as usize];
count -= 1;
} else {
out[out_size] = '=' as u8;
}
out_size += 1;
}
}
out_size
}
#[cfg(test)]
mod tests {
use super::{encode_block, encode};
#[test]
fn test_block() {
let examples = [
([0u8, 0, 0], [0u8, 0, 0, 0]),
([1, 0, 0], [0, 16, 0, 0]),
([0, 1, 0], [0, 0, 4, 0]),
([0, 0, 1], [0, 0, 0, 1]),
];
for &(block, res) in examples.iter() {
assert_eq!(res, encode_block(&block));
}
}
#[test]
fn test_encode() {
let c = '+' as u8;
let eq = '=' as u8;
let table = [c; 64];
let mut out = [0u8; 8];
let examples = [
("qwe", vec![c; 4]),
("qweqwe", vec![c; 8]),
("q", vec![c, c, eq, eq]),
("qw", vec![c, c, c, eq]),
];
for &(data, ref res) in examples.iter() {
let count = encode(&table, data.as_bytes(), &mut out);
assert_eq!(res.iter().collect::<Vec<_>>(),
out[..count].iter().collect::<Vec<_>>());
}
}
}
|
{
let mut bitvec: u32 = 0xff_00_00_00;
// first char place in second octet of bitvec
// second char to third, etc.
for (i, chr) in block.iter().enumerate() {
let x: u32 = (*chr as u32) << (8 * (2 - i));
bitvec |= x;
}
// <-----> six bits
let mut mask: u32 = 0b0000_0000_1111_1100_0000_0000_0000_0000;
let mut res: [u8; 4] = [0; 4];
// divide three octets (2, 3, 4) of bitvec to four six-bits integers
for i in 0..4 {
res[i] = ((bitvec & mask) >> (6 * (3 - i))) as u8;
mask = mask >> 6;
}
res
|
identifier_body
|
encode.rs
|
/// get three u8 characters and convert to 4 indexes of ALPHABET table
fn encode_block(block: &[u8]) -> [u8; 4] {
let mut bitvec: u32 = 0xff_00_00_00;
// first char place in second octet of bitvec
// second char to third, etc.
for (i, chr) in block.iter().enumerate() {
let x: u32 = (*chr as u32) << (8 * (2 - i));
bitvec |= x;
}
// <-----> six bits
let mut mask: u32 = 0b0000_0000_1111_1100_0000_0000_0000_0000;
let mut res: [u8; 4] = [0; 4];
// divide three octets (2, 3, 4) of bitvec to four six-bits integers
for i in 0..4 {
res[i] = ((bitvec & mask) >> (6 * (3 - i))) as u8;
mask = mask >> 6;
}
res
}
/// get translation table and input data buffer, output data buffer
/// and return count of encoded bytes
pub fn encode(table: &[u8], data: &[u8], out: &mut [u8]) -> usize {
let mut data = data.iter();
let mut block: [u8; 3] = [0; 3];
let mut done = false;
let mut count;
let mut out_size: usize = 0;
while!done {
count = 0;
// fill block with chars
// count only those symbols that were actually added
for i in 0..3 {
block[i] = match data.next() {
Some(chr) =>
|
,
None => {
done = true;
0
},
}
}
// in case of empty iterator
if count == 0 {
break
}
for idx in &encode_block(&block) {
if count + 1!= 0 {
out[out_size] = table[*idx as usize];
count -= 1;
} else {
out[out_size] = '=' as u8;
}
out_size += 1;
}
}
out_size
}
#[cfg(test)]
mod tests {
use super::{encode_block, encode};
#[test]
fn test_block() {
let examples = [
([0u8, 0, 0], [0u8, 0, 0, 0]),
([1, 0, 0], [0, 16, 0, 0]),
([0, 1, 0], [0, 0, 4, 0]),
([0, 0, 1], [0, 0, 0, 1]),
];
for &(block, res) in examples.iter() {
assert_eq!(res, encode_block(&block));
}
}
#[test]
fn test_encode() {
let c = '+' as u8;
let eq = '=' as u8;
let table = [c; 64];
let mut out = [0u8; 8];
let examples = [
("qwe", vec![c; 4]),
("qweqwe", vec![c; 8]),
("q", vec![c, c, eq, eq]),
("qw", vec![c, c, c, eq]),
];
for &(data, ref res) in examples.iter() {
let count = encode(&table, data.as_bytes(), &mut out);
assert_eq!(res.iter().collect::<Vec<_>>(),
out[..count].iter().collect::<Vec<_>>());
}
}
}
|
{
count += 1;
*chr
}
|
conditional_block
|
encode.rs
|
/// get three u8 characters and convert to 4 indexes of ALPHABET table
fn encode_block(block: &[u8]) -> [u8; 4] {
let mut bitvec: u32 = 0xff_00_00_00;
// first char place in second octet of bitvec
// second char to third, etc.
for (i, chr) in block.iter().enumerate() {
let x: u32 = (*chr as u32) << (8 * (2 - i));
bitvec |= x;
}
// <-----> six bits
let mut mask: u32 = 0b0000_0000_1111_1100_0000_0000_0000_0000;
let mut res: [u8; 4] = [0; 4];
// divide three octets (2, 3, 4) of bitvec to four six-bits integers
for i in 0..4 {
res[i] = ((bitvec & mask) >> (6 * (3 - i))) as u8;
mask = mask >> 6;
}
res
}
/// get translation table and input data buffer, output data buffer
/// and return count of encoded bytes
pub fn encode(table: &[u8], data: &[u8], out: &mut [u8]) -> usize {
let mut data = data.iter();
let mut block: [u8; 3] = [0; 3];
let mut done = false;
let mut count;
let mut out_size: usize = 0;
while!done {
count = 0;
|
Some(chr) => {
count += 1;
*chr
},
None => {
done = true;
0
},
}
}
// in case of empty iterator
if count == 0 {
break
}
for idx in &encode_block(&block) {
if count + 1!= 0 {
out[out_size] = table[*idx as usize];
count -= 1;
} else {
out[out_size] = '=' as u8;
}
out_size += 1;
}
}
out_size
}
#[cfg(test)]
mod tests {
use super::{encode_block, encode};
#[test]
fn test_block() {
let examples = [
([0u8, 0, 0], [0u8, 0, 0, 0]),
([1, 0, 0], [0, 16, 0, 0]),
([0, 1, 0], [0, 0, 4, 0]),
([0, 0, 1], [0, 0, 0, 1]),
];
for &(block, res) in examples.iter() {
assert_eq!(res, encode_block(&block));
}
}
#[test]
fn test_encode() {
let c = '+' as u8;
let eq = '=' as u8;
let table = [c; 64];
let mut out = [0u8; 8];
let examples = [
("qwe", vec![c; 4]),
("qweqwe", vec![c; 8]),
("q", vec![c, c, eq, eq]),
("qw", vec![c, c, c, eq]),
];
for &(data, ref res) in examples.iter() {
let count = encode(&table, data.as_bytes(), &mut out);
assert_eq!(res.iter().collect::<Vec<_>>(),
out[..count].iter().collect::<Vec<_>>());
}
}
}
|
// fill block with chars
// count only those symbols that were actually added
for i in 0..3 {
block[i] = match data.next() {
|
random_line_split
|
unit.rs
|
extern crate stratis_shared as shared;
extern crate uuid;
use ::uuid::Uuid;
use std::io::prelude::*;
use std::io::Cursor;
use shared::player::Player;
use shared::opcode;
use shared::chat;
/// recreate to appease type checker
fn cursor_transform (s: &mut Cursor<Vec<u8>>) -> Cursor<&mut [u8]> {
Cursor::new(&mut s.get_mut()[..])
}
fn assert_opcode(s: &mut Cursor<&mut [u8]>, op: u8) {
s.set_position(0);
let mut b = [0u8];
let r = s.read_exact(&mut b);
assert!(r.is_ok());
assert_eq!(b[0], op);
}
#[test]
fn
|
() {
let p = Player::default();
let id = Uuid::new_v4();
let mut bytes = Player::to_bytes(Some(&id),&p);
let mut s = Cursor::new(&mut bytes[..]);
assert!(s.get_ref().len() > 0);
assert_opcode(&mut s,opcode::PLAYER);
let r = Player::from_stream(&mut s, true);
assert!(r.0.is_some());
assert!(r.1.is_some());
assert_eq!(id,r.0.unwrap());
assert_eq!(p,r.1.unwrap());
}
#[test]
fn player () {
let p = Player::default();
let mut bytes = Player::to_bytes(None,&p);
let mut s = Cursor::new(&mut bytes[..]);
assert!(s.get_ref().len() > 0);
assert_opcode(&mut s,opcode::PLAYER);
let r = Player::from_stream(&mut s, false);
assert!(r.0.is_none());
assert!(r.1.is_some());
assert_eq!(p,r.1.unwrap());
}
#[test]
fn chat() {
let text = "test";
let mut s = Cursor::new(vec![]);
chat::write_text(&mut s, text);
let mut s = cursor_transform(&mut s);
assert!(s.get_ref().len() > 0);
assert_opcode(&mut s,opcode::CHAT);
let r = chat::read_text(&mut s);
assert!(r.is_some());
assert_eq!(text, r.unwrap());
}
|
player_uuid
|
identifier_name
|
unit.rs
|
extern crate stratis_shared as shared;
extern crate uuid;
use ::uuid::Uuid;
use std::io::prelude::*;
use std::io::Cursor;
use shared::player::Player;
use shared::opcode;
use shared::chat;
/// recreate to appease type checker
fn cursor_transform (s: &mut Cursor<Vec<u8>>) -> Cursor<&mut [u8]> {
Cursor::new(&mut s.get_mut()[..])
}
fn assert_opcode(s: &mut Cursor<&mut [u8]>, op: u8) {
s.set_position(0);
let mut b = [0u8];
let r = s.read_exact(&mut b);
assert!(r.is_ok());
assert_eq!(b[0], op);
}
#[test]
fn player_uuid () {
let p = Player::default();
let id = Uuid::new_v4();
let mut bytes = Player::to_bytes(Some(&id),&p);
let mut s = Cursor::new(&mut bytes[..]);
assert!(s.get_ref().len() > 0);
assert_opcode(&mut s,opcode::PLAYER);
let r = Player::from_stream(&mut s, true);
assert!(r.0.is_some());
assert!(r.1.is_some());
assert_eq!(id,r.0.unwrap());
assert_eq!(p,r.1.unwrap());
}
#[test]
fn player () {
let p = Player::default();
let mut bytes = Player::to_bytes(None,&p);
let mut s = Cursor::new(&mut bytes[..]);
assert!(s.get_ref().len() > 0);
assert_opcode(&mut s,opcode::PLAYER);
let r = Player::from_stream(&mut s, false);
assert!(r.0.is_none());
assert!(r.1.is_some());
assert_eq!(p,r.1.unwrap());
}
#[test]
fn chat()
|
{
let text = "test";
let mut s = Cursor::new(vec![]);
chat::write_text(&mut s, text);
let mut s = cursor_transform(&mut s);
assert!(s.get_ref().len() > 0);
assert_opcode(&mut s,opcode::CHAT);
let r = chat::read_text(&mut s);
assert!(r.is_some());
assert_eq!(text, r.unwrap());
}
|
identifier_body
|
|
unit.rs
|
extern crate stratis_shared as shared;
extern crate uuid;
use ::uuid::Uuid;
use std::io::prelude::*;
use std::io::Cursor;
use shared::player::Player;
use shared::opcode;
use shared::chat;
/// recreate to appease type checker
fn cursor_transform (s: &mut Cursor<Vec<u8>>) -> Cursor<&mut [u8]> {
Cursor::new(&mut s.get_mut()[..])
}
fn assert_opcode(s: &mut Cursor<&mut [u8]>, op: u8) {
|
}
#[test]
fn player_uuid () {
let p = Player::default();
let id = Uuid::new_v4();
let mut bytes = Player::to_bytes(Some(&id),&p);
let mut s = Cursor::new(&mut bytes[..]);
assert!(s.get_ref().len() > 0);
assert_opcode(&mut s,opcode::PLAYER);
let r = Player::from_stream(&mut s, true);
assert!(r.0.is_some());
assert!(r.1.is_some());
assert_eq!(id,r.0.unwrap());
assert_eq!(p,r.1.unwrap());
}
#[test]
fn player () {
let p = Player::default();
let mut bytes = Player::to_bytes(None,&p);
let mut s = Cursor::new(&mut bytes[..]);
assert!(s.get_ref().len() > 0);
assert_opcode(&mut s,opcode::PLAYER);
let r = Player::from_stream(&mut s, false);
assert!(r.0.is_none());
assert!(r.1.is_some());
assert_eq!(p,r.1.unwrap());
}
#[test]
fn chat() {
let text = "test";
let mut s = Cursor::new(vec![]);
chat::write_text(&mut s, text);
let mut s = cursor_transform(&mut s);
assert!(s.get_ref().len() > 0);
assert_opcode(&mut s,opcode::CHAT);
let r = chat::read_text(&mut s);
assert!(r.is_some());
assert_eq!(text, r.unwrap());
}
|
s.set_position(0);
let mut b = [0u8];
let r = s.read_exact(&mut b);
assert!(r.is_ok());
assert_eq!(b[0], op);
|
random_line_split
|
main.rs
|
extern crate chrono;
extern crate iron;
extern crate persistent;
extern crate r2d2;
extern crate r2d2_sqlite;
extern crate router;
extern crate rusqlite;
extern crate rustc_serialize;
extern crate sha2;
use chrono::*;
use iron::prelude::*;
use iron::Url;
use iron::modifiers::Redirect;
use iron::status::Status;
use iron::typemap::Key;
use persistent::Read;
use r2d2::Pool;
use r2d2_sqlite::SqliteConnectionManager;
use router::Router;
use rusqlite::Connection;
use rustc_serialize::hex::ToHex;
use sha2::{Digest, Sha256};
use std::env;
const HOST: &'static str = "https://yaus.pw/";
pub type SqlitePool = Pool<SqliteConnectionManager>;
pub struct YausDb;
impl Key for YausDb {
type Value = SqlitePool;
}
// A simple macro to return early if the URL can't parse.
macro_rules! try_url {
($url:expr) => {
match Url::parse($url) {
Ok(_) => { },
Err(_) => {
return Ok(Response::with((Status::BadRequest, "Malformed URL")));
}
}
}
}
fn create_shortened_url(db: &Connection, long_url: &str) -> IronResult<Response> {
let mut d = Sha256::default();
d.input(long_url.as_bytes());
let locator = d.result().as_slice().to_hex();
let timestamp = Local::now().to_rfc3339();
db.execute("INSERT INTO urls VALUES (NULL,?1,?2,?3)",
&[×tamp, &long_url, &&locator[0..7]]).unwrap();
|
}
/// Given a long URL, see if it already exists in the table, else create an entry and return
/// it.
///
/// A 200 means that a shortened URL already exists and has been returned. A 201
/// response means that a new shortened URL has been created.
fn check_or_shorten_url(db: &Connection, long_url: &str) -> IronResult<Response> {
let mut stmt = db.prepare("SELECT locator FROM urls WHERE url = (?)").unwrap();
let mut row = stmt.query_map::<String, _>(&[&long_url], |r| r.get(0)).unwrap();
if let Some(l) = row.next() {
return Ok(Response::with((Status::Ok, [HOST, &l.unwrap()].join(""))));
}
create_shortened_url(db, long_url)
}
/// The handler to shorten a URL.
fn shorten_handler(req: &mut Request) -> IronResult<Response> {
match req.url.clone().query() {
None => { Ok(Response::with((Status::BadRequest, "URL missing in query"))) },
Some(s) => {
let (k, v) = s.split_at(4);
if k == "url=" {
try_url!(v);
let pool = req.get::<Read<YausDb>>().unwrap().clone();
let db = pool.get().unwrap();
check_or_shorten_url(&db, v)
} else {
Ok(Response::with((Status::BadRequest, "Malformed query string")))
}
}
}
}
/// The handler that redirects to the long URL.
fn redirect_handler(req: &mut Request) -> IronResult<Response> {
let pool = req.get::<Read<YausDb>>().unwrap().clone();
let db = pool.get().unwrap();
let locator = req.url.path()[0];
let mut stmt = db.prepare("SELECT url FROM urls WHERE locator = (?)").unwrap();
let mut row = stmt.query_map::<String, _>(&[&locator], |r| r.get(0)).unwrap();
if let Some(u) = row.next() {
let long_url = Url::parse(&u.unwrap()).unwrap();
Ok(Response::with((Status::MovedPermanently, Redirect(long_url))))
} else {
Ok(Response::with((Status::NotFound, "Not found")))
}
}
fn index_handler(_: &mut Request) -> IronResult<Response> {
Ok(Response::with((Status::Ok, "See https://github.com/gsquire/yaus for the API")))
}
fn main() {
let mut router = Router::new();
router.get("/shorten", shorten_handler, "shorten");
router.get("/:locator", redirect_handler, "locator");
router.get("/", index_handler, "index");
let config = r2d2::Config::default();
let db = env::var("SHORT_DB").unwrap();
let manager = SqliteConnectionManager::new(&db);
let pool = r2d2::Pool::new(config, manager).unwrap();
let mut chain = Chain::new(router);
chain.link_before(Read::<YausDb>::one(pool));
Iron::new(chain).http("localhost:3000").unwrap();
}
|
Ok(Response::with((Status::Created, [HOST, &locator[0..7]].join(""))))
|
random_line_split
|
main.rs
|
extern crate chrono;
extern crate iron;
extern crate persistent;
extern crate r2d2;
extern crate r2d2_sqlite;
extern crate router;
extern crate rusqlite;
extern crate rustc_serialize;
extern crate sha2;
use chrono::*;
use iron::prelude::*;
use iron::Url;
use iron::modifiers::Redirect;
use iron::status::Status;
use iron::typemap::Key;
use persistent::Read;
use r2d2::Pool;
use r2d2_sqlite::SqliteConnectionManager;
use router::Router;
use rusqlite::Connection;
use rustc_serialize::hex::ToHex;
use sha2::{Digest, Sha256};
use std::env;
const HOST: &'static str = "https://yaus.pw/";
pub type SqlitePool = Pool<SqliteConnectionManager>;
pub struct YausDb;
impl Key for YausDb {
type Value = SqlitePool;
}
// A simple macro to return early if the URL can't parse.
macro_rules! try_url {
($url:expr) => {
match Url::parse($url) {
Ok(_) => { },
Err(_) => {
return Ok(Response::with((Status::BadRequest, "Malformed URL")));
}
}
}
}
fn create_shortened_url(db: &Connection, long_url: &str) -> IronResult<Response>
|
/// Given a long URL, see if it already exists in the table, else create an entry and return
/// it.
///
/// A 200 means that a shortened URL already exists and has been returned. A 201
/// response means that a new shortened URL has been created.
fn check_or_shorten_url(db: &Connection, long_url: &str) -> IronResult<Response> {
let mut stmt = db.prepare("SELECT locator FROM urls WHERE url = (?)").unwrap();
let mut row = stmt.query_map::<String, _>(&[&long_url], |r| r.get(0)).unwrap();
if let Some(l) = row.next() {
return Ok(Response::with((Status::Ok, [HOST, &l.unwrap()].join(""))));
}
create_shortened_url(db, long_url)
}
/// The handler to shorten a URL.
fn shorten_handler(req: &mut Request) -> IronResult<Response> {
match req.url.clone().query() {
None => { Ok(Response::with((Status::BadRequest, "URL missing in query"))) },
Some(s) => {
let (k, v) = s.split_at(4);
if k == "url=" {
try_url!(v);
let pool = req.get::<Read<YausDb>>().unwrap().clone();
let db = pool.get().unwrap();
check_or_shorten_url(&db, v)
} else {
Ok(Response::with((Status::BadRequest, "Malformed query string")))
}
}
}
}
/// The handler that redirects to the long URL.
fn redirect_handler(req: &mut Request) -> IronResult<Response> {
let pool = req.get::<Read<YausDb>>().unwrap().clone();
let db = pool.get().unwrap();
let locator = req.url.path()[0];
let mut stmt = db.prepare("SELECT url FROM urls WHERE locator = (?)").unwrap();
let mut row = stmt.query_map::<String, _>(&[&locator], |r| r.get(0)).unwrap();
if let Some(u) = row.next() {
let long_url = Url::parse(&u.unwrap()).unwrap();
Ok(Response::with((Status::MovedPermanently, Redirect(long_url))))
} else {
Ok(Response::with((Status::NotFound, "Not found")))
}
}
fn index_handler(_: &mut Request) -> IronResult<Response> {
Ok(Response::with((Status::Ok, "See https://github.com/gsquire/yaus for the API")))
}
fn main() {
let mut router = Router::new();
router.get("/shorten", shorten_handler, "shorten");
router.get("/:locator", redirect_handler, "locator");
router.get("/", index_handler, "index");
let config = r2d2::Config::default();
let db = env::var("SHORT_DB").unwrap();
let manager = SqliteConnectionManager::new(&db);
let pool = r2d2::Pool::new(config, manager).unwrap();
let mut chain = Chain::new(router);
chain.link_before(Read::<YausDb>::one(pool));
Iron::new(chain).http("localhost:3000").unwrap();
}
|
{
let mut d = Sha256::default();
d.input(long_url.as_bytes());
let locator = d.result().as_slice().to_hex();
let timestamp = Local::now().to_rfc3339();
db.execute("INSERT INTO urls VALUES (NULL, ?1, ?2, ?3)",
&[×tamp, &long_url, &&locator[0..7]]).unwrap();
Ok(Response::with((Status::Created, [HOST, &locator[0..7]].join(""))))
}
|
identifier_body
|
main.rs
|
extern crate chrono;
extern crate iron;
extern crate persistent;
extern crate r2d2;
extern crate r2d2_sqlite;
extern crate router;
extern crate rusqlite;
extern crate rustc_serialize;
extern crate sha2;
use chrono::*;
use iron::prelude::*;
use iron::Url;
use iron::modifiers::Redirect;
use iron::status::Status;
use iron::typemap::Key;
use persistent::Read;
use r2d2::Pool;
use r2d2_sqlite::SqliteConnectionManager;
use router::Router;
use rusqlite::Connection;
use rustc_serialize::hex::ToHex;
use sha2::{Digest, Sha256};
use std::env;
const HOST: &'static str = "https://yaus.pw/";
pub type SqlitePool = Pool<SqliteConnectionManager>;
pub struct YausDb;
impl Key for YausDb {
type Value = SqlitePool;
}
// A simple macro to return early if the URL can't parse.
macro_rules! try_url {
($url:expr) => {
match Url::parse($url) {
Ok(_) => { },
Err(_) => {
return Ok(Response::with((Status::BadRequest, "Malformed URL")));
}
}
}
}
fn create_shortened_url(db: &Connection, long_url: &str) -> IronResult<Response> {
let mut d = Sha256::default();
d.input(long_url.as_bytes());
let locator = d.result().as_slice().to_hex();
let timestamp = Local::now().to_rfc3339();
db.execute("INSERT INTO urls VALUES (NULL,?1,?2,?3)",
&[×tamp, &long_url, &&locator[0..7]]).unwrap();
Ok(Response::with((Status::Created, [HOST, &locator[0..7]].join(""))))
}
/// Given a long URL, see if it already exists in the table, else create an entry and return
/// it.
///
/// A 200 means that a shortened URL already exists and has been returned. A 201
/// response means that a new shortened URL has been created.
fn check_or_shorten_url(db: &Connection, long_url: &str) -> IronResult<Response> {
let mut stmt = db.prepare("SELECT locator FROM urls WHERE url = (?)").unwrap();
let mut row = stmt.query_map::<String, _>(&[&long_url], |r| r.get(0)).unwrap();
if let Some(l) = row.next() {
return Ok(Response::with((Status::Ok, [HOST, &l.unwrap()].join(""))));
}
create_shortened_url(db, long_url)
}
/// The handler to shorten a URL.
fn shorten_handler(req: &mut Request) -> IronResult<Response> {
match req.url.clone().query() {
None => { Ok(Response::with((Status::BadRequest, "URL missing in query"))) },
Some(s) => {
let (k, v) = s.split_at(4);
if k == "url=" {
try_url!(v);
let pool = req.get::<Read<YausDb>>().unwrap().clone();
let db = pool.get().unwrap();
check_or_shorten_url(&db, v)
} else {
Ok(Response::with((Status::BadRequest, "Malformed query string")))
}
}
}
}
/// The handler that redirects to the long URL.
fn redirect_handler(req: &mut Request) -> IronResult<Response> {
let pool = req.get::<Read<YausDb>>().unwrap().clone();
let db = pool.get().unwrap();
let locator = req.url.path()[0];
let mut stmt = db.prepare("SELECT url FROM urls WHERE locator = (?)").unwrap();
let mut row = stmt.query_map::<String, _>(&[&locator], |r| r.get(0)).unwrap();
if let Some(u) = row.next()
|
else {
Ok(Response::with((Status::NotFound, "Not found")))
}
}
fn index_handler(_: &mut Request) -> IronResult<Response> {
Ok(Response::with((Status::Ok, "See https://github.com/gsquire/yaus for the API")))
}
fn main() {
let mut router = Router::new();
router.get("/shorten", shorten_handler, "shorten");
router.get("/:locator", redirect_handler, "locator");
router.get("/", index_handler, "index");
let config = r2d2::Config::default();
let db = env::var("SHORT_DB").unwrap();
let manager = SqliteConnectionManager::new(&db);
let pool = r2d2::Pool::new(config, manager).unwrap();
let mut chain = Chain::new(router);
chain.link_before(Read::<YausDb>::one(pool));
Iron::new(chain).http("localhost:3000").unwrap();
}
|
{
let long_url = Url::parse(&u.unwrap()).unwrap();
Ok(Response::with((Status::MovedPermanently, Redirect(long_url))))
}
|
conditional_block
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.