file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
session.rs
|
use std::io::Write;
use nickel::{Continue, FormBody, Halt, MediaType, Middleware, MiddlewareResult, Request, Response};
use redis::Commands;
use api::consumer::response::BADSESSION;
|
pub struct SessionMiddleware;
impl<D> Middleware<D> for SessionMiddleware {
fn invoke<'mw, 'conn>(&self,
req: &mut Request<'mw, 'conn, D>,
mut rep: Response<'mw, D>)
-> MiddlewareResult<'mw, D> {
// Verify the scrobble session id, to be used in all now-playing and submission requests.
if req.path_without_query()
.map(|p| p.starts_with(CONSUMER_HANDSHAKE) && p.len() > CONSUMER_HANDSHAKE.len())
.unwrap_or(false) {
if!match try_with!(rep, req.redis_conn()).get::<&'static str, String>(SESSION_KEY) {
Ok(sid) => {
try_with!(rep, req.form_body())
.get("s")
.map(|value| value == sid)
.unwrap_or(false)
}
Err(_) => false,
} {
warn!("Consumer: Invalid session ID, abort!");
rep.set(MediaType::Txt);
let mut stream = try!(rep.start());
if let Err(err) = stream.write_all(BADSESSION.as_bytes()) {
return stream.bail(format!("Failed to halt session: {}", err));
} else {
return Ok(Halt(stream));
}
}
}
Ok(Continue(rep))
}
}
|
use api::rds::RedisExtension;
use api::routes::CONSUMER_HANDSHAKE;
pub const SESSION_KEY: &'static str = "audioscrobbler:session";
|
random_line_split
|
session.rs
|
use std::io::Write;
use nickel::{Continue, FormBody, Halt, MediaType, Middleware, MiddlewareResult, Request, Response};
use redis::Commands;
use api::consumer::response::BADSESSION;
use api::rds::RedisExtension;
use api::routes::CONSUMER_HANDSHAKE;
pub const SESSION_KEY: &'static str = "audioscrobbler:session";
pub struct SessionMiddleware;
impl<D> Middleware<D> for SessionMiddleware {
fn invoke<'mw, 'conn>(&self,
req: &mut Request<'mw, 'conn, D>,
mut rep: Response<'mw, D>)
-> MiddlewareResult<'mw, D>
|
return Ok(Halt(stream));
}
}
}
Ok(Continue(rep))
}
}
|
{
// Verify the scrobble session id, to be used in all now-playing and submission requests.
if req.path_without_query()
.map(|p| p.starts_with(CONSUMER_HANDSHAKE) && p.len() > CONSUMER_HANDSHAKE.len())
.unwrap_or(false) {
if !match try_with!(rep, req.redis_conn()).get::<&'static str, String>(SESSION_KEY) {
Ok(sid) => {
try_with!(rep, req.form_body())
.get("s")
.map(|value| value == sid)
.unwrap_or(false)
}
Err(_) => false,
} {
warn!("Consumer: Invalid session ID, abort!");
rep.set(MediaType::Txt);
let mut stream = try!(rep.start());
if let Err(err) = stream.write_all(BADSESSION.as_bytes()) {
return stream.bail(format!("Failed to halt session: {}", err));
} else {
|
identifier_body
|
session.rs
|
use std::io::Write;
use nickel::{Continue, FormBody, Halt, MediaType, Middleware, MiddlewareResult, Request, Response};
use redis::Commands;
use api::consumer::response::BADSESSION;
use api::rds::RedisExtension;
use api::routes::CONSUMER_HANDSHAKE;
pub const SESSION_KEY: &'static str = "audioscrobbler:session";
pub struct SessionMiddleware;
impl<D> Middleware<D> for SessionMiddleware {
fn
|
<'mw, 'conn>(&self,
req: &mut Request<'mw, 'conn, D>,
mut rep: Response<'mw, D>)
-> MiddlewareResult<'mw, D> {
// Verify the scrobble session id, to be used in all now-playing and submission requests.
if req.path_without_query()
.map(|p| p.starts_with(CONSUMER_HANDSHAKE) && p.len() > CONSUMER_HANDSHAKE.len())
.unwrap_or(false) {
if!match try_with!(rep, req.redis_conn()).get::<&'static str, String>(SESSION_KEY) {
Ok(sid) => {
try_with!(rep, req.form_body())
.get("s")
.map(|value| value == sid)
.unwrap_or(false)
}
Err(_) => false,
} {
warn!("Consumer: Invalid session ID, abort!");
rep.set(MediaType::Txt);
let mut stream = try!(rep.start());
if let Err(err) = stream.write_all(BADSESSION.as_bytes()) {
return stream.bail(format!("Failed to halt session: {}", err));
} else {
return Ok(Halt(stream));
}
}
}
Ok(Continue(rep))
}
}
|
invoke
|
identifier_name
|
ai.rs
|
extern crate rand;
use chess::logic::{Figure, Board, Position};
use chess::player::{Player, PlayerType};
use self::rand::{thread_rng, Rng};
/// Returns a move for the AI, depending on which one it is
pub fn get_move(board: &Board, me: &Player, other: &Player) -> (Position, Position) {
// If AI is stupid
if me.ptype()!= PlayerType::Smart {
get_dumb_move(&mut board.clone(), &mut me.clone(), &mut other.clone())
// If AI is smart
} else {
get_smart_move(board.clone(), me.clone(), other.clone())
}
}
/// Returns the measure of a figure's value
fn figure_value(fig: &Figure) -> i32 {
match *fig {
Figure::King => 500,
Figure::Queen => 100,
Figure::Rook => 50,
Figure::Bishop | Figure::Knight => 25,
Figure::Pawn => 10
}
}
/// Returns a random move in'moves'
fn random_move(moves: &Vec<(Position, Position)>) -> (Position, Position) {
let mut rng = thread_rng();
let index = rng.gen_range(0, moves.len());
moves[index]
}
/// Returns a dumb move
fn
|
(board: &mut Board, me: &mut Player, other: &mut Player) -> (Position, Position) {
let my_moves = me.get_possible_moves(board, other);
let move_values: Vec<(i32, (Position, Position))> = my_moves.iter()
.map(|x| (capture_and_evade(board, x, me, other), *x))
.collect();
if let Some(at) = move_values.iter().max_by_key(|x| x.0) {
if at.0 == 0 {
return random_move(&my_moves)
} else {
return at.1
}
}
// If we got here than there is no valid move to make, which should not happen
// because then this function should not be called in the first place
unreachable!()
}
/// Return a measure that tries to capture opponent figures and evade being captured
fn capture_and_evade(board: &mut Board, pos: &(Position, Position), active: &mut Player, inactive: &mut Player) -> i32 {
let capture = {
if board.is_capture_move(pos.0, pos.1) {
figure_value(&board.get_figure(pos.1).unwrap())
} else {
0
}
};
let evade = {
if board.simulate_check(pos.0, pos.1, active, inactive, false) {
(figure_value(&board.get_figure(pos.0).unwrap()) * -1) + 1
} else {
0
}
};
capture + evade
}
/// Chooses a smart AI move
#[allow(dead_code, unused_variables)]
fn get_smart_move(board: Board, me: Player, other: Player) -> (Position, Position) {
// TODO: implement this (minimax?)
(Position::new(1, 1), Position::new(1, 2))
}
|
get_dumb_move
|
identifier_name
|
ai.rs
|
extern crate rand;
use chess::logic::{Figure, Board, Position};
use chess::player::{Player, PlayerType};
use self::rand::{thread_rng, Rng};
/// Returns a move for the AI, depending on which one it is
pub fn get_move(board: &Board, me: &Player, other: &Player) -> (Position, Position) {
// If AI is stupid
if me.ptype()!= PlayerType::Smart {
get_dumb_move(&mut board.clone(), &mut me.clone(), &mut other.clone())
// If AI is smart
} else {
get_smart_move(board.clone(), me.clone(), other.clone())
}
}
/// Returns the measure of a figure's value
fn figure_value(fig: &Figure) -> i32 {
match *fig {
Figure::King => 500,
Figure::Queen => 100,
Figure::Rook => 50,
Figure::Bishop | Figure::Knight => 25,
Figure::Pawn => 10
}
}
/// Returns a random move in'moves'
fn random_move(moves: &Vec<(Position, Position)>) -> (Position, Position) {
let mut rng = thread_rng();
let index = rng.gen_range(0, moves.len());
moves[index]
}
/// Returns a dumb move
fn get_dumb_move(board: &mut Board, me: &mut Player, other: &mut Player) -> (Position, Position) {
let my_moves = me.get_possible_moves(board, other);
|
.map(|x| (capture_and_evade(board, x, me, other), *x))
.collect();
if let Some(at) = move_values.iter().max_by_key(|x| x.0) {
if at.0 == 0 {
return random_move(&my_moves)
} else {
return at.1
}
}
// If we got here than there is no valid move to make, which should not happen
// because then this function should not be called in the first place
unreachable!()
}
/// Return a measure that tries to capture opponent figures and evade being captured
fn capture_and_evade(board: &mut Board, pos: &(Position, Position), active: &mut Player, inactive: &mut Player) -> i32 {
let capture = {
if board.is_capture_move(pos.0, pos.1) {
figure_value(&board.get_figure(pos.1).unwrap())
} else {
0
}
};
let evade = {
if board.simulate_check(pos.0, pos.1, active, inactive, false) {
(figure_value(&board.get_figure(pos.0).unwrap()) * -1) + 1
} else {
0
}
};
capture + evade
}
/// Chooses a smart AI move
#[allow(dead_code, unused_variables)]
fn get_smart_move(board: Board, me: Player, other: Player) -> (Position, Position) {
// TODO: implement this (minimax?)
(Position::new(1, 1), Position::new(1, 2))
}
|
let move_values: Vec<(i32, (Position, Position))> = my_moves.iter()
|
random_line_split
|
ai.rs
|
extern crate rand;
use chess::logic::{Figure, Board, Position};
use chess::player::{Player, PlayerType};
use self::rand::{thread_rng, Rng};
/// Returns a move for the AI, depending on which one it is
pub fn get_move(board: &Board, me: &Player, other: &Player) -> (Position, Position) {
// If AI is stupid
if me.ptype()!= PlayerType::Smart {
get_dumb_move(&mut board.clone(), &mut me.clone(), &mut other.clone())
// If AI is smart
} else {
get_smart_move(board.clone(), me.clone(), other.clone())
}
}
/// Returns the measure of a figure's value
fn figure_value(fig: &Figure) -> i32 {
match *fig {
Figure::King => 500,
Figure::Queen => 100,
Figure::Rook => 50,
Figure::Bishop | Figure::Knight => 25,
Figure::Pawn => 10
}
}
/// Returns a random move in'moves'
fn random_move(moves: &Vec<(Position, Position)>) -> (Position, Position) {
let mut rng = thread_rng();
let index = rng.gen_range(0, moves.len());
moves[index]
}
/// Returns a dumb move
fn get_dumb_move(board: &mut Board, me: &mut Player, other: &mut Player) -> (Position, Position)
|
/// Return a measure that tries to capture opponent figures and evade being captured
fn capture_and_evade(board: &mut Board, pos: &(Position, Position), active: &mut Player, inactive: &mut Player) -> i32 {
let capture = {
if board.is_capture_move(pos.0, pos.1) {
figure_value(&board.get_figure(pos.1).unwrap())
} else {
0
}
};
let evade = {
if board.simulate_check(pos.0, pos.1, active, inactive, false) {
(figure_value(&board.get_figure(pos.0).unwrap()) * -1) + 1
} else {
0
}
};
capture + evade
}
/// Chooses a smart AI move
#[allow(dead_code, unused_variables)]
fn get_smart_move(board: Board, me: Player, other: Player) -> (Position, Position) {
// TODO: implement this (minimax?)
(Position::new(1, 1), Position::new(1, 2))
}
|
{
let my_moves = me.get_possible_moves(board, other);
let move_values: Vec<(i32, (Position, Position))> = my_moves.iter()
.map(|x| (capture_and_evade(board, x, me, other), *x))
.collect();
if let Some(at) = move_values.iter().max_by_key(|x| x.0) {
if at.0 == 0 {
return random_move(&my_moves)
} else {
return at.1
}
}
// If we got here than there is no valid move to make, which should not happen
// because then this function should not be called in the first place
unreachable!()
}
|
identifier_body
|
table_caption.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! CSS table formatting contexts.
use layout::block::BlockFlow;
use layout::construct::FlowConstructor;
use layout::context::LayoutContext;
use layout::display_list_builder::{DisplayListBuilder, ExtraDisplayListData};
use layout::flow::{TableCaptionFlowClass, FlowClass, Flow};
use layout::wrapper::ThreadSafeLayoutNode;
use std::cell::RefCell;
use geom::{Point2D, Rect, Size2D};
use gfx::display_list::DisplayListCollection;
use servo_util::geometry::Au;
/// A table formatting context.
pub struct TableCaptionFlow {
block_flow: BlockFlow,
}
impl TableCaptionFlow {
pub fn from_node(constructor: &mut FlowConstructor,
node: &ThreadSafeLayoutNode)
-> TableCaptionFlow {
TableCaptionFlow {
block_flow: BlockFlow::from_node(constructor, node)
}
}
pub fn teardown(&mut self) {
self.block_flow.teardown();
}
pub fn build_display_list_table_caption<E:ExtraDisplayListData>(
&mut self,
builder: &DisplayListBuilder,
container_block_size: &Size2D<Au>,
absolute_cb_abs_position: Point2D<Au>,
dirty: &Rect<Au>,
index: uint,
lists: &RefCell<DisplayListCollection<E>>)
-> uint {
debug!("build_display_list_table_caption: same process as block flow");
self.block_flow.build_display_list_block(builder, container_block_size,
absolute_cb_abs_position,
dirty, index, lists)
}
}
impl Flow for TableCaptionFlow {
fn class(&self) -> FlowClass {
TableCaptionFlowClass
}
fn as_table_caption<'a>(&'a mut self) -> &'a mut TableCaptionFlow {
self
}
fn as_block<'a>(&'a mut self) -> &'a mut BlockFlow {
&mut self.block_flow
}
fn bubble_widths(&mut self, ctx: &mut LayoutContext) {
self.block_flow.bubble_widths(ctx);
}
fn assign_widths(&mut self, ctx: &mut LayoutContext) {
debug!("assign_widths({}): assigning width for flow", "table_caption");
self.block_flow.assign_widths(ctx);
}
/// This is called on kid flows by a parent.
///
/// Hence, we can assume that assign_height has already been called on the
/// kid (because of the bottom-up traversal).
fn
|
(&mut self, ctx: &mut LayoutContext) {
debug!("assign_height_inorder: assigning height for table_caption");
self.block_flow.assign_height_inorder(ctx);
}
fn assign_height(&mut self, ctx: &mut LayoutContext) {
debug!("assign_height: assigning height for table_caption");
self.block_flow.assign_height(ctx);
}
/// table-caption has margins but is not collapsed with a sibling(table)
/// or its parents(table-wrapper).
/// Therefore, margins to be collapsed do not exist.
fn collapse_margins(&mut self, _: bool, _: &mut bool, _: &mut Au,
_: &mut Au, _: &mut Au, _: &mut Au) {
}
fn debug_str(&self) -> ~str {
let txt = ~"TableCaptionFlow: ";
txt.append(match self.block_flow.box_ {
Some(ref rb) => rb.debug_str(),
None => ~"",
})
}
}
|
assign_height_inorder
|
identifier_name
|
table_caption.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! CSS table formatting contexts.
use layout::block::BlockFlow;
use layout::construct::FlowConstructor;
use layout::context::LayoutContext;
use layout::display_list_builder::{DisplayListBuilder, ExtraDisplayListData};
use layout::flow::{TableCaptionFlowClass, FlowClass, Flow};
use layout::wrapper::ThreadSafeLayoutNode;
use std::cell::RefCell;
use geom::{Point2D, Rect, Size2D};
use gfx::display_list::DisplayListCollection;
use servo_util::geometry::Au;
/// A table formatting context.
pub struct TableCaptionFlow {
block_flow: BlockFlow,
}
impl TableCaptionFlow {
pub fn from_node(constructor: &mut FlowConstructor,
node: &ThreadSafeLayoutNode)
-> TableCaptionFlow {
TableCaptionFlow {
block_flow: BlockFlow::from_node(constructor, node)
}
}
pub fn teardown(&mut self) {
self.block_flow.teardown();
}
pub fn build_display_list_table_caption<E:ExtraDisplayListData>(
&mut self,
builder: &DisplayListBuilder,
container_block_size: &Size2D<Au>,
absolute_cb_abs_position: Point2D<Au>,
dirty: &Rect<Au>,
index: uint,
lists: &RefCell<DisplayListCollection<E>>)
-> uint {
debug!("build_display_list_table_caption: same process as block flow");
self.block_flow.build_display_list_block(builder, container_block_size,
absolute_cb_abs_position,
dirty, index, lists)
}
}
impl Flow for TableCaptionFlow {
fn class(&self) -> FlowClass {
TableCaptionFlowClass
}
fn as_table_caption<'a>(&'a mut self) -> &'a mut TableCaptionFlow {
self
}
fn as_block<'a>(&'a mut self) -> &'a mut BlockFlow {
&mut self.block_flow
}
fn bubble_widths(&mut self, ctx: &mut LayoutContext) {
self.block_flow.bubble_widths(ctx);
}
fn assign_widths(&mut self, ctx: &mut LayoutContext) {
debug!("assign_widths({}): assigning width for flow", "table_caption");
self.block_flow.assign_widths(ctx);
}
/// This is called on kid flows by a parent.
///
/// Hence, we can assume that assign_height has already been called on the
/// kid (because of the bottom-up traversal).
fn assign_height_inorder(&mut self, ctx: &mut LayoutContext) {
debug!("assign_height_inorder: assigning height for table_caption");
self.block_flow.assign_height_inorder(ctx);
}
fn assign_height(&mut self, ctx: &mut LayoutContext) {
debug!("assign_height: assigning height for table_caption");
self.block_flow.assign_height(ctx);
}
/// table-caption has margins but is not collapsed with a sibling(table)
/// or its parents(table-wrapper).
/// Therefore, margins to be collapsed do not exist.
fn collapse_margins(&mut self, _: bool, _: &mut bool, _: &mut Au,
_: &mut Au, _: &mut Au, _: &mut Au)
|
fn debug_str(&self) -> ~str {
let txt = ~"TableCaptionFlow: ";
txt.append(match self.block_flow.box_ {
Some(ref rb) => rb.debug_str(),
None => ~"",
})
}
}
|
{
}
|
identifier_body
|
table_caption.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! CSS table formatting contexts.
use layout::block::BlockFlow;
use layout::construct::FlowConstructor;
use layout::context::LayoutContext;
use layout::display_list_builder::{DisplayListBuilder, ExtraDisplayListData};
use layout::flow::{TableCaptionFlowClass, FlowClass, Flow};
use layout::wrapper::ThreadSafeLayoutNode;
use std::cell::RefCell;
use geom::{Point2D, Rect, Size2D};
use gfx::display_list::DisplayListCollection;
use servo_util::geometry::Au;
/// A table formatting context.
pub struct TableCaptionFlow {
block_flow: BlockFlow,
}
impl TableCaptionFlow {
pub fn from_node(constructor: &mut FlowConstructor,
node: &ThreadSafeLayoutNode)
-> TableCaptionFlow {
TableCaptionFlow {
block_flow: BlockFlow::from_node(constructor, node)
}
}
pub fn teardown(&mut self) {
self.block_flow.teardown();
}
pub fn build_display_list_table_caption<E:ExtraDisplayListData>(
&mut self,
builder: &DisplayListBuilder,
container_block_size: &Size2D<Au>,
absolute_cb_abs_position: Point2D<Au>,
dirty: &Rect<Au>,
index: uint,
lists: &RefCell<DisplayListCollection<E>>)
-> uint {
debug!("build_display_list_table_caption: same process as block flow");
self.block_flow.build_display_list_block(builder, container_block_size,
absolute_cb_abs_position,
dirty, index, lists)
}
}
impl Flow for TableCaptionFlow {
fn class(&self) -> FlowClass {
TableCaptionFlowClass
}
fn as_table_caption<'a>(&'a mut self) -> &'a mut TableCaptionFlow {
self
}
fn as_block<'a>(&'a mut self) -> &'a mut BlockFlow {
&mut self.block_flow
}
fn bubble_widths(&mut self, ctx: &mut LayoutContext) {
self.block_flow.bubble_widths(ctx);
}
fn assign_widths(&mut self, ctx: &mut LayoutContext) {
debug!("assign_widths({}): assigning width for flow", "table_caption");
self.block_flow.assign_widths(ctx);
}
/// This is called on kid flows by a parent.
///
/// Hence, we can assume that assign_height has already been called on the
/// kid (because of the bottom-up traversal).
fn assign_height_inorder(&mut self, ctx: &mut LayoutContext) {
debug!("assign_height_inorder: assigning height for table_caption");
self.block_flow.assign_height_inorder(ctx);
}
|
debug!("assign_height: assigning height for table_caption");
self.block_flow.assign_height(ctx);
}
/// table-caption has margins but is not collapsed with a sibling(table)
/// or its parents(table-wrapper).
/// Therefore, margins to be collapsed do not exist.
fn collapse_margins(&mut self, _: bool, _: &mut bool, _: &mut Au,
_: &mut Au, _: &mut Au, _: &mut Au) {
}
fn debug_str(&self) -> ~str {
let txt = ~"TableCaptionFlow: ";
txt.append(match self.block_flow.box_ {
Some(ref rb) => rb.debug_str(),
None => ~"",
})
}
}
|
fn assign_height(&mut self, ctx: &mut LayoutContext) {
|
random_line_split
|
buf_writer.rs
|
use futures_core::task::{Context, Poll};
use futures_io::{AsyncSeek, AsyncWrite, IoSlice, SeekFrom};
use pin_utils::{unsafe_pinned, unsafe_unpinned};
use std::fmt;
use std::io::{self, Write};
use std::pin::Pin;
use super::DEFAULT_BUF_SIZE;
/// Wraps a writer and buffers its output.
///
/// It can be excessively inefficient to work directly with something that
/// implements [`AsyncWrite`]. A `BufWriter` keeps an in-memory buffer of data and
/// writes it to an underlying writer in large, infrequent batches.
///
/// `BufWriter` can improve the speed of programs that make *small* and
/// *repeated* write calls to the same file or network socket. It does not
/// help when writing very large amounts at once, or writing just one or a few
/// times. It also provides no advantage when writing to a destination that is
/// in memory, like a `Vec<u8>`.
///
/// When the `BufWriter` is dropped, the contents of its buffer will be
/// discarded. Creating multiple instances of a `BufWriter` on the same
/// stream can cause data loss. If you need to write out the contents of its
/// buffer, you must manually call flush before the writer is dropped.
///
/// [`AsyncWrite`]: futures_io::AsyncWrite
/// [`flush`]: super::AsyncWriteExt::flush
///
// TODO: Examples
pub struct BufWriter<W> {
inner: W,
buf: Vec<u8>,
written: usize,
}
impl<W: AsyncWrite> BufWriter<W> {
unsafe_pinned!(inner: W);
unsafe_unpinned!(buf: Vec<u8>);
/// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
pub fn new(inner: W) -> Self {
Self::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufWriter` with the specified buffer capacity.
pub fn with_capacity(cap: usize, inner: W) -> Self {
Self {
inner,
buf: Vec::with_capacity(cap),
written: 0,
}
}
fn flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let Self { inner, buf, written } = unsafe { self.get_unchecked_mut() };
let mut inner = unsafe { Pin::new_unchecked(inner) };
let len = buf.len();
let mut ret = Ok(());
while *written < len {
match ready!(inner.as_mut().poll_write(cx, &buf[*written..])) {
Ok(0) => {
ret = Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write the buffered data",
));
break;
}
Ok(n) => *written += n,
Err(e) => {
ret = Err(e);
break;
}
}
}
if *written > 0 {
buf.drain(..*written);
}
*written = 0;
Poll::Ready(ret)
}
/// Gets a reference to the underlying writer.
pub fn get_ref(&self) -> &W {
&self.inner
}
/// Gets a mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
pub fn get_mut(&mut self) -> &mut W {
&mut self.inner
}
/// Gets a pinned mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> {
self.inner()
}
/// Consumes this `BufWriter`, returning the underlying writer.
///
/// Note that any leftover data in the internal buffer is lost.
pub fn into_inner(self) -> W {
self.inner
}
/// Returns a reference to the internally buffered data.
pub fn buffer(&self) -> &[u8] {
&self.buf
}
}
impl<W: AsyncWrite> AsyncWrite for BufWriter<W> {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>>
|
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.buf.len() + total_len > self.buf.capacity() {
ready!(self.as_mut().flush_buf(cx))?;
}
if total_len >= self.buf.capacity() {
self.inner().poll_write_vectored(cx, bufs)
} else {
Poll::Ready(self.buf().write_vectored(bufs))
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
ready!(self.as_mut().flush_buf(cx))?;
self.inner().poll_flush(cx)
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
ready!(self.as_mut().flush_buf(cx))?;
self.inner().poll_close(cx)
}
}
impl<W: fmt::Debug> fmt::Debug for BufWriter<W> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BufWriter")
.field("writer", &self.inner)
.field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
.field("written", &self.written)
.finish()
}
}
impl<W: AsyncWrite + AsyncSeek> AsyncSeek for BufWriter<W> {
/// Seek to the offset, in bytes, in the underlying writer.
///
/// Seeking always writes out the internal buffer before seeking.
fn poll_seek(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
pos: SeekFrom,
) -> Poll<io::Result<u64>> {
ready!(self.as_mut().flush_buf(cx))?;
self.inner().poll_seek(cx, pos)
}
}
|
{
if self.buf.len() + buf.len() > self.buf.capacity() {
ready!(self.as_mut().flush_buf(cx))?;
}
if buf.len() >= self.buf.capacity() {
self.inner().poll_write(cx, buf)
} else {
Poll::Ready(self.buf().write(buf))
}
}
|
identifier_body
|
buf_writer.rs
|
use futures_core::task::{Context, Poll};
use futures_io::{AsyncSeek, AsyncWrite, IoSlice, SeekFrom};
use pin_utils::{unsafe_pinned, unsafe_unpinned};
use std::fmt;
use std::io::{self, Write};
use std::pin::Pin;
use super::DEFAULT_BUF_SIZE;
/// Wraps a writer and buffers its output.
///
/// It can be excessively inefficient to work directly with something that
/// implements [`AsyncWrite`]. A `BufWriter` keeps an in-memory buffer of data and
/// writes it to an underlying writer in large, infrequent batches.
///
/// `BufWriter` can improve the speed of programs that make *small* and
/// *repeated* write calls to the same file or network socket. It does not
/// help when writing very large amounts at once, or writing just one or a few
/// times. It also provides no advantage when writing to a destination that is
/// in memory, like a `Vec<u8>`.
///
/// When the `BufWriter` is dropped, the contents of its buffer will be
/// discarded. Creating multiple instances of a `BufWriter` on the same
/// stream can cause data loss. If you need to write out the contents of its
/// buffer, you must manually call flush before the writer is dropped.
///
/// [`AsyncWrite`]: futures_io::AsyncWrite
/// [`flush`]: super::AsyncWriteExt::flush
///
// TODO: Examples
pub struct BufWriter<W> {
inner: W,
buf: Vec<u8>,
written: usize,
}
impl<W: AsyncWrite> BufWriter<W> {
unsafe_pinned!(inner: W);
unsafe_unpinned!(buf: Vec<u8>);
/// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
pub fn new(inner: W) -> Self {
Self::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufWriter` with the specified buffer capacity.
pub fn with_capacity(cap: usize, inner: W) -> Self {
Self {
inner,
buf: Vec::with_capacity(cap),
written: 0,
}
}
fn flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let Self { inner, buf, written } = unsafe { self.get_unchecked_mut() };
let mut inner = unsafe { Pin::new_unchecked(inner) };
let len = buf.len();
let mut ret = Ok(());
while *written < len {
match ready!(inner.as_mut().poll_write(cx, &buf[*written..])) {
Ok(0) => {
ret = Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write the buffered data",
));
break;
}
Ok(n) => *written += n,
Err(e) => {
ret = Err(e);
break;
}
}
}
if *written > 0 {
buf.drain(..*written);
}
*written = 0;
Poll::Ready(ret)
}
/// Gets a reference to the underlying writer.
pub fn get_ref(&self) -> &W {
&self.inner
}
/// Gets a mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
pub fn get_mut(&mut self) -> &mut W {
&mut self.inner
}
/// Gets a pinned mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> {
self.inner()
}
/// Consumes this `BufWriter`, returning the underlying writer.
///
/// Note that any leftover data in the internal buffer is lost.
pub fn into_inner(self) -> W {
self.inner
}
/// Returns a reference to the internally buffered data.
pub fn buffer(&self) -> &[u8] {
&self.buf
}
}
impl<W: AsyncWrite> AsyncWrite for BufWriter<W> {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
if self.buf.len() + buf.len() > self.buf.capacity()
|
if buf.len() >= self.buf.capacity() {
self.inner().poll_write(cx, buf)
} else {
Poll::Ready(self.buf().write(buf))
}
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.buf.len() + total_len > self.buf.capacity() {
ready!(self.as_mut().flush_buf(cx))?;
}
if total_len >= self.buf.capacity() {
self.inner().poll_write_vectored(cx, bufs)
} else {
Poll::Ready(self.buf().write_vectored(bufs))
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
ready!(self.as_mut().flush_buf(cx))?;
self.inner().poll_flush(cx)
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
ready!(self.as_mut().flush_buf(cx))?;
self.inner().poll_close(cx)
}
}
impl<W: fmt::Debug> fmt::Debug for BufWriter<W> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BufWriter")
.field("writer", &self.inner)
.field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
.field("written", &self.written)
.finish()
}
}
impl<W: AsyncWrite + AsyncSeek> AsyncSeek for BufWriter<W> {
/// Seek to the offset, in bytes, in the underlying writer.
///
/// Seeking always writes out the internal buffer before seeking.
fn poll_seek(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
pos: SeekFrom,
) -> Poll<io::Result<u64>> {
ready!(self.as_mut().flush_buf(cx))?;
self.inner().poll_seek(cx, pos)
}
}
|
{
ready!(self.as_mut().flush_buf(cx))?;
}
|
conditional_block
|
buf_writer.rs
|
use futures_core::task::{Context, Poll};
use futures_io::{AsyncSeek, AsyncWrite, IoSlice, SeekFrom};
use pin_utils::{unsafe_pinned, unsafe_unpinned};
use std::fmt;
use std::io::{self, Write};
use std::pin::Pin;
use super::DEFAULT_BUF_SIZE;
/// Wraps a writer and buffers its output.
///
/// It can be excessively inefficient to work directly with something that
/// implements [`AsyncWrite`]. A `BufWriter` keeps an in-memory buffer of data and
/// writes it to an underlying writer in large, infrequent batches.
///
/// `BufWriter` can improve the speed of programs that make *small* and
/// *repeated* write calls to the same file or network socket. It does not
/// help when writing very large amounts at once, or writing just one or a few
/// times. It also provides no advantage when writing to a destination that is
/// in memory, like a `Vec<u8>`.
///
/// When the `BufWriter` is dropped, the contents of its buffer will be
/// discarded. Creating multiple instances of a `BufWriter` on the same
/// stream can cause data loss. If you need to write out the contents of its
/// buffer, you must manually call flush before the writer is dropped.
///
/// [`AsyncWrite`]: futures_io::AsyncWrite
/// [`flush`]: super::AsyncWriteExt::flush
///
// TODO: Examples
pub struct BufWriter<W> {
inner: W,
buf: Vec<u8>,
written: usize,
}
impl<W: AsyncWrite> BufWriter<W> {
unsafe_pinned!(inner: W);
unsafe_unpinned!(buf: Vec<u8>);
/// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
pub fn new(inner: W) -> Self {
Self::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufWriter` with the specified buffer capacity.
pub fn with_capacity(cap: usize, inner: W) -> Self {
Self {
inner,
buf: Vec::with_capacity(cap),
written: 0,
}
}
fn flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let Self { inner, buf, written } = unsafe { self.get_unchecked_mut() };
let mut inner = unsafe { Pin::new_unchecked(inner) };
let len = buf.len();
let mut ret = Ok(());
while *written < len {
match ready!(inner.as_mut().poll_write(cx, &buf[*written..])) {
Ok(0) => {
ret = Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write the buffered data",
));
break;
}
Ok(n) => *written += n,
Err(e) => {
ret = Err(e);
break;
}
}
}
if *written > 0 {
buf.drain(..*written);
}
*written = 0;
Poll::Ready(ret)
}
/// Gets a reference to the underlying writer.
pub fn get_ref(&self) -> &W {
&self.inner
}
/// Gets a mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
pub fn get_mut(&mut self) -> &mut W {
&mut self.inner
}
/// Gets a pinned mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> {
self.inner()
}
/// Consumes this `BufWriter`, returning the underlying writer.
///
/// Note that any leftover data in the internal buffer is lost.
pub fn into_inner(self) -> W {
self.inner
}
/// Returns a reference to the internally buffered data.
pub fn buffer(&self) -> &[u8] {
&self.buf
}
}
impl<W: AsyncWrite> AsyncWrite for BufWriter<W> {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
if self.buf.len() + buf.len() > self.buf.capacity() {
ready!(self.as_mut().flush_buf(cx))?;
}
if buf.len() >= self.buf.capacity() {
self.inner().poll_write(cx, buf)
} else {
Poll::Ready(self.buf().write(buf))
}
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.buf.len() + total_len > self.buf.capacity() {
ready!(self.as_mut().flush_buf(cx))?;
}
if total_len >= self.buf.capacity() {
self.inner().poll_write_vectored(cx, bufs)
} else {
Poll::Ready(self.buf().write_vectored(bufs))
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
ready!(self.as_mut().flush_buf(cx))?;
self.inner().poll_flush(cx)
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
ready!(self.as_mut().flush_buf(cx))?;
self.inner().poll_close(cx)
}
}
impl<W: fmt::Debug> fmt::Debug for BufWriter<W> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BufWriter")
.field("writer", &self.inner)
.field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
.field("written", &self.written)
.finish()
}
}
impl<W: AsyncWrite + AsyncSeek> AsyncSeek for BufWriter<W> {
/// Seek to the offset, in bytes, in the underlying writer.
///
/// Seeking always writes out the internal buffer before seeking.
fn poll_seek(
|
self.inner().poll_seek(cx, pos)
}
}
|
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
pos: SeekFrom,
) -> Poll<io::Result<u64>> {
ready!(self.as_mut().flush_buf(cx))?;
|
random_line_split
|
buf_writer.rs
|
use futures_core::task::{Context, Poll};
use futures_io::{AsyncSeek, AsyncWrite, IoSlice, SeekFrom};
use pin_utils::{unsafe_pinned, unsafe_unpinned};
use std::fmt;
use std::io::{self, Write};
use std::pin::Pin;
use super::DEFAULT_BUF_SIZE;
/// Wraps a writer and buffers its output.
///
/// It can be excessively inefficient to work directly with something that
/// implements [`AsyncWrite`]. A `BufWriter` keeps an in-memory buffer of data and
/// writes it to an underlying writer in large, infrequent batches.
///
/// `BufWriter` can improve the speed of programs that make *small* and
/// *repeated* write calls to the same file or network socket. It does not
/// help when writing very large amounts at once, or writing just one or a few
/// times. It also provides no advantage when writing to a destination that is
/// in memory, like a `Vec<u8>`.
///
/// When the `BufWriter` is dropped, the contents of its buffer will be
/// discarded. Creating multiple instances of a `BufWriter` on the same
/// stream can cause data loss. If you need to write out the contents of its
/// buffer, you must manually call flush before the writer is dropped.
///
/// [`AsyncWrite`]: futures_io::AsyncWrite
/// [`flush`]: super::AsyncWriteExt::flush
///
// TODO: Examples
pub struct BufWriter<W> {
inner: W,
buf: Vec<u8>,
written: usize,
}
impl<W: AsyncWrite> BufWriter<W> {
unsafe_pinned!(inner: W);
unsafe_unpinned!(buf: Vec<u8>);
/// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB,
/// but may change in the future.
pub fn
|
(inner: W) -> Self {
Self::with_capacity(DEFAULT_BUF_SIZE, inner)
}
/// Creates a new `BufWriter` with the specified buffer capacity.
pub fn with_capacity(cap: usize, inner: W) -> Self {
Self {
inner,
buf: Vec::with_capacity(cap),
written: 0,
}
}
fn flush_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let Self { inner, buf, written } = unsafe { self.get_unchecked_mut() };
let mut inner = unsafe { Pin::new_unchecked(inner) };
let len = buf.len();
let mut ret = Ok(());
while *written < len {
match ready!(inner.as_mut().poll_write(cx, &buf[*written..])) {
Ok(0) => {
ret = Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write the buffered data",
));
break;
}
Ok(n) => *written += n,
Err(e) => {
ret = Err(e);
break;
}
}
}
if *written > 0 {
buf.drain(..*written);
}
*written = 0;
Poll::Ready(ret)
}
/// Gets a reference to the underlying writer.
pub fn get_ref(&self) -> &W {
&self.inner
}
/// Gets a mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
pub fn get_mut(&mut self) -> &mut W {
&mut self.inner
}
/// Gets a pinned mutable reference to the underlying writer.
///
/// It is inadvisable to directly write to the underlying writer.
pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut W> {
self.inner()
}
/// Consumes this `BufWriter`, returning the underlying writer.
///
/// Note that any leftover data in the internal buffer is lost.
pub fn into_inner(self) -> W {
self.inner
}
/// Returns a reference to the internally buffered data.
pub fn buffer(&self) -> &[u8] {
&self.buf
}
}
impl<W: AsyncWrite> AsyncWrite for BufWriter<W> {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
if self.buf.len() + buf.len() > self.buf.capacity() {
ready!(self.as_mut().flush_buf(cx))?;
}
if buf.len() >= self.buf.capacity() {
self.inner().poll_write(cx, buf)
} else {
Poll::Ready(self.buf().write(buf))
}
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[IoSlice<'_>],
) -> Poll<io::Result<usize>> {
let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
if self.buf.len() + total_len > self.buf.capacity() {
ready!(self.as_mut().flush_buf(cx))?;
}
if total_len >= self.buf.capacity() {
self.inner().poll_write_vectored(cx, bufs)
} else {
Poll::Ready(self.buf().write_vectored(bufs))
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
ready!(self.as_mut().flush_buf(cx))?;
self.inner().poll_flush(cx)
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
ready!(self.as_mut().flush_buf(cx))?;
self.inner().poll_close(cx)
}
}
impl<W: fmt::Debug> fmt::Debug for BufWriter<W> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BufWriter")
.field("writer", &self.inner)
.field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity()))
.field("written", &self.written)
.finish()
}
}
impl<W: AsyncWrite + AsyncSeek> AsyncSeek for BufWriter<W> {
/// Seek to the offset, in bytes, in the underlying writer.
///
/// Seeking always writes out the internal buffer before seeking.
fn poll_seek(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
pos: SeekFrom,
) -> Poll<io::Result<u64>> {
ready!(self.as_mut().flush_buf(cx))?;
self.inner().poll_seek(cx, pos)
}
}
|
new
|
identifier_name
|
edenapi.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::sync::Arc;
use std::time::Duration;
use futures::StreamExt;
use futures_batch::ChunksTimeoutStreamExt;
use edenapi::EdenApi;
use edenapi_types::{FileEntry, TreeAttributes, TreeEntry};
use types::Key;
use crate::{
localstore::ExtStoredPolicy,
scmstore::{fetch_error, FetchError, FetchStream, KeyStream, ReadStore},
};
// TODO(meyer): These should be configurable
// EdenApi's API is batch-based and async, and it will split a large batch into multiple requests to send in parallel
// but it won't join separate batches into larger ones. Because the input stream may not terminate in a timely fashion,
// we group the stream into batches with a timeout so that EdenApi will actually be sent batches, rather than constructing
// a batch of one for each item in the stream. This is worth investigating in the future, though - we could be sending
// "batches of one" to EdenApi, or we could change the EdenApi client to batch across requests, not just within them.
// I believe Arun has determined that even with HTTP2, some level of batching within requests is advantageous instead
// of individually streaming a separate request for each key, but it's still worth making sure we're doing the rgiht thing.
// We might also want to just grab all ready items from the stream in a batch, with no timeout, if the cost of small batches
// is smaller than the cost of the timeout waiting to collect larger ones.
const BATCH_SIZE: usize = 100;
const BATCH_TIMEOUT: Duration = Duration::from_millis(100);
pub struct EdenApiAdapter<C> {
pub client: C,
pub repo: String,
pub extstored_policy: ExtStoredPolicy,
}
impl<C> ReadStore<Key, TreeEntry> for EdenApiAdapter<C>
where
C: EdenApi,
{
fn fetch_stream(self: Arc<Self>, keys: KeyStream<Key>) -> FetchStream<Key, TreeEntry> {
Box::pin(
keys.chunks_timeout(BATCH_SIZE, BATCH_TIMEOUT)
.then(move |keys| {
let self_ = self.clone();
async move {
self_
.client
.trees(self_.repo.clone(), keys, Some(TreeAttributes::all()), None)
.await
.map_or_else(fetch_error, |s| {
Box::pin(s.entries.map(|v| match v {
Ok(Ok(v)) => Ok(v),
// TODO: Separate out NotFound errors from EdenApi
// TODO: We could eliminate this redundant key clone with a trait, I think.
Ok(Err(e)) => Err(FetchError::maybe_with_key(e.key.clone(), e)),
// TODO: What should happen when an entire batch fails?
Err(e) => Err(FetchError::from(e)),
})) as FetchStream<Key, TreeEntry>
})
}
})
.flatten(),
)
}
}
impl<C> ReadStore<Key, FileEntry> for EdenApiAdapter<C>
where
C: EdenApi,
{
fn fetch_stream(self: Arc<Self>, keys: KeyStream<Key>) -> FetchStream<Key, FileEntry> {
Box::pin(
keys.chunks_timeout(BATCH_SIZE, BATCH_TIMEOUT)
.then(move |keys| {
let self_ = self.clone();
async move {
self_
.client
.files(self_.repo.clone(), keys, None)
.await
.map_or_else(fetch_error, {
let self_ = self_.clone();
move |fetch| {
// TODO: Add per-item errors to EdenApi `files`
Box::pin(fetch.entries.map(move |res| {
res.map_err(FetchError::from).and_then(|entry| {
if self_.extstored_policy == ExtStoredPolicy::Ignore
&& entry.metadata().is_lfs()
{
Err(FetchError::not_found(entry.key().clone()))
} else {
Ok(entry)
}
})
}))
as
FetchStream<Key, FileEntry>
}
})
}
})
.flatten(),
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
use futures::stream;
use maplit::hashmap;
use async_runtime::stream_to_iter as block_on_stream;
use minibytes::Bytes;
use types::{testutil::*, Parents};
use crate::{
datastore::Metadata,
edenapi::{EdenApiRemoteStore, File},
localstore::ExtStoredPolicy,
scmstore::FetchError,
testutil::*,
};
#[test]
fn test_files_extstore_use() -> Result<(), ()>
|
};
let trees = HashMap::new();
let client = FakeEdenApi::new()
.files_with_flags(files)
.trees(trees)
.into_arc();
let remote_files = EdenApiRemoteStore::<File>::new("repo", client, None);
let files_adapter = Arc::new(remote_files.get_scmstore_adapter(ExtStoredPolicy::Use));
let fetched: Vec<_> =
block_on_stream(files_adapter.fetch_stream(Box::pin(stream::iter(vec![
lfs_key.clone(),
nonlfs_key.clone(),
]))))
.collect();
let lfs_entry = FileEntry::new(
lfs_key,
lfs_bytes.to_vec().into(),
Parents::default(),
lfs_metadata,
);
let nonlfs_entry = FileEntry::new(
nonlfs_key,
nonlfs_bytes.to_vec().into(),
Parents::default(),
nonlfs_metadata,
);
let exepcted = vec![Ok(lfs_entry), Ok(nonlfs_entry)];
assert_eq!(fetched.into_iter().collect::<Vec<_>>(), exepcted);
Ok(())
}
#[test]
fn test_files_extstore_ignore() -> Result<(), ()> {
// Set up mocked EdenAPI file and tree stores.
let lfs_metadata = Metadata {
size: Some(4),
flags: Some(Metadata::LFS_FLAG),
};
let nonlfs_metadata = Metadata {
size: Some(4),
flags: None,
};
let lfs_key = key("a", "def6f29d7b61f9cb70b2f14f79cd5c43c38e21b2");
let nonlfs_key = key("b", "def6f29d7b61f9cb70b2f14f79cd5c43c38e21b2");
let lfs_bytes = Bytes::from("1234");
let nonlfs_bytes = Bytes::from("2345");
let files = hashmap! {
lfs_key.clone() => (lfs_bytes.clone(), lfs_metadata.flags),
nonlfs_key.clone() => (nonlfs_bytes.clone(), nonlfs_metadata.flags)
};
let trees = HashMap::new();
let client = FakeEdenApi::new()
.files_with_flags(files)
.trees(trees)
.into_arc();
let remote_files = EdenApiRemoteStore::<File>::new("repo", client, None);
let files_adapter = Arc::new(remote_files.get_scmstore_adapter(ExtStoredPolicy::Ignore));
let fetched: Vec<_> =
block_on_stream(files_adapter.fetch_stream(Box::pin(stream::iter(vec![
lfs_key.clone(),
nonlfs_key.clone(),
]))))
.collect();
let _lfs_entry = FileEntry::new(
lfs_key.clone(),
lfs_bytes.to_vec().into(),
Parents::default(),
lfs_metadata,
);
let nonlfs_entry = FileEntry::new(
nonlfs_key,
nonlfs_bytes.to_vec().into(),
Parents::default(),
nonlfs_metadata,
);
let exepcted = vec![Err(FetchError::not_found(lfs_key)), Ok(nonlfs_entry)];
assert_eq!(fetched.into_iter().collect::<Vec<_>>(), exepcted);
Ok(())
}
}
|
{
// Set up mocked EdenAPI file and tree stores.
let lfs_metadata = Metadata {
size: Some(4),
flags: Some(Metadata::LFS_FLAG),
};
let nonlfs_metadata = Metadata {
size: Some(4),
flags: None,
};
let lfs_key = key("a", "def6f29d7b61f9cb70b2f14f79cd5c43c38e21b2");
let nonlfs_key = key("b", "def6f29d7b61f9cb70b2f14f79cd5c43c38e21b2");
let lfs_bytes = Bytes::from("1234");
let nonlfs_bytes = Bytes::from("2345");
let files = hashmap! {
lfs_key.clone() => (lfs_bytes.clone(), lfs_metadata.flags),
nonlfs_key.clone() => (nonlfs_bytes.clone(), nonlfs_metadata.flags)
|
identifier_body
|
edenapi.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::sync::Arc;
use std::time::Duration;
use futures::StreamExt;
use futures_batch::ChunksTimeoutStreamExt;
use edenapi::EdenApi;
use edenapi_types::{FileEntry, TreeAttributes, TreeEntry};
use types::Key;
use crate::{
localstore::ExtStoredPolicy,
scmstore::{fetch_error, FetchError, FetchStream, KeyStream, ReadStore},
};
// TODO(meyer): These should be configurable
// EdenApi's API is batch-based and async, and it will split a large batch into multiple requests to send in parallel
// but it won't join separate batches into larger ones. Because the input stream may not terminate in a timely fashion,
// we group the stream into batches with a timeout so that EdenApi will actually be sent batches, rather than constructing
// a batch of one for each item in the stream. This is worth investigating in the future, though - we could be sending
// "batches of one" to EdenApi, or we could change the EdenApi client to batch across requests, not just within them.
// I believe Arun has determined that even with HTTP2, some level of batching within requests is advantageous instead
// of individually streaming a separate request for each key, but it's still worth making sure we're doing the rgiht thing.
// We might also want to just grab all ready items from the stream in a batch, with no timeout, if the cost of small batches
// is smaller than the cost of the timeout waiting to collect larger ones.
const BATCH_SIZE: usize = 100;
const BATCH_TIMEOUT: Duration = Duration::from_millis(100);
pub struct EdenApiAdapter<C> {
pub client: C,
pub repo: String,
pub extstored_policy: ExtStoredPolicy,
}
impl<C> ReadStore<Key, TreeEntry> for EdenApiAdapter<C>
where
C: EdenApi,
{
fn fetch_stream(self: Arc<Self>, keys: KeyStream<Key>) -> FetchStream<Key, TreeEntry> {
Box::pin(
keys.chunks_timeout(BATCH_SIZE, BATCH_TIMEOUT)
.then(move |keys| {
let self_ = self.clone();
async move {
self_
.client
.trees(self_.repo.clone(), keys, Some(TreeAttributes::all()), None)
.await
.map_or_else(fetch_error, |s| {
Box::pin(s.entries.map(|v| match v {
Ok(Ok(v)) => Ok(v),
// TODO: Separate out NotFound errors from EdenApi
// TODO: We could eliminate this redundant key clone with a trait, I think.
Ok(Err(e)) => Err(FetchError::maybe_with_key(e.key.clone(), e)),
// TODO: What should happen when an entire batch fails?
Err(e) => Err(FetchError::from(e)),
})) as FetchStream<Key, TreeEntry>
})
}
})
.flatten(),
)
}
}
impl<C> ReadStore<Key, FileEntry> for EdenApiAdapter<C>
where
C: EdenApi,
{
fn fetch_stream(self: Arc<Self>, keys: KeyStream<Key>) -> FetchStream<Key, FileEntry> {
Box::pin(
keys.chunks_timeout(BATCH_SIZE, BATCH_TIMEOUT)
.then(move |keys| {
let self_ = self.clone();
async move {
self_
.client
.files(self_.repo.clone(), keys, None)
.await
.map_or_else(fetch_error, {
let self_ = self_.clone();
move |fetch| {
// TODO: Add per-item errors to EdenApi `files`
Box::pin(fetch.entries.map(move |res| {
res.map_err(FetchError::from).and_then(|entry| {
if self_.extstored_policy == ExtStoredPolicy::Ignore
&& entry.metadata().is_lfs()
{
Err(FetchError::not_found(entry.key().clone()))
} else {
Ok(entry)
}
})
}))
as
FetchStream<Key, FileEntry>
}
})
}
})
.flatten(),
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
use futures::stream;
use maplit::hashmap;
use async_runtime::stream_to_iter as block_on_stream;
use minibytes::Bytes;
use types::{testutil::*, Parents};
use crate::{
datastore::Metadata,
edenapi::{EdenApiRemoteStore, File},
localstore::ExtStoredPolicy,
scmstore::FetchError,
testutil::*,
};
#[test]
fn
|
() -> Result<(), ()> {
// Set up mocked EdenAPI file and tree stores.
let lfs_metadata = Metadata {
size: Some(4),
flags: Some(Metadata::LFS_FLAG),
};
let nonlfs_metadata = Metadata {
size: Some(4),
flags: None,
};
let lfs_key = key("a", "def6f29d7b61f9cb70b2f14f79cd5c43c38e21b2");
let nonlfs_key = key("b", "def6f29d7b61f9cb70b2f14f79cd5c43c38e21b2");
let lfs_bytes = Bytes::from("1234");
let nonlfs_bytes = Bytes::from("2345");
let files = hashmap! {
lfs_key.clone() => (lfs_bytes.clone(), lfs_metadata.flags),
nonlfs_key.clone() => (nonlfs_bytes.clone(), nonlfs_metadata.flags)
};
let trees = HashMap::new();
let client = FakeEdenApi::new()
.files_with_flags(files)
.trees(trees)
.into_arc();
let remote_files = EdenApiRemoteStore::<File>::new("repo", client, None);
let files_adapter = Arc::new(remote_files.get_scmstore_adapter(ExtStoredPolicy::Use));
let fetched: Vec<_> =
block_on_stream(files_adapter.fetch_stream(Box::pin(stream::iter(vec![
lfs_key.clone(),
nonlfs_key.clone(),
]))))
.collect();
let lfs_entry = FileEntry::new(
lfs_key,
lfs_bytes.to_vec().into(),
Parents::default(),
lfs_metadata,
);
let nonlfs_entry = FileEntry::new(
nonlfs_key,
nonlfs_bytes.to_vec().into(),
Parents::default(),
nonlfs_metadata,
);
let exepcted = vec![Ok(lfs_entry), Ok(nonlfs_entry)];
assert_eq!(fetched.into_iter().collect::<Vec<_>>(), exepcted);
Ok(())
}
#[test]
fn test_files_extstore_ignore() -> Result<(), ()> {
// Set up mocked EdenAPI file and tree stores.
let lfs_metadata = Metadata {
size: Some(4),
flags: Some(Metadata::LFS_FLAG),
};
let nonlfs_metadata = Metadata {
size: Some(4),
flags: None,
};
let lfs_key = key("a", "def6f29d7b61f9cb70b2f14f79cd5c43c38e21b2");
let nonlfs_key = key("b", "def6f29d7b61f9cb70b2f14f79cd5c43c38e21b2");
let lfs_bytes = Bytes::from("1234");
let nonlfs_bytes = Bytes::from("2345");
let files = hashmap! {
lfs_key.clone() => (lfs_bytes.clone(), lfs_metadata.flags),
nonlfs_key.clone() => (nonlfs_bytes.clone(), nonlfs_metadata.flags)
};
let trees = HashMap::new();
let client = FakeEdenApi::new()
.files_with_flags(files)
.trees(trees)
.into_arc();
let remote_files = EdenApiRemoteStore::<File>::new("repo", client, None);
let files_adapter = Arc::new(remote_files.get_scmstore_adapter(ExtStoredPolicy::Ignore));
let fetched: Vec<_> =
block_on_stream(files_adapter.fetch_stream(Box::pin(stream::iter(vec![
lfs_key.clone(),
nonlfs_key.clone(),
]))))
.collect();
let _lfs_entry = FileEntry::new(
lfs_key.clone(),
lfs_bytes.to_vec().into(),
Parents::default(),
lfs_metadata,
);
let nonlfs_entry = FileEntry::new(
nonlfs_key,
nonlfs_bytes.to_vec().into(),
Parents::default(),
nonlfs_metadata,
);
let exepcted = vec![Err(FetchError::not_found(lfs_key)), Ok(nonlfs_entry)];
assert_eq!(fetched.into_iter().collect::<Vec<_>>(), exepcted);
Ok(())
}
}
|
test_files_extstore_use
|
identifier_name
|
edenapi.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::sync::Arc;
use std::time::Duration;
use futures::StreamExt;
use futures_batch::ChunksTimeoutStreamExt;
use edenapi::EdenApi;
use edenapi_types::{FileEntry, TreeAttributes, TreeEntry};
use types::Key;
use crate::{
localstore::ExtStoredPolicy,
scmstore::{fetch_error, FetchError, FetchStream, KeyStream, ReadStore},
};
// TODO(meyer): These should be configurable
// EdenApi's API is batch-based and async, and it will split a large batch into multiple requests to send in parallel
// but it won't join separate batches into larger ones. Because the input stream may not terminate in a timely fashion,
// we group the stream into batches with a timeout so that EdenApi will actually be sent batches, rather than constructing
// a batch of one for each item in the stream. This is worth investigating in the future, though - we could be sending
// "batches of one" to EdenApi, or we could change the EdenApi client to batch across requests, not just within them.
// I believe Arun has determined that even with HTTP2, some level of batching within requests is advantageous instead
// of individually streaming a separate request for each key, but it's still worth making sure we're doing the rgiht thing.
// We might also want to just grab all ready items from the stream in a batch, with no timeout, if the cost of small batches
// is smaller than the cost of the timeout waiting to collect larger ones.
const BATCH_SIZE: usize = 100;
const BATCH_TIMEOUT: Duration = Duration::from_millis(100);
pub struct EdenApiAdapter<C> {
pub client: C,
pub repo: String,
pub extstored_policy: ExtStoredPolicy,
}
impl<C> ReadStore<Key, TreeEntry> for EdenApiAdapter<C>
where
C: EdenApi,
{
fn fetch_stream(self: Arc<Self>, keys: KeyStream<Key>) -> FetchStream<Key, TreeEntry> {
Box::pin(
keys.chunks_timeout(BATCH_SIZE, BATCH_TIMEOUT)
.then(move |keys| {
let self_ = self.clone();
async move {
self_
.client
.trees(self_.repo.clone(), keys, Some(TreeAttributes::all()), None)
.await
.map_or_else(fetch_error, |s| {
Box::pin(s.entries.map(|v| match v {
Ok(Ok(v)) => Ok(v),
// TODO: Separate out NotFound errors from EdenApi
// TODO: We could eliminate this redundant key clone with a trait, I think.
Ok(Err(e)) => Err(FetchError::maybe_with_key(e.key.clone(), e)),
// TODO: What should happen when an entire batch fails?
Err(e) => Err(FetchError::from(e)),
})) as FetchStream<Key, TreeEntry>
})
}
})
.flatten(),
)
}
}
impl<C> ReadStore<Key, FileEntry> for EdenApiAdapter<C>
where
C: EdenApi,
{
fn fetch_stream(self: Arc<Self>, keys: KeyStream<Key>) -> FetchStream<Key, FileEntry> {
Box::pin(
keys.chunks_timeout(BATCH_SIZE, BATCH_TIMEOUT)
.then(move |keys| {
let self_ = self.clone();
async move {
self_
.client
.files(self_.repo.clone(), keys, None)
.await
.map_or_else(fetch_error, {
let self_ = self_.clone();
move |fetch| {
// TODO: Add per-item errors to EdenApi `files`
Box::pin(fetch.entries.map(move |res| {
res.map_err(FetchError::from).and_then(|entry| {
if self_.extstored_policy == ExtStoredPolicy::Ignore
&& entry.metadata().is_lfs()
{
Err(FetchError::not_found(entry.key().clone()))
} else {
Ok(entry)
}
})
}))
as
FetchStream<Key, FileEntry>
}
})
}
})
.flatten(),
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
use futures::stream;
use maplit::hashmap;
use async_runtime::stream_to_iter as block_on_stream;
use minibytes::Bytes;
use types::{testutil::*, Parents};
use crate::{
datastore::Metadata,
edenapi::{EdenApiRemoteStore, File},
localstore::ExtStoredPolicy,
scmstore::FetchError,
testutil::*,
};
#[test]
fn test_files_extstore_use() -> Result<(), ()> {
// Set up mocked EdenAPI file and tree stores.
let lfs_metadata = Metadata {
size: Some(4),
flags: Some(Metadata::LFS_FLAG),
};
let nonlfs_metadata = Metadata {
size: Some(4),
flags: None,
};
let lfs_key = key("a", "def6f29d7b61f9cb70b2f14f79cd5c43c38e21b2");
let nonlfs_key = key("b", "def6f29d7b61f9cb70b2f14f79cd5c43c38e21b2");
let lfs_bytes = Bytes::from("1234");
let nonlfs_bytes = Bytes::from("2345");
let files = hashmap! {
lfs_key.clone() => (lfs_bytes.clone(), lfs_metadata.flags),
nonlfs_key.clone() => (nonlfs_bytes.clone(), nonlfs_metadata.flags)
};
let trees = HashMap::new();
let client = FakeEdenApi::new()
.files_with_flags(files)
.trees(trees)
.into_arc();
let remote_files = EdenApiRemoteStore::<File>::new("repo", client, None);
let files_adapter = Arc::new(remote_files.get_scmstore_adapter(ExtStoredPolicy::Use));
let fetched: Vec<_> =
block_on_stream(files_adapter.fetch_stream(Box::pin(stream::iter(vec![
lfs_key.clone(),
nonlfs_key.clone(),
]))))
.collect();
let lfs_entry = FileEntry::new(
lfs_key,
lfs_bytes.to_vec().into(),
Parents::default(),
lfs_metadata,
);
let nonlfs_entry = FileEntry::new(
nonlfs_key,
nonlfs_bytes.to_vec().into(),
Parents::default(),
nonlfs_metadata,
);
let exepcted = vec![Ok(lfs_entry), Ok(nonlfs_entry)];
assert_eq!(fetched.into_iter().collect::<Vec<_>>(), exepcted);
Ok(())
}
#[test]
fn test_files_extstore_ignore() -> Result<(), ()> {
// Set up mocked EdenAPI file and tree stores.
let lfs_metadata = Metadata {
size: Some(4),
flags: Some(Metadata::LFS_FLAG),
};
let nonlfs_metadata = Metadata {
size: Some(4),
flags: None,
};
let lfs_key = key("a", "def6f29d7b61f9cb70b2f14f79cd5c43c38e21b2");
let nonlfs_key = key("b", "def6f29d7b61f9cb70b2f14f79cd5c43c38e21b2");
let lfs_bytes = Bytes::from("1234");
let nonlfs_bytes = Bytes::from("2345");
let files = hashmap! {
lfs_key.clone() => (lfs_bytes.clone(), lfs_metadata.flags),
nonlfs_key.clone() => (nonlfs_bytes.clone(), nonlfs_metadata.flags)
};
let trees = HashMap::new();
let client = FakeEdenApi::new()
.files_with_flags(files)
.trees(trees)
.into_arc();
let remote_files = EdenApiRemoteStore::<File>::new("repo", client, None);
let files_adapter = Arc::new(remote_files.get_scmstore_adapter(ExtStoredPolicy::Ignore));
let fetched: Vec<_> =
block_on_stream(files_adapter.fetch_stream(Box::pin(stream::iter(vec![
lfs_key.clone(),
nonlfs_key.clone(),
]))))
.collect();
let _lfs_entry = FileEntry::new(
|
lfs_metadata,
);
let nonlfs_entry = FileEntry::new(
nonlfs_key,
nonlfs_bytes.to_vec().into(),
Parents::default(),
nonlfs_metadata,
);
let exepcted = vec![Err(FetchError::not_found(lfs_key)), Ok(nonlfs_entry)];
assert_eq!(fetched.into_iter().collect::<Vec<_>>(), exepcted);
Ok(())
}
}
|
lfs_key.clone(),
lfs_bytes.to_vec().into(),
Parents::default(),
|
random_line_split
|
raytrace.rs
|
use rand;
use std;
use prelude::*;
use scene;
pub struct Output {
data : Vec<RGB>,
w : u32,
h : u32,
}
impl Output {
pub fn new(w: u32, h: u32) -> Self {
Output {
data : std::iter::repeat(RGB { r: 0.0, g: 0.0, b: 0.0 }).take((w * h) as usize).collect(),
w : w,
h : h,
}
}
pub fn pixel_mut(&mut self, x: u32, y: u32) -> &mut RGB {
self.data.get_mut((y * self.w + x) as usize).unwrap()
}
pub fn to_vec(self) -> Vec<RGB> {
self.data
}
}
/// A unit of work to be done.
/// Consists of a ray to trace, an attenuation, and a pixel location to draw to.
struct Work {
pub ray : Ray,
pub pixel_x : u32,
pub pixel_y : u32,
pub attenuation : RGB,
}
fn cast<'a>(s: &'a scene::T, ray: &Ray) -> Option<scene::Collision<'a>> {
let mut first_collision: Option<scene::Collision<'a>> = None;
for object in &s.objects {
if let Some(collision) = object.intersect_ray(ray) {
if let Some(first_collision) = first_collision.as_ref() {
if first_collision.toi < collision.toi {
continue
}
}
first_collision = Some(collision);
}
}
first_collision
}
fn perturb<Rng: rand::Rng>(unperturbed: Vector, normal: Vector, shininess: f32, rng: &mut Rng) -> Vector {
let rotation = {
let y = unperturbed;
let x =
if unperturbed.x <= 0.5 {
// specialized cross-product for crossing with x axis
Vector::new(0.0, unperturbed.z, -unperturbed.y)
} else {
// specialized cross-product for crossing with y axis
Vector::new(-unperturbed.z, 0.0, unperturbed.x)
};
let x = normalize(x);
let z = cross(y, x);
Matrix::from_cols(x, y, z)
};
for _ in 0..4 {
let altitude = rng.next_f32().asin();
let altitude = std::f32::consts::FRAC_PI_2 * (altitude / std::f32::consts::FRAC_PI_2).powf(shininess.exp());
let altitude = std::f32::consts::FRAC_PI_2 - altitude;
let azimuth = rng.next_f32() * 2.0 * std::f32::consts::PI;
let xz = altitude.cos();
let direction = rotation * Vector::new(azimuth.cos() * xz, altitude.sin(), azimuth.sin() * xz);
if dot(direction, normal) >= 0.0 {
return direction
}
}
// If we failed this many times, we're probably hitting some corner case (e.g. divide-by-zero).
unperturbed
}
fn do_work<Rng: rand::Rng, AddWork: FnMut(Work)> (
s: &scene::T,
work: &Work,
rng: &mut Rng,
add_work: &mut AddWork,
output: &mut Output,
) {
let min_attenuation = 0.01;
if work.attenuation.r < min_attenuation &&
work.attenuation.g < min_attenuation &&
work.attenuation.b < min_attenuation {
return
}
let collision =
match cast(s, &work.ray) {
None => return,
Some(c) => c,
};
|
scene::Texture::SolidColor(color) => color,
};
let color = work.attenuation * color;
*output.pixel_mut(work.pixel_x, work.pixel_y) += color * collision.object.emittance;
let make_ray = {
let location = collision.location;
move |direction| {
Ray {
direction : direction,
origin : location + 0.01 * direction,
}
}
};
let make_work = {
let pixel_x = work.pixel_x;
let pixel_y = work.pixel_y;
move |ray, attenuation| {
Work {
ray : ray,
attenuation : attenuation,
pixel_x : pixel_x,
pixel_y : pixel_y,
}
}
};
let reflected = work.ray.direction - 2.0 * dot(work.ray.direction, collision.normal) * collision.normal;
let reflected = perturb(reflected, collision.normal, collision.object.shininess, rng);
add_work(make_work(make_ray(reflected), color * collision.object.reflectance));
let transmitted = work.ray.direction;
let transmitted = perturb(transmitted, -collision.normal, collision.object.shininess, rng);
add_work(make_work(make_ray(transmitted), color * collision.object.transmittance));
}
pub fn scene<Rng: rand::Rng>(s: &scene::T, width: u32, height: u32, rng: &mut Rng) -> Output {
let mut output = Output::new(width, height);
let mut work_items = std::collections::VecDeque::new();
let aspect = width as f32 / height as f32;
let max_y = (s.fovy / 2.0).tan();
let scale = 2.0 * max_y / height as f32;
let shift = -max_y;
let view_to_world = Matrix::from_cols(s.x(), s.y(), s.z());
for y in 0.. height {
for x in 0.. width {
// in view coordinates
let ray =
Vector::new(
scale * x as f32 + shift * aspect,
scale * y as f32 + shift,
1.0,
);
work_items.push_back(
Work {
ray :
Ray {
origin : s.eye,
direction : normalize(view_to_world * ray),
},
pixel_x : x,
pixel_y : y,
attenuation : RGB { r: 1.0, g: 1.0, b: 1.0 },
}
);
}
}
while let Some(work) = work_items.pop_front() {
let mut add_work = |work| work_items.push_back(work);
do_work(s, &work, rng, &mut add_work, &mut output);
}
output
}
|
let color =
match collision.object.texture {
|
random_line_split
|
raytrace.rs
|
use rand;
use std;
use prelude::*;
use scene;
pub struct Output {
data : Vec<RGB>,
w : u32,
h : u32,
}
impl Output {
pub fn new(w: u32, h: u32) -> Self {
Output {
data : std::iter::repeat(RGB { r: 0.0, g: 0.0, b: 0.0 }).take((w * h) as usize).collect(),
w : w,
h : h,
}
}
pub fn pixel_mut(&mut self, x: u32, y: u32) -> &mut RGB {
self.data.get_mut((y * self.w + x) as usize).unwrap()
}
pub fn to_vec(self) -> Vec<RGB> {
self.data
}
}
/// A unit of work to be done.
/// Consists of a ray to trace, an attenuation, and a pixel location to draw to.
struct
|
{
pub ray : Ray,
pub pixel_x : u32,
pub pixel_y : u32,
pub attenuation : RGB,
}
fn cast<'a>(s: &'a scene::T, ray: &Ray) -> Option<scene::Collision<'a>> {
let mut first_collision: Option<scene::Collision<'a>> = None;
for object in &s.objects {
if let Some(collision) = object.intersect_ray(ray) {
if let Some(first_collision) = first_collision.as_ref() {
if first_collision.toi < collision.toi {
continue
}
}
first_collision = Some(collision);
}
}
first_collision
}
fn perturb<Rng: rand::Rng>(unperturbed: Vector, normal: Vector, shininess: f32, rng: &mut Rng) -> Vector {
let rotation = {
let y = unperturbed;
let x =
if unperturbed.x <= 0.5 {
// specialized cross-product for crossing with x axis
Vector::new(0.0, unperturbed.z, -unperturbed.y)
} else {
// specialized cross-product for crossing with y axis
Vector::new(-unperturbed.z, 0.0, unperturbed.x)
};
let x = normalize(x);
let z = cross(y, x);
Matrix::from_cols(x, y, z)
};
for _ in 0..4 {
let altitude = rng.next_f32().asin();
let altitude = std::f32::consts::FRAC_PI_2 * (altitude / std::f32::consts::FRAC_PI_2).powf(shininess.exp());
let altitude = std::f32::consts::FRAC_PI_2 - altitude;
let azimuth = rng.next_f32() * 2.0 * std::f32::consts::PI;
let xz = altitude.cos();
let direction = rotation * Vector::new(azimuth.cos() * xz, altitude.sin(), azimuth.sin() * xz);
if dot(direction, normal) >= 0.0 {
return direction
}
}
// If we failed this many times, we're probably hitting some corner case (e.g. divide-by-zero).
unperturbed
}
fn do_work<Rng: rand::Rng, AddWork: FnMut(Work)> (
s: &scene::T,
work: &Work,
rng: &mut Rng,
add_work: &mut AddWork,
output: &mut Output,
) {
let min_attenuation = 0.01;
if work.attenuation.r < min_attenuation &&
work.attenuation.g < min_attenuation &&
work.attenuation.b < min_attenuation {
return
}
let collision =
match cast(s, &work.ray) {
None => return,
Some(c) => c,
};
let color =
match collision.object.texture {
scene::Texture::SolidColor(color) => color,
};
let color = work.attenuation * color;
*output.pixel_mut(work.pixel_x, work.pixel_y) += color * collision.object.emittance;
let make_ray = {
let location = collision.location;
move |direction| {
Ray {
direction : direction,
origin : location + 0.01 * direction,
}
}
};
let make_work = {
let pixel_x = work.pixel_x;
let pixel_y = work.pixel_y;
move |ray, attenuation| {
Work {
ray : ray,
attenuation : attenuation,
pixel_x : pixel_x,
pixel_y : pixel_y,
}
}
};
let reflected = work.ray.direction - 2.0 * dot(work.ray.direction, collision.normal) * collision.normal;
let reflected = perturb(reflected, collision.normal, collision.object.shininess, rng);
add_work(make_work(make_ray(reflected), color * collision.object.reflectance));
let transmitted = work.ray.direction;
let transmitted = perturb(transmitted, -collision.normal, collision.object.shininess, rng);
add_work(make_work(make_ray(transmitted), color * collision.object.transmittance));
}
pub fn scene<Rng: rand::Rng>(s: &scene::T, width: u32, height: u32, rng: &mut Rng) -> Output {
let mut output = Output::new(width, height);
let mut work_items = std::collections::VecDeque::new();
let aspect = width as f32 / height as f32;
let max_y = (s.fovy / 2.0).tan();
let scale = 2.0 * max_y / height as f32;
let shift = -max_y;
let view_to_world = Matrix::from_cols(s.x(), s.y(), s.z());
for y in 0.. height {
for x in 0.. width {
// in view coordinates
let ray =
Vector::new(
scale * x as f32 + shift * aspect,
scale * y as f32 + shift,
1.0,
);
work_items.push_back(
Work {
ray :
Ray {
origin : s.eye,
direction : normalize(view_to_world * ray),
},
pixel_x : x,
pixel_y : y,
attenuation : RGB { r: 1.0, g: 1.0, b: 1.0 },
}
);
}
}
while let Some(work) = work_items.pop_front() {
let mut add_work = |work| work_items.push_back(work);
do_work(s, &work, rng, &mut add_work, &mut output);
}
output
}
|
Work
|
identifier_name
|
raytrace.rs
|
use rand;
use std;
use prelude::*;
use scene;
pub struct Output {
data : Vec<RGB>,
w : u32,
h : u32,
}
impl Output {
pub fn new(w: u32, h: u32) -> Self {
Output {
data : std::iter::repeat(RGB { r: 0.0, g: 0.0, b: 0.0 }).take((w * h) as usize).collect(),
w : w,
h : h,
}
}
pub fn pixel_mut(&mut self, x: u32, y: u32) -> &mut RGB {
self.data.get_mut((y * self.w + x) as usize).unwrap()
}
pub fn to_vec(self) -> Vec<RGB> {
self.data
}
}
/// A unit of work to be done.
/// Consists of a ray to trace, an attenuation, and a pixel location to draw to.
struct Work {
pub ray : Ray,
pub pixel_x : u32,
pub pixel_y : u32,
pub attenuation : RGB,
}
fn cast<'a>(s: &'a scene::T, ray: &Ray) -> Option<scene::Collision<'a>> {
let mut first_collision: Option<scene::Collision<'a>> = None;
for object in &s.objects {
if let Some(collision) = object.intersect_ray(ray) {
if let Some(first_collision) = first_collision.as_ref()
|
first_collision = Some(collision);
}
}
first_collision
}
fn perturb<Rng: rand::Rng>(unperturbed: Vector, normal: Vector, shininess: f32, rng: &mut Rng) -> Vector {
let rotation = {
let y = unperturbed;
let x =
if unperturbed.x <= 0.5 {
// specialized cross-product for crossing with x axis
Vector::new(0.0, unperturbed.z, -unperturbed.y)
} else {
// specialized cross-product for crossing with y axis
Vector::new(-unperturbed.z, 0.0, unperturbed.x)
};
let x = normalize(x);
let z = cross(y, x);
Matrix::from_cols(x, y, z)
};
for _ in 0..4 {
let altitude = rng.next_f32().asin();
let altitude = std::f32::consts::FRAC_PI_2 * (altitude / std::f32::consts::FRAC_PI_2).powf(shininess.exp());
let altitude = std::f32::consts::FRAC_PI_2 - altitude;
let azimuth = rng.next_f32() * 2.0 * std::f32::consts::PI;
let xz = altitude.cos();
let direction = rotation * Vector::new(azimuth.cos() * xz, altitude.sin(), azimuth.sin() * xz);
if dot(direction, normal) >= 0.0 {
return direction
}
}
// If we failed this many times, we're probably hitting some corner case (e.g. divide-by-zero).
unperturbed
}
fn do_work<Rng: rand::Rng, AddWork: FnMut(Work)> (
s: &scene::T,
work: &Work,
rng: &mut Rng,
add_work: &mut AddWork,
output: &mut Output,
) {
let min_attenuation = 0.01;
if work.attenuation.r < min_attenuation &&
work.attenuation.g < min_attenuation &&
work.attenuation.b < min_attenuation {
return
}
let collision =
match cast(s, &work.ray) {
None => return,
Some(c) => c,
};
let color =
match collision.object.texture {
scene::Texture::SolidColor(color) => color,
};
let color = work.attenuation * color;
*output.pixel_mut(work.pixel_x, work.pixel_y) += color * collision.object.emittance;
let make_ray = {
let location = collision.location;
move |direction| {
Ray {
direction : direction,
origin : location + 0.01 * direction,
}
}
};
let make_work = {
let pixel_x = work.pixel_x;
let pixel_y = work.pixel_y;
move |ray, attenuation| {
Work {
ray : ray,
attenuation : attenuation,
pixel_x : pixel_x,
pixel_y : pixel_y,
}
}
};
let reflected = work.ray.direction - 2.0 * dot(work.ray.direction, collision.normal) * collision.normal;
let reflected = perturb(reflected, collision.normal, collision.object.shininess, rng);
add_work(make_work(make_ray(reflected), color * collision.object.reflectance));
let transmitted = work.ray.direction;
let transmitted = perturb(transmitted, -collision.normal, collision.object.shininess, rng);
add_work(make_work(make_ray(transmitted), color * collision.object.transmittance));
}
pub fn scene<Rng: rand::Rng>(s: &scene::T, width: u32, height: u32, rng: &mut Rng) -> Output {
let mut output = Output::new(width, height);
let mut work_items = std::collections::VecDeque::new();
let aspect = width as f32 / height as f32;
let max_y = (s.fovy / 2.0).tan();
let scale = 2.0 * max_y / height as f32;
let shift = -max_y;
let view_to_world = Matrix::from_cols(s.x(), s.y(), s.z());
for y in 0.. height {
for x in 0.. width {
// in view coordinates
let ray =
Vector::new(
scale * x as f32 + shift * aspect,
scale * y as f32 + shift,
1.0,
);
work_items.push_back(
Work {
ray :
Ray {
origin : s.eye,
direction : normalize(view_to_world * ray),
},
pixel_x : x,
pixel_y : y,
attenuation : RGB { r: 1.0, g: 1.0, b: 1.0 },
}
);
}
}
while let Some(work) = work_items.pop_front() {
let mut add_work = |work| work_items.push_back(work);
do_work(s, &work, rng, &mut add_work, &mut output);
}
output
}
|
{
if first_collision.toi < collision.toi {
continue
}
}
|
conditional_block
|
clone.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The `Clone` trait for types that cannot be 'implicitly copied'
//!
//! In Rust, some simple types are "implicitly copyable" and when you
//! assign them or pass them as arguments, the receiver will get a copy,
//! leaving the original value in place. These types do not require
//! allocation to copy and do not have finalizers (i.e. they do not
//! contain owned boxes or implement `Drop`), so the compiler considers
//! them cheap and safe to copy. For other types copies must be made
//! explicitly, by convention implementing the `Clone` trait and calling
//! the `clone` method.
#![stable]
use marker::Sized;
/// A common trait for cloning an object.
#[stable]
pub trait Clone : Sized {
/// Returns a copy of the value.
#[stable]
fn clone(&self) -> Self;
/// Perform copy-assignment from `source`.
///
/// `a.clone_from(&b)` is equivalent to `a = b.clone()` in functionality,
/// but can be overridden to reuse the resources of `a` to avoid unnecessary
/// allocations.
#[inline(always)]
#[unstable = "this function is rarely used"]
fn
|
(&mut self, source: &Self) {
*self = source.clone()
}
}
#[stable]
impl<'a, T:?Sized> Clone for &'a T {
/// Return a shallow copy of the reference.
#[inline]
fn clone(&self) -> &'a T { *self }
}
macro_rules! clone_impl {
($t:ty) => {
#[stable]
impl Clone for $t {
/// Return a deep copy of the value.
#[inline]
fn clone(&self) -> $t { *self }
}
}
}
clone_impl! { int }
clone_impl! { i8 }
clone_impl! { i16 }
clone_impl! { i32 }
clone_impl! { i64 }
clone_impl! { uint }
clone_impl! { u8 }
clone_impl! { u16 }
clone_impl! { u32 }
clone_impl! { u64 }
clone_impl! { f32 }
clone_impl! { f64 }
clone_impl! { () }
clone_impl! { bool }
clone_impl! { char }
macro_rules! extern_fn_clone {
($($A:ident),*) => (
#[experimental = "this may not be sufficient for fns with region parameters"]
impl<$($A,)* ReturnType> Clone for extern "Rust" fn($($A),*) -> ReturnType {
/// Return a copy of a function pointer
#[inline]
fn clone(&self) -> extern "Rust" fn($($A),*) -> ReturnType { *self }
}
)
}
extern_fn_clone! {}
extern_fn_clone! { A }
extern_fn_clone! { A, B }
extern_fn_clone! { A, B, C }
extern_fn_clone! { A, B, C, D }
extern_fn_clone! { A, B, C, D, E }
extern_fn_clone! { A, B, C, D, E, F }
extern_fn_clone! { A, B, C, D, E, F, G }
extern_fn_clone! { A, B, C, D, E, F, G, H }
|
clone_from
|
identifier_name
|
clone.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The `Clone` trait for types that cannot be 'implicitly copied'
//!
//! In Rust, some simple types are "implicitly copyable" and when you
//! assign them or pass them as arguments, the receiver will get a copy,
//! leaving the original value in place. These types do not require
//! allocation to copy and do not have finalizers (i.e. they do not
//! contain owned boxes or implement `Drop`), so the compiler considers
//! them cheap and safe to copy. For other types copies must be made
//! explicitly, by convention implementing the `Clone` trait and calling
//! the `clone` method.
#![stable]
use marker::Sized;
/// A common trait for cloning an object.
#[stable]
pub trait Clone : Sized {
/// Returns a copy of the value.
#[stable]
fn clone(&self) -> Self;
/// Perform copy-assignment from `source`.
///
/// `a.clone_from(&b)` is equivalent to `a = b.clone()` in functionality,
/// but can be overridden to reuse the resources of `a` to avoid unnecessary
/// allocations.
#[inline(always)]
#[unstable = "this function is rarely used"]
fn clone_from(&mut self, source: &Self) {
*self = source.clone()
}
}
#[stable]
impl<'a, T:?Sized> Clone for &'a T {
/// Return a shallow copy of the reference.
#[inline]
fn clone(&self) -> &'a T
|
}
macro_rules! clone_impl {
($t:ty) => {
#[stable]
impl Clone for $t {
/// Return a deep copy of the value.
#[inline]
fn clone(&self) -> $t { *self }
}
}
}
clone_impl! { int }
clone_impl! { i8 }
clone_impl! { i16 }
clone_impl! { i32 }
clone_impl! { i64 }
clone_impl! { uint }
clone_impl! { u8 }
clone_impl! { u16 }
clone_impl! { u32 }
clone_impl! { u64 }
clone_impl! { f32 }
clone_impl! { f64 }
clone_impl! { () }
clone_impl! { bool }
clone_impl! { char }
macro_rules! extern_fn_clone {
($($A:ident),*) => (
#[experimental = "this may not be sufficient for fns with region parameters"]
impl<$($A,)* ReturnType> Clone for extern "Rust" fn($($A),*) -> ReturnType {
/// Return a copy of a function pointer
#[inline]
fn clone(&self) -> extern "Rust" fn($($A),*) -> ReturnType { *self }
}
)
}
extern_fn_clone! {}
extern_fn_clone! { A }
extern_fn_clone! { A, B }
extern_fn_clone! { A, B, C }
extern_fn_clone! { A, B, C, D }
extern_fn_clone! { A, B, C, D, E }
extern_fn_clone! { A, B, C, D, E, F }
extern_fn_clone! { A, B, C, D, E, F, G }
extern_fn_clone! { A, B, C, D, E, F, G, H }
|
{ *self }
|
identifier_body
|
clone.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The `Clone` trait for types that cannot be 'implicitly copied'
//!
//! In Rust, some simple types are "implicitly copyable" and when you
//! assign them or pass them as arguments, the receiver will get a copy,
//! leaving the original value in place. These types do not require
//! allocation to copy and do not have finalizers (i.e. they do not
//! contain owned boxes or implement `Drop`), so the compiler considers
//! them cheap and safe to copy. For other types copies must be made
//! explicitly, by convention implementing the `Clone` trait and calling
//! the `clone` method.
#![stable]
use marker::Sized;
/// A common trait for cloning an object.
#[stable]
pub trait Clone : Sized {
/// Returns a copy of the value.
#[stable]
fn clone(&self) -> Self;
/// Perform copy-assignment from `source`.
///
/// `a.clone_from(&b)` is equivalent to `a = b.clone()` in functionality,
/// but can be overridden to reuse the resources of `a` to avoid unnecessary
/// allocations.
#[inline(always)]
#[unstable = "this function is rarely used"]
fn clone_from(&mut self, source: &Self) {
*self = source.clone()
}
}
#[stable]
impl<'a, T:?Sized> Clone for &'a T {
/// Return a shallow copy of the reference.
#[inline]
fn clone(&self) -> &'a T { *self }
}
macro_rules! clone_impl {
($t:ty) => {
#[stable]
impl Clone for $t {
/// Return a deep copy of the value.
#[inline]
fn clone(&self) -> $t { *self }
}
}
}
clone_impl! { int }
clone_impl! { i8 }
clone_impl! { i16 }
clone_impl! { i32 }
clone_impl! { i64 }
clone_impl! { uint }
clone_impl! { u8 }
clone_impl! { u16 }
clone_impl! { u32 }
clone_impl! { u64 }
clone_impl! { f32 }
clone_impl! { f64 }
clone_impl! { () }
clone_impl! { bool }
clone_impl! { char }
macro_rules! extern_fn_clone {
($($A:ident),*) => (
|
/// Return a copy of a function pointer
#[inline]
fn clone(&self) -> extern "Rust" fn($($A),*) -> ReturnType { *self }
}
)
}
extern_fn_clone! {}
extern_fn_clone! { A }
extern_fn_clone! { A, B }
extern_fn_clone! { A, B, C }
extern_fn_clone! { A, B, C, D }
extern_fn_clone! { A, B, C, D, E }
extern_fn_clone! { A, B, C, D, E, F }
extern_fn_clone! { A, B, C, D, E, F, G }
extern_fn_clone! { A, B, C, D, E, F, G, H }
|
#[experimental = "this may not be sufficient for fns with region parameters"]
impl<$($A,)* ReturnType> Clone for extern "Rust" fn($($A),*) -> ReturnType {
|
random_line_split
|
main.rs
|
extern crate piston;
extern crate piston_window;
extern crate graphics;
extern crate opengl_graphics;
extern crate rand;
#[cfg(test)]
#[macro_use(expect)]
extern crate expectest;
use piston_window::WindowSettings;
use opengl_graphics::{GlGraphics, OpenGL};
use std::cell::RefCell;
use std::rc::Rc;
mod actors;
mod intersect;
mod point;
mod scene;
mod config;
use scene::{MainScene, Scene};
use config::Config;
fn main()
|
{
let opengl = OpenGL::V3_2;
let config = Config::new();
let dims = [config.width() as u32, config.height() as u32];
let window_settings = WindowSettings::new("vs-game", dims).exit_on_esc(true);
let window = Rc::new(RefCell::new(window_settings.build().unwrap()));
let mut gl = GlGraphics::new(opengl);
let mut rng = rand::thread_rng();
let mut scene: Box<Scene> = Box::new(MainScene::new(1, &config, &mut rng));
while let Some(new_scene) = scene.events(&mut rng, window.clone(), &mut gl, &config) {
scene = new_scene;
}
}
|
identifier_body
|
|
main.rs
|
extern crate piston;
extern crate piston_window;
extern crate graphics;
extern crate opengl_graphics;
extern crate rand;
#[cfg(test)]
#[macro_use(expect)]
extern crate expectest;
use piston_window::WindowSettings;
use opengl_graphics::{GlGraphics, OpenGL};
use std::cell::RefCell;
use std::rc::Rc;
mod actors;
mod intersect;
mod point;
mod scene;
mod config;
use scene::{MainScene, Scene};
use config::Config;
fn main() {
|
let window_settings = WindowSettings::new("vs-game", dims).exit_on_esc(true);
let window = Rc::new(RefCell::new(window_settings.build().unwrap()));
let mut gl = GlGraphics::new(opengl);
let mut rng = rand::thread_rng();
let mut scene: Box<Scene> = Box::new(MainScene::new(1, &config, &mut rng));
while let Some(new_scene) = scene.events(&mut rng, window.clone(), &mut gl, &config) {
scene = new_scene;
}
}
|
let opengl = OpenGL::V3_2;
let config = Config::new();
let dims = [config.width() as u32, config.height() as u32];
|
random_line_split
|
main.rs
|
extern crate piston;
extern crate piston_window;
extern crate graphics;
extern crate opengl_graphics;
extern crate rand;
#[cfg(test)]
#[macro_use(expect)]
extern crate expectest;
use piston_window::WindowSettings;
use opengl_graphics::{GlGraphics, OpenGL};
use std::cell::RefCell;
use std::rc::Rc;
mod actors;
mod intersect;
mod point;
mod scene;
mod config;
use scene::{MainScene, Scene};
use config::Config;
fn
|
() {
let opengl = OpenGL::V3_2;
let config = Config::new();
let dims = [config.width() as u32, config.height() as u32];
let window_settings = WindowSettings::new("vs-game", dims).exit_on_esc(true);
let window = Rc::new(RefCell::new(window_settings.build().unwrap()));
let mut gl = GlGraphics::new(opengl);
let mut rng = rand::thread_rng();
let mut scene: Box<Scene> = Box::new(MainScene::new(1, &config, &mut rng));
while let Some(new_scene) = scene.events(&mut rng, window.clone(), &mut gl, &config) {
scene = new_scene;
}
}
|
main
|
identifier_name
|
htmlheadelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::DocumentBinding::DocumentMethods;
use crate::dom::bindings::codegen::Bindings::HTMLHeadElementBinding;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::DomRoot;
use crate::dom::document::{determine_policy_for_token, Document};
use crate::dom::element::Element;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::htmlmetaelement::HTMLMetaElement;
use crate::dom::node::{document_from_node, Node};
use crate::dom::userscripts::load_script;
use crate::dom::virtualmethods::VirtualMethods;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLHeadElement {
htmlelement: HTMLElement,
}
impl HTMLHeadElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLHeadElement {
HTMLHeadElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLHeadElement> {
Node::reflect_node(
Box::new(HTMLHeadElement::new_inherited(local_name, prefix, document)),
document,
HTMLHeadElementBinding::Wrap,
)
}
/// <https://html.spec.whatwg.org/multipage/#meta-referrer>
pub fn set_document_referrer(&self) {
let doc = document_from_node(self);
if doc.GetHead().deref()!= Some(self) {
return;
}
let node = self.upcast::<Node>();
let candidates = node
.traverse_preorder()
.filter_map(DomRoot::downcast::<Element>)
.filter(|elem| elem.is::<HTMLMetaElement>())
.filter(|elem| elem.get_string_attribute(&local_name!("name")) == "referrer")
.filter(|elem| {
elem.get_attribute(&ns!(), &local_name!("content"))
.is_some()
});
for meta in candidates {
if let Some(ref content) = meta.get_attribute(&ns!(), &local_name!("content"))
|
}
}
}
impl VirtualMethods for HTMLHeadElement {
fn super_type(&self) -> Option<&dyn VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &dyn VirtualMethods)
}
fn bind_to_tree(&self, tree_in_doc: bool) {
if let Some(ref s) = self.super_type() {
s.bind_to_tree(tree_in_doc);
}
load_script(self);
}
}
|
{
let content = content.value();
let content_val = content.trim();
if !content_val.is_empty() {
doc.set_referrer_policy(determine_policy_for_token(content_val));
return;
}
}
|
conditional_block
|
htmlheadelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::DocumentBinding::DocumentMethods;
use crate::dom::bindings::codegen::Bindings::HTMLHeadElementBinding;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::DomRoot;
use crate::dom::document::{determine_policy_for_token, Document};
use crate::dom::element::Element;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::htmlmetaelement::HTMLMetaElement;
use crate::dom::node::{document_from_node, Node};
use crate::dom::userscripts::load_script;
use crate::dom::virtualmethods::VirtualMethods;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLHeadElement {
htmlelement: HTMLElement,
}
impl HTMLHeadElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLHeadElement {
HTMLHeadElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLHeadElement> {
Node::reflect_node(
Box::new(HTMLHeadElement::new_inherited(local_name, prefix, document)),
document,
HTMLHeadElementBinding::Wrap,
)
}
/// <https://html.spec.whatwg.org/multipage/#meta-referrer>
pub fn set_document_referrer(&self) {
let doc = document_from_node(self);
if doc.GetHead().deref()!= Some(self) {
return;
}
let node = self.upcast::<Node>();
let candidates = node
.traverse_preorder()
.filter_map(DomRoot::downcast::<Element>)
.filter(|elem| elem.is::<HTMLMetaElement>())
.filter(|elem| elem.get_string_attribute(&local_name!("name")) == "referrer")
.filter(|elem| {
elem.get_attribute(&ns!(), &local_name!("content"))
.is_some()
});
for meta in candidates {
if let Some(ref content) = meta.get_attribute(&ns!(), &local_name!("content")) {
let content = content.value();
let content_val = content.trim();
if!content_val.is_empty() {
doc.set_referrer_policy(determine_policy_for_token(content_val));
return;
}
}
}
}
}
impl VirtualMethods for HTMLHeadElement {
fn super_type(&self) -> Option<&dyn VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &dyn VirtualMethods)
}
fn bind_to_tree(&self, tree_in_doc: bool)
|
}
|
{
if let Some(ref s) = self.super_type() {
s.bind_to_tree(tree_in_doc);
}
load_script(self);
}
|
identifier_body
|
htmlheadelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::DocumentBinding::DocumentMethods;
use crate::dom::bindings::codegen::Bindings::HTMLHeadElementBinding;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::DomRoot;
use crate::dom::document::{determine_policy_for_token, Document};
use crate::dom::element::Element;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::htmlmetaelement::HTMLMetaElement;
use crate::dom::node::{document_from_node, Node};
use crate::dom::userscripts::load_script;
use crate::dom::virtualmethods::VirtualMethods;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLHeadElement {
htmlelement: HTMLElement,
}
impl HTMLHeadElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLHeadElement {
HTMLHeadElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLHeadElement> {
Node::reflect_node(
Box::new(HTMLHeadElement::new_inherited(local_name, prefix, document)),
document,
HTMLHeadElementBinding::Wrap,
)
}
/// <https://html.spec.whatwg.org/multipage/#meta-referrer>
pub fn set_document_referrer(&self) {
let doc = document_from_node(self);
if doc.GetHead().deref()!= Some(self) {
return;
}
let node = self.upcast::<Node>();
let candidates = node
.traverse_preorder()
.filter_map(DomRoot::downcast::<Element>)
.filter(|elem| elem.is::<HTMLMetaElement>())
.filter(|elem| elem.get_string_attribute(&local_name!("name")) == "referrer")
.filter(|elem| {
elem.get_attribute(&ns!(), &local_name!("content"))
.is_some()
});
for meta in candidates {
if let Some(ref content) = meta.get_attribute(&ns!(), &local_name!("content")) {
let content = content.value();
let content_val = content.trim();
if!content_val.is_empty() {
doc.set_referrer_policy(determine_policy_for_token(content_val));
return;
}
}
}
}
}
impl VirtualMethods for HTMLHeadElement {
fn super_type(&self) -> Option<&dyn VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &dyn VirtualMethods)
}
fn
|
(&self, tree_in_doc: bool) {
if let Some(ref s) = self.super_type() {
s.bind_to_tree(tree_in_doc);
}
load_script(self);
}
}
|
bind_to_tree
|
identifier_name
|
htmlheadelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::DocumentBinding::DocumentMethods;
use crate::dom::bindings::codegen::Bindings::HTMLHeadElementBinding;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::DomRoot;
use crate::dom::document::{determine_policy_for_token, Document};
use crate::dom::element::Element;
use crate::dom::htmlelement::HTMLElement;
use crate::dom::htmlmetaelement::HTMLMetaElement;
use crate::dom::node::{document_from_node, Node};
use crate::dom::userscripts::load_script;
use crate::dom::virtualmethods::VirtualMethods;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLHeadElement {
htmlelement: HTMLElement,
}
impl HTMLHeadElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLHeadElement {
HTMLHeadElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLHeadElement> {
Node::reflect_node(
Box::new(HTMLHeadElement::new_inherited(local_name, prefix, document)),
document,
HTMLHeadElementBinding::Wrap,
)
}
/// <https://html.spec.whatwg.org/multipage/#meta-referrer>
pub fn set_document_referrer(&self) {
let doc = document_from_node(self);
if doc.GetHead().deref()!= Some(self) {
return;
}
let node = self.upcast::<Node>();
let candidates = node
.traverse_preorder()
.filter_map(DomRoot::downcast::<Element>)
.filter(|elem| elem.is::<HTMLMetaElement>())
.filter(|elem| elem.get_string_attribute(&local_name!("name")) == "referrer")
.filter(|elem| {
elem.get_attribute(&ns!(), &local_name!("content"))
.is_some()
});
for meta in candidates {
if let Some(ref content) = meta.get_attribute(&ns!(), &local_name!("content")) {
let content = content.value();
|
return;
}
}
}
}
}
impl VirtualMethods for HTMLHeadElement {
fn super_type(&self) -> Option<&dyn VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &dyn VirtualMethods)
}
fn bind_to_tree(&self, tree_in_doc: bool) {
if let Some(ref s) = self.super_type() {
s.bind_to_tree(tree_in_doc);
}
load_script(self);
}
}
|
let content_val = content.trim();
if !content_val.is_empty() {
doc.set_referrer_policy(determine_policy_for_token(content_val));
|
random_line_split
|
area_frame_allocator.rs
|
use memory::{Frame, FrameAllocator};
use memory::areas::{Area, AreaIter, memory_areas};
use memory::paging::{PhysicalAddress};
#[derive(Debug)]
pub struct AreaFrameAllocator {
next_free_frame: Frame,
current_area: Option<&'static Area>,
areas: AreaIter,
kernel_start: Frame,
kernel_end: Frame,
}
impl AreaFrameAllocator {
pub fn new(kernel_start: PhysicalAddress, kernel_end: PhysicalAddress) -> AreaFrameAllocator {
let mut allocator = AreaFrameAllocator {
next_free_frame: Frame::containing_address(PhysicalAddress::new(0)),
current_area: None,
areas: memory_areas(),
kernel_start: Frame::containing_address(kernel_start),
kernel_end: Frame::containing_address(kernel_end),
};
allocator.choose_next_area();
allocator
}
fn choose_next_area(&mut self) {
self.current_area = self.areas.clone().filter(|area| {
let address = area.base + area.length - 1;
Frame::containing_address(PhysicalAddress::new(address as usize)) >= self.next_free_frame
}).min_by_key(|area| area.base);
if let Some(area) = self.current_area {
let start_frame = Frame::containing_address(PhysicalAddress::new(area.base as usize));
if self.next_free_frame < start_frame {
self.next_free_frame = start_frame;
}
}
}
}
impl FrameAllocator for AreaFrameAllocator {
fn allocate_frame(&mut self) -> Option<Frame> {
if let Some(area) = self.current_area
|
else {
None // no free frames left
}
}
fn deallocate_frame(&mut self, _frame: Frame) {
unimplemented!()
}
}
|
{
let frame = Frame{ number: self.next_free_frame.number };
let current_area_last_frame = {
let address = area.base + area.length - 1;
Frame::containing_address(PhysicalAddress::new(address as usize))
};
if frame > current_area_last_frame {
self.choose_next_area();
} else if frame >= self.kernel_start && frame <= self.kernel_end {
self.next_free_frame = Frame { number: self.kernel_end.number + 1 };
} else {
self.next_free_frame.number += 1;
return Some(frame);
}
self.allocate_frame() // Try again
}
|
conditional_block
|
area_frame_allocator.rs
|
use memory::{Frame, FrameAllocator};
use memory::areas::{Area, AreaIter, memory_areas};
use memory::paging::{PhysicalAddress};
#[derive(Debug)]
pub struct AreaFrameAllocator {
next_free_frame: Frame,
current_area: Option<&'static Area>,
areas: AreaIter,
kernel_start: Frame,
kernel_end: Frame,
}
impl AreaFrameAllocator {
pub fn new(kernel_start: PhysicalAddress, kernel_end: PhysicalAddress) -> AreaFrameAllocator {
let mut allocator = AreaFrameAllocator {
next_free_frame: Frame::containing_address(PhysicalAddress::new(0)),
current_area: None,
areas: memory_areas(),
kernel_start: Frame::containing_address(kernel_start),
kernel_end: Frame::containing_address(kernel_end),
};
allocator.choose_next_area();
allocator
}
fn
|
(&mut self) {
self.current_area = self.areas.clone().filter(|area| {
let address = area.base + area.length - 1;
Frame::containing_address(PhysicalAddress::new(address as usize)) >= self.next_free_frame
}).min_by_key(|area| area.base);
if let Some(area) = self.current_area {
let start_frame = Frame::containing_address(PhysicalAddress::new(area.base as usize));
if self.next_free_frame < start_frame {
self.next_free_frame = start_frame;
}
}
}
}
impl FrameAllocator for AreaFrameAllocator {
fn allocate_frame(&mut self) -> Option<Frame> {
if let Some(area) = self.current_area {
let frame = Frame{ number: self.next_free_frame.number };
let current_area_last_frame = {
let address = area.base + area.length - 1;
Frame::containing_address(PhysicalAddress::new(address as usize))
};
if frame > current_area_last_frame {
self.choose_next_area();
} else if frame >= self.kernel_start && frame <= self.kernel_end {
self.next_free_frame = Frame { number: self.kernel_end.number + 1 };
} else {
self.next_free_frame.number += 1;
return Some(frame);
}
self.allocate_frame() // Try again
} else {
None // no free frames left
}
}
fn deallocate_frame(&mut self, _frame: Frame) {
unimplemented!()
}
}
|
choose_next_area
|
identifier_name
|
area_frame_allocator.rs
|
use memory::{Frame, FrameAllocator};
use memory::areas::{Area, AreaIter, memory_areas};
use memory::paging::{PhysicalAddress};
#[derive(Debug)]
pub struct AreaFrameAllocator {
next_free_frame: Frame,
current_area: Option<&'static Area>,
areas: AreaIter,
kernel_start: Frame,
kernel_end: Frame,
}
impl AreaFrameAllocator {
pub fn new(kernel_start: PhysicalAddress, kernel_end: PhysicalAddress) -> AreaFrameAllocator {
let mut allocator = AreaFrameAllocator {
next_free_frame: Frame::containing_address(PhysicalAddress::new(0)),
current_area: None,
areas: memory_areas(),
kernel_start: Frame::containing_address(kernel_start),
kernel_end: Frame::containing_address(kernel_end),
};
allocator.choose_next_area();
allocator
}
fn choose_next_area(&mut self) {
self.current_area = self.areas.clone().filter(|area| {
let address = area.base + area.length - 1;
Frame::containing_address(PhysicalAddress::new(address as usize)) >= self.next_free_frame
}).min_by_key(|area| area.base);
if let Some(area) = self.current_area {
let start_frame = Frame::containing_address(PhysicalAddress::new(area.base as usize));
if self.next_free_frame < start_frame {
self.next_free_frame = start_frame;
}
}
}
}
impl FrameAllocator for AreaFrameAllocator {
fn allocate_frame(&mut self) -> Option<Frame> {
if let Some(area) = self.current_area {
let frame = Frame{ number: self.next_free_frame.number };
let current_area_last_frame = {
let address = area.base + area.length - 1;
Frame::containing_address(PhysicalAddress::new(address as usize))
};
if frame > current_area_last_frame {
self.choose_next_area();
} else if frame >= self.kernel_start && frame <= self.kernel_end {
self.next_free_frame = Frame { number: self.kernel_end.number + 1 };
} else {
self.next_free_frame.number += 1;
return Some(frame);
}
self.allocate_frame() // Try again
} else {
None // no free frames left
}
}
fn deallocate_frame(&mut self, _frame: Frame)
|
}
|
{
unimplemented!()
}
|
identifier_body
|
area_frame_allocator.rs
|
use memory::{Frame, FrameAllocator};
use memory::areas::{Area, AreaIter, memory_areas};
|
current_area: Option<&'static Area>,
areas: AreaIter,
kernel_start: Frame,
kernel_end: Frame,
}
impl AreaFrameAllocator {
pub fn new(kernel_start: PhysicalAddress, kernel_end: PhysicalAddress) -> AreaFrameAllocator {
let mut allocator = AreaFrameAllocator {
next_free_frame: Frame::containing_address(PhysicalAddress::new(0)),
current_area: None,
areas: memory_areas(),
kernel_start: Frame::containing_address(kernel_start),
kernel_end: Frame::containing_address(kernel_end),
};
allocator.choose_next_area();
allocator
}
fn choose_next_area(&mut self) {
self.current_area = self.areas.clone().filter(|area| {
let address = area.base + area.length - 1;
Frame::containing_address(PhysicalAddress::new(address as usize)) >= self.next_free_frame
}).min_by_key(|area| area.base);
if let Some(area) = self.current_area {
let start_frame = Frame::containing_address(PhysicalAddress::new(area.base as usize));
if self.next_free_frame < start_frame {
self.next_free_frame = start_frame;
}
}
}
}
impl FrameAllocator for AreaFrameAllocator {
fn allocate_frame(&mut self) -> Option<Frame> {
if let Some(area) = self.current_area {
let frame = Frame{ number: self.next_free_frame.number };
let current_area_last_frame = {
let address = area.base + area.length - 1;
Frame::containing_address(PhysicalAddress::new(address as usize))
};
if frame > current_area_last_frame {
self.choose_next_area();
} else if frame >= self.kernel_start && frame <= self.kernel_end {
self.next_free_frame = Frame { number: self.kernel_end.number + 1 };
} else {
self.next_free_frame.number += 1;
return Some(frame);
}
self.allocate_frame() // Try again
} else {
None // no free frames left
}
}
fn deallocate_frame(&mut self, _frame: Frame) {
unimplemented!()
}
}
|
use memory::paging::{PhysicalAddress};
#[derive(Debug)]
pub struct AreaFrameAllocator {
next_free_frame: Frame,
|
random_line_split
|
k_means.rs
|
//! K-means Classification
//!
//! Provides implementation of K-Means classification.
//!
//! # Usage
//!
//! ```
//! use rusty_machine::linalg::Matrix;
//! use rusty_machine::learning::k_means::KMeansClassifier;
//! use rusty_machine::learning::UnSupModel;
//!
//! let inputs = Matrix::new(3, 2, vec![1.0, 2.0, 1.0, 3.0, 1.0, 4.0]);
//! let test_inputs = Matrix::new(1, 2, vec![1.0, 3.5]);
//!
//! // Create model with k(=2) classes.
//! let mut model = KMeansClassifier::new(2);
//!
//! // Where inputs is a Matrix with features in columns.
//! model.train(&inputs);
//!
//! // Where test_inputs is a Matrix with features in columns.
//! let a = model.predict(&test_inputs);
//! ```
//!
//! Additionally you can control the initialization
//! algorithm and max number of iterations.
//!
//! # Initializations
//!
//! Three initialization algorithms are supported.
//!
//! ## Forgy initialization
//!
//! Choose initial centroids randomly from the data.
//!
//! ## Random Partition initialization
//!
//! Randomly assign each data point to one of k clusters.
//! The initial centroids are the mean of the data in their class.
//!
//! ## K-means++ initialization
//!
//! The [k-means++](https://en.wikipedia.org/wiki/K-means%2B%2B) scheme.
use linalg::BaseSlice;
use linalg::{Matrix, MatrixSlice, Axes};
use linalg::Vector;
use learning::UnSupModel;
use learning::error::{Error, ErrorKind};
use rand::{Rng, thread_rng};
use libnum::abs;
use std::fmt::Debug;
/// K-Means Classification model.
///
/// Contains option for centroids.
/// Specifies iterations and number of classes.
///
/// # Usage
///
/// This model is used through the `UnSupModel` trait. The model is
/// trained via the `train` function with a matrix containing rows of
/// feature vectors.
///
/// The model will not check to ensure the data coming in is all valid.
/// This responsibility lies with the user (for now).
#[derive(Debug)]
pub struct KMeansClassifier<InitAlg: Initializer> {
/// Max iterations of algorithm to run.
iters: usize,
/// The number of classes.
k: usize,
/// The fitted centroids.
centroids: Option<Matrix<f64>>,
/// The initial algorithm to use.
init_algorithm: InitAlg,
}
impl<InitAlg: Initializer> UnSupModel<Matrix<f64>, Vector<usize>> for KMeansClassifier<InitAlg> {
/// Predict classes from data.
///
/// Model must be trained.
fn predict(&self, inputs: &Matrix<f64>) -> Vector<usize> {
if let Some(ref centroids) = self.centroids {
return KMeansClassifier::<InitAlg>::find_closest_centroids(centroids.as_slice(),
inputs)
.0;
} else
|
}
/// Train the classifier using input data.
fn train(&mut self, inputs: &Matrix<f64>) {
self.init_centroids(inputs).expect("Could not initialize centroids.");
let mut cost = 0.0;
let eps = 1e-14;
for _i in 0..self.iters {
let (idx, distances) = self.get_closest_centroids(inputs);
self.update_centroids(inputs, idx);
let cost_i = distances.sum();
if abs(cost - cost_i) < eps {
break;
}
cost = cost_i;
}
}
}
impl KMeansClassifier<KPlusPlus> {
/// Constructs untrained k-means classifier model.
///
/// Requires number of classes to be specified.
/// Defaults to 100 iterations and kmeans++ initialization.
///
/// # Examples
///
/// ```
/// use rusty_machine::learning::k_means::KMeansClassifier;
///
/// let model = KMeansClassifier::new(5);
/// ```
pub fn new(k: usize) -> KMeansClassifier<KPlusPlus> {
KMeansClassifier {
iters: 100,
k: k,
centroids: None,
init_algorithm: KPlusPlus,
}
}
}
impl<InitAlg: Initializer> KMeansClassifier<InitAlg> {
/// Constructs untrained k-means classifier model.
///
/// Requires number of classes, number of iterations, and
/// the initialization algorithm to use.
///
/// # Examples
///
/// ```
/// use rusty_machine::learning::k_means::{KMeansClassifier, Forgy};
///
/// let model = KMeansClassifier::new_specified(5, 42, Forgy);
/// ```
pub fn new_specified(k: usize, iters: usize, algo: InitAlg) -> KMeansClassifier<InitAlg> {
KMeansClassifier {
iters: iters,
k: k,
centroids: None,
init_algorithm: algo,
}
}
/// Get the number of classes.
pub fn k(&self) -> usize {
self.k
}
/// Get the number of iterations.
pub fn iters(&self) -> usize {
self.iters
}
/// Get the initialization algorithm.
pub fn init_algorithm(&self) -> &InitAlg {
&self.init_algorithm
}
/// Get the centroids `Option<Matrix<f64>>`.
pub fn centroids(&self) -> &Option<Matrix<f64>> {
&self.centroids
}
/// Set the number of iterations.
pub fn set_iters(&mut self, iters: usize) {
self.iters = iters;
}
/// Initialize the centroids.
///
/// Used internally within model.
fn init_centroids(&mut self, inputs: &Matrix<f64>) -> Result<(), Error> {
if self.k > inputs.rows() {
Err(Error::new(ErrorKind::InvalidData,
format!("Number of clusters ({0}) exceeds number of data points \
({1}).",
self.k,
inputs.rows())))
} else {
let centroids = try!(self.init_algorithm.init_centroids(self.k, inputs));
assert!(centroids.rows() == self.k,
"Initial centroids must have exactly k rows.");
assert!(centroids.cols() == inputs.cols(),
"Initial centroids must have the same column count as inputs.");
self.centroids = Some(centroids);
Ok(())
}
}
/// Updated the centroids by computing means of assigned classes.
///
/// Used internally within model.
fn update_centroids(&mut self, inputs: &Matrix<f64>, classes: Vector<usize>) {
let mut new_centroids = Vec::with_capacity(self.k * inputs.cols());
let mut row_indexes = vec![Vec::new(); self.k];
for (i, c) in classes.into_vec().into_iter().enumerate() {
row_indexes.get_mut(c as usize).map(|v| v.push(i));
}
for vec_i in row_indexes {
let mat_i = inputs.select_rows(&vec_i);
new_centroids.extend(mat_i.mean(Axes::Row).into_vec());
}
self.centroids = Some(Matrix::new(self.k, inputs.cols(), new_centroids));
}
fn get_closest_centroids(&self, inputs: &Matrix<f64>) -> (Vector<usize>, Vector<f64>) {
if let Some(ref c) = self.centroids {
return KMeansClassifier::<InitAlg>::find_closest_centroids(c.as_slice(), inputs);
} else {
panic!("Centroids not correctly initialized.");
}
}
/// Find the centroid closest to each data point.
///
/// Used internally within model.
/// Returns the index of the closest centroid and the distance to it.
fn find_closest_centroids(centroids: MatrixSlice<f64>,
inputs: &Matrix<f64>)
-> (Vector<usize>, Vector<f64>) {
let mut idx = Vec::with_capacity(inputs.rows());
let mut distances = Vec::with_capacity(inputs.rows());
for i in 0..inputs.rows() {
// This works like repmat pulling out row i repeatedly.
let centroid_diff = centroids - inputs.select_rows(&vec![i; centroids.rows()]);
let dist = ¢roid_diff.elemul(¢roid_diff).sum_cols();
// Now take argmin and this is the centroid.
let (min_idx, min_dist) = dist.argmin();
idx.push(min_idx);
distances.push(min_dist);
}
(Vector::new(idx), Vector::new(distances))
}
}
/// Trait for algorithms initializing the K-means centroids.
pub trait Initializer: Debug {
/// Initialize the centroids for the initial state of the K-Means model.
///
/// The `Matrix` returned must have `k` rows and the same column count as `inputs`.
fn init_centroids(&self, k: usize, inputs: &Matrix<f64>) -> Result<Matrix<f64>, Error>;
}
/// The Forgy initialization scheme.
#[derive(Debug)]
pub struct Forgy;
impl Initializer for Forgy {
fn init_centroids(&self, k: usize, inputs: &Matrix<f64>) -> Result<Matrix<f64>, Error> {
let mut random_choices = Vec::with_capacity(k);
let mut rng = thread_rng();
while random_choices.len() < k {
let r = rng.gen_range(0, inputs.rows());
if!random_choices.contains(&r) {
random_choices.push(r);
}
}
Ok(inputs.select_rows(&random_choices))
}
}
/// The Random Partition initialization scheme.
#[derive(Debug)]
pub struct RandomPartition;
impl Initializer for RandomPartition {
fn init_centroids(&self, k: usize, inputs: &Matrix<f64>) -> Result<Matrix<f64>, Error> {
// Populate so we have something in each class.
let mut random_assignments = (0..k).map(|i| vec![i]).collect::<Vec<Vec<usize>>>();
let mut rng = thread_rng();
for i in k..inputs.rows() {
let idx = rng.gen_range(0, k);
unsafe { random_assignments.get_unchecked_mut(idx).push(i); }
}
let mut init_centroids = Vec::with_capacity(k * inputs.cols());
for vec_i in random_assignments {
let mat_i = inputs.select_rows(&vec_i);
init_centroids.extend_from_slice(&*mat_i.mean(Axes::Row).into_vec());
}
Ok(Matrix::new(k, inputs.cols(), init_centroids))
}
}
/// The K-means ++ initialization scheme.
#[derive(Debug)]
pub struct KPlusPlus;
impl Initializer for KPlusPlus {
fn init_centroids(&self, k: usize, inputs: &Matrix<f64>) -> Result<Matrix<f64>, Error> {
let mut rng = thread_rng();
let mut init_centroids = Vec::with_capacity(k * inputs.cols());
let first_cen = rng.gen_range(0usize, inputs.rows());
unsafe {
init_centroids.extend_from_slice(inputs.get_row_unchecked(first_cen));
}
for i in 1..k {
unsafe {
let temp_centroids = MatrixSlice::from_raw_parts(init_centroids.as_ptr(),
i,
inputs.cols(),
inputs.cols());
let (_, dist) =
KMeansClassifier::<KPlusPlus>::find_closest_centroids(temp_centroids, &inputs);
// A relatively cheap way to validate our input data
if!dist.data().iter().all(|x| x.is_finite()) {
return Err(Error::new(ErrorKind::InvalidData,
"Input data led to invalid centroid distances during \
initialization."));
}
let next_cen = sample_discretely(dist);
init_centroids.extend_from_slice(inputs.get_row_unchecked(next_cen));
}
}
Ok(Matrix::new(k, inputs.cols(), init_centroids))
}
}
/// Sample from an unnormalized distribution.
///
/// The input to this function is assumed to have all positive entries.
fn sample_discretely(unnorm_dist: Vector<f64>) -> usize {
assert!(unnorm_dist.size() > 0, "No entries in distribution vector.");
let sum = unnorm_dist.sum();
let rand = thread_rng().gen_range(0.0f64, sum);
let mut tempsum = 0.0;
for (i, p) in unnorm_dist.data().iter().enumerate() {
tempsum += *p;
if rand < tempsum {
return i;
}
}
panic!("No random value was sampled! There may be more clusters than unique data points.");
}
|
{
panic!("Model has not been trained.");
}
|
conditional_block
|
k_means.rs
|
//! K-means Classification
//!
//! Provides implementation of K-Means classification.
//!
//! # Usage
//!
//! ```
//! use rusty_machine::linalg::Matrix;
//! use rusty_machine::learning::k_means::KMeansClassifier;
//! use rusty_machine::learning::UnSupModel;
//!
//! let inputs = Matrix::new(3, 2, vec![1.0, 2.0, 1.0, 3.0, 1.0, 4.0]);
//! let test_inputs = Matrix::new(1, 2, vec![1.0, 3.5]);
//!
//! // Create model with k(=2) classes.
//! let mut model = KMeansClassifier::new(2);
//!
//! // Where inputs is a Matrix with features in columns.
//! model.train(&inputs);
//!
//! // Where test_inputs is a Matrix with features in columns.
//! let a = model.predict(&test_inputs);
//! ```
//!
//! Additionally you can control the initialization
//! algorithm and max number of iterations.
//!
//! # Initializations
//!
//! Three initialization algorithms are supported.
//!
//! ## Forgy initialization
//!
//! Choose initial centroids randomly from the data.
//!
//! ## Random Partition initialization
//!
//! Randomly assign each data point to one of k clusters.
//! The initial centroids are the mean of the data in their class.
//!
//! ## K-means++ initialization
//!
//! The [k-means++](https://en.wikipedia.org/wiki/K-means%2B%2B) scheme.
use linalg::BaseSlice;
use linalg::{Matrix, MatrixSlice, Axes};
use linalg::Vector;
use learning::UnSupModel;
use learning::error::{Error, ErrorKind};
use rand::{Rng, thread_rng};
use libnum::abs;
use std::fmt::Debug;
/// K-Means Classification model.
///
/// Contains option for centroids.
/// Specifies iterations and number of classes.
///
/// # Usage
///
/// This model is used through the `UnSupModel` trait. The model is
/// trained via the `train` function with a matrix containing rows of
/// feature vectors.
///
/// The model will not check to ensure the data coming in is all valid.
/// This responsibility lies with the user (for now).
#[derive(Debug)]
pub struct KMeansClassifier<InitAlg: Initializer> {
/// Max iterations of algorithm to run.
iters: usize,
/// The number of classes.
k: usize,
/// The fitted centroids.
centroids: Option<Matrix<f64>>,
/// The initial algorithm to use.
init_algorithm: InitAlg,
}
impl<InitAlg: Initializer> UnSupModel<Matrix<f64>, Vector<usize>> for KMeansClassifier<InitAlg> {
/// Predict classes from data.
///
/// Model must be trained.
fn predict(&self, inputs: &Matrix<f64>) -> Vector<usize> {
if let Some(ref centroids) = self.centroids {
return KMeansClassifier::<InitAlg>::find_closest_centroids(centroids.as_slice(),
inputs)
.0;
} else {
panic!("Model has not been trained.");
}
}
/// Train the classifier using input data.
fn train(&mut self, inputs: &Matrix<f64>) {
self.init_centroids(inputs).expect("Could not initialize centroids.");
let mut cost = 0.0;
let eps = 1e-14;
for _i in 0..self.iters {
let (idx, distances) = self.get_closest_centroids(inputs);
self.update_centroids(inputs, idx);
let cost_i = distances.sum();
if abs(cost - cost_i) < eps {
break;
}
cost = cost_i;
}
}
}
impl KMeansClassifier<KPlusPlus> {
/// Constructs untrained k-means classifier model.
///
/// Requires number of classes to be specified.
/// Defaults to 100 iterations and kmeans++ initialization.
///
/// # Examples
///
/// ```
/// use rusty_machine::learning::k_means::KMeansClassifier;
///
/// let model = KMeansClassifier::new(5);
/// ```
pub fn new(k: usize) -> KMeansClassifier<KPlusPlus> {
KMeansClassifier {
iters: 100,
k: k,
centroids: None,
init_algorithm: KPlusPlus,
}
}
}
impl<InitAlg: Initializer> KMeansClassifier<InitAlg> {
/// Constructs untrained k-means classifier model.
///
/// Requires number of classes, number of iterations, and
/// the initialization algorithm to use.
///
/// # Examples
///
/// ```
/// use rusty_machine::learning::k_means::{KMeansClassifier, Forgy};
///
/// let model = KMeansClassifier::new_specified(5, 42, Forgy);
/// ```
pub fn new_specified(k: usize, iters: usize, algo: InitAlg) -> KMeansClassifier<InitAlg> {
KMeansClassifier {
iters: iters,
k: k,
centroids: None,
init_algorithm: algo,
}
}
/// Get the number of classes.
pub fn k(&self) -> usize {
self.k
}
/// Get the number of iterations.
pub fn iters(&self) -> usize {
self.iters
}
/// Get the initialization algorithm.
pub fn init_algorithm(&self) -> &InitAlg {
&self.init_algorithm
}
/// Get the centroids `Option<Matrix<f64>>`.
pub fn centroids(&self) -> &Option<Matrix<f64>> {
&self.centroids
}
/// Set the number of iterations.
pub fn set_iters(&mut self, iters: usize) {
self.iters = iters;
}
/// Initialize the centroids.
///
/// Used internally within model.
fn init_centroids(&mut self, inputs: &Matrix<f64>) -> Result<(), Error> {
if self.k > inputs.rows() {
Err(Error::new(ErrorKind::InvalidData,
format!("Number of clusters ({0}) exceeds number of data points \
({1}).",
self.k,
inputs.rows())))
} else {
let centroids = try!(self.init_algorithm.init_centroids(self.k, inputs));
assert!(centroids.rows() == self.k,
"Initial centroids must have exactly k rows.");
assert!(centroids.cols() == inputs.cols(),
"Initial centroids must have the same column count as inputs.");
self.centroids = Some(centroids);
Ok(())
}
}
/// Updated the centroids by computing means of assigned classes.
///
/// Used internally within model.
fn update_centroids(&mut self, inputs: &Matrix<f64>, classes: Vector<usize>) {
let mut new_centroids = Vec::with_capacity(self.k * inputs.cols());
let mut row_indexes = vec![Vec::new(); self.k];
for (i, c) in classes.into_vec().into_iter().enumerate() {
row_indexes.get_mut(c as usize).map(|v| v.push(i));
}
for vec_i in row_indexes {
let mat_i = inputs.select_rows(&vec_i);
new_centroids.extend(mat_i.mean(Axes::Row).into_vec());
}
self.centroids = Some(Matrix::new(self.k, inputs.cols(), new_centroids));
}
fn get_closest_centroids(&self, inputs: &Matrix<f64>) -> (Vector<usize>, Vector<f64>) {
if let Some(ref c) = self.centroids {
return KMeansClassifier::<InitAlg>::find_closest_centroids(c.as_slice(), inputs);
} else {
panic!("Centroids not correctly initialized.");
}
}
/// Find the centroid closest to each data point.
///
/// Used internally within model.
/// Returns the index of the closest centroid and the distance to it.
fn find_closest_centroids(centroids: MatrixSlice<f64>,
inputs: &Matrix<f64>)
-> (Vector<usize>, Vector<f64>) {
let mut idx = Vec::with_capacity(inputs.rows());
let mut distances = Vec::with_capacity(inputs.rows());
for i in 0..inputs.rows() {
// This works like repmat pulling out row i repeatedly.
let centroid_diff = centroids - inputs.select_rows(&vec![i; centroids.rows()]);
let dist = ¢roid_diff.elemul(¢roid_diff).sum_cols();
// Now take argmin and this is the centroid.
let (min_idx, min_dist) = dist.argmin();
idx.push(min_idx);
distances.push(min_dist);
}
(Vector::new(idx), Vector::new(distances))
}
}
/// Trait for algorithms initializing the K-means centroids.
pub trait Initializer: Debug {
/// Initialize the centroids for the initial state of the K-Means model.
///
/// The `Matrix` returned must have `k` rows and the same column count as `inputs`.
fn init_centroids(&self, k: usize, inputs: &Matrix<f64>) -> Result<Matrix<f64>, Error>;
}
/// The Forgy initialization scheme.
#[derive(Debug)]
pub struct Forgy;
impl Initializer for Forgy {
fn init_centroids(&self, k: usize, inputs: &Matrix<f64>) -> Result<Matrix<f64>, Error> {
let mut random_choices = Vec::with_capacity(k);
let mut rng = thread_rng();
while random_choices.len() < k {
let r = rng.gen_range(0, inputs.rows());
if!random_choices.contains(&r) {
random_choices.push(r);
}
}
Ok(inputs.select_rows(&random_choices))
}
}
/// The Random Partition initialization scheme.
#[derive(Debug)]
pub struct RandomPartition;
impl Initializer for RandomPartition {
fn init_centroids(&self, k: usize, inputs: &Matrix<f64>) -> Result<Matrix<f64>, Error>
|
}
/// The K-means ++ initialization scheme.
#[derive(Debug)]
pub struct KPlusPlus;
impl Initializer for KPlusPlus {
fn init_centroids(&self, k: usize, inputs: &Matrix<f64>) -> Result<Matrix<f64>, Error> {
let mut rng = thread_rng();
let mut init_centroids = Vec::with_capacity(k * inputs.cols());
let first_cen = rng.gen_range(0usize, inputs.rows());
unsafe {
init_centroids.extend_from_slice(inputs.get_row_unchecked(first_cen));
}
for i in 1..k {
unsafe {
let temp_centroids = MatrixSlice::from_raw_parts(init_centroids.as_ptr(),
i,
inputs.cols(),
inputs.cols());
let (_, dist) =
KMeansClassifier::<KPlusPlus>::find_closest_centroids(temp_centroids, &inputs);
// A relatively cheap way to validate our input data
if!dist.data().iter().all(|x| x.is_finite()) {
return Err(Error::new(ErrorKind::InvalidData,
"Input data led to invalid centroid distances during \
initialization."));
}
let next_cen = sample_discretely(dist);
init_centroids.extend_from_slice(inputs.get_row_unchecked(next_cen));
}
}
Ok(Matrix::new(k, inputs.cols(), init_centroids))
}
}
/// Sample from an unnormalized distribution.
///
/// The input to this function is assumed to have all positive entries.
fn sample_discretely(unnorm_dist: Vector<f64>) -> usize {
assert!(unnorm_dist.size() > 0, "No entries in distribution vector.");
let sum = unnorm_dist.sum();
let rand = thread_rng().gen_range(0.0f64, sum);
let mut tempsum = 0.0;
for (i, p) in unnorm_dist.data().iter().enumerate() {
tempsum += *p;
if rand < tempsum {
return i;
}
}
panic!("No random value was sampled! There may be more clusters than unique data points.");
}
|
{
// Populate so we have something in each class.
let mut random_assignments = (0..k).map(|i| vec![i]).collect::<Vec<Vec<usize>>>();
let mut rng = thread_rng();
for i in k..inputs.rows() {
let idx = rng.gen_range(0, k);
unsafe { random_assignments.get_unchecked_mut(idx).push(i); }
}
let mut init_centroids = Vec::with_capacity(k * inputs.cols());
for vec_i in random_assignments {
let mat_i = inputs.select_rows(&vec_i);
init_centroids.extend_from_slice(&*mat_i.mean(Axes::Row).into_vec());
}
Ok(Matrix::new(k, inputs.cols(), init_centroids))
}
|
identifier_body
|
k_means.rs
|
//! K-means Classification
//!
//! Provides implementation of K-Means classification.
//!
//! # Usage
//!
//! ```
//! use rusty_machine::linalg::Matrix;
//! use rusty_machine::learning::k_means::KMeansClassifier;
//! use rusty_machine::learning::UnSupModel;
//!
//! let inputs = Matrix::new(3, 2, vec![1.0, 2.0, 1.0, 3.0, 1.0, 4.0]);
//! let test_inputs = Matrix::new(1, 2, vec![1.0, 3.5]);
//!
//! // Create model with k(=2) classes.
//! let mut model = KMeansClassifier::new(2);
//!
//! // Where inputs is a Matrix with features in columns.
//! model.train(&inputs);
//!
//! // Where test_inputs is a Matrix with features in columns.
//! let a = model.predict(&test_inputs);
//! ```
//!
//! Additionally you can control the initialization
//! algorithm and max number of iterations.
//!
//! # Initializations
//!
//! Three initialization algorithms are supported.
//!
//! ## Forgy initialization
//!
//! Choose initial centroids randomly from the data.
//!
//! ## Random Partition initialization
//!
//! Randomly assign each data point to one of k clusters.
//! The initial centroids are the mean of the data in their class.
//!
//! ## K-means++ initialization
//!
//! The [k-means++](https://en.wikipedia.org/wiki/K-means%2B%2B) scheme.
use linalg::BaseSlice;
use linalg::{Matrix, MatrixSlice, Axes};
use linalg::Vector;
use learning::UnSupModel;
use learning::error::{Error, ErrorKind};
use rand::{Rng, thread_rng};
use libnum::abs;
use std::fmt::Debug;
/// K-Means Classification model.
///
/// Contains option for centroids.
/// Specifies iterations and number of classes.
///
/// # Usage
///
/// This model is used through the `UnSupModel` trait. The model is
/// trained via the `train` function with a matrix containing rows of
/// feature vectors.
///
/// The model will not check to ensure the data coming in is all valid.
/// This responsibility lies with the user (for now).
#[derive(Debug)]
pub struct KMeansClassifier<InitAlg: Initializer> {
/// Max iterations of algorithm to run.
iters: usize,
/// The number of classes.
k: usize,
/// The fitted centroids.
centroids: Option<Matrix<f64>>,
/// The initial algorithm to use.
init_algorithm: InitAlg,
}
impl<InitAlg: Initializer> UnSupModel<Matrix<f64>, Vector<usize>> for KMeansClassifier<InitAlg> {
/// Predict classes from data.
///
/// Model must be trained.
fn predict(&self, inputs: &Matrix<f64>) -> Vector<usize> {
if let Some(ref centroids) = self.centroids {
return KMeansClassifier::<InitAlg>::find_closest_centroids(centroids.as_slice(),
inputs)
.0;
} else {
panic!("Model has not been trained.");
}
}
/// Train the classifier using input data.
fn train(&mut self, inputs: &Matrix<f64>) {
self.init_centroids(inputs).expect("Could not initialize centroids.");
let mut cost = 0.0;
let eps = 1e-14;
for _i in 0..self.iters {
let (idx, distances) = self.get_closest_centroids(inputs);
self.update_centroids(inputs, idx);
let cost_i = distances.sum();
if abs(cost - cost_i) < eps {
break;
}
cost = cost_i;
}
}
}
impl KMeansClassifier<KPlusPlus> {
/// Constructs untrained k-means classifier model.
///
/// Requires number of classes to be specified.
/// Defaults to 100 iterations and kmeans++ initialization.
///
/// # Examples
///
/// ```
/// use rusty_machine::learning::k_means::KMeansClassifier;
///
/// let model = KMeansClassifier::new(5);
/// ```
pub fn new(k: usize) -> KMeansClassifier<KPlusPlus> {
KMeansClassifier {
iters: 100,
k: k,
centroids: None,
init_algorithm: KPlusPlus,
}
}
}
impl<InitAlg: Initializer> KMeansClassifier<InitAlg> {
/// Constructs untrained k-means classifier model.
///
/// Requires number of classes, number of iterations, and
/// the initialization algorithm to use.
///
/// # Examples
///
/// ```
/// use rusty_machine::learning::k_means::{KMeansClassifier, Forgy};
///
/// let model = KMeansClassifier::new_specified(5, 42, Forgy);
/// ```
pub fn new_specified(k: usize, iters: usize, algo: InitAlg) -> KMeansClassifier<InitAlg> {
KMeansClassifier {
iters: iters,
k: k,
centroids: None,
init_algorithm: algo,
}
}
/// Get the number of classes.
pub fn k(&self) -> usize {
self.k
}
/// Get the number of iterations.
pub fn iters(&self) -> usize {
self.iters
}
/// Get the initialization algorithm.
pub fn init_algorithm(&self) -> &InitAlg {
&self.init_algorithm
}
/// Get the centroids `Option<Matrix<f64>>`.
pub fn centroids(&self) -> &Option<Matrix<f64>> {
&self.centroids
}
/// Set the number of iterations.
pub fn set_iters(&mut self, iters: usize) {
self.iters = iters;
}
/// Initialize the centroids.
///
/// Used internally within model.
fn init_centroids(&mut self, inputs: &Matrix<f64>) -> Result<(), Error> {
if self.k > inputs.rows() {
Err(Error::new(ErrorKind::InvalidData,
format!("Number of clusters ({0}) exceeds number of data points \
({1}).",
self.k,
inputs.rows())))
} else {
let centroids = try!(self.init_algorithm.init_centroids(self.k, inputs));
assert!(centroids.rows() == self.k,
"Initial centroids must have exactly k rows.");
assert!(centroids.cols() == inputs.cols(),
"Initial centroids must have the same column count as inputs.");
self.centroids = Some(centroids);
Ok(())
}
}
/// Updated the centroids by computing means of assigned classes.
///
/// Used internally within model.
fn update_centroids(&mut self, inputs: &Matrix<f64>, classes: Vector<usize>) {
let mut new_centroids = Vec::with_capacity(self.k * inputs.cols());
let mut row_indexes = vec![Vec::new(); self.k];
for (i, c) in classes.into_vec().into_iter().enumerate() {
row_indexes.get_mut(c as usize).map(|v| v.push(i));
}
for vec_i in row_indexes {
let mat_i = inputs.select_rows(&vec_i);
new_centroids.extend(mat_i.mean(Axes::Row).into_vec());
}
self.centroids = Some(Matrix::new(self.k, inputs.cols(), new_centroids));
}
fn get_closest_centroids(&self, inputs: &Matrix<f64>) -> (Vector<usize>, Vector<f64>) {
if let Some(ref c) = self.centroids {
return KMeansClassifier::<InitAlg>::find_closest_centroids(c.as_slice(), inputs);
} else {
panic!("Centroids not correctly initialized.");
}
}
/// Find the centroid closest to each data point.
///
/// Used internally within model.
/// Returns the index of the closest centroid and the distance to it.
fn find_closest_centroids(centroids: MatrixSlice<f64>,
inputs: &Matrix<f64>)
-> (Vector<usize>, Vector<f64>) {
let mut idx = Vec::with_capacity(inputs.rows());
let mut distances = Vec::with_capacity(inputs.rows());
for i in 0..inputs.rows() {
// This works like repmat pulling out row i repeatedly.
let centroid_diff = centroids - inputs.select_rows(&vec![i; centroids.rows()]);
let dist = ¢roid_diff.elemul(¢roid_diff).sum_cols();
// Now take argmin and this is the centroid.
let (min_idx, min_dist) = dist.argmin();
idx.push(min_idx);
distances.push(min_dist);
}
(Vector::new(idx), Vector::new(distances))
}
}
/// Trait for algorithms initializing the K-means centroids.
pub trait Initializer: Debug {
/// Initialize the centroids for the initial state of the K-Means model.
///
/// The `Matrix` returned must have `k` rows and the same column count as `inputs`.
fn init_centroids(&self, k: usize, inputs: &Matrix<f64>) -> Result<Matrix<f64>, Error>;
}
/// The Forgy initialization scheme.
#[derive(Debug)]
pub struct Forgy;
impl Initializer for Forgy {
fn init_centroids(&self, k: usize, inputs: &Matrix<f64>) -> Result<Matrix<f64>, Error> {
let mut random_choices = Vec::with_capacity(k);
let mut rng = thread_rng();
while random_choices.len() < k {
let r = rng.gen_range(0, inputs.rows());
if!random_choices.contains(&r) {
random_choices.push(r);
}
}
Ok(inputs.select_rows(&random_choices))
}
}
/// The Random Partition initialization scheme.
#[derive(Debug)]
pub struct
|
;
impl Initializer for RandomPartition {
fn init_centroids(&self, k: usize, inputs: &Matrix<f64>) -> Result<Matrix<f64>, Error> {
// Populate so we have something in each class.
let mut random_assignments = (0..k).map(|i| vec![i]).collect::<Vec<Vec<usize>>>();
let mut rng = thread_rng();
for i in k..inputs.rows() {
let idx = rng.gen_range(0, k);
unsafe { random_assignments.get_unchecked_mut(idx).push(i); }
}
let mut init_centroids = Vec::with_capacity(k * inputs.cols());
for vec_i in random_assignments {
let mat_i = inputs.select_rows(&vec_i);
init_centroids.extend_from_slice(&*mat_i.mean(Axes::Row).into_vec());
}
Ok(Matrix::new(k, inputs.cols(), init_centroids))
}
}
/// The K-means ++ initialization scheme.
#[derive(Debug)]
pub struct KPlusPlus;
impl Initializer for KPlusPlus {
fn init_centroids(&self, k: usize, inputs: &Matrix<f64>) -> Result<Matrix<f64>, Error> {
let mut rng = thread_rng();
let mut init_centroids = Vec::with_capacity(k * inputs.cols());
let first_cen = rng.gen_range(0usize, inputs.rows());
unsafe {
init_centroids.extend_from_slice(inputs.get_row_unchecked(first_cen));
}
for i in 1..k {
unsafe {
let temp_centroids = MatrixSlice::from_raw_parts(init_centroids.as_ptr(),
i,
inputs.cols(),
inputs.cols());
let (_, dist) =
KMeansClassifier::<KPlusPlus>::find_closest_centroids(temp_centroids, &inputs);
// A relatively cheap way to validate our input data
if!dist.data().iter().all(|x| x.is_finite()) {
return Err(Error::new(ErrorKind::InvalidData,
"Input data led to invalid centroid distances during \
initialization."));
}
let next_cen = sample_discretely(dist);
init_centroids.extend_from_slice(inputs.get_row_unchecked(next_cen));
}
}
Ok(Matrix::new(k, inputs.cols(), init_centroids))
}
}
/// Sample from an unnormalized distribution.
///
/// The input to this function is assumed to have all positive entries.
fn sample_discretely(unnorm_dist: Vector<f64>) -> usize {
assert!(unnorm_dist.size() > 0, "No entries in distribution vector.");
let sum = unnorm_dist.sum();
let rand = thread_rng().gen_range(0.0f64, sum);
let mut tempsum = 0.0;
for (i, p) in unnorm_dist.data().iter().enumerate() {
tempsum += *p;
if rand < tempsum {
return i;
}
}
panic!("No random value was sampled! There may be more clusters than unique data points.");
}
|
RandomPartition
|
identifier_name
|
k_means.rs
|
//! K-means Classification
//!
//! Provides implementation of K-Means classification.
//!
//! # Usage
//!
//! ```
//! use rusty_machine::linalg::Matrix;
//! use rusty_machine::learning::k_means::KMeansClassifier;
//! use rusty_machine::learning::UnSupModel;
//!
//! let inputs = Matrix::new(3, 2, vec![1.0, 2.0, 1.0, 3.0, 1.0, 4.0]);
//! let test_inputs = Matrix::new(1, 2, vec![1.0, 3.5]);
//!
|
//!
//! // Where test_inputs is a Matrix with features in columns.
//! let a = model.predict(&test_inputs);
//! ```
//!
//! Additionally you can control the initialization
//! algorithm and max number of iterations.
//!
//! # Initializations
//!
//! Three initialization algorithms are supported.
//!
//! ## Forgy initialization
//!
//! Choose initial centroids randomly from the data.
//!
//! ## Random Partition initialization
//!
//! Randomly assign each data point to one of k clusters.
//! The initial centroids are the mean of the data in their class.
//!
//! ## K-means++ initialization
//!
//! The [k-means++](https://en.wikipedia.org/wiki/K-means%2B%2B) scheme.
use linalg::BaseSlice;
use linalg::{Matrix, MatrixSlice, Axes};
use linalg::Vector;
use learning::UnSupModel;
use learning::error::{Error, ErrorKind};
use rand::{Rng, thread_rng};
use libnum::abs;
use std::fmt::Debug;
/// K-Means Classification model.
///
/// Contains option for centroids.
/// Specifies iterations and number of classes.
///
/// # Usage
///
/// This model is used through the `UnSupModel` trait. The model is
/// trained via the `train` function with a matrix containing rows of
/// feature vectors.
///
/// The model will not check to ensure the data coming in is all valid.
/// This responsibility lies with the user (for now).
#[derive(Debug)]
pub struct KMeansClassifier<InitAlg: Initializer> {
/// Max iterations of algorithm to run.
iters: usize,
/// The number of classes.
k: usize,
/// The fitted centroids.
centroids: Option<Matrix<f64>>,
/// The initial algorithm to use.
init_algorithm: InitAlg,
}
impl<InitAlg: Initializer> UnSupModel<Matrix<f64>, Vector<usize>> for KMeansClassifier<InitAlg> {
/// Predict classes from data.
///
/// Model must be trained.
fn predict(&self, inputs: &Matrix<f64>) -> Vector<usize> {
if let Some(ref centroids) = self.centroids {
return KMeansClassifier::<InitAlg>::find_closest_centroids(centroids.as_slice(),
inputs)
.0;
} else {
panic!("Model has not been trained.");
}
}
/// Train the classifier using input data.
fn train(&mut self, inputs: &Matrix<f64>) {
self.init_centroids(inputs).expect("Could not initialize centroids.");
let mut cost = 0.0;
let eps = 1e-14;
for _i in 0..self.iters {
let (idx, distances) = self.get_closest_centroids(inputs);
self.update_centroids(inputs, idx);
let cost_i = distances.sum();
if abs(cost - cost_i) < eps {
break;
}
cost = cost_i;
}
}
}
impl KMeansClassifier<KPlusPlus> {
/// Constructs untrained k-means classifier model.
///
/// Requires number of classes to be specified.
/// Defaults to 100 iterations and kmeans++ initialization.
///
/// # Examples
///
/// ```
/// use rusty_machine::learning::k_means::KMeansClassifier;
///
/// let model = KMeansClassifier::new(5);
/// ```
pub fn new(k: usize) -> KMeansClassifier<KPlusPlus> {
KMeansClassifier {
iters: 100,
k: k,
centroids: None,
init_algorithm: KPlusPlus,
}
}
}
impl<InitAlg: Initializer> KMeansClassifier<InitAlg> {
/// Constructs untrained k-means classifier model.
///
/// Requires number of classes, number of iterations, and
/// the initialization algorithm to use.
///
/// # Examples
///
/// ```
/// use rusty_machine::learning::k_means::{KMeansClassifier, Forgy};
///
/// let model = KMeansClassifier::new_specified(5, 42, Forgy);
/// ```
pub fn new_specified(k: usize, iters: usize, algo: InitAlg) -> KMeansClassifier<InitAlg> {
KMeansClassifier {
iters: iters,
k: k,
centroids: None,
init_algorithm: algo,
}
}
/// Get the number of classes.
pub fn k(&self) -> usize {
self.k
}
/// Get the number of iterations.
pub fn iters(&self) -> usize {
self.iters
}
/// Get the initialization algorithm.
pub fn init_algorithm(&self) -> &InitAlg {
&self.init_algorithm
}
/// Get the centroids `Option<Matrix<f64>>`.
pub fn centroids(&self) -> &Option<Matrix<f64>> {
&self.centroids
}
/// Set the number of iterations.
pub fn set_iters(&mut self, iters: usize) {
self.iters = iters;
}
/// Initialize the centroids.
///
/// Used internally within model.
fn init_centroids(&mut self, inputs: &Matrix<f64>) -> Result<(), Error> {
if self.k > inputs.rows() {
Err(Error::new(ErrorKind::InvalidData,
format!("Number of clusters ({0}) exceeds number of data points \
({1}).",
self.k,
inputs.rows())))
} else {
let centroids = try!(self.init_algorithm.init_centroids(self.k, inputs));
assert!(centroids.rows() == self.k,
"Initial centroids must have exactly k rows.");
assert!(centroids.cols() == inputs.cols(),
"Initial centroids must have the same column count as inputs.");
self.centroids = Some(centroids);
Ok(())
}
}
/// Updated the centroids by computing means of assigned classes.
///
/// Used internally within model.
fn update_centroids(&mut self, inputs: &Matrix<f64>, classes: Vector<usize>) {
let mut new_centroids = Vec::with_capacity(self.k * inputs.cols());
let mut row_indexes = vec![Vec::new(); self.k];
for (i, c) in classes.into_vec().into_iter().enumerate() {
row_indexes.get_mut(c as usize).map(|v| v.push(i));
}
for vec_i in row_indexes {
let mat_i = inputs.select_rows(&vec_i);
new_centroids.extend(mat_i.mean(Axes::Row).into_vec());
}
self.centroids = Some(Matrix::new(self.k, inputs.cols(), new_centroids));
}
fn get_closest_centroids(&self, inputs: &Matrix<f64>) -> (Vector<usize>, Vector<f64>) {
if let Some(ref c) = self.centroids {
return KMeansClassifier::<InitAlg>::find_closest_centroids(c.as_slice(), inputs);
} else {
panic!("Centroids not correctly initialized.");
}
}
/// Find the centroid closest to each data point.
///
/// Used internally within model.
/// Returns the index of the closest centroid and the distance to it.
fn find_closest_centroids(centroids: MatrixSlice<f64>,
inputs: &Matrix<f64>)
-> (Vector<usize>, Vector<f64>) {
let mut idx = Vec::with_capacity(inputs.rows());
let mut distances = Vec::with_capacity(inputs.rows());
for i in 0..inputs.rows() {
// This works like repmat pulling out row i repeatedly.
let centroid_diff = centroids - inputs.select_rows(&vec![i; centroids.rows()]);
let dist = ¢roid_diff.elemul(¢roid_diff).sum_cols();
// Now take argmin and this is the centroid.
let (min_idx, min_dist) = dist.argmin();
idx.push(min_idx);
distances.push(min_dist);
}
(Vector::new(idx), Vector::new(distances))
}
}
/// Trait for algorithms initializing the K-means centroids.
pub trait Initializer: Debug {
/// Initialize the centroids for the initial state of the K-Means model.
///
/// The `Matrix` returned must have `k` rows and the same column count as `inputs`.
fn init_centroids(&self, k: usize, inputs: &Matrix<f64>) -> Result<Matrix<f64>, Error>;
}
/// The Forgy initialization scheme.
#[derive(Debug)]
pub struct Forgy;
impl Initializer for Forgy {
fn init_centroids(&self, k: usize, inputs: &Matrix<f64>) -> Result<Matrix<f64>, Error> {
let mut random_choices = Vec::with_capacity(k);
let mut rng = thread_rng();
while random_choices.len() < k {
let r = rng.gen_range(0, inputs.rows());
if!random_choices.contains(&r) {
random_choices.push(r);
}
}
Ok(inputs.select_rows(&random_choices))
}
}
/// The Random Partition initialization scheme.
#[derive(Debug)]
pub struct RandomPartition;
impl Initializer for RandomPartition {
fn init_centroids(&self, k: usize, inputs: &Matrix<f64>) -> Result<Matrix<f64>, Error> {
// Populate so we have something in each class.
let mut random_assignments = (0..k).map(|i| vec![i]).collect::<Vec<Vec<usize>>>();
let mut rng = thread_rng();
for i in k..inputs.rows() {
let idx = rng.gen_range(0, k);
unsafe { random_assignments.get_unchecked_mut(idx).push(i); }
}
let mut init_centroids = Vec::with_capacity(k * inputs.cols());
for vec_i in random_assignments {
let mat_i = inputs.select_rows(&vec_i);
init_centroids.extend_from_slice(&*mat_i.mean(Axes::Row).into_vec());
}
Ok(Matrix::new(k, inputs.cols(), init_centroids))
}
}
/// The K-means ++ initialization scheme.
#[derive(Debug)]
pub struct KPlusPlus;
impl Initializer for KPlusPlus {
fn init_centroids(&self, k: usize, inputs: &Matrix<f64>) -> Result<Matrix<f64>, Error> {
let mut rng = thread_rng();
let mut init_centroids = Vec::with_capacity(k * inputs.cols());
let first_cen = rng.gen_range(0usize, inputs.rows());
unsafe {
init_centroids.extend_from_slice(inputs.get_row_unchecked(first_cen));
}
for i in 1..k {
unsafe {
let temp_centroids = MatrixSlice::from_raw_parts(init_centroids.as_ptr(),
i,
inputs.cols(),
inputs.cols());
let (_, dist) =
KMeansClassifier::<KPlusPlus>::find_closest_centroids(temp_centroids, &inputs);
// A relatively cheap way to validate our input data
if!dist.data().iter().all(|x| x.is_finite()) {
return Err(Error::new(ErrorKind::InvalidData,
"Input data led to invalid centroid distances during \
initialization."));
}
let next_cen = sample_discretely(dist);
init_centroids.extend_from_slice(inputs.get_row_unchecked(next_cen));
}
}
Ok(Matrix::new(k, inputs.cols(), init_centroids))
}
}
/// Sample from an unnormalized distribution.
///
/// The input to this function is assumed to have all positive entries.
fn sample_discretely(unnorm_dist: Vector<f64>) -> usize {
assert!(unnorm_dist.size() > 0, "No entries in distribution vector.");
let sum = unnorm_dist.sum();
let rand = thread_rng().gen_range(0.0f64, sum);
let mut tempsum = 0.0;
for (i, p) in unnorm_dist.data().iter().enumerate() {
tempsum += *p;
if rand < tempsum {
return i;
}
}
panic!("No random value was sampled! There may be more clusters than unique data points.");
}
|
//! // Create model with k(=2) classes.
//! let mut model = KMeansClassifier::new(2);
//!
//! // Where inputs is a Matrix with features in columns.
//! model.train(&inputs);
|
random_line_split
|
eve2pcap.rs
|
// Copyright (C) 2020 Jason Ish
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use axum::extract::{Extension, Form};
use axum::http::header::HeaderName;
use axum::http::HeaderValue;
use axum::response::{Headers, IntoResponse};
use std::sync::Arc;
use crate::prelude::*;
use serde::Deserialize;
use crate::eve::eve::EveJson;
use crate::eve::Eve;
use crate::pcap;
use crate::server::api::ApiError;
use crate::server::main::SessionExtractor;
use crate::server::ServerContext;
#[derive(Deserialize, Debug)]
pub struct PcapForm {
pub what: String,
pub event: String,
}
pub(crate) async fn handler(
Extension(_context): Extension<Arc<ServerContext>>,
_session: SessionExtractor,
Form(form): Form<PcapForm>,
) -> Result<impl IntoResponse, ApiError> {
let headers = Headers(vec![
(
HeaderName::from_static("content-type"),
HeaderValue::from_static("application/vnc.tcpdump.pcap"),
),
(
HeaderName::from_static("content-disposition"),
HeaderValue::from_static("attachment; filename=event.pcap"),
),
]);
let event: EveJson = serde_json::from_str(&form.event)
.map_err(|err| ApiError::BadRequest(format!("failed to decode event: {}", err)))?;
match form.what.as_ref() {
"packet" => {
let linktype = if let Some(linktype) = &event["xpacket_info"]["linktype"].as_u64() {
*linktype as u32
} else {
warn!("No usable link-type in event, will use ethernet");
pcap::LinkType::Ethernet as u32
};
let packet = &event["packet"]
.as_str()
.map(base64::decode)
.ok_or_else(|| ApiError::BadRequest("no packet in event".to_string()))?
.map_err(|err| {
ApiError::BadRequest(format!("failed to base64 decode packet: {}", err))
})?;
|
}
"payload" => {
let ts = event.timestamp().ok_or_else(|| {
ApiError::BadRequest("bad or missing timestamp field".to_string())
})?;
let packet = pcap::packet_from_payload(&event).map_err(|err| {
let msg = format!("Failed to create packet from payload: {:?}", err);
warn!("{}", msg);
ApiError::BadRequest(msg)
})?;
let pcap_buffer = pcap::create(pcap::LinkType::Raw as u32, ts, &packet);
return Ok((headers, pcap_buffer));
}
_ => {
return Err(ApiError::BadRequest("invalid value for what".to_string()));
}
}
}
|
let ts = event.timestamp().ok_or_else(|| {
ApiError::BadRequest("bad or missing timestamp field".to_string())
})?;
let pcap_buffer = pcap::create(linktype, ts, packet);
return Ok((headers, pcap_buffer));
|
random_line_split
|
eve2pcap.rs
|
// Copyright (C) 2020 Jason Ish
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use axum::extract::{Extension, Form};
use axum::http::header::HeaderName;
use axum::http::HeaderValue;
use axum::response::{Headers, IntoResponse};
use std::sync::Arc;
use crate::prelude::*;
use serde::Deserialize;
use crate::eve::eve::EveJson;
use crate::eve::Eve;
use crate::pcap;
use crate::server::api::ApiError;
use crate::server::main::SessionExtractor;
use crate::server::ServerContext;
#[derive(Deserialize, Debug)]
pub struct PcapForm {
pub what: String,
pub event: String,
}
pub(crate) async fn handler(
Extension(_context): Extension<Arc<ServerContext>>,
_session: SessionExtractor,
Form(form): Form<PcapForm>,
) -> Result<impl IntoResponse, ApiError> {
let headers = Headers(vec![
(
HeaderName::from_static("content-type"),
HeaderValue::from_static("application/vnc.tcpdump.pcap"),
),
(
HeaderName::from_static("content-disposition"),
HeaderValue::from_static("attachment; filename=event.pcap"),
),
]);
let event: EveJson = serde_json::from_str(&form.event)
.map_err(|err| ApiError::BadRequest(format!("failed to decode event: {}", err)))?;
match form.what.as_ref() {
"packet" => {
let linktype = if let Some(linktype) = &event["xpacket_info"]["linktype"].as_u64() {
*linktype as u32
} else {
warn!("No usable link-type in event, will use ethernet");
pcap::LinkType::Ethernet as u32
};
let packet = &event["packet"]
.as_str()
.map(base64::decode)
.ok_or_else(|| ApiError::BadRequest("no packet in event".to_string()))?
.map_err(|err| {
ApiError::BadRequest(format!("failed to base64 decode packet: {}", err))
})?;
let ts = event.timestamp().ok_or_else(|| {
ApiError::BadRequest("bad or missing timestamp field".to_string())
})?;
let pcap_buffer = pcap::create(linktype, ts, packet);
return Ok((headers, pcap_buffer));
}
"payload" =>
|
_ => {
return Err(ApiError::BadRequest("invalid value for what".to_string()));
}
}
}
|
{
let ts = event.timestamp().ok_or_else(|| {
ApiError::BadRequest("bad or missing timestamp field".to_string())
})?;
let packet = pcap::packet_from_payload(&event).map_err(|err| {
let msg = format!("Failed to create packet from payload: {:?}", err);
warn!("{}", msg);
ApiError::BadRequest(msg)
})?;
let pcap_buffer = pcap::create(pcap::LinkType::Raw as u32, ts, &packet);
return Ok((headers, pcap_buffer));
}
|
conditional_block
|
eve2pcap.rs
|
// Copyright (C) 2020 Jason Ish
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use axum::extract::{Extension, Form};
use axum::http::header::HeaderName;
use axum::http::HeaderValue;
use axum::response::{Headers, IntoResponse};
use std::sync::Arc;
use crate::prelude::*;
use serde::Deserialize;
use crate::eve::eve::EveJson;
use crate::eve::Eve;
use crate::pcap;
use crate::server::api::ApiError;
use crate::server::main::SessionExtractor;
use crate::server::ServerContext;
#[derive(Deserialize, Debug)]
pub struct PcapForm {
pub what: String,
pub event: String,
}
pub(crate) async fn handler(
Extension(_context): Extension<Arc<ServerContext>>,
_session: SessionExtractor,
Form(form): Form<PcapForm>,
) -> Result<impl IntoResponse, ApiError>
|
pcap::LinkType::Ethernet as u32
};
let packet = &event["packet"]
.as_str()
.map(base64::decode)
.ok_or_else(|| ApiError::BadRequest("no packet in event".to_string()))?
.map_err(|err| {
ApiError::BadRequest(format!("failed to base64 decode packet: {}", err))
})?;
let ts = event.timestamp().ok_or_else(|| {
ApiError::BadRequest("bad or missing timestamp field".to_string())
})?;
let pcap_buffer = pcap::create(linktype, ts, packet);
return Ok((headers, pcap_buffer));
}
"payload" => {
let ts = event.timestamp().ok_or_else(|| {
ApiError::BadRequest("bad or missing timestamp field".to_string())
})?;
let packet = pcap::packet_from_payload(&event).map_err(|err| {
let msg = format!("Failed to create packet from payload: {:?}", err);
warn!("{}", msg);
ApiError::BadRequest(msg)
})?;
let pcap_buffer = pcap::create(pcap::LinkType::Raw as u32, ts, &packet);
return Ok((headers, pcap_buffer));
}
_ => {
return Err(ApiError::BadRequest("invalid value for what".to_string()));
}
}
}
|
{
let headers = Headers(vec![
(
HeaderName::from_static("content-type"),
HeaderValue::from_static("application/vnc.tcpdump.pcap"),
),
(
HeaderName::from_static("content-disposition"),
HeaderValue::from_static("attachment; filename=event.pcap"),
),
]);
let event: EveJson = serde_json::from_str(&form.event)
.map_err(|err| ApiError::BadRequest(format!("failed to decode event: {}", err)))?;
match form.what.as_ref() {
"packet" => {
let linktype = if let Some(linktype) = &event["xpacket_info"]["linktype"].as_u64() {
*linktype as u32
} else {
warn!("No usable link-type in event, will use ethernet");
|
identifier_body
|
eve2pcap.rs
|
// Copyright (C) 2020 Jason Ish
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use axum::extract::{Extension, Form};
use axum::http::header::HeaderName;
use axum::http::HeaderValue;
use axum::response::{Headers, IntoResponse};
use std::sync::Arc;
use crate::prelude::*;
use serde::Deserialize;
use crate::eve::eve::EveJson;
use crate::eve::Eve;
use crate::pcap;
use crate::server::api::ApiError;
use crate::server::main::SessionExtractor;
use crate::server::ServerContext;
#[derive(Deserialize, Debug)]
pub struct
|
{
pub what: String,
pub event: String,
}
pub(crate) async fn handler(
Extension(_context): Extension<Arc<ServerContext>>,
_session: SessionExtractor,
Form(form): Form<PcapForm>,
) -> Result<impl IntoResponse, ApiError> {
let headers = Headers(vec![
(
HeaderName::from_static("content-type"),
HeaderValue::from_static("application/vnc.tcpdump.pcap"),
),
(
HeaderName::from_static("content-disposition"),
HeaderValue::from_static("attachment; filename=event.pcap"),
),
]);
let event: EveJson = serde_json::from_str(&form.event)
.map_err(|err| ApiError::BadRequest(format!("failed to decode event: {}", err)))?;
match form.what.as_ref() {
"packet" => {
let linktype = if let Some(linktype) = &event["xpacket_info"]["linktype"].as_u64() {
*linktype as u32
} else {
warn!("No usable link-type in event, will use ethernet");
pcap::LinkType::Ethernet as u32
};
let packet = &event["packet"]
.as_str()
.map(base64::decode)
.ok_or_else(|| ApiError::BadRequest("no packet in event".to_string()))?
.map_err(|err| {
ApiError::BadRequest(format!("failed to base64 decode packet: {}", err))
})?;
let ts = event.timestamp().ok_or_else(|| {
ApiError::BadRequest("bad or missing timestamp field".to_string())
})?;
let pcap_buffer = pcap::create(linktype, ts, packet);
return Ok((headers, pcap_buffer));
}
"payload" => {
let ts = event.timestamp().ok_or_else(|| {
ApiError::BadRequest("bad or missing timestamp field".to_string())
})?;
let packet = pcap::packet_from_payload(&event).map_err(|err| {
let msg = format!("Failed to create packet from payload: {:?}", err);
warn!("{}", msg);
ApiError::BadRequest(msg)
})?;
let pcap_buffer = pcap::create(pcap::LinkType::Raw as u32, ts, &packet);
return Ok((headers, pcap_buffer));
}
_ => {
return Err(ApiError::BadRequest("invalid value for what".to_string()));
}
}
}
|
PcapForm
|
identifier_name
|
regions-in-enums.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that lifetimes must be declared for use on enums.
// See also regions-undeclared.rs
enum yes0<'lt> {
X3(&'lt usize)
}
|
enum yes1<'a> {
X4(&'a usize)
}
enum no0 {
X5(&'foo usize) //~ ERROR use of undeclared lifetime name `'foo`
}
enum no1 {
X6(&'a usize) //~ ERROR use of undeclared lifetime name `'a`
}
fn main() {}
|
random_line_split
|
|
regions-in-enums.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that lifetimes must be declared for use on enums.
// See also regions-undeclared.rs
enum
|
<'lt> {
X3(&'lt usize)
}
enum yes1<'a> {
X4(&'a usize)
}
enum no0 {
X5(&'foo usize) //~ ERROR use of undeclared lifetime name `'foo`
}
enum no1 {
X6(&'a usize) //~ ERROR use of undeclared lifetime name `'a`
}
fn main() {}
|
yes0
|
identifier_name
|
test_cksum.rs
|
// spell-checker:ignore (words) asdf
use crate::common::util::*;
#[test]
fn test_single_file() {
new_ucmd!()
.arg("lorem_ipsum.txt")
.succeeds()
.stdout_is_fixture("single_file.expected");
}
#[test]
fn test_multiple_files() {
new_ucmd!()
.arg("lorem_ipsum.txt")
.arg("alice_in_wonderland.txt")
.succeeds()
.stdout_is_fixture("multiple_files.expected");
}
#[test]
fn test_stdin() {
new_ucmd!()
.pipe_in_fixture("lorem_ipsum.txt")
.succeeds()
.stdout_is_fixture("stdin.expected");
}
#[test]
fn test_empty() {
let (at, mut ucmd) = at_and_ucmd!();
at.touch("a");
ucmd.arg("a")
.succeeds()
.no_stderr()
.normalized_newlines_stdout_is("4294967295 0 a\n");
}
#[test]
fn test_arg_overrides_stdin() {
let (at, mut ucmd) = at_and_ucmd!();
let input = "foobarfoobar"; // spell-checker:disable-line
at.touch("a");
ucmd.arg("a")
.pipe_in(input.as_bytes())
// the command might have exited before all bytes have been pipe in.
// in that case, we don't care about the error (broken pipe)
.ignore_stdin_write_error()
.succeeds()
.no_stderr()
.normalized_newlines_stdout_is("4294967295 0 a\n");
}
#[test]
fn test_invalid_file() {
let ts = TestScenario::new(util_name!());
let at = ts.fixtures.clone();
let folder_name = "asdf";
// First check when file doesn't exist
ts.ucmd()
.arg(folder_name)
.fails()
.no_stdout()
.stderr_contains("cksum: asdf: No such file or directory");
// Then check when the file is of an invalid type
at.mkdir(folder_name);
ts.ucmd()
.arg(folder_name)
.succeeds()
.stdout_only("4294967295 0 asdf\n");
}
// Make sure crc is correct for files larger than 32 bytes
// but <128 bytes (1 fold pclmul) // spell-checker:disable-line
#[test]
fn test_crc_for_bigger_than_32_bytes() {
let (_, mut ucmd) = at_and_ucmd!();
let result = ucmd.arg("chars.txt").succeeds();
let mut stdout_split = result.stdout_str().split(' ');
let cksum: i64 = stdout_split.next().unwrap().parse().unwrap();
let bytes_cnt: i64 = stdout_split.next().unwrap().parse().unwrap();
assert_eq!(cksum, 586_047_089);
assert_eq!(bytes_cnt, 16);
}
#[test]
fn
|
() {
let (_, mut ucmd) = at_and_ucmd!();
let result = ucmd.arg("larger_than_2056_bytes.txt").succeeds();
let mut stdout_split = result.stdout_str().split(' ');
let cksum: i64 = stdout_split.next().unwrap().parse().unwrap();
let bytes_cnt: i64 = stdout_split.next().unwrap().parse().unwrap();
assert_eq!(cksum, 945_881_979);
assert_eq!(bytes_cnt, 2058);
}
|
test_stdin_larger_than_128_bytes
|
identifier_name
|
test_cksum.rs
|
// spell-checker:ignore (words) asdf
use crate::common::util::*;
#[test]
fn test_single_file() {
new_ucmd!()
.arg("lorem_ipsum.txt")
.succeeds()
.stdout_is_fixture("single_file.expected");
}
#[test]
fn test_multiple_files() {
new_ucmd!()
.arg("lorem_ipsum.txt")
.arg("alice_in_wonderland.txt")
.succeeds()
.stdout_is_fixture("multiple_files.expected");
}
#[test]
fn test_stdin() {
new_ucmd!()
.pipe_in_fixture("lorem_ipsum.txt")
.succeeds()
.stdout_is_fixture("stdin.expected");
}
#[test]
fn test_empty() {
let (at, mut ucmd) = at_and_ucmd!();
at.touch("a");
ucmd.arg("a")
.succeeds()
.no_stderr()
.normalized_newlines_stdout_is("4294967295 0 a\n");
}
#[test]
fn test_arg_overrides_stdin()
|
#[test]
fn test_invalid_file() {
let ts = TestScenario::new(util_name!());
let at = ts.fixtures.clone();
let folder_name = "asdf";
// First check when file doesn't exist
ts.ucmd()
.arg(folder_name)
.fails()
.no_stdout()
.stderr_contains("cksum: asdf: No such file or directory");
// Then check when the file is of an invalid type
at.mkdir(folder_name);
ts.ucmd()
.arg(folder_name)
.succeeds()
.stdout_only("4294967295 0 asdf\n");
}
// Make sure crc is correct for files larger than 32 bytes
// but <128 bytes (1 fold pclmul) // spell-checker:disable-line
#[test]
fn test_crc_for_bigger_than_32_bytes() {
let (_, mut ucmd) = at_and_ucmd!();
let result = ucmd.arg("chars.txt").succeeds();
let mut stdout_split = result.stdout_str().split(' ');
let cksum: i64 = stdout_split.next().unwrap().parse().unwrap();
let bytes_cnt: i64 = stdout_split.next().unwrap().parse().unwrap();
assert_eq!(cksum, 586_047_089);
assert_eq!(bytes_cnt, 16);
}
#[test]
fn test_stdin_larger_than_128_bytes() {
let (_, mut ucmd) = at_and_ucmd!();
let result = ucmd.arg("larger_than_2056_bytes.txt").succeeds();
let mut stdout_split = result.stdout_str().split(' ');
let cksum: i64 = stdout_split.next().unwrap().parse().unwrap();
let bytes_cnt: i64 = stdout_split.next().unwrap().parse().unwrap();
assert_eq!(cksum, 945_881_979);
assert_eq!(bytes_cnt, 2058);
}
|
{
let (at, mut ucmd) = at_and_ucmd!();
let input = "foobarfoobar"; // spell-checker:disable-line
at.touch("a");
ucmd.arg("a")
.pipe_in(input.as_bytes())
// the command might have exited before all bytes have been pipe in.
// in that case, we don't care about the error (broken pipe)
.ignore_stdin_write_error()
.succeeds()
.no_stderr()
.normalized_newlines_stdout_is("4294967295 0 a\n");
}
|
identifier_body
|
test_cksum.rs
|
// spell-checker:ignore (words) asdf
use crate::common::util::*;
#[test]
fn test_single_file() {
new_ucmd!()
.arg("lorem_ipsum.txt")
.succeeds()
.stdout_is_fixture("single_file.expected");
}
#[test]
fn test_multiple_files() {
new_ucmd!()
.arg("lorem_ipsum.txt")
.arg("alice_in_wonderland.txt")
.succeeds()
.stdout_is_fixture("multiple_files.expected");
}
#[test]
fn test_stdin() {
new_ucmd!()
.pipe_in_fixture("lorem_ipsum.txt")
.succeeds()
.stdout_is_fixture("stdin.expected");
}
|
fn test_empty() {
let (at, mut ucmd) = at_and_ucmd!();
at.touch("a");
ucmd.arg("a")
.succeeds()
.no_stderr()
.normalized_newlines_stdout_is("4294967295 0 a\n");
}
#[test]
fn test_arg_overrides_stdin() {
let (at, mut ucmd) = at_and_ucmd!();
let input = "foobarfoobar"; // spell-checker:disable-line
at.touch("a");
ucmd.arg("a")
.pipe_in(input.as_bytes())
// the command might have exited before all bytes have been pipe in.
// in that case, we don't care about the error (broken pipe)
.ignore_stdin_write_error()
.succeeds()
.no_stderr()
.normalized_newlines_stdout_is("4294967295 0 a\n");
}
#[test]
fn test_invalid_file() {
let ts = TestScenario::new(util_name!());
let at = ts.fixtures.clone();
let folder_name = "asdf";
// First check when file doesn't exist
ts.ucmd()
.arg(folder_name)
.fails()
.no_stdout()
.stderr_contains("cksum: asdf: No such file or directory");
// Then check when the file is of an invalid type
at.mkdir(folder_name);
ts.ucmd()
.arg(folder_name)
.succeeds()
.stdout_only("4294967295 0 asdf\n");
}
// Make sure crc is correct for files larger than 32 bytes
// but <128 bytes (1 fold pclmul) // spell-checker:disable-line
#[test]
fn test_crc_for_bigger_than_32_bytes() {
let (_, mut ucmd) = at_and_ucmd!();
let result = ucmd.arg("chars.txt").succeeds();
let mut stdout_split = result.stdout_str().split(' ');
let cksum: i64 = stdout_split.next().unwrap().parse().unwrap();
let bytes_cnt: i64 = stdout_split.next().unwrap().parse().unwrap();
assert_eq!(cksum, 586_047_089);
assert_eq!(bytes_cnt, 16);
}
#[test]
fn test_stdin_larger_than_128_bytes() {
let (_, mut ucmd) = at_and_ucmd!();
let result = ucmd.arg("larger_than_2056_bytes.txt").succeeds();
let mut stdout_split = result.stdout_str().split(' ');
let cksum: i64 = stdout_split.next().unwrap().parse().unwrap();
let bytes_cnt: i64 = stdout_split.next().unwrap().parse().unwrap();
assert_eq!(cksum, 945_881_979);
assert_eq!(bytes_cnt, 2058);
}
|
#[test]
|
random_line_split
|
screen.rs
|
extern crate screenshot;
|
extern crate image;
use screenshot::get_screenshot;
use bmp::{Image, Pixel};
fn main() {
let s = get_screenshot(0).unwrap();
println!("{} x {} x {} = {} bytes", s.height(), s.width(), s.pixel_width(), s.raw_len());
let origin = s.get_pixel(0, 0);
println!("(0,0): R: {}, G: {}, B: {}", origin.r, origin.g, origin.b);
let end_col = s.get_pixel(0, s.width()-1);
println!("(0,end): R: {}, G: {}, B: {}", end_col.r, end_col.g, end_col.b);
let opp = s.get_pixel(s.height()-1, s.width()-1);
println!("(end,end): R: {}, G: {}, B: {}", opp.r, opp.g, opp.b);
// WARNING rust-bmp params are (width, height)
let mut img = Image::new(s.width() as u32, s.height() as u32);
for row in (0..s.height()) {
for col in (0..s.width()) {
let p = s.get_pixel(row, col);
// WARNING rust-bmp params are (x, y)
img.set_pixel(col as u32, row as u32, Pixel {r: p.r, g: p.g, b: p.b});
}
}
img.save("test.bmp").unwrap();
image::save_buffer("test.png",
s.as_ref(), s.width() as u32, s.height() as u32, image::RGBA(8))
.unwrap();
}
|
extern crate bmp;
|
random_line_split
|
screen.rs
|
extern crate screenshot;
extern crate bmp;
extern crate image;
use screenshot::get_screenshot;
use bmp::{Image, Pixel};
fn
|
() {
let s = get_screenshot(0).unwrap();
println!("{} x {} x {} = {} bytes", s.height(), s.width(), s.pixel_width(), s.raw_len());
let origin = s.get_pixel(0, 0);
println!("(0,0): R: {}, G: {}, B: {}", origin.r, origin.g, origin.b);
let end_col = s.get_pixel(0, s.width()-1);
println!("(0,end): R: {}, G: {}, B: {}", end_col.r, end_col.g, end_col.b);
let opp = s.get_pixel(s.height()-1, s.width()-1);
println!("(end,end): R: {}, G: {}, B: {}", opp.r, opp.g, opp.b);
// WARNING rust-bmp params are (width, height)
let mut img = Image::new(s.width() as u32, s.height() as u32);
for row in (0..s.height()) {
for col in (0..s.width()) {
let p = s.get_pixel(row, col);
// WARNING rust-bmp params are (x, y)
img.set_pixel(col as u32, row as u32, Pixel {r: p.r, g: p.g, b: p.b});
}
}
img.save("test.bmp").unwrap();
image::save_buffer("test.png",
s.as_ref(), s.width() as u32, s.height() as u32, image::RGBA(8))
.unwrap();
}
|
main
|
identifier_name
|
screen.rs
|
extern crate screenshot;
extern crate bmp;
extern crate image;
use screenshot::get_screenshot;
use bmp::{Image, Pixel};
fn main()
|
img.set_pixel(col as u32, row as u32, Pixel {r: p.r, g: p.g, b: p.b});
}
}
img.save("test.bmp").unwrap();
image::save_buffer("test.png",
s.as_ref(), s.width() as u32, s.height() as u32, image::RGBA(8))
.unwrap();
}
|
{
let s = get_screenshot(0).unwrap();
println!("{} x {} x {} = {} bytes", s.height(), s.width(), s.pixel_width(), s.raw_len());
let origin = s.get_pixel(0, 0);
println!("(0,0): R: {}, G: {}, B: {}", origin.r, origin.g, origin.b);
let end_col = s.get_pixel(0, s.width()-1);
println!("(0,end): R: {}, G: {}, B: {}", end_col.r, end_col.g, end_col.b);
let opp = s.get_pixel(s.height()-1, s.width()-1);
println!("(end,end): R: {}, G: {}, B: {}", opp.r, opp.g, opp.b);
// WARNING rust-bmp params are (width, height)
let mut img = Image::new(s.width() as u32, s.height() as u32);
for row in (0..s.height()) {
for col in (0..s.width()) {
let p = s.get_pixel(row, col);
// WARNING rust-bmp params are (x, y)
|
identifier_body
|
geometry.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use app_units::{Au, MAX_AU};
use euclid::point::Point2D;
use euclid::rect::Rect;
use euclid::size::Size2D;
use std::i32;
|
/// A normalized "pixel" at the default resolution for the display.
///
/// Like the CSS "px" unit, the exact physical size of this unit may vary between devices, but it
/// should approximate a device-independent reference length. This unit corresponds to Android's
/// "density-independent pixel" (dip), Mac OS X's "point", and Windows "device-independent pixel."
///
/// The relationship between DevicePixel and ScreenPx is defined by the OS. On most low-dpi
/// screens, one ScreenPx is equal to one DevicePixel. But on high-density screens it can be
/// some larger number. For example, by default on Apple "retina" displays, one ScreenPx equals
/// two DevicePixels. On Android "MDPI" displays, one ScreenPx equals 1.5 device pixels.
///
/// The ratio between ScreenPx and DevicePixel for a given display be found by calling
/// `servo::windowing::WindowMethods::hidpi_factor`.
#[derive(Clone, Copy, Debug)]
pub enum ScreenPx {}
known_heap_size!(0, ScreenPx);
// An Au is an "App Unit" and represents 1/60th of a CSS pixel. It was
// originally proposed in 2002 as a standard unit of measure in Gecko.
// See https://bugzilla.mozilla.org/show_bug.cgi?id=177805 for more info.
#[inline(always)]
pub fn max_rect() -> Rect<Au> {
Rect::new(Point2D::new(Au(i32::MIN / 2), Au(i32::MIN / 2)), Size2D::new(MAX_AU, MAX_AU))
}
/// A helper function to convert a rect of `f32` pixels to a rect of app units.
pub fn f32_rect_to_au_rect(rect: Rect<f32>) -> Rect<Au> {
Rect::new(Point2D::new(Au::from_f32_px(rect.origin.x), Au::from_f32_px(rect.origin.y)),
Size2D::new(Au::from_f32_px(rect.size.width), Au::from_f32_px(rect.size.height)))
}
/// A helper function to convert a rect of `Au` pixels to a rect of f32 units.
pub fn au_rect_to_f32_rect(rect: Rect<Au>) -> Rect<f32> {
Rect::new(Point2D::new(rect.origin.x.to_f32_px(), rect.origin.y.to_f32_px()),
Size2D::new(rect.size.width.to_f32_px(), rect.size.height.to_f32_px()))
}
pub trait ExpandToPixelBoundaries {
fn expand_to_px_boundaries(&self) -> Self;
}
impl ExpandToPixelBoundaries for Rect<Au> {
fn expand_to_px_boundaries(&self) -> Rect<Au> {
let bottom_right = self.bottom_right();
let bottom_right = Point2D::new(Au::from_px(bottom_right.x.ceil_to_px()),
Au::from_px(bottom_right.y.ceil_to_px()));
let new_origin = Point2D::new(Au::from_px(self.origin.x.to_px()),
Au::from_px(self.origin.y.to_px()));
Rect::new(new_origin,
Size2D::new(bottom_right.x - new_origin.x,
bottom_right.y - new_origin.y))
}
}
|
// Units for use with euclid::length and euclid::scale_factor.
|
random_line_split
|
geometry.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use app_units::{Au, MAX_AU};
use euclid::point::Point2D;
use euclid::rect::Rect;
use euclid::size::Size2D;
use std::i32;
// Units for use with euclid::length and euclid::scale_factor.
/// A normalized "pixel" at the default resolution for the display.
///
/// Like the CSS "px" unit, the exact physical size of this unit may vary between devices, but it
/// should approximate a device-independent reference length. This unit corresponds to Android's
/// "density-independent pixel" (dip), Mac OS X's "point", and Windows "device-independent pixel."
///
/// The relationship between DevicePixel and ScreenPx is defined by the OS. On most low-dpi
/// screens, one ScreenPx is equal to one DevicePixel. But on high-density screens it can be
/// some larger number. For example, by default on Apple "retina" displays, one ScreenPx equals
/// two DevicePixels. On Android "MDPI" displays, one ScreenPx equals 1.5 device pixels.
///
/// The ratio between ScreenPx and DevicePixel for a given display be found by calling
/// `servo::windowing::WindowMethods::hidpi_factor`.
#[derive(Clone, Copy, Debug)]
pub enum ScreenPx {}
known_heap_size!(0, ScreenPx);
// An Au is an "App Unit" and represents 1/60th of a CSS pixel. It was
// originally proposed in 2002 as a standard unit of measure in Gecko.
// See https://bugzilla.mozilla.org/show_bug.cgi?id=177805 for more info.
#[inline(always)]
pub fn max_rect() -> Rect<Au> {
Rect::new(Point2D::new(Au(i32::MIN / 2), Au(i32::MIN / 2)), Size2D::new(MAX_AU, MAX_AU))
}
/// A helper function to convert a rect of `f32` pixels to a rect of app units.
pub fn
|
(rect: Rect<f32>) -> Rect<Au> {
Rect::new(Point2D::new(Au::from_f32_px(rect.origin.x), Au::from_f32_px(rect.origin.y)),
Size2D::new(Au::from_f32_px(rect.size.width), Au::from_f32_px(rect.size.height)))
}
/// A helper function to convert a rect of `Au` pixels to a rect of f32 units.
pub fn au_rect_to_f32_rect(rect: Rect<Au>) -> Rect<f32> {
Rect::new(Point2D::new(rect.origin.x.to_f32_px(), rect.origin.y.to_f32_px()),
Size2D::new(rect.size.width.to_f32_px(), rect.size.height.to_f32_px()))
}
pub trait ExpandToPixelBoundaries {
fn expand_to_px_boundaries(&self) -> Self;
}
impl ExpandToPixelBoundaries for Rect<Au> {
fn expand_to_px_boundaries(&self) -> Rect<Au> {
let bottom_right = self.bottom_right();
let bottom_right = Point2D::new(Au::from_px(bottom_right.x.ceil_to_px()),
Au::from_px(bottom_right.y.ceil_to_px()));
let new_origin = Point2D::new(Au::from_px(self.origin.x.to_px()),
Au::from_px(self.origin.y.to_px()));
Rect::new(new_origin,
Size2D::new(bottom_right.x - new_origin.x,
bottom_right.y - new_origin.y))
}
}
|
f32_rect_to_au_rect
|
identifier_name
|
circle.rs
|
use {Color, Dimensions, LineStyle, Scalar};
use super::oval::Oval;
use super::Style as Style;
/// A tiny wrapper around the **Oval** widget type.
#[derive(Copy, Clone, Debug)]
pub struct Circle;
fn rad_to_dim(radius: Scalar) -> Dimensions {
let side = radius * 2.0;
[side, side]
}
impl Circle {
/// Build a circular **Oval** with the given dimensions and style.
pub fn styled(radius: Scalar, style: Style) -> Oval {
Oval::styled(rad_to_dim(radius), style)
}
/// Build a new **Fill**ed circular **Oval**.
pub fn fill(radius: Scalar) -> Oval {
Oval::fill(rad_to_dim(radius))
}
/// Build a new circular **Oval** **Fill**ed with the given color.
pub fn
|
(radius: Scalar, color: Color) -> Oval {
Oval::fill_with(rad_to_dim(radius), color)
}
/// Build a new circular **Outline**d **Oval** widget.
pub fn outline(radius: Scalar) -> Oval {
Oval::outline(rad_to_dim(radius))
}
/// Build a new circular **Oval** **Outline**d with the given style.
pub fn outline_styled(radius: Scalar, line_style: LineStyle) -> Oval {
Oval::outline_styled(rad_to_dim(radius), line_style)
}
}
|
fill_with
|
identifier_name
|
circle.rs
|
use {Color, Dimensions, LineStyle, Scalar};
use super::oval::Oval;
use super::Style as Style;
/// A tiny wrapper around the **Oval** widget type.
#[derive(Copy, Clone, Debug)]
pub struct Circle;
fn rad_to_dim(radius: Scalar) -> Dimensions {
let side = radius * 2.0;
[side, side]
}
impl Circle {
/// Build a circular **Oval** with the given dimensions and style.
pub fn styled(radius: Scalar, style: Style) -> Oval {
Oval::styled(rad_to_dim(radius), style)
}
/// Build a new **Fill**ed circular **Oval**.
pub fn fill(radius: Scalar) -> Oval {
Oval::fill(rad_to_dim(radius))
}
/// Build a new circular **Oval** **Fill**ed with the given color.
pub fn fill_with(radius: Scalar, color: Color) -> Oval {
Oval::fill_with(rad_to_dim(radius), color)
}
/// Build a new circular **Outline**d **Oval** widget.
pub fn outline(radius: Scalar) -> Oval
|
/// Build a new circular **Oval** **Outline**d with the given style.
pub fn outline_styled(radius: Scalar, line_style: LineStyle) -> Oval {
Oval::outline_styled(rad_to_dim(radius), line_style)
}
}
|
{
Oval::outline(rad_to_dim(radius))
}
|
identifier_body
|
circle.rs
|
use {Color, Dimensions, LineStyle, Scalar};
use super::oval::Oval;
use super::Style as Style;
/// A tiny wrapper around the **Oval** widget type.
#[derive(Copy, Clone, Debug)]
pub struct Circle;
fn rad_to_dim(radius: Scalar) -> Dimensions {
let side = radius * 2.0;
[side, side]
}
impl Circle {
/// Build a circular **Oval** with the given dimensions and style.
pub fn styled(radius: Scalar, style: Style) -> Oval {
Oval::styled(rad_to_dim(radius), style)
}
/// Build a new **Fill**ed circular **Oval**.
pub fn fill(radius: Scalar) -> Oval {
Oval::fill(rad_to_dim(radius))
}
/// Build a new circular **Oval** **Fill**ed with the given color.
pub fn fill_with(radius: Scalar, color: Color) -> Oval {
Oval::fill_with(rad_to_dim(radius), color)
}
/// Build a new circular **Outline**d **Oval** widget.
|
/// Build a new circular **Oval** **Outline**d with the given style.
pub fn outline_styled(radius: Scalar, line_style: LineStyle) -> Oval {
Oval::outline_styled(rad_to_dim(radius), line_style)
}
}
|
pub fn outline(radius: Scalar) -> Oval {
Oval::outline(rad_to_dim(radius))
}
|
random_line_split
|
day5.rs
|
extern crate hyper;
extern crate rustc_serialize;
extern crate url;
use std::io::Read;
use hyper::Client;
use rustc_serialize::{Encodable, json};
use url::form_urlencoded;
fn get_content(url: &str) -> hyper::Result<String> {
let client = Client::new();
let mut response = client.get(url).send()?;
let mut buf = String::new();
response.read_to_string(&mut buf)?;
Ok(buf)
}
type Query<'a> = Vec<(&'a str, &'a str)>;
fn post_query(url: &str, query: Query) -> hyper::Result<String> {
let client = Client::new();
let body = form_urlencoded::Serializer::new(String::new())
.extend_pairs(query.iter())
.finish();
let mut response = client.post(url).body(&body[..]).send()?;
let mut buf = String::new();
response.read_to_string(&mut buf)?;
Ok(buf)
}
fn post_json<T>(url: &str, payload: &T) -> hyper::Result<String>
where
T: Encodable,
{
let client = Client::new();
let body = json::encode(payload).unwrap();
let mut response = client.post(url).body(&body[..]).send()?;
let mut buf = String::new();
response.read_to_string(&mut buf)?;
Ok(buf)
}
#[derive(RustcDecodable, RustcEncodable)]
struct Movie {
title: String,
bad_guy: String,
pub_year: usize,
}
fn
|
() {
println!("24 days of Rust - hyper (day 5)");
println!("{:?}", get_content("http://httpbin.org/status/200"));
let query = vec![("key", "value"), ("foo", "bar")];
println!("{}", post_query("http://httpbin.org/post", query).unwrap());
let movie = Movie {
title: "You Only Live Twice".to_string(),
bad_guy: "Blofeld".to_string(),
pub_year: 1967,
};
println!("{}", post_json("http://httpbin.org/post", &movie).unwrap());
}
|
main
|
identifier_name
|
day5.rs
|
extern crate hyper;
extern crate rustc_serialize;
extern crate url;
use std::io::Read;
use hyper::Client;
use rustc_serialize::{Encodable, json};
use url::form_urlencoded;
fn get_content(url: &str) -> hyper::Result<String> {
|
response.read_to_string(&mut buf)?;
Ok(buf)
}
type Query<'a> = Vec<(&'a str, &'a str)>;
fn post_query(url: &str, query: Query) -> hyper::Result<String> {
let client = Client::new();
let body = form_urlencoded::Serializer::new(String::new())
.extend_pairs(query.iter())
.finish();
let mut response = client.post(url).body(&body[..]).send()?;
let mut buf = String::new();
response.read_to_string(&mut buf)?;
Ok(buf)
}
fn post_json<T>(url: &str, payload: &T) -> hyper::Result<String>
where
T: Encodable,
{
let client = Client::new();
let body = json::encode(payload).unwrap();
let mut response = client.post(url).body(&body[..]).send()?;
let mut buf = String::new();
response.read_to_string(&mut buf)?;
Ok(buf)
}
#[derive(RustcDecodable, RustcEncodable)]
struct Movie {
title: String,
bad_guy: String,
pub_year: usize,
}
fn main() {
println!("24 days of Rust - hyper (day 5)");
println!("{:?}", get_content("http://httpbin.org/status/200"));
let query = vec![("key", "value"), ("foo", "bar")];
println!("{}", post_query("http://httpbin.org/post", query).unwrap());
let movie = Movie {
title: "You Only Live Twice".to_string(),
bad_guy: "Blofeld".to_string(),
pub_year: 1967,
};
println!("{}", post_json("http://httpbin.org/post", &movie).unwrap());
}
|
let client = Client::new();
let mut response = client.get(url).send()?;
let mut buf = String::new();
|
random_line_split
|
slice.rs
|
use super::{Parse, ParseElem, ParseLiteral, ParseSlice, RuleResult};
impl<T> Parse for [T] {
type PositionRepr = usize;
fn start(&self) -> usize {
0
}
fn is_eof(&self, pos: usize) -> bool {
pos >= self.len()
}
fn position_repr(&self, pos: usize) -> usize {
pos
}
}
impl<'input, T: 'input + Copy> ParseElem<'input> for [T] {
type Element = T;
fn parse_elem(&'input self, pos: usize) -> RuleResult<T> {
match self[pos..].first() {
Some(c) => RuleResult::Matched(pos + 1, *c),
None => RuleResult::Failed,
}
}
}
impl ParseLiteral for [u8] {
fn parse_string_literal(&self, pos: usize, literal: &str) -> RuleResult<()> {
let l = literal.len();
if self.len() >= pos + l && &self[pos..pos + l] == literal.as_bytes() {
RuleResult::Matched(pos + l, ())
} else
|
}
}
impl<'input, T: 'input> ParseSlice<'input> for [T] {
type Slice = &'input [T];
fn parse_slice(&'input self, p1: usize, p2: usize) -> &'input [T] {
&self[p1..p2]
}
}
|
{
RuleResult::Failed
}
|
conditional_block
|
slice.rs
|
use super::{Parse, ParseElem, ParseLiteral, ParseSlice, RuleResult};
impl<T> Parse for [T] {
type PositionRepr = usize;
fn start(&self) -> usize {
0
}
fn is_eof(&self, pos: usize) -> bool {
pos >= self.len()
}
fn position_repr(&self, pos: usize) -> usize {
pos
}
}
impl<'input, T: 'input + Copy> ParseElem<'input> for [T] {
type Element = T;
fn parse_elem(&'input self, pos: usize) -> RuleResult<T> {
match self[pos..].first() {
Some(c) => RuleResult::Matched(pos + 1, *c),
None => RuleResult::Failed,
}
}
}
impl ParseLiteral for [u8] {
fn parse_string_literal(&self, pos: usize, literal: &str) -> RuleResult<()> {
let l = literal.len();
if self.len() >= pos + l && &self[pos..pos + l] == literal.as_bytes() {
RuleResult::Matched(pos + l, ())
} else {
RuleResult::Failed
}
}
}
impl<'input, T: 'input> ParseSlice<'input> for [T] {
type Slice = &'input [T];
fn parse_slice(&'input self, p1: usize, p2: usize) -> &'input [T]
|
}
|
{
&self[p1..p2]
}
|
identifier_body
|
slice.rs
|
use super::{Parse, ParseElem, ParseLiteral, ParseSlice, RuleResult};
impl<T> Parse for [T] {
type PositionRepr = usize;
fn start(&self) -> usize {
0
}
fn is_eof(&self, pos: usize) -> bool {
pos >= self.len()
}
fn position_repr(&self, pos: usize) -> usize {
pos
}
}
impl<'input, T: 'input + Copy> ParseElem<'input> for [T] {
type Element = T;
fn
|
(&'input self, pos: usize) -> RuleResult<T> {
match self[pos..].first() {
Some(c) => RuleResult::Matched(pos + 1, *c),
None => RuleResult::Failed,
}
}
}
impl ParseLiteral for [u8] {
fn parse_string_literal(&self, pos: usize, literal: &str) -> RuleResult<()> {
let l = literal.len();
if self.len() >= pos + l && &self[pos..pos + l] == literal.as_bytes() {
RuleResult::Matched(pos + l, ())
} else {
RuleResult::Failed
}
}
}
impl<'input, T: 'input> ParseSlice<'input> for [T] {
type Slice = &'input [T];
fn parse_slice(&'input self, p1: usize, p2: usize) -> &'input [T] {
&self[p1..p2]
}
}
|
parse_elem
|
identifier_name
|
slice.rs
|
use super::{Parse, ParseElem, ParseLiteral, ParseSlice, RuleResult};
impl<T> Parse for [T] {
type PositionRepr = usize;
fn start(&self) -> usize {
0
}
fn is_eof(&self, pos: usize) -> bool {
pos >= self.len()
}
fn position_repr(&self, pos: usize) -> usize {
pos
}
}
impl<'input, T: 'input + Copy> ParseElem<'input> for [T] {
type Element = T;
fn parse_elem(&'input self, pos: usize) -> RuleResult<T> {
match self[pos..].first() {
Some(c) => RuleResult::Matched(pos + 1, *c),
None => RuleResult::Failed,
}
}
}
impl ParseLiteral for [u8] {
fn parse_string_literal(&self, pos: usize, literal: &str) -> RuleResult<()> {
let l = literal.len();
if self.len() >= pos + l && &self[pos..pos + l] == literal.as_bytes() {
RuleResult::Matched(pos + l, ())
} else {
RuleResult::Failed
}
}
}
impl<'input, T: 'input> ParseSlice<'input> for [T] {
type Slice = &'input [T];
fn parse_slice(&'input self, p1: usize, p2: usize) -> &'input [T] {
&self[p1..p2]
|
}
}
|
random_line_split
|
|
pointer.rs
|
use std::{
fmt,
hash::{Hash, Hasher},
marker::PhantomData,
};
use crate::{Pending, PendingRef, PointerData};
/// The error type which is returned from upgrading
/// [`WeakPointer`](struct.WeakPointer.html).
#[derive(Debug, PartialEq)]
pub struct DeadComponentError;
/// A pointer to a component of type `T`.
/// The component is guaranteed to be accessible for as long as this pointer is alive.
/// You'd need a storage to access the data.
/// # Examples
/// ```rust
/// # let mut storage = froggy::Storage::new();
/// // you can create pointer by creating component in storage
/// let ptr1 = storage.create(1i32);
/// // later you can change component in storage
/// storage[&ptr1] = 2i32;
/// ```
/// Also you can use [`Storage::pin`](struct.Storage.html#method.pin) to pin component with `Pointer`
///
/// ```rust
/// # let mut storage = froggy::Storage::new();
/// # let ptr1 = storage.create(1i32);
/// let item = storage.iter().next().unwrap();
/// let ptr2 = storage.pin(&item);
/// // Pointers to the same component are equal
/// assert_eq!(ptr1, ptr2);
/// ```
pub struct Pointer<T> {
pub(crate) data: PointerData,
pub(crate) pending: PendingRef,
pub(crate) marker: PhantomData<T>,
}
impl<T> fmt::Debug for Pointer<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
/// Debug output type for `Self`.
#[derive(Debug)]
pub struct Pointer<'a> {
/// All integer entries are `usize` for future-proofing.
index: usize,
epoch: usize,
storage_id: usize,
pending: &'a Pending,
}
fmt::Debug::fmt(
&Pointer {
index: self.data.get_index() as usize,
epoch: self.data.get_epoch() as usize,
storage_id: self.data.get_storage_id() as usize,
pending: &self.pending.lock(),
},
f,
)
}
}
impl<T> Pointer<T> {
/// Creates a new `WeakPointer` to this component.
/// See [`WeakPointer`](pointer/struct.WeakPointer.html)
#[inline]
pub fn downgrade(&self) -> WeakPointer<T> {
WeakPointer {
data: self.data,
pending: self.pending.clone(),
marker: PhantomData,
}
}
}
impl<T> PartialOrd for Pointer<T> {
fn partial_cmp(&self, other: &Pointer<T>) -> Option<std::cmp::Ordering> {
if self.data.get_storage_id() == other.data.get_storage_id() {
debug_assert!(
self.data.get_index()!= other.data.get_index()
|| self.data.get_epoch() == self.data.get_epoch()
);
self.data.get_index().partial_cmp(&other.data.get_index())
} else {
None
}
}
}
impl<T> Clone for Pointer<T> {
#[inline]
fn clone(&self) -> Pointer<T> {
self.pending.lock().add_ref.push(self.data.get_index());
Pointer {
data: self.data,
pending: self.pending.clone(),
marker: PhantomData,
}
}
}
impl<T> PartialEq for Pointer<T> {
#[inline]
fn eq(&self, other: &Pointer<T>) -> bool {
self.data == other.data
}
}
impl<T> Eq for Pointer<T> {}
impl<T> Hash for Pointer<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.data.hash(state);
}
|
#[inline]
fn drop(&mut self) {
self.pending.lock().sub_ref.push(self.data.get_index());
}
}
/// Weak variant of `Pointer`.
/// `WeakPointer`s are used to avoid deadlocking when dropping structures with cycled references to each other.
/// In the following example `Storage` will stand in memory even after going out of scope, because there is cyclic referencing between `Node`s
///
/// ```rust
/// # use froggy::{Pointer, Storage};
/// struct Node {
/// next: Option<Pointer<Node>>,
/// }
/// # let mut storage = Storage::new();
/// let ptr1 = storage.create(Node { next: None });
/// let ptr2 = storage.create(Node { next: Some(ptr1.clone()) });
/// storage[&ptr1].next = Some(ptr2.clone());
/// ```
///
/// To avoid such situations, just replace `Option<Pointer<Node>>` with `Option<WeakPointer<Node>>`
/// # Example
///
/// ```rust
/// # let mut storage = froggy::Storage::new();
/// let pointer = storage.create(1i32);
/// // create WeakPointer to this component
/// let weak = pointer.downgrade();
/// ```
///
/// You will need to [`upgrade`](struct.WeakPointer.html#method.upgrade) `WeakPointer` to access component in storage
///
/// ```rust
/// # fn try_main() -> Result<(), froggy::DeadComponentError> {
/// # let mut storage = froggy::Storage::new();
/// # let _pointer = storage.create(1i32);
/// # let weak = _pointer.downgrade();
/// let pointer = weak.upgrade()?;
/// storage[&pointer] = 20;
/// # Ok(()) }
/// # fn main() { try_main().unwrap(); }
/// ```
#[derive(Debug)]
pub struct WeakPointer<T> {
data: PointerData,
pending: PendingRef,
marker: PhantomData<T>,
}
impl<T> WeakPointer<T> {
/// Upgrades the `WeakPointer` to a `Pointer`, if possible.
/// # Errors
/// Returns [`DeadComponentError`](struct.DeadComponentError.html) if the related component in storage was destroyed.
pub fn upgrade(&self) -> Result<Pointer<T>, DeadComponentError> {
let mut pending = self.pending.lock();
if pending.get_epoch(self.data.get_index())!= self.data.get_epoch() {
return Err(DeadComponentError);
}
pending.add_ref.push(self.data.get_index());
Ok(Pointer {
data: self.data,
pending: self.pending.clone(),
marker: PhantomData,
})
}
}
impl<T> Clone for WeakPointer<T> {
#[inline]
fn clone(&self) -> WeakPointer<T> {
WeakPointer {
data: self.data,
pending: self.pending.clone(),
marker: PhantomData,
}
}
}
impl<T> PartialEq for WeakPointer<T> {
#[inline]
fn eq(&self, other: &WeakPointer<T>) -> bool {
self.data == other.data
}
}
impl<T> Eq for WeakPointer<T> {}
|
}
impl<T> Drop for Pointer<T> {
|
random_line_split
|
pointer.rs
|
use std::{
fmt,
hash::{Hash, Hasher},
marker::PhantomData,
};
use crate::{Pending, PendingRef, PointerData};
/// The error type which is returned from upgrading
/// [`WeakPointer`](struct.WeakPointer.html).
#[derive(Debug, PartialEq)]
pub struct DeadComponentError;
/// A pointer to a component of type `T`.
/// The component is guaranteed to be accessible for as long as this pointer is alive.
/// You'd need a storage to access the data.
/// # Examples
/// ```rust
/// # let mut storage = froggy::Storage::new();
/// // you can create pointer by creating component in storage
/// let ptr1 = storage.create(1i32);
/// // later you can change component in storage
/// storage[&ptr1] = 2i32;
/// ```
/// Also you can use [`Storage::pin`](struct.Storage.html#method.pin) to pin component with `Pointer`
///
/// ```rust
/// # let mut storage = froggy::Storage::new();
/// # let ptr1 = storage.create(1i32);
/// let item = storage.iter().next().unwrap();
/// let ptr2 = storage.pin(&item);
/// // Pointers to the same component are equal
/// assert_eq!(ptr1, ptr2);
/// ```
pub struct Pointer<T> {
pub(crate) data: PointerData,
pub(crate) pending: PendingRef,
pub(crate) marker: PhantomData<T>,
}
impl<T> fmt::Debug for Pointer<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
/// Debug output type for `Self`.
#[derive(Debug)]
pub struct Pointer<'a> {
/// All integer entries are `usize` for future-proofing.
index: usize,
epoch: usize,
storage_id: usize,
pending: &'a Pending,
}
fmt::Debug::fmt(
&Pointer {
index: self.data.get_index() as usize,
epoch: self.data.get_epoch() as usize,
storage_id: self.data.get_storage_id() as usize,
pending: &self.pending.lock(),
},
f,
)
}
}
impl<T> Pointer<T> {
/// Creates a new `WeakPointer` to this component.
/// See [`WeakPointer`](pointer/struct.WeakPointer.html)
#[inline]
pub fn downgrade(&self) -> WeakPointer<T> {
WeakPointer {
data: self.data,
pending: self.pending.clone(),
marker: PhantomData,
}
}
}
impl<T> PartialOrd for Pointer<T> {
fn partial_cmp(&self, other: &Pointer<T>) -> Option<std::cmp::Ordering> {
if self.data.get_storage_id() == other.data.get_storage_id() {
debug_assert!(
self.data.get_index()!= other.data.get_index()
|| self.data.get_epoch() == self.data.get_epoch()
);
self.data.get_index().partial_cmp(&other.data.get_index())
} else {
None
}
}
}
impl<T> Clone for Pointer<T> {
#[inline]
fn clone(&self) -> Pointer<T> {
self.pending.lock().add_ref.push(self.data.get_index());
Pointer {
data: self.data,
pending: self.pending.clone(),
marker: PhantomData,
}
}
}
impl<T> PartialEq for Pointer<T> {
#[inline]
fn eq(&self, other: &Pointer<T>) -> bool {
self.data == other.data
}
}
impl<T> Eq for Pointer<T> {}
impl<T> Hash for Pointer<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.data.hash(state);
}
}
impl<T> Drop for Pointer<T> {
#[inline]
fn drop(&mut self) {
self.pending.lock().sub_ref.push(self.data.get_index());
}
}
/// Weak variant of `Pointer`.
/// `WeakPointer`s are used to avoid deadlocking when dropping structures with cycled references to each other.
/// In the following example `Storage` will stand in memory even after going out of scope, because there is cyclic referencing between `Node`s
///
/// ```rust
/// # use froggy::{Pointer, Storage};
/// struct Node {
/// next: Option<Pointer<Node>>,
/// }
/// # let mut storage = Storage::new();
/// let ptr1 = storage.create(Node { next: None });
/// let ptr2 = storage.create(Node { next: Some(ptr1.clone()) });
/// storage[&ptr1].next = Some(ptr2.clone());
/// ```
///
/// To avoid such situations, just replace `Option<Pointer<Node>>` with `Option<WeakPointer<Node>>`
/// # Example
///
/// ```rust
/// # let mut storage = froggy::Storage::new();
/// let pointer = storage.create(1i32);
/// // create WeakPointer to this component
/// let weak = pointer.downgrade();
/// ```
///
/// You will need to [`upgrade`](struct.WeakPointer.html#method.upgrade) `WeakPointer` to access component in storage
///
/// ```rust
/// # fn try_main() -> Result<(), froggy::DeadComponentError> {
/// # let mut storage = froggy::Storage::new();
/// # let _pointer = storage.create(1i32);
/// # let weak = _pointer.downgrade();
/// let pointer = weak.upgrade()?;
/// storage[&pointer] = 20;
/// # Ok(()) }
/// # fn main() { try_main().unwrap(); }
/// ```
#[derive(Debug)]
pub struct WeakPointer<T> {
data: PointerData,
pending: PendingRef,
marker: PhantomData<T>,
}
impl<T> WeakPointer<T> {
/// Upgrades the `WeakPointer` to a `Pointer`, if possible.
/// # Errors
/// Returns [`DeadComponentError`](struct.DeadComponentError.html) if the related component in storage was destroyed.
pub fn upgrade(&self) -> Result<Pointer<T>, DeadComponentError> {
let mut pending = self.pending.lock();
if pending.get_epoch(self.data.get_index())!= self.data.get_epoch() {
return Err(DeadComponentError);
}
pending.add_ref.push(self.data.get_index());
Ok(Pointer {
data: self.data,
pending: self.pending.clone(),
marker: PhantomData,
})
}
}
impl<T> Clone for WeakPointer<T> {
#[inline]
fn clone(&self) -> WeakPointer<T> {
WeakPointer {
data: self.data,
pending: self.pending.clone(),
marker: PhantomData,
}
}
}
impl<T> PartialEq for WeakPointer<T> {
#[inline]
fn
|
(&self, other: &WeakPointer<T>) -> bool {
self.data == other.data
}
}
impl<T> Eq for WeakPointer<T> {}
|
eq
|
identifier_name
|
pointer.rs
|
use std::{
fmt,
hash::{Hash, Hasher},
marker::PhantomData,
};
use crate::{Pending, PendingRef, PointerData};
/// The error type which is returned from upgrading
/// [`WeakPointer`](struct.WeakPointer.html).
#[derive(Debug, PartialEq)]
pub struct DeadComponentError;
/// A pointer to a component of type `T`.
/// The component is guaranteed to be accessible for as long as this pointer is alive.
/// You'd need a storage to access the data.
/// # Examples
/// ```rust
/// # let mut storage = froggy::Storage::new();
/// // you can create pointer by creating component in storage
/// let ptr1 = storage.create(1i32);
/// // later you can change component in storage
/// storage[&ptr1] = 2i32;
/// ```
/// Also you can use [`Storage::pin`](struct.Storage.html#method.pin) to pin component with `Pointer`
///
/// ```rust
/// # let mut storage = froggy::Storage::new();
/// # let ptr1 = storage.create(1i32);
/// let item = storage.iter().next().unwrap();
/// let ptr2 = storage.pin(&item);
/// // Pointers to the same component are equal
/// assert_eq!(ptr1, ptr2);
/// ```
pub struct Pointer<T> {
pub(crate) data: PointerData,
pub(crate) pending: PendingRef,
pub(crate) marker: PhantomData<T>,
}
impl<T> fmt::Debug for Pointer<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
/// Debug output type for `Self`.
#[derive(Debug)]
pub struct Pointer<'a> {
/// All integer entries are `usize` for future-proofing.
index: usize,
epoch: usize,
storage_id: usize,
pending: &'a Pending,
}
fmt::Debug::fmt(
&Pointer {
index: self.data.get_index() as usize,
epoch: self.data.get_epoch() as usize,
storage_id: self.data.get_storage_id() as usize,
pending: &self.pending.lock(),
},
f,
)
}
}
impl<T> Pointer<T> {
/// Creates a new `WeakPointer` to this component.
/// See [`WeakPointer`](pointer/struct.WeakPointer.html)
#[inline]
pub fn downgrade(&self) -> WeakPointer<T>
|
}
impl<T> PartialOrd for Pointer<T> {
fn partial_cmp(&self, other: &Pointer<T>) -> Option<std::cmp::Ordering> {
if self.data.get_storage_id() == other.data.get_storage_id() {
debug_assert!(
self.data.get_index()!= other.data.get_index()
|| self.data.get_epoch() == self.data.get_epoch()
);
self.data.get_index().partial_cmp(&other.data.get_index())
} else {
None
}
}
}
impl<T> Clone for Pointer<T> {
#[inline]
fn clone(&self) -> Pointer<T> {
self.pending.lock().add_ref.push(self.data.get_index());
Pointer {
data: self.data,
pending: self.pending.clone(),
marker: PhantomData,
}
}
}
impl<T> PartialEq for Pointer<T> {
#[inline]
fn eq(&self, other: &Pointer<T>) -> bool {
self.data == other.data
}
}
impl<T> Eq for Pointer<T> {}
impl<T> Hash for Pointer<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.data.hash(state);
}
}
impl<T> Drop for Pointer<T> {
#[inline]
fn drop(&mut self) {
self.pending.lock().sub_ref.push(self.data.get_index());
}
}
/// Weak variant of `Pointer`.
/// `WeakPointer`s are used to avoid deadlocking when dropping structures with cycled references to each other.
/// In the following example `Storage` will stand in memory even after going out of scope, because there is cyclic referencing between `Node`s
///
/// ```rust
/// # use froggy::{Pointer, Storage};
/// struct Node {
/// next: Option<Pointer<Node>>,
/// }
/// # let mut storage = Storage::new();
/// let ptr1 = storage.create(Node { next: None });
/// let ptr2 = storage.create(Node { next: Some(ptr1.clone()) });
/// storage[&ptr1].next = Some(ptr2.clone());
/// ```
///
/// To avoid such situations, just replace `Option<Pointer<Node>>` with `Option<WeakPointer<Node>>`
/// # Example
///
/// ```rust
/// # let mut storage = froggy::Storage::new();
/// let pointer = storage.create(1i32);
/// // create WeakPointer to this component
/// let weak = pointer.downgrade();
/// ```
///
/// You will need to [`upgrade`](struct.WeakPointer.html#method.upgrade) `WeakPointer` to access component in storage
///
/// ```rust
/// # fn try_main() -> Result<(), froggy::DeadComponentError> {
/// # let mut storage = froggy::Storage::new();
/// # let _pointer = storage.create(1i32);
/// # let weak = _pointer.downgrade();
/// let pointer = weak.upgrade()?;
/// storage[&pointer] = 20;
/// # Ok(()) }
/// # fn main() { try_main().unwrap(); }
/// ```
#[derive(Debug)]
pub struct WeakPointer<T> {
data: PointerData,
pending: PendingRef,
marker: PhantomData<T>,
}
impl<T> WeakPointer<T> {
/// Upgrades the `WeakPointer` to a `Pointer`, if possible.
/// # Errors
/// Returns [`DeadComponentError`](struct.DeadComponentError.html) if the related component in storage was destroyed.
pub fn upgrade(&self) -> Result<Pointer<T>, DeadComponentError> {
let mut pending = self.pending.lock();
if pending.get_epoch(self.data.get_index())!= self.data.get_epoch() {
return Err(DeadComponentError);
}
pending.add_ref.push(self.data.get_index());
Ok(Pointer {
data: self.data,
pending: self.pending.clone(),
marker: PhantomData,
})
}
}
impl<T> Clone for WeakPointer<T> {
#[inline]
fn clone(&self) -> WeakPointer<T> {
WeakPointer {
data: self.data,
pending: self.pending.clone(),
marker: PhantomData,
}
}
}
impl<T> PartialEq for WeakPointer<T> {
#[inline]
fn eq(&self, other: &WeakPointer<T>) -> bool {
self.data == other.data
}
}
impl<T> Eq for WeakPointer<T> {}
|
{
WeakPointer {
data: self.data,
pending: self.pending.clone(),
marker: PhantomData,
}
}
|
identifier_body
|
pointer.rs
|
use std::{
fmt,
hash::{Hash, Hasher},
marker::PhantomData,
};
use crate::{Pending, PendingRef, PointerData};
/// The error type which is returned from upgrading
/// [`WeakPointer`](struct.WeakPointer.html).
#[derive(Debug, PartialEq)]
pub struct DeadComponentError;
/// A pointer to a component of type `T`.
/// The component is guaranteed to be accessible for as long as this pointer is alive.
/// You'd need a storage to access the data.
/// # Examples
/// ```rust
/// # let mut storage = froggy::Storage::new();
/// // you can create pointer by creating component in storage
/// let ptr1 = storage.create(1i32);
/// // later you can change component in storage
/// storage[&ptr1] = 2i32;
/// ```
/// Also you can use [`Storage::pin`](struct.Storage.html#method.pin) to pin component with `Pointer`
///
/// ```rust
/// # let mut storage = froggy::Storage::new();
/// # let ptr1 = storage.create(1i32);
/// let item = storage.iter().next().unwrap();
/// let ptr2 = storage.pin(&item);
/// // Pointers to the same component are equal
/// assert_eq!(ptr1, ptr2);
/// ```
pub struct Pointer<T> {
pub(crate) data: PointerData,
pub(crate) pending: PendingRef,
pub(crate) marker: PhantomData<T>,
}
impl<T> fmt::Debug for Pointer<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
/// Debug output type for `Self`.
#[derive(Debug)]
pub struct Pointer<'a> {
/// All integer entries are `usize` for future-proofing.
index: usize,
epoch: usize,
storage_id: usize,
pending: &'a Pending,
}
fmt::Debug::fmt(
&Pointer {
index: self.data.get_index() as usize,
epoch: self.data.get_epoch() as usize,
storage_id: self.data.get_storage_id() as usize,
pending: &self.pending.lock(),
},
f,
)
}
}
impl<T> Pointer<T> {
/// Creates a new `WeakPointer` to this component.
/// See [`WeakPointer`](pointer/struct.WeakPointer.html)
#[inline]
pub fn downgrade(&self) -> WeakPointer<T> {
WeakPointer {
data: self.data,
pending: self.pending.clone(),
marker: PhantomData,
}
}
}
impl<T> PartialOrd for Pointer<T> {
fn partial_cmp(&self, other: &Pointer<T>) -> Option<std::cmp::Ordering> {
if self.data.get_storage_id() == other.data.get_storage_id() {
debug_assert!(
self.data.get_index()!= other.data.get_index()
|| self.data.get_epoch() == self.data.get_epoch()
);
self.data.get_index().partial_cmp(&other.data.get_index())
} else {
None
}
}
}
impl<T> Clone for Pointer<T> {
#[inline]
fn clone(&self) -> Pointer<T> {
self.pending.lock().add_ref.push(self.data.get_index());
Pointer {
data: self.data,
pending: self.pending.clone(),
marker: PhantomData,
}
}
}
impl<T> PartialEq for Pointer<T> {
#[inline]
fn eq(&self, other: &Pointer<T>) -> bool {
self.data == other.data
}
}
impl<T> Eq for Pointer<T> {}
impl<T> Hash for Pointer<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.data.hash(state);
}
}
impl<T> Drop for Pointer<T> {
#[inline]
fn drop(&mut self) {
self.pending.lock().sub_ref.push(self.data.get_index());
}
}
/// Weak variant of `Pointer`.
/// `WeakPointer`s are used to avoid deadlocking when dropping structures with cycled references to each other.
/// In the following example `Storage` will stand in memory even after going out of scope, because there is cyclic referencing between `Node`s
///
/// ```rust
/// # use froggy::{Pointer, Storage};
/// struct Node {
/// next: Option<Pointer<Node>>,
/// }
/// # let mut storage = Storage::new();
/// let ptr1 = storage.create(Node { next: None });
/// let ptr2 = storage.create(Node { next: Some(ptr1.clone()) });
/// storage[&ptr1].next = Some(ptr2.clone());
/// ```
///
/// To avoid such situations, just replace `Option<Pointer<Node>>` with `Option<WeakPointer<Node>>`
/// # Example
///
/// ```rust
/// # let mut storage = froggy::Storage::new();
/// let pointer = storage.create(1i32);
/// // create WeakPointer to this component
/// let weak = pointer.downgrade();
/// ```
///
/// You will need to [`upgrade`](struct.WeakPointer.html#method.upgrade) `WeakPointer` to access component in storage
///
/// ```rust
/// # fn try_main() -> Result<(), froggy::DeadComponentError> {
/// # let mut storage = froggy::Storage::new();
/// # let _pointer = storage.create(1i32);
/// # let weak = _pointer.downgrade();
/// let pointer = weak.upgrade()?;
/// storage[&pointer] = 20;
/// # Ok(()) }
/// # fn main() { try_main().unwrap(); }
/// ```
#[derive(Debug)]
pub struct WeakPointer<T> {
data: PointerData,
pending: PendingRef,
marker: PhantomData<T>,
}
impl<T> WeakPointer<T> {
/// Upgrades the `WeakPointer` to a `Pointer`, if possible.
/// # Errors
/// Returns [`DeadComponentError`](struct.DeadComponentError.html) if the related component in storage was destroyed.
pub fn upgrade(&self) -> Result<Pointer<T>, DeadComponentError> {
let mut pending = self.pending.lock();
if pending.get_epoch(self.data.get_index())!= self.data.get_epoch()
|
pending.add_ref.push(self.data.get_index());
Ok(Pointer {
data: self.data,
pending: self.pending.clone(),
marker: PhantomData,
})
}
}
impl<T> Clone for WeakPointer<T> {
#[inline]
fn clone(&self) -> WeakPointer<T> {
WeakPointer {
data: self.data,
pending: self.pending.clone(),
marker: PhantomData,
}
}
}
impl<T> PartialEq for WeakPointer<T> {
#[inline]
fn eq(&self, other: &WeakPointer<T>) -> bool {
self.data == other.data
}
}
impl<T> Eq for WeakPointer<T> {}
|
{
return Err(DeadComponentError);
}
|
conditional_block
|
lib.rs
|
// Copyright 2015-2017 Intecture Developers.
//
// Licensed under the Mozilla Public License 2.0 <LICENSE or
// https://www.tldrlegal.com/l/mpl-2.0>. This file may not be copied,
// modified, or distributed except according to those terms.
//! Intecture is an API for managing your servers. You can think of it as a
//! DevOps tool, but without the complicated ecosystem and proprietary nonsense.
//!
//! The core API is, well, the core of Intecture. It contains all the endpoints
//! used to configure a host, as well as the underlying OS abstractions that
//! they are built on. Generally you'll consume this API via
//! [intecture_proj](../intecture_proj/), which reexports `intecture_api`,
//! though for projects that do not need a formal structure (e.g. an installer
//! program), this API will suffice.
//!
//!## Project structure
//!
//! The core API is organised into a series of modules (known as “endpoints”,
//! e.g. `command`, `package` etc.), which represent basic configuration tasks
//! that you’d normally perform by hand. Within each endpoint is a `providers`
//! module, which houses the OS-specific abstractions that do the heavy lifting
//! on behalf of the endpoint.
//!
//! For example, the [`package`](package/) endpoint has a struct called
//! `Package`. This is a cross-platform abstraction for managing a package on
//! your server. Behind this abstraction is a concrete implementation of a
//! specific package [_provider_](package/providers), e.g. Yum or Apt. If you
//! instantiate a new `Package` instance through the
//! [`Package::new()`](package/struct.Package.html#method.new) function, the
//! best available provider for your server is chosen automatically. This is
//! true of all endpoints.
//!
//!## Hosts
//!
//! So far we’ve talked about using endpoints to automate configuration tasks,
//! but how does Intecture know which server we want to talk to? This is where
//! we need the [`host`](host/) endpoint. All things start with a host! Side
//! note - if we were ever to do ‘merch’, that’d probably be on a t-shirt.
//! Anyway, poor marketing decisions aside, you’ll need to create a host in
//! order to do anything.
//!
//! Hosts come in both the [`Local`](host/local/struct.Local.html) and
//! [`Plain`](host/remote/struct.Plain.html) varieties. The `Local` type points
//! to your local machine, and the `Plain` type is a remote host type that
//! connects to a remote machine over the network. Whichever type you choose,
//! simply pass it in to your endpoints as required and Intecture will do the
//! rest.
//!
//!>“Why `Plain`?” I hear you ask. Well, it’s because the `Plain` host type is
//! a remote host that uses TCP to send/receive _plaintext_ data.
//!
//!## Example
//!
//! Here’s a reproduction of the
//! [basic example](https://github.com/intecture/api/blob/master/core/examples/basic.rs)
//! from the `examples/` folder:
//!
//!```rust
//!extern crate futures;
//!extern crate intecture_api;
//!extern crate tokio_core;
//!
//!use futures::{Future, Stream};
//!use intecture_api::prelude::*;
//!use tokio_core::reactor::Core;
//!
//!fn main() {
//! // These two lines are part of `tokio-core` and can be safely ignored. So
//! // long as they appear at the top of your code, all is fine with the world.
//! let mut core = Core::new().unwrap();
//! let handle = core.handle();
//!
//! // Here's the meat of your project. In this example we're talking to our
//! // local machine, so we use the `Local` host type.
//! let host = Local::new(&handle).and_then(|host| {
//! // Ok, we're in! Now we can pass our `host` handle to other endpoints,
//! // which informs them of the server we mean to talk to.
//!
//! // Let's start with something basic - a shell command.
//! let cmd = Command::new(&host, "whoami", None);
//! cmd.exec().and_then(|mut status| {
//! // At this point, our command is running. As the API is
//! // asynchronous, we don't have to wait for it to finish before
//! // inspecting its output. This is called "streaming".
//!
//! // First let's grab the stream from `CommandStatus`. This stream is
//! // a stream of strings, each of which represents a line of command
//! // output. We can use the `for_each` combinator to print these
//! // lines to stdout.
//! //
//! // If printing isn't your thing, you are also free to lick them or
//! // whatever you're into. I'm not here to judge.
//! let stream = status.take_stream()
//! .unwrap() // Unwrap is fine here as we haven't called it before
//! .for_each(|line| { println!("{}", line); Ok(()) });
//!
//! // Next, let's check on the result of our command.
//! // `CommandStatus` is a `Future` that represents the command's
//! // exit status. We can use the `map` combinator to print it out.*
//! //
//! // * Same caveat as above RE: printing. This is a safe
//! // place.
//! let status = status.map(|s| println!("This command {} {}",
//! if s.success { "succeeded" } else { "failed" },
//! if let Some(e) = s.code { format!("with code {}", e) } else { String::new() }));
//!
//! // Finally, we need to return these two `Future`s (stream and
//! // status) so that they will be executed by the event loop. Sadly
//! // we can't return them both as a tuple, so we use the join
//! // combinator instead to turn them into a single `Future`. Easy!
//! stream.join(status)
//! })
//! });
//!
//! // This line is part of `tokio-core` and is used to execute the
//! // chain of futures you've created above. You'll need to call
//! // `core.run()` for each host you interact with, otherwise your
//! // project will not run at all!
//! core.run(host).unwrap();
//!}
|
#![recursion_limit = "1024"]
extern crate bytes;
extern crate erased_serde;
#[macro_use] extern crate error_chain;
extern crate futures;
extern crate hostname;
#[macro_use] extern crate intecture_core_derive;
extern crate ipnetwork;
#[macro_use] extern crate log;
extern crate pnet;
extern crate regex;
extern crate serde;
#[macro_use] extern crate serde_derive;
extern crate serde_json;
extern crate tokio_core;
extern crate tokio_io;
extern crate tokio_process;
extern crate tokio_proto;
extern crate tokio_service;
extern crate users;
pub mod command;
pub mod errors;
pub mod host;
mod message;
pub mod prelude {
//! The API prelude.
pub use command::{self, Command};
pub use host::Host;
pub use host::remote::{self, Plain};
pub use host::local::{self, Local};
pub use package::{self, Package};
pub use service::{self, Service};
pub use telemetry::{self, Cpu, FsMount, LinuxDistro, Os, OsFamily, OsPlatform, Telemetry};
}
pub mod package;
mod request;
pub mod service;
mod target;
pub mod telemetry;
#[doc(hidden)]
pub use message::{FromMessage, InMessage};
#[doc(hidden)]
pub use request::Request;
|
//!```
|
random_line_split
|
issue-14456.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::env;
use std::io::prelude::*;
use std::io;
use std::process::{Command, Stdio};
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "child"
|
test();
}
fn child() {
writeln!(&mut io::stdout(), "foo").unwrap();
writeln!(&mut io::stderr(), "bar").unwrap();
let mut stdin = io::stdin();
let mut s = String::new();
stdin.lock().read_line(&mut s).unwrap();
assert_eq!(s.len(), 0);
}
fn test() {
let args: Vec<String> = env::args().collect();
let mut p = Command::new(&args[0]).arg("child")
.stdin(Stdio::capture())
.stdout(Stdio::capture())
.stderr(Stdio::capture())
.spawn().unwrap();
assert!(p.wait().unwrap().success());
}
|
{
return child()
}
|
conditional_block
|
issue-14456.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::env;
use std::io::prelude::*;
use std::io;
use std::process::{Command, Stdio};
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "child" {
return child()
}
test();
}
fn child() {
|
writeln!(&mut io::stderr(), "bar").unwrap();
let mut stdin = io::stdin();
let mut s = String::new();
stdin.lock().read_line(&mut s).unwrap();
assert_eq!(s.len(), 0);
}
fn test() {
let args: Vec<String> = env::args().collect();
let mut p = Command::new(&args[0]).arg("child")
.stdin(Stdio::capture())
.stdout(Stdio::capture())
.stderr(Stdio::capture())
.spawn().unwrap();
assert!(p.wait().unwrap().success());
}
|
writeln!(&mut io::stdout(), "foo").unwrap();
|
random_line_split
|
issue-14456.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::env;
use std::io::prelude::*;
use std::io;
use std::process::{Command, Stdio};
fn
|
() {
let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "child" {
return child()
}
test();
}
fn child() {
writeln!(&mut io::stdout(), "foo").unwrap();
writeln!(&mut io::stderr(), "bar").unwrap();
let mut stdin = io::stdin();
let mut s = String::new();
stdin.lock().read_line(&mut s).unwrap();
assert_eq!(s.len(), 0);
}
fn test() {
let args: Vec<String> = env::args().collect();
let mut p = Command::new(&args[0]).arg("child")
.stdin(Stdio::capture())
.stdout(Stdio::capture())
.stderr(Stdio::capture())
.spawn().unwrap();
assert!(p.wait().unwrap().success());
}
|
main
|
identifier_name
|
issue-14456.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::env;
use std::io::prelude::*;
use std::io;
use std::process::{Command, Stdio};
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "child" {
return child()
}
test();
}
fn child()
|
fn test() {
let args: Vec<String> = env::args().collect();
let mut p = Command::new(&args[0]).arg("child")
.stdin(Stdio::capture())
.stdout(Stdio::capture())
.stderr(Stdio::capture())
.spawn().unwrap();
assert!(p.wait().unwrap().success());
}
|
{
writeln!(&mut io::stdout(), "foo").unwrap();
writeln!(&mut io::stderr(), "bar").unwrap();
let mut stdin = io::stdin();
let mut s = String::new();
stdin.lock().read_line(&mut s).unwrap();
assert_eq!(s.len(), 0);
}
|
identifier_body
|
classify.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*
Predicates on exprs and stmts that the pretty-printer and parser use
*/
use ast;
use codemap;
// does this expression require a semicolon to be treated
// as a statement? The negation of this: 'can this expression
// be used as a statement without a semicolon' -- is used
// as an early-bail-out in the parser so that, for instance,
// 'if true {...} else {...}
// |x| 5 '
// isn't parsed as (if true {...} else {...} | x) | 5
pub fn expr_requires_semi_to_be_stmt(e: @ast::expr) -> bool {
match e.node {
ast::expr_if(*)
| ast::expr_match(*)
| ast::expr_block(_)
| ast::expr_while(*)
| ast::expr_loop(*)
| ast::expr_call(_, _, ast::DoSugar)
| ast::expr_call(_, _, ast::ForSugar)
| ast::expr_method_call(_, _, _, _, ast::DoSugar)
| ast::expr_method_call(_, _, _, _, ast::ForSugar) => false,
_ => true
}
}
pub fn expr_is_simple_block(e: @ast::expr) -> bool {
match e.node {
ast::expr_block(
codemap::spanned {
node: ast::blk_ { rules: ast::default_blk, _ }, _ }
) => true,
_ => false
}
}
// this statement requires a semicolon after it.
// note that in one case (stmt_semi), we've already
// seen the semicolon, and thus don't need another.
pub fn
|
(stmt: &ast::stmt) -> bool {
return match stmt.node {
ast::stmt_decl(d, _) => {
match d.node {
ast::decl_local(_) => true,
ast::decl_item(_) => false
}
}
ast::stmt_expr(e, _) => { expr_requires_semi_to_be_stmt(e) }
ast::stmt_semi(*) => { false }
ast::stmt_mac(*) => { false }
}
}
|
stmt_ends_with_semi
|
identifier_name
|
classify.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*
Predicates on exprs and stmts that the pretty-printer and parser use
*/
use ast;
use codemap;
// does this expression require a semicolon to be treated
// as a statement? The negation of this: 'can this expression
// be used as a statement without a semicolon' -- is used
// as an early-bail-out in the parser so that, for instance,
// 'if true {...} else {...}
// |x| 5 '
// isn't parsed as (if true {...} else {...} | x) | 5
pub fn expr_requires_semi_to_be_stmt(e: @ast::expr) -> bool
|
pub fn expr_is_simple_block(e: @ast::expr) -> bool {
match e.node {
ast::expr_block(
codemap::spanned {
node: ast::blk_ { rules: ast::default_blk, _ }, _ }
) => true,
_ => false
}
}
// this statement requires a semicolon after it.
// note that in one case (stmt_semi), we've already
// seen the semicolon, and thus don't need another.
pub fn stmt_ends_with_semi(stmt: &ast::stmt) -> bool {
return match stmt.node {
ast::stmt_decl(d, _) => {
match d.node {
ast::decl_local(_) => true,
ast::decl_item(_) => false
}
}
ast::stmt_expr(e, _) => { expr_requires_semi_to_be_stmt(e) }
ast::stmt_semi(*) => { false }
ast::stmt_mac(*) => { false }
}
}
|
{
match e.node {
ast::expr_if(*)
| ast::expr_match(*)
| ast::expr_block(_)
| ast::expr_while(*)
| ast::expr_loop(*)
| ast::expr_call(_, _, ast::DoSugar)
| ast::expr_call(_, _, ast::ForSugar)
| ast::expr_method_call(_, _, _, _, ast::DoSugar)
| ast::expr_method_call(_, _, _, _, ast::ForSugar) => false,
_ => true
}
}
|
identifier_body
|
classify.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*
Predicates on exprs and stmts that the pretty-printer and parser use
*/
use ast;
use codemap;
// does this expression require a semicolon to be treated
// as a statement? The negation of this: 'can this expression
// be used as a statement without a semicolon' -- is used
// as an early-bail-out in the parser so that, for instance,
// 'if true {...} else {...}
// |x| 5 '
// isn't parsed as (if true {...} else {...} | x) | 5
pub fn expr_requires_semi_to_be_stmt(e: @ast::expr) -> bool {
match e.node {
ast::expr_if(*)
| ast::expr_match(*)
| ast::expr_block(_)
| ast::expr_while(*)
| ast::expr_loop(*)
| ast::expr_call(_, _, ast::DoSugar)
| ast::expr_call(_, _, ast::ForSugar)
|
| ast::expr_method_call(_, _, _, _, ast::ForSugar) => false,
_ => true
}
}
pub fn expr_is_simple_block(e: @ast::expr) -> bool {
match e.node {
ast::expr_block(
codemap::spanned {
node: ast::blk_ { rules: ast::default_blk, _ }, _ }
) => true,
_ => false
}
}
// this statement requires a semicolon after it.
// note that in one case (stmt_semi), we've already
// seen the semicolon, and thus don't need another.
pub fn stmt_ends_with_semi(stmt: &ast::stmt) -> bool {
return match stmt.node {
ast::stmt_decl(d, _) => {
match d.node {
ast::decl_local(_) => true,
ast::decl_item(_) => false
}
}
ast::stmt_expr(e, _) => { expr_requires_semi_to_be_stmt(e) }
ast::stmt_semi(*) => { false }
ast::stmt_mac(*) => { false }
}
}
|
| ast::expr_method_call(_, _, _, _, ast::DoSugar)
|
random_line_split
|
token.rs
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use super::util::encode_u32;
use core::convert::From;
use core::ops::Deref;
/// Type for holding the value of a CoAP message token.
#[derive(Debug, Eq, PartialEq, Hash, Copy, Clone, Ord, PartialOrd)]
pub struct MsgToken {
len: u8,
bytes: [u8; 8],
}
impl MsgToken {
/// Constant representing an empty token.
pub const EMPTY: MsgToken = MsgToken {
len: 0u8,
bytes: [0; 8],
};
/// Creates a new token from the given byte slice.
pub fn new(x: &[u8]) -> MsgToken {
MsgToken::from(x)
|
}
/// Returns true if the length of this token is zero.
pub fn is_empty(&self) -> bool {
self.len == 0
}
/// Returns a byte slice containing this token.
pub fn as_bytes(&self) -> &[u8] {
&self.bytes[..self.len as usize]
}
}
impl std::fmt::Display for MsgToken {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for b in self.as_bytes() {
write!(f, "{:02X}", b)?;
}
Ok(())
}
}
impl Default for MsgToken {
fn default() -> Self {
MsgToken::EMPTY
}
}
impl Deref for MsgToken {
type Target = [u8];
fn deref(&self) -> &Self::Target {
self.as_bytes()
}
}
impl core::cmp::PartialEq<[u8]> for MsgToken {
fn eq(&self, other: &[u8]) -> bool {
self.as_bytes() == other
}
}
impl core::convert::From<u32> for MsgToken {
fn from(x: u32) -> Self {
let mut bytes = [0u8; 8];
let len = encode_u32(x, &mut bytes).len();
MsgToken {
len: len as u8,
bytes,
}
}
}
impl core::convert::From<i32> for MsgToken {
fn from(x: i32) -> Self {
core::convert::Into::into(x as u32)
}
}
impl core::convert::From<u16> for MsgToken {
fn from(x: u16) -> Self {
core::convert::Into::into(x as u32)
}
}
impl core::convert::From<&[u8]> for MsgToken {
// Note: this will panic if x is too big.
fn from(x: &[u8]) -> Self {
let mut bytes = [0u8; 8];
let len = x.len();
bytes[..len].copy_from_slice(x);
MsgToken {
len: len as u8,
bytes,
}
}
}
|
}
/// Returns the length of this token.
pub fn len(&self) -> usize {
self.len as usize
|
random_line_split
|
token.rs
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use super::util::encode_u32;
use core::convert::From;
use core::ops::Deref;
/// Type for holding the value of a CoAP message token.
#[derive(Debug, Eq, PartialEq, Hash, Copy, Clone, Ord, PartialOrd)]
pub struct MsgToken {
len: u8,
bytes: [u8; 8],
}
impl MsgToken {
/// Constant representing an empty token.
pub const EMPTY: MsgToken = MsgToken {
len: 0u8,
bytes: [0; 8],
};
/// Creates a new token from the given byte slice.
pub fn
|
(x: &[u8]) -> MsgToken {
MsgToken::from(x)
}
/// Returns the length of this token.
pub fn len(&self) -> usize {
self.len as usize
}
/// Returns true if the length of this token is zero.
pub fn is_empty(&self) -> bool {
self.len == 0
}
/// Returns a byte slice containing this token.
pub fn as_bytes(&self) -> &[u8] {
&self.bytes[..self.len as usize]
}
}
impl std::fmt::Display for MsgToken {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for b in self.as_bytes() {
write!(f, "{:02X}", b)?;
}
Ok(())
}
}
impl Default for MsgToken {
fn default() -> Self {
MsgToken::EMPTY
}
}
impl Deref for MsgToken {
type Target = [u8];
fn deref(&self) -> &Self::Target {
self.as_bytes()
}
}
impl core::cmp::PartialEq<[u8]> for MsgToken {
fn eq(&self, other: &[u8]) -> bool {
self.as_bytes() == other
}
}
impl core::convert::From<u32> for MsgToken {
fn from(x: u32) -> Self {
let mut bytes = [0u8; 8];
let len = encode_u32(x, &mut bytes).len();
MsgToken {
len: len as u8,
bytes,
}
}
}
impl core::convert::From<i32> for MsgToken {
fn from(x: i32) -> Self {
core::convert::Into::into(x as u32)
}
}
impl core::convert::From<u16> for MsgToken {
fn from(x: u16) -> Self {
core::convert::Into::into(x as u32)
}
}
impl core::convert::From<&[u8]> for MsgToken {
// Note: this will panic if x is too big.
fn from(x: &[u8]) -> Self {
let mut bytes = [0u8; 8];
let len = x.len();
bytes[..len].copy_from_slice(x);
MsgToken {
len: len as u8,
bytes,
}
}
}
|
new
|
identifier_name
|
token.rs
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
use super::util::encode_u32;
use core::convert::From;
use core::ops::Deref;
/// Type for holding the value of a CoAP message token.
#[derive(Debug, Eq, PartialEq, Hash, Copy, Clone, Ord, PartialOrd)]
pub struct MsgToken {
len: u8,
bytes: [u8; 8],
}
impl MsgToken {
/// Constant representing an empty token.
pub const EMPTY: MsgToken = MsgToken {
len: 0u8,
bytes: [0; 8],
};
/// Creates a new token from the given byte slice.
pub fn new(x: &[u8]) -> MsgToken {
MsgToken::from(x)
}
/// Returns the length of this token.
pub fn len(&self) -> usize {
self.len as usize
}
/// Returns true if the length of this token is zero.
pub fn is_empty(&self) -> bool {
self.len == 0
}
/// Returns a byte slice containing this token.
pub fn as_bytes(&self) -> &[u8] {
&self.bytes[..self.len as usize]
}
}
impl std::fmt::Display for MsgToken {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for b in self.as_bytes() {
write!(f, "{:02X}", b)?;
}
Ok(())
}
}
impl Default for MsgToken {
fn default() -> Self {
MsgToken::EMPTY
}
}
impl Deref for MsgToken {
type Target = [u8];
fn deref(&self) -> &Self::Target {
self.as_bytes()
}
}
impl core::cmp::PartialEq<[u8]> for MsgToken {
fn eq(&self, other: &[u8]) -> bool {
self.as_bytes() == other
}
}
impl core::convert::From<u32> for MsgToken {
fn from(x: u32) -> Self
|
}
impl core::convert::From<i32> for MsgToken {
fn from(x: i32) -> Self {
core::convert::Into::into(x as u32)
}
}
impl core::convert::From<u16> for MsgToken {
fn from(x: u16) -> Self {
core::convert::Into::into(x as u32)
}
}
impl core::convert::From<&[u8]> for MsgToken {
// Note: this will panic if x is too big.
fn from(x: &[u8]) -> Self {
let mut bytes = [0u8; 8];
let len = x.len();
bytes[..len].copy_from_slice(x);
MsgToken {
len: len as u8,
bytes,
}
}
}
|
{
let mut bytes = [0u8; 8];
let len = encode_u32(x, &mut bytes).len();
MsgToken {
len: len as u8,
bytes,
}
}
|
identifier_body
|
main.rs
|
// Copyright (c) 2015 by Stacy Prowell. All rights reserved.
//
// Licensed under the BSD 2-Clause license. See the file LICENSE
// that is part of this distribution. This file may not be copied,
// modified, or distributed except according to those terms.extern
extern crate linenoise;
extern crate getopts;
extern crate num;
extern crate relision;
use getopts::Options;
/// The REPL.
fn repl() {
let history_filename = relision::get_config_dir() + ("/repl.history");
linenoise::history_load(&history_filename);
loop {
let val = linenoise::input("e> ");
match val {
None => {
linenoise::history_save(&history_filename);
break;
}
Some(input) => {
println!("{}", input);
linenoise::history_add(&input);
if input == "clear" {
linenoise::clear_screen();
}
}
} // Match.
} // REPL loop.
}
/// Print the command line help. First print the prototype for using the
/// command, and then print help about using the switches.
/// progname: The program name.
/// switches: The allowed command line switch data structure.
fn print_usage(progname: &str, switches: Options) {
let prototype = format!("Usage: {} [switches...] [elision files...]", progname);
print!("{}", switches.usage(&prototype));
}
/// Entry point when run from the prompt.
fn main() {
println!("Running on {}.", relision::get_platform());
println!("Configuration stored at: {}.", relision::get_config_dir());
// Get the command line arguments.
let args = std::env::args().collect::<Vec<String>>();
let me = args[0].clone();
// Specify the switches this wrapper takes.
let mut switches = getopts::Options::new();
switches.optflag("h", "help", "Print this command line help.");
// Now process all command line switches. The "tail" removes the program
// name.
let matches = match switches.parse(args) {
Ok(mat) => mat,
Err(fail) => {
println!("ERROR parsing command line arguments:");
println!(" {}", fail.to_string());
return;
}
};
if matches.opt_present("h") {
print_usage(&me, switches);
return;
}
|
// Now run the REPL.
repl();
}
|
random_line_split
|
|
main.rs
|
// Copyright (c) 2015 by Stacy Prowell. All rights reserved.
//
// Licensed under the BSD 2-Clause license. See the file LICENSE
// that is part of this distribution. This file may not be copied,
// modified, or distributed except according to those terms.extern
extern crate linenoise;
extern crate getopts;
extern crate num;
extern crate relision;
use getopts::Options;
/// The REPL.
fn repl()
|
/// Print the command line help. First print the prototype for using the
/// command, and then print help about using the switches.
/// progname: The program name.
/// switches: The allowed command line switch data structure.
fn print_usage(progname: &str, switches: Options) {
let prototype = format!("Usage: {} [switches...] [elision files...]", progname);
print!("{}", switches.usage(&prototype));
}
/// Entry point when run from the prompt.
fn main() {
println!("Running on {}.", relision::get_platform());
println!("Configuration stored at: {}.", relision::get_config_dir());
// Get the command line arguments.
let args = std::env::args().collect::<Vec<String>>();
let me = args[0].clone();
// Specify the switches this wrapper takes.
let mut switches = getopts::Options::new();
switches.optflag("h", "help", "Print this command line help.");
// Now process all command line switches. The "tail" removes the program
// name.
let matches = match switches.parse(args) {
Ok(mat) => mat,
Err(fail) => {
println!("ERROR parsing command line arguments:");
println!(" {}", fail.to_string());
return;
}
};
if matches.opt_present("h") {
print_usage(&me, switches);
return;
}
// Now run the REPL.
repl();
}
|
{
let history_filename = relision::get_config_dir() + ("/repl.history");
linenoise::history_load(&history_filename);
loop {
let val = linenoise::input("e> ");
match val {
None => {
linenoise::history_save(&history_filename);
break;
}
Some(input) => {
println!("{}", input);
linenoise::history_add(&input);
if input == "clear" {
linenoise::clear_screen();
}
}
} // Match.
} // REPL loop.
}
|
identifier_body
|
main.rs
|
// Copyright (c) 2015 by Stacy Prowell. All rights reserved.
//
// Licensed under the BSD 2-Clause license. See the file LICENSE
// that is part of this distribution. This file may not be copied,
// modified, or distributed except according to those terms.extern
extern crate linenoise;
extern crate getopts;
extern crate num;
extern crate relision;
use getopts::Options;
/// The REPL.
fn repl() {
let history_filename = relision::get_config_dir() + ("/repl.history");
linenoise::history_load(&history_filename);
loop {
let val = linenoise::input("e> ");
match val {
None => {
linenoise::history_save(&history_filename);
break;
}
Some(input) => {
println!("{}", input);
linenoise::history_add(&input);
if input == "clear" {
linenoise::clear_screen();
}
}
} // Match.
} // REPL loop.
}
/// Print the command line help. First print the prototype for using the
/// command, and then print help about using the switches.
/// progname: The program name.
/// switches: The allowed command line switch data structure.
fn
|
(progname: &str, switches: Options) {
let prototype = format!("Usage: {} [switches...] [elision files...]", progname);
print!("{}", switches.usage(&prototype));
}
/// Entry point when run from the prompt.
fn main() {
println!("Running on {}.", relision::get_platform());
println!("Configuration stored at: {}.", relision::get_config_dir());
// Get the command line arguments.
let args = std::env::args().collect::<Vec<String>>();
let me = args[0].clone();
// Specify the switches this wrapper takes.
let mut switches = getopts::Options::new();
switches.optflag("h", "help", "Print this command line help.");
// Now process all command line switches. The "tail" removes the program
// name.
let matches = match switches.parse(args) {
Ok(mat) => mat,
Err(fail) => {
println!("ERROR parsing command line arguments:");
println!(" {}", fail.to_string());
return;
}
};
if matches.opt_present("h") {
print_usage(&me, switches);
return;
}
// Now run the REPL.
repl();
}
|
print_usage
|
identifier_name
|
mod.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::HashMap;
use std::io::{Read, Write};
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Mutex;
use std::ops::Deref;
use std::fmt::Debug;
use std::fmt::Error as FmtError;
|
use super::FileAbstractionInstance;
use super::Drain;
use super::InMemoryFileAbstraction;
use store::Entry;
use file_abstraction::iter::PathIterator;
pub mod mapper;
pub mod out;
use self::mapper::Mapper;
use self::out::StdoutFileAbstraction;
// Because this is not exported in super::inmemory;
type Backend = Arc<Mutex<RefCell<HashMap<PathBuf, Entry>>>>;
pub struct StdIoFileAbstraction<W: Write, M: Mapper>(StdoutFileAbstraction<W, M>);
impl<W, M> StdIoFileAbstraction<W, M>
where M: Mapper,
W: Write
{
pub fn new<R: Read>(in_stream: &mut R, out_stream: Rc<RefCell<W>>, mapper: M) -> Result<StdIoFileAbstraction<W, M>, SE> {
StdoutFileAbstraction::new(out_stream, mapper)
.and_then(|out| {
let _ = out.backend()
.lock()
.map_err(|_| SE::from_kind(SEK::LockError))
.map(|mut mtx| out.mapper().read_to_fs(in_stream, mtx.get_mut()))?;
Ok(StdIoFileAbstraction(out))
})
}
pub fn backend(&self) -> &Backend {
self.0.backend()
}
}
impl<W, M> Debug for StdIoFileAbstraction<W, M>
where M: Mapper,
W: Write
{
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
write!(f, "StdIoFileAbstraction({:?}", self.0)
}
}
impl<W, M> Deref for StdIoFileAbstraction<W, M>
where M: Mapper,
W: Write
{
type Target = StdoutFileAbstraction<W, M>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
// basically #[derive(FileAbstraction)]
impl<W: Write, M: Mapper> FileAbstraction for StdIoFileAbstraction<W, M> {
fn remove_file(&self, path: &PathBuf) -> Result<(), SE> {
self.0.remove_file(path)
}
fn copy(&self, from: &PathBuf, to: &PathBuf) -> Result<(), SE> {
self.0.copy(from, to)
}
fn rename(&self, from: &PathBuf, to: &PathBuf) -> Result<(), SE> {
self.0.rename(from, to)
}
fn create_dir_all(&self, pb: &PathBuf) -> Result<(), SE> {
self.0.create_dir_all(pb)
}
fn new_instance(&self, p: PathBuf) -> Box<FileAbstractionInstance> {
self.0.new_instance(p)
}
fn exists(&self, p: &PathBuf) -> Result<bool, SE> {
self.0.exists(p)
}
fn is_file(&self, p: &PathBuf) -> Result<bool, SE> {
self.0.is_file(p)
}
fn drain(&self) -> Result<Drain, SE> {
self.0.drain()
}
fn fill(&mut self, d: Drain) -> Result<(), SE> {
self.0.fill(d)
}
fn pathes_recursively(&self, basepath: PathBuf) -> Result<PathIterator, SE> {
self.0.pathes_recursively(basepath)
}
}
|
use std::fmt::Formatter;
use error::StoreErrorKind as SEK;
use error::StoreError as SE;
use super::FileAbstraction;
|
random_line_split
|
mod.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::HashMap;
use std::io::{Read, Write};
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Mutex;
use std::ops::Deref;
use std::fmt::Debug;
use std::fmt::Error as FmtError;
use std::fmt::Formatter;
use error::StoreErrorKind as SEK;
use error::StoreError as SE;
use super::FileAbstraction;
use super::FileAbstractionInstance;
use super::Drain;
use super::InMemoryFileAbstraction;
use store::Entry;
use file_abstraction::iter::PathIterator;
pub mod mapper;
pub mod out;
use self::mapper::Mapper;
use self::out::StdoutFileAbstraction;
// Because this is not exported in super::inmemory;
type Backend = Arc<Mutex<RefCell<HashMap<PathBuf, Entry>>>>;
pub struct StdIoFileAbstraction<W: Write, M: Mapper>(StdoutFileAbstraction<W, M>);
impl<W, M> StdIoFileAbstraction<W, M>
where M: Mapper,
W: Write
{
pub fn new<R: Read>(in_stream: &mut R, out_stream: Rc<RefCell<W>>, mapper: M) -> Result<StdIoFileAbstraction<W, M>, SE> {
StdoutFileAbstraction::new(out_stream, mapper)
.and_then(|out| {
let _ = out.backend()
.lock()
.map_err(|_| SE::from_kind(SEK::LockError))
.map(|mut mtx| out.mapper().read_to_fs(in_stream, mtx.get_mut()))?;
Ok(StdIoFileAbstraction(out))
})
}
pub fn backend(&self) -> &Backend
|
}
impl<W, M> Debug for StdIoFileAbstraction<W, M>
where M: Mapper,
W: Write
{
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
write!(f, "StdIoFileAbstraction({:?}", self.0)
}
}
impl<W, M> Deref for StdIoFileAbstraction<W, M>
where M: Mapper,
W: Write
{
type Target = StdoutFileAbstraction<W, M>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
// basically #[derive(FileAbstraction)]
impl<W: Write, M: Mapper> FileAbstraction for StdIoFileAbstraction<W, M> {
fn remove_file(&self, path: &PathBuf) -> Result<(), SE> {
self.0.remove_file(path)
}
fn copy(&self, from: &PathBuf, to: &PathBuf) -> Result<(), SE> {
self.0.copy(from, to)
}
fn rename(&self, from: &PathBuf, to: &PathBuf) -> Result<(), SE> {
self.0.rename(from, to)
}
fn create_dir_all(&self, pb: &PathBuf) -> Result<(), SE> {
self.0.create_dir_all(pb)
}
fn new_instance(&self, p: PathBuf) -> Box<FileAbstractionInstance> {
self.0.new_instance(p)
}
fn exists(&self, p: &PathBuf) -> Result<bool, SE> {
self.0.exists(p)
}
fn is_file(&self, p: &PathBuf) -> Result<bool, SE> {
self.0.is_file(p)
}
fn drain(&self) -> Result<Drain, SE> {
self.0.drain()
}
fn fill(&mut self, d: Drain) -> Result<(), SE> {
self.0.fill(d)
}
fn pathes_recursively(&self, basepath: PathBuf) -> Result<PathIterator, SE> {
self.0.pathes_recursively(basepath)
}
}
|
{
self.0.backend()
}
|
identifier_body
|
mod.rs
|
//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::HashMap;
use std::io::{Read, Write};
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Mutex;
use std::ops::Deref;
use std::fmt::Debug;
use std::fmt::Error as FmtError;
use std::fmt::Formatter;
use error::StoreErrorKind as SEK;
use error::StoreError as SE;
use super::FileAbstraction;
use super::FileAbstractionInstance;
use super::Drain;
use super::InMemoryFileAbstraction;
use store::Entry;
use file_abstraction::iter::PathIterator;
pub mod mapper;
pub mod out;
use self::mapper::Mapper;
use self::out::StdoutFileAbstraction;
// Because this is not exported in super::inmemory;
type Backend = Arc<Mutex<RefCell<HashMap<PathBuf, Entry>>>>;
pub struct StdIoFileAbstraction<W: Write, M: Mapper>(StdoutFileAbstraction<W, M>);
impl<W, M> StdIoFileAbstraction<W, M>
where M: Mapper,
W: Write
{
pub fn new<R: Read>(in_stream: &mut R, out_stream: Rc<RefCell<W>>, mapper: M) -> Result<StdIoFileAbstraction<W, M>, SE> {
StdoutFileAbstraction::new(out_stream, mapper)
.and_then(|out| {
let _ = out.backend()
.lock()
.map_err(|_| SE::from_kind(SEK::LockError))
.map(|mut mtx| out.mapper().read_to_fs(in_stream, mtx.get_mut()))?;
Ok(StdIoFileAbstraction(out))
})
}
pub fn backend(&self) -> &Backend {
self.0.backend()
}
}
impl<W, M> Debug for StdIoFileAbstraction<W, M>
where M: Mapper,
W: Write
{
fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> {
write!(f, "StdIoFileAbstraction({:?}", self.0)
}
}
impl<W, M> Deref for StdIoFileAbstraction<W, M>
where M: Mapper,
W: Write
{
type Target = StdoutFileAbstraction<W, M>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
// basically #[derive(FileAbstraction)]
impl<W: Write, M: Mapper> FileAbstraction for StdIoFileAbstraction<W, M> {
fn remove_file(&self, path: &PathBuf) -> Result<(), SE> {
self.0.remove_file(path)
}
fn copy(&self, from: &PathBuf, to: &PathBuf) -> Result<(), SE> {
self.0.copy(from, to)
}
fn
|
(&self, from: &PathBuf, to: &PathBuf) -> Result<(), SE> {
self.0.rename(from, to)
}
fn create_dir_all(&self, pb: &PathBuf) -> Result<(), SE> {
self.0.create_dir_all(pb)
}
fn new_instance(&self, p: PathBuf) -> Box<FileAbstractionInstance> {
self.0.new_instance(p)
}
fn exists(&self, p: &PathBuf) -> Result<bool, SE> {
self.0.exists(p)
}
fn is_file(&self, p: &PathBuf) -> Result<bool, SE> {
self.0.is_file(p)
}
fn drain(&self) -> Result<Drain, SE> {
self.0.drain()
}
fn fill(&mut self, d: Drain) -> Result<(), SE> {
self.0.fill(d)
}
fn pathes_recursively(&self, basepath: PathBuf) -> Result<PathIterator, SE> {
self.0.pathes_recursively(basepath)
}
}
|
rename
|
identifier_name
|
helpers.rs
|
/* #![deny(warnings)] */
extern crate clap;
extern crate fern;
extern crate futures;
extern crate futures_cpupool;
extern crate git2;
extern crate josh;
extern crate lazy_static;
extern crate log;
extern crate regex;
extern crate tempdir;
extern crate tokio_core;
use josh::Shell;
use tempdir::TempDir;
pub struct TestRepo {
pub repo: git2::Repository,
pub shell: Shell,
td: TempDir,
}
impl TestRepo {
pub fn new() -> Self {
let td = TempDir::new("cgh_test").expect("folder cgh_test should be created");
let repo = git2::Repository::init(td.path()).expect("init should succeed");
let tr = TestRepo {
repo: repo,
shell: Shell {
cwd: td.path().to_path_buf(),
},
td: td,
};
tr.shell.command("git config user.name test");
tr.shell.command("git config user.email [email protected]");
return tr;
}
pub fn commit(&self, message: &str) -> String {
self.shell
.command(&format!("git commit -m \"{}\"", message));
let (stdout, _) = self.shell.command("git rev-parse HEAD");
stdout
}
pub fn add_file(&self, filename: &str) {
self.shell
.command(&format!("mkdir -p $(dirname {})", filename));
self.shell
.command(&format!("echo test_content > {}", filename));
self.shell.command(&format!("git add {}", filename));
}
pub fn
|
(&self, r: &str) -> String {
let (stdout, _) = self.shell.command(&format!("git rev-parse {}", r));
stdout
}
}
|
rev
|
identifier_name
|
helpers.rs
|
/* #![deny(warnings)] */
extern crate clap;
extern crate fern;
extern crate futures;
extern crate futures_cpupool;
extern crate git2;
extern crate josh;
extern crate lazy_static;
extern crate log;
extern crate regex;
extern crate tempdir;
extern crate tokio_core;
use josh::Shell;
use tempdir::TempDir;
pub struct TestRepo {
pub repo: git2::Repository,
pub shell: Shell,
td: TempDir,
}
impl TestRepo {
pub fn new() -> Self {
let td = TempDir::new("cgh_test").expect("folder cgh_test should be created");
let repo = git2::Repository::init(td.path()).expect("init should succeed");
let tr = TestRepo {
repo: repo,
shell: Shell {
cwd: td.path().to_path_buf(),
},
td: td,
};
tr.shell.command("git config user.name test");
tr.shell.command("git config user.email [email protected]");
|
return tr;
}
pub fn commit(&self, message: &str) -> String {
self.shell
.command(&format!("git commit -m \"{}\"", message));
let (stdout, _) = self.shell.command("git rev-parse HEAD");
stdout
}
pub fn add_file(&self, filename: &str) {
self.shell
.command(&format!("mkdir -p $(dirname {})", filename));
self.shell
.command(&format!("echo test_content > {}", filename));
self.shell.command(&format!("git add {}", filename));
}
pub fn rev(&self, r: &str) -> String {
let (stdout, _) = self.shell.command(&format!("git rev-parse {}", r));
stdout
}
}
|
random_line_split
|
|
helpers.rs
|
/* #![deny(warnings)] */
extern crate clap;
extern crate fern;
extern crate futures;
extern crate futures_cpupool;
extern crate git2;
extern crate josh;
extern crate lazy_static;
extern crate log;
extern crate regex;
extern crate tempdir;
extern crate tokio_core;
use josh::Shell;
use tempdir::TempDir;
pub struct TestRepo {
pub repo: git2::Repository,
pub shell: Shell,
td: TempDir,
}
impl TestRepo {
pub fn new() -> Self {
let td = TempDir::new("cgh_test").expect("folder cgh_test should be created");
let repo = git2::Repository::init(td.path()).expect("init should succeed");
let tr = TestRepo {
repo: repo,
shell: Shell {
cwd: td.path().to_path_buf(),
},
td: td,
};
tr.shell.command("git config user.name test");
tr.shell.command("git config user.email [email protected]");
return tr;
}
pub fn commit(&self, message: &str) -> String
|
pub fn add_file(&self, filename: &str) {
self.shell
.command(&format!("mkdir -p $(dirname {})", filename));
self.shell
.command(&format!("echo test_content > {}", filename));
self.shell.command(&format!("git add {}", filename));
}
pub fn rev(&self, r: &str) -> String {
let (stdout, _) = self.shell.command(&format!("git rev-parse {}", r));
stdout
}
}
|
{
self.shell
.command(&format!("git commit -m \"{}\"", message));
let (stdout, _) = self.shell.command("git rev-parse HEAD");
stdout
}
|
identifier_body
|
wrap.rs
|
// Copyright 2018 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(test)]
extern crate test;
extern crate xi_core_lib as xi_core;
extern crate xi_rope;
use crate::xi_core::line_offset::LineOffset;
use crate::xi_core::tabs::BufferId;
use crate::xi_core::view::View;
use test::Bencher;
use xi_rope::Rope;
fn build_short_lines(n: usize) -> String {
let line =
"See it, the beautiful ball Poised in the toyshop window, Rounder than sun or moon.\n";
let mut s = String::new();
for _ in 0..n {
s += line;
}
s
}
#[bench]
fn line_of_offset_no_breaks(b: &mut Bencher) {
let text = Rope::from(build_short_lines(10_000));
let view = View::new(1.into(), BufferId::new(2));
let total_bytes = text.len();
b.iter(|| {
for i in 0..total_bytes {
|
#[bench]
fn line_of_offset_col_breaks(b: &mut Bencher) {
let text = Rope::from(build_short_lines(10_000));
let mut view = View::new(1.into(), BufferId::new(2));
view.debug_force_rewrap_cols(&text, 20);
let total_bytes = text.len();
b.iter(|| {
for i in 0..total_bytes {
let _line = view.line_of_offset(&text, i);
}
})
}
#[bench]
fn offset_of_line_no_breaks(b: &mut Bencher) {
let text = Rope::from(build_short_lines(10_000));
let view = View::new(1.into(), BufferId::new(2));
b.iter(|| {
for i in 0..10_000 {
let _line = view.offset_of_line(&text, i);
}
})
}
#[bench]
fn offset_of_line_col_breaks(b: &mut Bencher) {
let text = Rope::from(build_short_lines(10_000));
let mut view = View::new(1.into(), BufferId::new(2));
view.debug_force_rewrap_cols(&text, 20);
b.iter(|| {
for i in 0..10_000 {
let _line = view.offset_of_line(&text, i);
}
})
}
|
let _line = view.line_of_offset(&text, i);
}
})
}
|
random_line_split
|
wrap.rs
|
// Copyright 2018 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(test)]
extern crate test;
extern crate xi_core_lib as xi_core;
extern crate xi_rope;
use crate::xi_core::line_offset::LineOffset;
use crate::xi_core::tabs::BufferId;
use crate::xi_core::view::View;
use test::Bencher;
use xi_rope::Rope;
fn build_short_lines(n: usize) -> String {
let line =
"See it, the beautiful ball Poised in the toyshop window, Rounder than sun or moon.\n";
let mut s = String::new();
for _ in 0..n {
s += line;
}
s
}
#[bench]
fn
|
(b: &mut Bencher) {
let text = Rope::from(build_short_lines(10_000));
let view = View::new(1.into(), BufferId::new(2));
let total_bytes = text.len();
b.iter(|| {
for i in 0..total_bytes {
let _line = view.line_of_offset(&text, i);
}
})
}
#[bench]
fn line_of_offset_col_breaks(b: &mut Bencher) {
let text = Rope::from(build_short_lines(10_000));
let mut view = View::new(1.into(), BufferId::new(2));
view.debug_force_rewrap_cols(&text, 20);
let total_bytes = text.len();
b.iter(|| {
for i in 0..total_bytes {
let _line = view.line_of_offset(&text, i);
}
})
}
#[bench]
fn offset_of_line_no_breaks(b: &mut Bencher) {
let text = Rope::from(build_short_lines(10_000));
let view = View::new(1.into(), BufferId::new(2));
b.iter(|| {
for i in 0..10_000 {
let _line = view.offset_of_line(&text, i);
}
})
}
#[bench]
fn offset_of_line_col_breaks(b: &mut Bencher) {
let text = Rope::from(build_short_lines(10_000));
let mut view = View::new(1.into(), BufferId::new(2));
view.debug_force_rewrap_cols(&text, 20);
b.iter(|| {
for i in 0..10_000 {
let _line = view.offset_of_line(&text, i);
}
})
}
|
line_of_offset_no_breaks
|
identifier_name
|
wrap.rs
|
// Copyright 2018 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(test)]
extern crate test;
extern crate xi_core_lib as xi_core;
extern crate xi_rope;
use crate::xi_core::line_offset::LineOffset;
use crate::xi_core::tabs::BufferId;
use crate::xi_core::view::View;
use test::Bencher;
use xi_rope::Rope;
fn build_short_lines(n: usize) -> String
|
#[bench]
fn line_of_offset_no_breaks(b: &mut Bencher) {
let text = Rope::from(build_short_lines(10_000));
let view = View::new(1.into(), BufferId::new(2));
let total_bytes = text.len();
b.iter(|| {
for i in 0..total_bytes {
let _line = view.line_of_offset(&text, i);
}
})
}
#[bench]
fn line_of_offset_col_breaks(b: &mut Bencher) {
let text = Rope::from(build_short_lines(10_000));
let mut view = View::new(1.into(), BufferId::new(2));
view.debug_force_rewrap_cols(&text, 20);
let total_bytes = text.len();
b.iter(|| {
for i in 0..total_bytes {
let _line = view.line_of_offset(&text, i);
}
})
}
#[bench]
fn offset_of_line_no_breaks(b: &mut Bencher) {
let text = Rope::from(build_short_lines(10_000));
let view = View::new(1.into(), BufferId::new(2));
b.iter(|| {
for i in 0..10_000 {
let _line = view.offset_of_line(&text, i);
}
})
}
#[bench]
fn offset_of_line_col_breaks(b: &mut Bencher) {
let text = Rope::from(build_short_lines(10_000));
let mut view = View::new(1.into(), BufferId::new(2));
view.debug_force_rewrap_cols(&text, 20);
b.iter(|| {
for i in 0..10_000 {
let _line = view.offset_of_line(&text, i);
}
})
}
|
{
let line =
"See it, the beautiful ball Poised in the toyshop window, Rounder than sun or moon.\n";
let mut s = String::new();
for _ in 0..n {
s += line;
}
s
}
|
identifier_body
|
regions-variance-contravariant-use-contravariant.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
#![allow(unused_variables)]
// Test that a type which is contravariant with respect to its region
// parameter compiles successfully when used in a contravariant way.
//
// Note: see compile-fail/variance-regions-*.rs for the tests that check that the
// variance inference works in the first place.
// pretty-expanded FIXME #23616
struct Contravariant<'a> {
f: &'a isize
}
fn use_<'a>(c: Contravariant<'a>) {
let x = 3;
// 'b winds up being inferred to this call.
// Contravariant<'a> <: Contravariant<'call> is true
// if 'call <= 'a, which is true, so no error.
collapse(&x, c);
fn collapse<'b>(x: &'b isize, c: Contravariant<'b>) { }
|
}
pub fn main() {}
|
random_line_split
|
|
regions-variance-contravariant-use-contravariant.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
#![allow(unused_variables)]
// Test that a type which is contravariant with respect to its region
// parameter compiles successfully when used in a contravariant way.
//
// Note: see compile-fail/variance-regions-*.rs for the tests that check that the
// variance inference works in the first place.
// pretty-expanded FIXME #23616
struct
|
<'a> {
f: &'a isize
}
fn use_<'a>(c: Contravariant<'a>) {
let x = 3;
// 'b winds up being inferred to this call.
// Contravariant<'a> <: Contravariant<'call> is true
// if 'call <= 'a, which is true, so no error.
collapse(&x, c);
fn collapse<'b>(x: &'b isize, c: Contravariant<'b>) { }
}
pub fn main() {}
|
Contravariant
|
identifier_name
|
regions-variance-contravariant-use-contravariant.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
#![allow(unused_variables)]
// Test that a type which is contravariant with respect to its region
// parameter compiles successfully when used in a contravariant way.
//
// Note: see compile-fail/variance-regions-*.rs for the tests that check that the
// variance inference works in the first place.
// pretty-expanded FIXME #23616
struct Contravariant<'a> {
f: &'a isize
}
fn use_<'a>(c: Contravariant<'a>) {
let x = 3;
// 'b winds up being inferred to this call.
// Contravariant<'a> <: Contravariant<'call> is true
// if 'call <= 'a, which is true, so no error.
collapse(&x, c);
fn collapse<'b>(x: &'b isize, c: Contravariant<'b>)
|
}
pub fn main() {}
|
{ }
|
identifier_body
|
to_num.rs
|
//! Types convertable to integers
use common::get_slice::GetSlice;
/// Parse the string to a integer using a given radix
pub trait ToNum {
fn to_num_radix(&self, radix: usize) -> usize;
fn to_num_radix_signed(&self, radix: usize) -> isize;
fn to_num(&self) -> usize;
fn to_num_signed(&self) -> isize;
}
impl ToNum for str {
fn to_num_radix(&self, radix: usize) -> usize {
if radix == 0 {
return 0;
}
let mut num = 0;
for c in self.chars() {
let digit;
if c >= '0' && c <= '9' {
digit = c as usize - '0' as usize
} else if c >= 'A' && c <= 'Z' {
digit = c as usize - 'A' as usize + 10
} else if c >= 'a' && c <= 'z' {
digit = c as usize - 'a' as usize + 10
} else {
break;
}
if digit >= radix {
break;
}
num *= radix;
num += digit;
}
num
}
/// Parse the string as a signed integer using a given radix
fn to_num_radix_signed(&self, radix: usize) -> isize {
if self.starts_with('-') {
-(self.get_slice(1..).to_num_radix(radix) as isize)
} else {
self.to_num_radix(radix) as isize
}
}
/// Parse it as a unsigned integer in base 10
fn to_num(&self) -> usize {
self.to_num_radix(10)
}
/// Parse it as a signed integer in base 10
fn to_num_signed(&self) -> isize {
self.to_num_radix_signed(10)
}
|
}
|
random_line_split
|
|
to_num.rs
|
//! Types convertable to integers
use common::get_slice::GetSlice;
/// Parse the string to a integer using a given radix
pub trait ToNum {
fn to_num_radix(&self, radix: usize) -> usize;
fn to_num_radix_signed(&self, radix: usize) -> isize;
fn to_num(&self) -> usize;
fn to_num_signed(&self) -> isize;
}
impl ToNum for str {
fn to_num_radix(&self, radix: usize) -> usize {
if radix == 0 {
return 0;
}
let mut num = 0;
for c in self.chars() {
let digit;
if c >= '0' && c <= '9' {
digit = c as usize - '0' as usize
} else if c >= 'A' && c <= 'Z' {
digit = c as usize - 'A' as usize + 10
} else if c >= 'a' && c <= 'z' {
digit = c as usize - 'a' as usize + 10
} else {
break;
}
if digit >= radix {
break;
}
num *= radix;
num += digit;
}
num
}
/// Parse the string as a signed integer using a given radix
fn to_num_radix_signed(&self, radix: usize) -> isize {
if self.starts_with('-') {
-(self.get_slice(1..).to_num_radix(radix) as isize)
} else {
self.to_num_radix(radix) as isize
}
}
/// Parse it as a unsigned integer in base 10
fn
|
(&self) -> usize {
self.to_num_radix(10)
}
/// Parse it as a signed integer in base 10
fn to_num_signed(&self) -> isize {
self.to_num_radix_signed(10)
}
}
|
to_num
|
identifier_name
|
to_num.rs
|
//! Types convertable to integers
use common::get_slice::GetSlice;
/// Parse the string to a integer using a given radix
pub trait ToNum {
fn to_num_radix(&self, radix: usize) -> usize;
fn to_num_radix_signed(&self, radix: usize) -> isize;
fn to_num(&self) -> usize;
fn to_num_signed(&self) -> isize;
}
impl ToNum for str {
fn to_num_radix(&self, radix: usize) -> usize {
if radix == 0 {
return 0;
}
let mut num = 0;
for c in self.chars() {
let digit;
if c >= '0' && c <= '9' {
digit = c as usize - '0' as usize
} else if c >= 'A' && c <= 'Z' {
digit = c as usize - 'A' as usize + 10
} else if c >= 'a' && c <= 'z' {
digit = c as usize - 'a' as usize + 10
} else {
break;
}
if digit >= radix {
break;
}
num *= radix;
num += digit;
}
num
}
/// Parse the string as a signed integer using a given radix
fn to_num_radix_signed(&self, radix: usize) -> isize {
if self.starts_with('-') {
-(self.get_slice(1..).to_num_radix(radix) as isize)
} else {
self.to_num_radix(radix) as isize
}
}
/// Parse it as a unsigned integer in base 10
fn to_num(&self) -> usize {
self.to_num_radix(10)
}
/// Parse it as a signed integer in base 10
fn to_num_signed(&self) -> isize
|
}
|
{
self.to_num_radix_signed(10)
}
|
identifier_body
|
to_num.rs
|
//! Types convertable to integers
use common::get_slice::GetSlice;
/// Parse the string to a integer using a given radix
pub trait ToNum {
fn to_num_radix(&self, radix: usize) -> usize;
fn to_num_radix_signed(&self, radix: usize) -> isize;
fn to_num(&self) -> usize;
fn to_num_signed(&self) -> isize;
}
impl ToNum for str {
fn to_num_radix(&self, radix: usize) -> usize {
if radix == 0 {
return 0;
}
let mut num = 0;
for c in self.chars() {
let digit;
if c >= '0' && c <= '9' {
digit = c as usize - '0' as usize
} else if c >= 'A' && c <= 'Z' {
digit = c as usize - 'A' as usize + 10
} else if c >= 'a' && c <= 'z'
|
else {
break;
}
if digit >= radix {
break;
}
num *= radix;
num += digit;
}
num
}
/// Parse the string as a signed integer using a given radix
fn to_num_radix_signed(&self, radix: usize) -> isize {
if self.starts_with('-') {
-(self.get_slice(1..).to_num_radix(radix) as isize)
} else {
self.to_num_radix(radix) as isize
}
}
/// Parse it as a unsigned integer in base 10
fn to_num(&self) -> usize {
self.to_num_radix(10)
}
/// Parse it as a signed integer in base 10
fn to_num_signed(&self) -> isize {
self.to_num_radix_signed(10)
}
}
|
{
digit = c as usize - 'a' as usize + 10
}
|
conditional_block
|
traits-issue-22019.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test an issue where global caching was causing free regions from
// distinct scopes to be compared (`'g` and `'h`). The only important
// thing is that compilation succeeds here.
#![allow(missing_copy_implementations)]
#![allow(unused_variables)]
use std::borrow::ToOwned;
pub struct CFGNode;
pub type Node<'a> = &'a CFGNode;
pub trait GraphWalk<'c, N> {
/// Returns all the nodes in this graph.
fn nodes(&'c self) where [N]:ToOwned<Vec<N>>;
}
impl<'g> GraphWalk<'g, Node<'g>> for u32
{
fn
|
(&'g self) where [Node<'g>]:ToOwned<Vec<Node<'g>>>
{ loop { } }
}
impl<'h> GraphWalk<'h, Node<'h>> for u64
{
fn nodes(&'h self) where [Node<'h>]:ToOwned<Vec<Node<'h>>>
{ loop { } }
}
fn main() { }
|
nodes
|
identifier_name
|
traits-issue-22019.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test an issue where global caching was causing free regions from
// distinct scopes to be compared (`'g` and `'h`). The only important
// thing is that compilation succeeds here.
|
#![allow(unused_variables)]
use std::borrow::ToOwned;
pub struct CFGNode;
pub type Node<'a> = &'a CFGNode;
pub trait GraphWalk<'c, N> {
/// Returns all the nodes in this graph.
fn nodes(&'c self) where [N]:ToOwned<Vec<N>>;
}
impl<'g> GraphWalk<'g, Node<'g>> for u32
{
fn nodes(&'g self) where [Node<'g>]:ToOwned<Vec<Node<'g>>>
{ loop { } }
}
impl<'h> GraphWalk<'h, Node<'h>> for u64
{
fn nodes(&'h self) where [Node<'h>]:ToOwned<Vec<Node<'h>>>
{ loop { } }
}
fn main() { }
|
#![allow(missing_copy_implementations)]
|
random_line_split
|
traits-issue-22019.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test an issue where global caching was causing free regions from
// distinct scopes to be compared (`'g` and `'h`). The only important
// thing is that compilation succeeds here.
#![allow(missing_copy_implementations)]
#![allow(unused_variables)]
use std::borrow::ToOwned;
pub struct CFGNode;
pub type Node<'a> = &'a CFGNode;
pub trait GraphWalk<'c, N> {
/// Returns all the nodes in this graph.
fn nodes(&'c self) where [N]:ToOwned<Vec<N>>;
}
impl<'g> GraphWalk<'g, Node<'g>> for u32
{
fn nodes(&'g self) where [Node<'g>]:ToOwned<Vec<Node<'g>>>
{ loop { } }
}
impl<'h> GraphWalk<'h, Node<'h>> for u64
{
fn nodes(&'h self) where [Node<'h>]:ToOwned<Vec<Node<'h>>>
|
}
fn main() { }
|
{ loop { } }
|
identifier_body
|
state.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::async_client::JsonRpcResponse;
#[derive(Debug, Clone, PartialEq, PartialOrd)]
pub struct State {
pub chain_id: u8,
pub version: u64,
pub timestamp_usecs: u64,
}
impl State {
pub fn from_response(resp: &JsonRpcResponse) -> Self {
Self {
chain_id: resp.diem_chain_id,
version: resp.diem_ledger_version,
timestamp_usecs: resp.diem_ledger_timestampusec,
}
}
}
pub struct StateManager {
last_known_state: std::sync::RwLock<Option<State>>,
}
impl Default for StateManager {
fn default() -> Self {
Self {
last_known_state: std::sync::RwLock::new(None),
}
}
}
impl StateManager {
pub fn new() -> Self {
Self::default()
}
pub fn last_known_state(&self) -> Option<State> {
let data = self.last_known_state.read().unwrap();
data.clone()
|
if let Some(state) = &*state_writer {
if &resp_state < state {
return false;
}
}
*state_writer = Some(resp_state);
true
}
}
|
}
pub fn update_state(&self, resp_state: State) -> bool {
let mut state_writer = self.last_known_state.write().unwrap();
|
random_line_split
|
state.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::async_client::JsonRpcResponse;
#[derive(Debug, Clone, PartialEq, PartialOrd)]
pub struct State {
pub chain_id: u8,
pub version: u64,
pub timestamp_usecs: u64,
}
impl State {
pub fn from_response(resp: &JsonRpcResponse) -> Self {
Self {
chain_id: resp.diem_chain_id,
version: resp.diem_ledger_version,
timestamp_usecs: resp.diem_ledger_timestampusec,
}
}
}
pub struct StateManager {
last_known_state: std::sync::RwLock<Option<State>>,
}
impl Default for StateManager {
fn default() -> Self {
Self {
last_known_state: std::sync::RwLock::new(None),
}
}
}
impl StateManager {
pub fn new() -> Self {
Self::default()
}
pub fn last_known_state(&self) -> Option<State> {
let data = self.last_known_state.read().unwrap();
data.clone()
}
pub fn update_state(&self, resp_state: State) -> bool {
let mut state_writer = self.last_known_state.write().unwrap();
if let Some(state) = &*state_writer
|
*state_writer = Some(resp_state);
true
}
}
|
{
if &resp_state < state {
return false;
}
}
|
conditional_block
|
state.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::async_client::JsonRpcResponse;
#[derive(Debug, Clone, PartialEq, PartialOrd)]
pub struct State {
pub chain_id: u8,
pub version: u64,
pub timestamp_usecs: u64,
}
impl State {
pub fn from_response(resp: &JsonRpcResponse) -> Self {
Self {
chain_id: resp.diem_chain_id,
version: resp.diem_ledger_version,
timestamp_usecs: resp.diem_ledger_timestampusec,
}
}
}
pub struct StateManager {
last_known_state: std::sync::RwLock<Option<State>>,
}
impl Default for StateManager {
fn default() -> Self {
Self {
last_known_state: std::sync::RwLock::new(None),
}
}
}
impl StateManager {
pub fn new() -> Self
|
pub fn last_known_state(&self) -> Option<State> {
let data = self.last_known_state.read().unwrap();
data.clone()
}
pub fn update_state(&self, resp_state: State) -> bool {
let mut state_writer = self.last_known_state.write().unwrap();
if let Some(state) = &*state_writer {
if &resp_state < state {
return false;
}
}
*state_writer = Some(resp_state);
true
}
}
|
{
Self::default()
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.