file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
mod.rs
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
impl super::TXCRCR {
#[doc = r" Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
}
#[doc = r" Value of the field"]
pub struct TXCRCR {
bits: u16,
}
impl TXCRCR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u16 {
self.bits
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32
|
#[doc = "Bits 0:15 - Tx CRC register"]
#[inline(always)]
pub fn tx_crc(&self) -> TXCRCR {
let bits = {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
TXCRCR { bits }
}
}
|
{
self.bits
}
|
identifier_body
|
mod.rs
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
impl super::TXCRCR {
#[doc = r" Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
}
#[doc = r" Value of the field"]
pub struct TXCRCR {
bits: u16,
}
impl TXCRCR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
|
self.bits
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:15 - Tx CRC register"]
#[inline(always)]
pub fn tx_crc(&self) -> TXCRCR {
let bits = {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
TXCRCR { bits }
}
}
|
pub fn bits(&self) -> u16 {
|
random_line_split
|
mod.rs
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
impl super::TXCRCR {
#[doc = r" Reads the contents of the register"]
#[inline(always)]
pub fn
|
(&self) -> R {
R {
bits: self.register.get(),
}
}
}
#[doc = r" Value of the field"]
pub struct TXCRCR {
bits: u16,
}
impl TXCRCR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u16 {
self.bits
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:15 - Tx CRC register"]
#[inline(always)]
pub fn tx_crc(&self) -> TXCRCR {
let bits = {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
TXCRCR { bits }
}
}
|
read
|
identifier_name
|
card.rs
|
// Copyright 2016-2018 Matthew D. Michelotti
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::{self, Debug, Formatter};
use std::ops::{Index, IndexMut};
/// Represents the four cardinal directions in 2D space.
#[derive(PartialEq, Eq, Copy, Clone, Debug, Hash)]
pub enum Card {
/// Negative X direction.
MinusX,
/// Negative Y direction.
MinusY,
/// Positive X direction.
PlusX,
/// Positive Y direction.
PlusY,
}
impl Card {
/// Returns the negative of the current direction.
pub fn flip(self) -> Card {
match self {
Card::MinusX => Card::PlusX,
Card::PlusX => Card::MinusX,
Card::MinusY => Card::PlusY,
Card::PlusY => Card::MinusY,
}
}
/// Returns all cardinal directions.
#[inline]
pub fn values() -> [Card; 4] {
[Card::MinusX, Card::MinusY, Card::PlusX, Card::PlusY]
}
}
/// A map from `Card` to `bool`, typically used to specify allowed normal vector
/// directions.
#[derive(PartialEq, Eq, Copy, Clone, Hash)]
pub struct CardMask {
flags: [bool; 4],
}
|
pub fn empty() -> CardMask {
CardMask { flags: [false; 4] }
}
/// Creates a `CardMask` with all values set to `true`.
#[inline]
pub fn full() -> CardMask {
CardMask { flags: [true; 4] }
}
pub(crate) fn flip(self) -> CardMask {
let mut result = CardMask::empty();
result[Card::PlusX] = self[Card::MinusX];
result[Card::MinusX] = self[Card::PlusX];
result[Card::PlusY] = self[Card::MinusY];
result[Card::MinusY] = self[Card::PlusY];
result
}
}
impl From<Card> for CardMask {
fn from(card: Card) -> CardMask {
let mut result = CardMask::empty();
result[card] = true;
result
}
}
impl Index<Card> for CardMask {
type Output = bool;
#[inline]
fn index(&self, index: Card) -> &bool {
&self.flags[index as usize]
}
}
impl IndexMut<Card> for CardMask {
#[inline]
fn index_mut(&mut self, index: Card) -> &mut bool {
&mut self.flags[index as usize]
}
}
impl Debug for CardMask {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(
f,
"CardMask {{ MinusX: {}, MinusY: {}, PlusX: {}, PlusY: {} }}",
self[Card::MinusX],
self[Card::MinusY],
self[Card::PlusX],
self[Card::PlusY]
)
}
}
|
impl CardMask {
/// Creates a `CardMask` with all values set to `false`.
#[inline]
|
random_line_split
|
card.rs
|
// Copyright 2016-2018 Matthew D. Michelotti
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::{self, Debug, Formatter};
use std::ops::{Index, IndexMut};
/// Represents the four cardinal directions in 2D space.
#[derive(PartialEq, Eq, Copy, Clone, Debug, Hash)]
pub enum Card {
/// Negative X direction.
MinusX,
/// Negative Y direction.
MinusY,
/// Positive X direction.
PlusX,
/// Positive Y direction.
PlusY,
}
impl Card {
/// Returns the negative of the current direction.
pub fn flip(self) -> Card {
match self {
Card::MinusX => Card::PlusX,
Card::PlusX => Card::MinusX,
Card::MinusY => Card::PlusY,
Card::PlusY => Card::MinusY,
}
}
/// Returns all cardinal directions.
#[inline]
pub fn values() -> [Card; 4] {
[Card::MinusX, Card::MinusY, Card::PlusX, Card::PlusY]
}
}
/// A map from `Card` to `bool`, typically used to specify allowed normal vector
/// directions.
#[derive(PartialEq, Eq, Copy, Clone, Hash)]
pub struct CardMask {
flags: [bool; 4],
}
impl CardMask {
/// Creates a `CardMask` with all values set to `false`.
#[inline]
pub fn empty() -> CardMask {
CardMask { flags: [false; 4] }
}
/// Creates a `CardMask` with all values set to `true`.
#[inline]
pub fn
|
() -> CardMask {
CardMask { flags: [true; 4] }
}
pub(crate) fn flip(self) -> CardMask {
let mut result = CardMask::empty();
result[Card::PlusX] = self[Card::MinusX];
result[Card::MinusX] = self[Card::PlusX];
result[Card::PlusY] = self[Card::MinusY];
result[Card::MinusY] = self[Card::PlusY];
result
}
}
impl From<Card> for CardMask {
fn from(card: Card) -> CardMask {
let mut result = CardMask::empty();
result[card] = true;
result
}
}
impl Index<Card> for CardMask {
type Output = bool;
#[inline]
fn index(&self, index: Card) -> &bool {
&self.flags[index as usize]
}
}
impl IndexMut<Card> for CardMask {
#[inline]
fn index_mut(&mut self, index: Card) -> &mut bool {
&mut self.flags[index as usize]
}
}
impl Debug for CardMask {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(
f,
"CardMask {{ MinusX: {}, MinusY: {}, PlusX: {}, PlusY: {} }}",
self[Card::MinusX],
self[Card::MinusY],
self[Card::PlusX],
self[Card::PlusY]
)
}
}
|
full
|
identifier_name
|
arith-unsigned.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Unsigned integer operations
pub fn main()
|
}
|
{
assert!((0u8 < 255u8));
assert!((0u8 <= 255u8));
assert!((255u8 > 0u8));
assert!((255u8 >= 0u8));
assert!((250u8 / 10u8 == 25u8));
assert!((255u8 % 10u8 == 5u8));
assert!((0u16 < 60000u16));
assert!((0u16 <= 60000u16));
assert!((60000u16 > 0u16));
assert!((60000u16 >= 0u16));
assert!((60000u16 / 10u16 == 6000u16));
assert!((60005u16 % 10u16 == 5u16));
assert!((0u32 < 4000000000u32));
assert!((0u32 <= 4000000000u32));
assert!((4000000000u32 > 0u32));
assert!((4000000000u32 >= 0u32));
assert!((4000000000u32 / 10u32 == 400000000u32));
assert!((4000000005u32 % 10u32 == 5u32));
// 64-bit numbers have some flakiness yet. Not tested
|
identifier_body
|
arith-unsigned.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Unsigned integer operations
pub fn
|
() {
assert!((0u8 < 255u8));
assert!((0u8 <= 255u8));
assert!((255u8 > 0u8));
assert!((255u8 >= 0u8));
assert!((250u8 / 10u8 == 25u8));
assert!((255u8 % 10u8 == 5u8));
assert!((0u16 < 60000u16));
assert!((0u16 <= 60000u16));
assert!((60000u16 > 0u16));
assert!((60000u16 >= 0u16));
assert!((60000u16 / 10u16 == 6000u16));
assert!((60005u16 % 10u16 == 5u16));
assert!((0u32 < 4000000000u32));
assert!((0u32 <= 4000000000u32));
assert!((4000000000u32 > 0u32));
assert!((4000000000u32 >= 0u32));
assert!((4000000000u32 / 10u32 == 400000000u32));
assert!((4000000005u32 % 10u32 == 5u32));
// 64-bit numbers have some flakiness yet. Not tested
}
|
main
|
identifier_name
|
arith-unsigned.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Unsigned integer operations
pub fn main() {
assert!((0u8 < 255u8));
assert!((0u8 <= 255u8));
assert!((255u8 > 0u8));
assert!((255u8 >= 0u8));
|
assert!((250u8 / 10u8 == 25u8));
assert!((255u8 % 10u8 == 5u8));
assert!((0u16 < 60000u16));
assert!((0u16 <= 60000u16));
assert!((60000u16 > 0u16));
assert!((60000u16 >= 0u16));
assert!((60000u16 / 10u16 == 6000u16));
assert!((60005u16 % 10u16 == 5u16));
assert!((0u32 < 4000000000u32));
assert!((0u32 <= 4000000000u32));
assert!((4000000000u32 > 0u32));
assert!((4000000000u32 >= 0u32));
assert!((4000000000u32 / 10u32 == 400000000u32));
assert!((4000000005u32 % 10u32 == 5u32));
// 64-bit numbers have some flakiness yet. Not tested
}
|
random_line_split
|
|
ffi.rs
|
// The MIT License (MIT)
//
// Copyright (c) 2013 Jeremy Letang ([email protected])
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#![allow(dead_code, non_camel_case_types)]
use super::*;
use libc::{c_char, c_void};
pub type SF_MODE = i32;
pub const SFM_READ : SF_MODE = 0x10;
pub const SFM_WRITE : SF_MODE = 0x20;
pub const SFM_RDWR : SF_MODE = 0x30;
pub type SF_ERR = i32;
pub const SF_ERR_NO_ERROR : SF_ERR = 0;
pub const SF_ERR_UNRECOGNISED_FORMAT : SF_ERR = 1;
pub const SF_ERR_SYSTEM : SF_ERR = 2;
pub const SF_ERR_MALFORMED_FILE : SF_ERR = 3;
pub const SF_ERR_UNSUPPORTED_ENCODING : SF_ERR = 4;
pub type SF_STR = i32;
pub const SF_STR_TITLE : SF_STR = 0x01;
pub const SF_STR_COPYRIGHT : SF_STR = 0x02;
pub const SF_STR_SOFTWARE : SF_STR = 0x03;
pub const SF_STR_ARTIST : SF_STR = 0x04;
pub const SF_STR_COMMENT : SF_STR = 0x05;
pub const SF_STR_DATE : SF_STR = 0x06;
pub const SF_STR_ALBUM : SF_STR = 0x07;
pub const SF_STR_LICENSE : SF_STR = 0x08;
pub const SF_STR_TRACKNUMBER : SF_STR = 0x09;
pub const SF_STR_GENRE : SF_STR = 0x10;
pub type SF_BOOL = i32;
pub const SF_FALSE : SF_BOOL = 0;
pub const SF_TRUE : SF_BOOL = 1;
pub type SEEK_MODE = i32;
pub const SEEK_SET : SEEK_MODE = 0;
pub const SEEK_CUR : SEEK_MODE = 1;
pub const SEEK_END : SEEK_MODE = 2;
pub type FORMAT_TYPE = i32;
pub const SF_FORMAT_WAV : FORMAT_TYPE = 0x010000; // Microsoft WAV format (little endian)
pub const SF_FORMAT_AIFF : FORMAT_TYPE = 0x020000; // Apple/SGI AIFF format (big endian)
pub const SF_FORMAT_AU : FORMAT_TYPE = 0x030000; // Sun/NeXT AU format (big endian)
pub const SF_FORMAT_RAW : FORMAT_TYPE = 0x040000; // RAW PCM data
pub const SF_FORMAT_PAF : FORMAT_TYPE = 0x050000; // Ensoniq PARIS file format
pub const SF_FORMAT_SVX : FORMAT_TYPE = 0x060000; // Amiga IFF / SVX8 / SV16 format
pub const SF_FORMAT_NIST : FORMAT_TYPE = 0x070000; // Sphere NIST format
pub const SF_FORMAT_VOC : FORMAT_TYPE = 0x080000; // VOC files
pub const SF_FORMAT_IRCAM : FORMAT_TYPE = 0x0A0000; // Berkeley/IRCAM/CARL
pub const SF_FORMAT_W64 : FORMAT_TYPE = 0x0B0000; // Sonic Foundry's 64 bit RIFF/WAV
pub const SF_FORMAT_MAT4 : FORMAT_TYPE = 0x0C0000; // Matlab (tm) V4.2 / GNU Octave 2.0
pub const SF_FORMAT_MAT5 : FORMAT_TYPE = 0x0D0000; // Matlab (tm) V5.0 / GNU Octave 2.1
pub const SF_FORMAT_PVF : FORMAT_TYPE = 0x0E0000; // Portable Voice Format
pub const SF_FORMAT_XI : FORMAT_TYPE = 0x0F0000; // Fasttracker 2 Extended Instrument
pub const SF_FORMAT_HTK : FORMAT_TYPE = 0x100000; // HMM Tool Kit format
pub const SF_FORMAT_SDS : FORMAT_TYPE = 0x110000; // Midi Sample Dump Standard
pub const SF_FORMAT_AVR : FORMAT_TYPE = 0x120000; // Audio Visual Research
pub const SF_FORMAT_WAVEX : FORMAT_TYPE = 0x130000; // MS WAVE with WAVEFORMATEX
pub const SF_FORMAT_SD2 : FORMAT_TYPE = 0x160000; // Sound Designer 2
pub const SF_FORMAT_FLAC : FORMAT_TYPE = 0x170000; // FLAC lossless file format
pub const SF_FORMAT_CAF : FORMAT_TYPE = 0x180000; // Core Audio File format
pub const SF_FORMAT_WVE : FORMAT_TYPE = 0x190000; // Psion WVE format
pub const SF_FORMAT_OGG : FORMAT_TYPE = 0x200000; // Xiph OGG container
pub const SF_FORMAT_MPC2K : FORMAT_TYPE = 0x210000; // Akai MPC 2000 sampler
pub const SF_FORMAT_RF64 : FORMAT_TYPE = 0x220000; // RF64 WAV file
/* Subtypes from here on. */
pub const SF_FORMAT_PCM_S8 : FORMAT_TYPE = 0x0001; // Signed 8 bit data
pub const SF_FORMAT_PCM_16 : FORMAT_TYPE = 0x0002; // Signed 16 bit data
pub const SF_FORMAT_PCM_24 : FORMAT_TYPE = 0x0003; // Signed 24 bit data
pub const SF_FORMAT_PCM_32 : FORMAT_TYPE = 0x0004; // Signed 32 bit data
pub const SF_FORMAT_PCM_U8 : FORMAT_TYPE = 0x0005; // Unsigned 8 bit data (WAV and RAW only)
pub const SF_FORMAT_FLOAT : FORMAT_TYPE = 0x0006; // 32 bit float data
pub const SF_FORMAT_DOUBLE : FORMAT_TYPE = 0x0007; // 64 bit float data
pub const SF_FORMAT_ULAW : FORMAT_TYPE = 0x0010; // U-Law encoded
pub const SF_FORMAT_ALAW : FORMAT_TYPE = 0x0011; // A-Law encoded
pub const SF_FORMAT_IMA_ADPCM : FORMAT_TYPE = 0x0012; // IMA ADPCM
pub const SF_FORMAT_MS_ADPCM : FORMAT_TYPE = 0x0013; // Microsoft ADPCM
pub const SF_FORMAT_GSM610 : FORMAT_TYPE = 0x0020; // GSM 6.10 encoding
pub const SF_FORMAT_VOX_ADPCM : FORMAT_TYPE = 0x0021; // Oki Dialogic ADPCM encoding
pub const SF_FORMAT_G721_32 : FORMAT_TYPE = 0x0030; // 32kbs G721 ADPCM encoding
pub const SF_FORMAT_G723_24 : FORMAT_TYPE = 0x0031; // 24kbs G723 ADPCM encoding
pub const SF_FORMAT_G723_40 : FORMAT_TYPE = 0x0032; // 40kbs G723 ADPCM encoding
pub const SF_FORMAT_DWVW_12 : FORMAT_TYPE = 0x0040; // 12 bit Delta Width Variable Word encoding
pub const SF_FORMAT_DWVW_16 : FORMAT_TYPE = 0x0041; // 16 bit Delta Width Variable Word encoding
pub const SF_FORMAT_DWVW_24 : FORMAT_TYPE = 0x0042; // 24 bit Delta Width Variable Word encoding
pub const SF_FORMAT_DWVW_N : FORMAT_TYPE = 0x0043; // N bit Delta Width Variable Word encoding
pub const SF_FORMAT_DPCM_8 : FORMAT_TYPE = 0x0050; // 8 bit differential PCM (XI only)
pub const SF_FORMAT_DPCM_16 : FORMAT_TYPE = 0x0051; // 16 bit differential PCM (XI only)
pub const SF_FORMAT_VORBIS : FORMAT_TYPE = 0x0060; // Xiph Vorbis encoding
/* Endian-ness options. */
pub const SF_ENDIAN_FILE : FORMAT_TYPE = 0x00000000; // Default file endian-ness
pub const SF_ENDIAN_LITTLE : FORMAT_TYPE = 0x10000000; // Force little endian-ness
pub const SF_ENDIAN_BIG : FORMAT_TYPE = 0x20000000; // Force big endian-ness
pub const SF_ENDIAN_CPU : FORMAT_TYPE = 0x30000000; // Force CPU endian-ness
pub const SF_FORMAT_SUBMASK : FORMAT_TYPE = 0x0000FFFF;
pub const SF_FORMAT_TYPEMASK : FORMAT_TYPE = 0x0FFF0000;
pub const SF_FORMAT_ENDMASK : FORMAT_TYPE = 0x30000000;
pub type SNDFILE = c_void;
pub struct
|
{
pub format : i32,
pub name : *const c_char,
pub extension : *const c_char
}
extern "C" {
pub fn sf_open(path : *const c_char, mode : SF_MODE, info : *const SndInfo) -> *mut SNDFILE;
pub fn sf_open_fd(fd : i32, mode : SF_MODE, info : *const SndInfo, close_desc : SF_BOOL) -> *mut SNDFILE;
pub fn sf_format_check(info : *const SndInfo) -> SF_BOOL;
pub fn sf_seek(sndfile : *mut SNDFILE, frames : i64, whence : i32) -> i64;
pub fn sf_command(sndfile : *mut SNDFILE, cmd : i32, data : *mut c_void, datasize : i32) -> SF_ERR;
pub fn sf_error(sndfile : *mut SNDFILE) -> SF_ERR;
pub fn sf_strerror(sndfile : *mut SNDFILE) -> *const c_char;
pub fn sf_error_number(errnum : i32) -> *const c_char;
pub fn sf_perror(sndfile : *mut SNDFILE) -> SF_ERR;
pub fn sf_error_str(sndfile : *mut SNDFILE, string : *const c_char, len : i64) ;
pub fn sf_close(sndfile : *mut SNDFILE) -> SF_ERR;
pub fn sf_write_sync(sndfile : *mut SNDFILE) -> ();
pub fn sf_read_short(sndfile : *mut SNDFILE, ptr : *mut i16, items : i64) -> i64;
pub fn sf_read_int(sndfile : *mut SNDFILE, ptr : *mut i32, items : i64) -> i64;
pub fn sf_read_float(sndfile : *mut SNDFILE, ptr : *mut f32, items : i64) -> i64;
pub fn sf_read_double(sndfile : *mut SNDFILE, ptr : *mut f64, items : i64) -> i64;
pub fn sf_readf_short(sndfile : *mut SNDFILE, ptr : *mut i16, frames : i64) -> i64;
pub fn sf_readf_int(sndfile : *mut SNDFILE, ptr : *mut i32, frames : i64) -> i64;
pub fn sf_readf_float(sndfile : *mut SNDFILE, ptr : *mut f32, frames : i64) -> i64;
pub fn sf_readf_double(sndfile : *mut SNDFILE, ptr : *mut f64, frames : i64) -> i64;
pub fn sf_write_short(sndfile : *mut SNDFILE, ptr : *mut i16, items : i64) -> i64;
pub fn sf_write_int(sndfile : *mut SNDFILE, ptr : *mut i32, items : i64) -> i64;
pub fn sf_write_float(sndfile : *mut SNDFILE, ptr : *mut f32, items : i64) -> i64;
pub fn sf_write_double(sndfile : *mut SNDFILE, ptr : *mut f64, items : i64) -> i64;
pub fn sf_writef_short(sndfile : *mut SNDFILE, ptr : *mut i16, frames : i64) -> i64;
pub fn sf_writef_int(sndfile : *mut SNDFILE, ptr : *mut i32, frames : i64) -> i64;
pub fn sf_writef_float(sndfile : *mut SNDFILE, ptr : *mut f32, frames : i64) -> i64;
pub fn sf_writef_double(sndfile : *mut SNDFILE, ptr : *mut f64, frames : i64) -> i64;
pub fn sf_read_raw(sndfile : *mut SNDFILE, ptr : *mut c_void, bytes : i64) -> i64;
pub fn sf_write_raw(sndfile : *mut SNDFILE, ptr : *mut c_void, bytes : i64) -> i64;
pub fn sf_get_string(sndfile : *mut SNDFILE, str_type : i32) -> *const c_char;
pub fn sf_set_string(sndfile : *mut SNDFILE, str_type : i32, string : *const c_char) -> SF_ERR;
}
|
FormatInfo
|
identifier_name
|
ffi.rs
|
// The MIT License (MIT)
//
// Copyright (c) 2013 Jeremy Letang ([email protected])
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#![allow(dead_code, non_camel_case_types)]
use super::*;
use libc::{c_char, c_void};
pub type SF_MODE = i32;
pub const SFM_READ : SF_MODE = 0x10;
pub const SFM_WRITE : SF_MODE = 0x20;
pub const SFM_RDWR : SF_MODE = 0x30;
pub type SF_ERR = i32;
pub const SF_ERR_NO_ERROR : SF_ERR = 0;
pub const SF_ERR_UNRECOGNISED_FORMAT : SF_ERR = 1;
pub const SF_ERR_SYSTEM : SF_ERR = 2;
pub const SF_ERR_MALFORMED_FILE : SF_ERR = 3;
pub const SF_ERR_UNSUPPORTED_ENCODING : SF_ERR = 4;
pub type SF_STR = i32;
pub const SF_STR_TITLE : SF_STR = 0x01;
pub const SF_STR_COPYRIGHT : SF_STR = 0x02;
pub const SF_STR_SOFTWARE : SF_STR = 0x03;
pub const SF_STR_ARTIST : SF_STR = 0x04;
pub const SF_STR_COMMENT : SF_STR = 0x05;
pub const SF_STR_DATE : SF_STR = 0x06;
pub const SF_STR_ALBUM : SF_STR = 0x07;
pub const SF_STR_LICENSE : SF_STR = 0x08;
pub const SF_STR_TRACKNUMBER : SF_STR = 0x09;
pub const SF_STR_GENRE : SF_STR = 0x10;
pub type SF_BOOL = i32;
pub const SF_FALSE : SF_BOOL = 0;
pub const SF_TRUE : SF_BOOL = 1;
pub type SEEK_MODE = i32;
pub const SEEK_SET : SEEK_MODE = 0;
pub const SEEK_CUR : SEEK_MODE = 1;
pub const SEEK_END : SEEK_MODE = 2;
pub type FORMAT_TYPE = i32;
pub const SF_FORMAT_WAV : FORMAT_TYPE = 0x010000; // Microsoft WAV format (little endian)
pub const SF_FORMAT_AIFF : FORMAT_TYPE = 0x020000; // Apple/SGI AIFF format (big endian)
pub const SF_FORMAT_AU : FORMAT_TYPE = 0x030000; // Sun/NeXT AU format (big endian)
pub const SF_FORMAT_RAW : FORMAT_TYPE = 0x040000; // RAW PCM data
pub const SF_FORMAT_PAF : FORMAT_TYPE = 0x050000; // Ensoniq PARIS file format
pub const SF_FORMAT_SVX : FORMAT_TYPE = 0x060000; // Amiga IFF / SVX8 / SV16 format
pub const SF_FORMAT_NIST : FORMAT_TYPE = 0x070000; // Sphere NIST format
pub const SF_FORMAT_VOC : FORMAT_TYPE = 0x080000; // VOC files
pub const SF_FORMAT_IRCAM : FORMAT_TYPE = 0x0A0000; // Berkeley/IRCAM/CARL
pub const SF_FORMAT_W64 : FORMAT_TYPE = 0x0B0000; // Sonic Foundry's 64 bit RIFF/WAV
pub const SF_FORMAT_MAT4 : FORMAT_TYPE = 0x0C0000; // Matlab (tm) V4.2 / GNU Octave 2.0
pub const SF_FORMAT_MAT5 : FORMAT_TYPE = 0x0D0000; // Matlab (tm) V5.0 / GNU Octave 2.1
pub const SF_FORMAT_PVF : FORMAT_TYPE = 0x0E0000; // Portable Voice Format
pub const SF_FORMAT_XI : FORMAT_TYPE = 0x0F0000; // Fasttracker 2 Extended Instrument
pub const SF_FORMAT_HTK : FORMAT_TYPE = 0x100000; // HMM Tool Kit format
pub const SF_FORMAT_SDS : FORMAT_TYPE = 0x110000; // Midi Sample Dump Standard
pub const SF_FORMAT_AVR : FORMAT_TYPE = 0x120000; // Audio Visual Research
pub const SF_FORMAT_WAVEX : FORMAT_TYPE = 0x130000; // MS WAVE with WAVEFORMATEX
pub const SF_FORMAT_SD2 : FORMAT_TYPE = 0x160000; // Sound Designer 2
pub const SF_FORMAT_FLAC : FORMAT_TYPE = 0x170000; // FLAC lossless file format
pub const SF_FORMAT_CAF : FORMAT_TYPE = 0x180000; // Core Audio File format
pub const SF_FORMAT_WVE : FORMAT_TYPE = 0x190000; // Psion WVE format
pub const SF_FORMAT_OGG : FORMAT_TYPE = 0x200000; // Xiph OGG container
pub const SF_FORMAT_MPC2K : FORMAT_TYPE = 0x210000; // Akai MPC 2000 sampler
pub const SF_FORMAT_RF64 : FORMAT_TYPE = 0x220000; // RF64 WAV file
/* Subtypes from here on. */
pub const SF_FORMAT_PCM_S8 : FORMAT_TYPE = 0x0001; // Signed 8 bit data
pub const SF_FORMAT_PCM_16 : FORMAT_TYPE = 0x0002; // Signed 16 bit data
pub const SF_FORMAT_PCM_24 : FORMAT_TYPE = 0x0003; // Signed 24 bit data
pub const SF_FORMAT_PCM_32 : FORMAT_TYPE = 0x0004; // Signed 32 bit data
pub const SF_FORMAT_PCM_U8 : FORMAT_TYPE = 0x0005; // Unsigned 8 bit data (WAV and RAW only)
pub const SF_FORMAT_FLOAT : FORMAT_TYPE = 0x0006; // 32 bit float data
pub const SF_FORMAT_DOUBLE : FORMAT_TYPE = 0x0007; // 64 bit float data
pub const SF_FORMAT_ULAW : FORMAT_TYPE = 0x0010; // U-Law encoded
pub const SF_FORMAT_ALAW : FORMAT_TYPE = 0x0011; // A-Law encoded
pub const SF_FORMAT_IMA_ADPCM : FORMAT_TYPE = 0x0012; // IMA ADPCM
pub const SF_FORMAT_MS_ADPCM : FORMAT_TYPE = 0x0013; // Microsoft ADPCM
pub const SF_FORMAT_GSM610 : FORMAT_TYPE = 0x0020; // GSM 6.10 encoding
pub const SF_FORMAT_VOX_ADPCM : FORMAT_TYPE = 0x0021; // Oki Dialogic ADPCM encoding
pub const SF_FORMAT_G721_32 : FORMAT_TYPE = 0x0030; // 32kbs G721 ADPCM encoding
pub const SF_FORMAT_G723_24 : FORMAT_TYPE = 0x0031; // 24kbs G723 ADPCM encoding
pub const SF_FORMAT_G723_40 : FORMAT_TYPE = 0x0032; // 40kbs G723 ADPCM encoding
pub const SF_FORMAT_DWVW_12 : FORMAT_TYPE = 0x0040; // 12 bit Delta Width Variable Word encoding
pub const SF_FORMAT_DWVW_16 : FORMAT_TYPE = 0x0041; // 16 bit Delta Width Variable Word encoding
pub const SF_FORMAT_DWVW_24 : FORMAT_TYPE = 0x0042; // 24 bit Delta Width Variable Word encoding
pub const SF_FORMAT_DWVW_N : FORMAT_TYPE = 0x0043; // N bit Delta Width Variable Word encoding
pub const SF_FORMAT_DPCM_8 : FORMAT_TYPE = 0x0050; // 8 bit differential PCM (XI only)
pub const SF_FORMAT_DPCM_16 : FORMAT_TYPE = 0x0051; // 16 bit differential PCM (XI only)
pub const SF_FORMAT_VORBIS : FORMAT_TYPE = 0x0060; // Xiph Vorbis encoding
/* Endian-ness options. */
pub const SF_ENDIAN_FILE : FORMAT_TYPE = 0x00000000; // Default file endian-ness
pub const SF_ENDIAN_LITTLE : FORMAT_TYPE = 0x10000000; // Force little endian-ness
pub const SF_ENDIAN_BIG : FORMAT_TYPE = 0x20000000; // Force big endian-ness
pub const SF_ENDIAN_CPU : FORMAT_TYPE = 0x30000000; // Force CPU endian-ness
pub const SF_FORMAT_SUBMASK : FORMAT_TYPE = 0x0000FFFF;
pub const SF_FORMAT_TYPEMASK : FORMAT_TYPE = 0x0FFF0000;
pub const SF_FORMAT_ENDMASK : FORMAT_TYPE = 0x30000000;
pub type SNDFILE = c_void;
pub struct FormatInfo {
pub format : i32,
pub name : *const c_char,
pub extension : *const c_char
}
extern "C" {
pub fn sf_open(path : *const c_char, mode : SF_MODE, info : *const SndInfo) -> *mut SNDFILE;
pub fn sf_open_fd(fd : i32, mode : SF_MODE, info : *const SndInfo, close_desc : SF_BOOL) -> *mut SNDFILE;
pub fn sf_format_check(info : *const SndInfo) -> SF_BOOL;
pub fn sf_seek(sndfile : *mut SNDFILE, frames : i64, whence : i32) -> i64;
pub fn sf_command(sndfile : *mut SNDFILE, cmd : i32, data : *mut c_void, datasize : i32) -> SF_ERR;
pub fn sf_error(sndfile : *mut SNDFILE) -> SF_ERR;
pub fn sf_strerror(sndfile : *mut SNDFILE) -> *const c_char;
pub fn sf_error_number(errnum : i32) -> *const c_char;
pub fn sf_perror(sndfile : *mut SNDFILE) -> SF_ERR;
pub fn sf_error_str(sndfile : *mut SNDFILE, string : *const c_char, len : i64) ;
pub fn sf_close(sndfile : *mut SNDFILE) -> SF_ERR;
pub fn sf_write_sync(sndfile : *mut SNDFILE) -> ();
pub fn sf_read_short(sndfile : *mut SNDFILE, ptr : *mut i16, items : i64) -> i64;
pub fn sf_read_int(sndfile : *mut SNDFILE, ptr : *mut i32, items : i64) -> i64;
pub fn sf_read_float(sndfile : *mut SNDFILE, ptr : *mut f32, items : i64) -> i64;
pub fn sf_read_double(sndfile : *mut SNDFILE, ptr : *mut f64, items : i64) -> i64;
pub fn sf_readf_short(sndfile : *mut SNDFILE, ptr : *mut i16, frames : i64) -> i64;
pub fn sf_readf_int(sndfile : *mut SNDFILE, ptr : *mut i32, frames : i64) -> i64;
pub fn sf_readf_float(sndfile : *mut SNDFILE, ptr : *mut f32, frames : i64) -> i64;
|
pub fn sf_write_double(sndfile : *mut SNDFILE, ptr : *mut f64, items : i64) -> i64;
pub fn sf_writef_short(sndfile : *mut SNDFILE, ptr : *mut i16, frames : i64) -> i64;
pub fn sf_writef_int(sndfile : *mut SNDFILE, ptr : *mut i32, frames : i64) -> i64;
pub fn sf_writef_float(sndfile : *mut SNDFILE, ptr : *mut f32, frames : i64) -> i64;
pub fn sf_writef_double(sndfile : *mut SNDFILE, ptr : *mut f64, frames : i64) -> i64;
pub fn sf_read_raw(sndfile : *mut SNDFILE, ptr : *mut c_void, bytes : i64) -> i64;
pub fn sf_write_raw(sndfile : *mut SNDFILE, ptr : *mut c_void, bytes : i64) -> i64;
pub fn sf_get_string(sndfile : *mut SNDFILE, str_type : i32) -> *const c_char;
pub fn sf_set_string(sndfile : *mut SNDFILE, str_type : i32, string : *const c_char) -> SF_ERR;
}
|
pub fn sf_readf_double(sndfile : *mut SNDFILE, ptr : *mut f64, frames : i64) -> i64;
pub fn sf_write_short(sndfile : *mut SNDFILE, ptr : *mut i16, items : i64) -> i64;
pub fn sf_write_int(sndfile : *mut SNDFILE, ptr : *mut i32, items : i64) -> i64;
pub fn sf_write_float(sndfile : *mut SNDFILE, ptr : *mut f32, items : i64) -> i64;
|
random_line_split
|
peers_api.rs
|
// Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::utils::w;
use crate::p2p::types::{PeerAddr, PeerInfoDisplay, ReasonForBan};
use crate::p2p::{self, PeerData};
use crate::rest::*;
use crate::router::{Handler, ResponseFuture};
use crate::web::*;
use hyper::{Body, Request, StatusCode};
use std::net::SocketAddr;
use std::sync::Weak;
pub struct PeersAllHandler {
pub peers: Weak<p2p::Peers>,
}
impl Handler for PeersAllHandler {
fn get(&self, _req: Request<Body>) -> ResponseFuture {
let peers = &w_fut!(&self.peers).all_peer_data();
json_response_pretty(&peers)
}
}
pub struct PeersConnectedHandler {
pub peers: Weak<p2p::Peers>,
}
|
let peers = w(&self.peers)?
.iter()
.connected()
.into_iter()
.map(|p| p.info.clone().into())
.collect::<Vec<PeerInfoDisplay>>();
Ok(peers)
}
}
impl Handler for PeersConnectedHandler {
fn get(&self, _req: Request<Body>) -> ResponseFuture {
let peers: Vec<PeerInfoDisplay> = w_fut!(&self.peers)
.iter()
.connected()
.into_iter()
.map(|p| p.info.clone().into())
.collect();
json_response(&peers)
}
}
/// Peer operations
/// GET /v1/peers/10.12.12.13
/// POST /v1/peers/10.12.12.13/ban
/// POST /v1/peers/10.12.12.13/unban
pub struct PeerHandler {
pub peers: Weak<p2p::Peers>,
}
impl PeerHandler {
pub fn get_peers(&self, addr: Option<SocketAddr>) -> Result<Vec<PeerData>, Error> {
if let Some(addr) = addr {
let peer_addr = PeerAddr(addr);
let peer_data: PeerData = w(&self.peers)?.get_peer(peer_addr).map_err(|e| {
let e: Error = ErrorKind::Internal(format!("get peer error: {:?}", e)).into();
e
})?;
return Ok(vec![peer_data]);
}
let peers = w(&self.peers)?.all_peer_data();
Ok(peers)
}
pub fn ban_peer(&self, addr: SocketAddr) -> Result<(), Error> {
let peer_addr = PeerAddr(addr);
w(&self.peers)?
.ban_peer(peer_addr, ReasonForBan::ManualBan)
.map_err(|e| ErrorKind::Internal(format!("ban peer error: {:?}", e)).into())
}
pub fn unban_peer(&self, addr: SocketAddr) -> Result<(), Error> {
let peer_addr = PeerAddr(addr);
w(&self.peers)?
.unban_peer(peer_addr)
.map_err(|e| ErrorKind::Internal(format!("unban peer error: {:?}", e)).into())
}
}
impl Handler for PeerHandler {
fn get(&self, req: Request<Body>) -> ResponseFuture {
let command = right_path_element!(req);
// We support both "ip" and "ip:port" here for peer_addr.
// "ip:port" is only really useful for local usernet testing on loopback address.
// Normally we map peers to ip and only allow a single peer per ip address.
let peer_addr;
if let Ok(ip_addr) = command.parse() {
peer_addr = PeerAddr::from_ip(ip_addr);
} else if let Ok(addr) = command.parse() {
peer_addr = PeerAddr(addr);
} else {
return response(
StatusCode::BAD_REQUEST,
format!("peer address unrecognized: {}", req.uri().path()),
);
}
match w_fut!(&self.peers).get_peer(peer_addr) {
Ok(peer) => json_response(&peer),
Err(_) => response(StatusCode::NOT_FOUND, "peer not found"),
}
}
fn post(&self, req: Request<Body>) -> ResponseFuture {
let mut path_elems = req.uri().path().trim_end_matches('/').rsplit('/');
let command = match path_elems.next() {
None => return response(StatusCode::BAD_REQUEST, "invalid url"),
Some(c) => c,
};
let addr = match path_elems.next() {
None => return response(StatusCode::BAD_REQUEST, "invalid url"),
Some(a) => {
if let Ok(ip_addr) = a.parse() {
PeerAddr::from_ip(ip_addr)
} else if let Ok(addr) = a.parse() {
PeerAddr(addr)
} else {
return response(
StatusCode::BAD_REQUEST,
format!("invalid peer address: {}", req.uri().path()),
);
}
}
};
match command {
"ban" => match w_fut!(&self.peers).ban_peer(addr, ReasonForBan::ManualBan) {
Ok(_) => response(StatusCode::OK, "{}"),
Err(e) => response(
StatusCode::INTERNAL_SERVER_ERROR,
format!("ban failed: {:?}", e),
),
},
"unban" => match w_fut!(&self.peers).unban_peer(addr) {
Ok(_) => response(StatusCode::OK, "{}"),
Err(e) => response(
StatusCode::INTERNAL_SERVER_ERROR,
format!("unban failed: {:?}", e),
),
},
_ => response(StatusCode::BAD_REQUEST, "invalid command"),
}
}
}
|
impl PeersConnectedHandler {
pub fn get_connected_peers(&self) -> Result<Vec<PeerInfoDisplay>, Error> {
|
random_line_split
|
peers_api.rs
|
// Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::utils::w;
use crate::p2p::types::{PeerAddr, PeerInfoDisplay, ReasonForBan};
use crate::p2p::{self, PeerData};
use crate::rest::*;
use crate::router::{Handler, ResponseFuture};
use crate::web::*;
use hyper::{Body, Request, StatusCode};
use std::net::SocketAddr;
use std::sync::Weak;
pub struct PeersAllHandler {
pub peers: Weak<p2p::Peers>,
}
impl Handler for PeersAllHandler {
fn get(&self, _req: Request<Body>) -> ResponseFuture {
let peers = &w_fut!(&self.peers).all_peer_data();
json_response_pretty(&peers)
}
}
pub struct PeersConnectedHandler {
pub peers: Weak<p2p::Peers>,
}
impl PeersConnectedHandler {
pub fn get_connected_peers(&self) -> Result<Vec<PeerInfoDisplay>, Error> {
let peers = w(&self.peers)?
.iter()
.connected()
.into_iter()
.map(|p| p.info.clone().into())
.collect::<Vec<PeerInfoDisplay>>();
Ok(peers)
}
}
impl Handler for PeersConnectedHandler {
fn get(&self, _req: Request<Body>) -> ResponseFuture {
let peers: Vec<PeerInfoDisplay> = w_fut!(&self.peers)
.iter()
.connected()
.into_iter()
.map(|p| p.info.clone().into())
.collect();
json_response(&peers)
}
}
/// Peer operations
/// GET /v1/peers/10.12.12.13
/// POST /v1/peers/10.12.12.13/ban
/// POST /v1/peers/10.12.12.13/unban
pub struct PeerHandler {
pub peers: Weak<p2p::Peers>,
}
impl PeerHandler {
pub fn
|
(&self, addr: Option<SocketAddr>) -> Result<Vec<PeerData>, Error> {
if let Some(addr) = addr {
let peer_addr = PeerAddr(addr);
let peer_data: PeerData = w(&self.peers)?.get_peer(peer_addr).map_err(|e| {
let e: Error = ErrorKind::Internal(format!("get peer error: {:?}", e)).into();
e
})?;
return Ok(vec![peer_data]);
}
let peers = w(&self.peers)?.all_peer_data();
Ok(peers)
}
pub fn ban_peer(&self, addr: SocketAddr) -> Result<(), Error> {
let peer_addr = PeerAddr(addr);
w(&self.peers)?
.ban_peer(peer_addr, ReasonForBan::ManualBan)
.map_err(|e| ErrorKind::Internal(format!("ban peer error: {:?}", e)).into())
}
pub fn unban_peer(&self, addr: SocketAddr) -> Result<(), Error> {
let peer_addr = PeerAddr(addr);
w(&self.peers)?
.unban_peer(peer_addr)
.map_err(|e| ErrorKind::Internal(format!("unban peer error: {:?}", e)).into())
}
}
impl Handler for PeerHandler {
fn get(&self, req: Request<Body>) -> ResponseFuture {
let command = right_path_element!(req);
// We support both "ip" and "ip:port" here for peer_addr.
// "ip:port" is only really useful for local usernet testing on loopback address.
// Normally we map peers to ip and only allow a single peer per ip address.
let peer_addr;
if let Ok(ip_addr) = command.parse() {
peer_addr = PeerAddr::from_ip(ip_addr);
} else if let Ok(addr) = command.parse() {
peer_addr = PeerAddr(addr);
} else {
return response(
StatusCode::BAD_REQUEST,
format!("peer address unrecognized: {}", req.uri().path()),
);
}
match w_fut!(&self.peers).get_peer(peer_addr) {
Ok(peer) => json_response(&peer),
Err(_) => response(StatusCode::NOT_FOUND, "peer not found"),
}
}
fn post(&self, req: Request<Body>) -> ResponseFuture {
let mut path_elems = req.uri().path().trim_end_matches('/').rsplit('/');
let command = match path_elems.next() {
None => return response(StatusCode::BAD_REQUEST, "invalid url"),
Some(c) => c,
};
let addr = match path_elems.next() {
None => return response(StatusCode::BAD_REQUEST, "invalid url"),
Some(a) => {
if let Ok(ip_addr) = a.parse() {
PeerAddr::from_ip(ip_addr)
} else if let Ok(addr) = a.parse() {
PeerAddr(addr)
} else {
return response(
StatusCode::BAD_REQUEST,
format!("invalid peer address: {}", req.uri().path()),
);
}
}
};
match command {
"ban" => match w_fut!(&self.peers).ban_peer(addr, ReasonForBan::ManualBan) {
Ok(_) => response(StatusCode::OK, "{}"),
Err(e) => response(
StatusCode::INTERNAL_SERVER_ERROR,
format!("ban failed: {:?}", e),
),
},
"unban" => match w_fut!(&self.peers).unban_peer(addr) {
Ok(_) => response(StatusCode::OK, "{}"),
Err(e) => response(
StatusCode::INTERNAL_SERVER_ERROR,
format!("unban failed: {:?}", e),
),
},
_ => response(StatusCode::BAD_REQUEST, "invalid command"),
}
}
}
|
get_peers
|
identifier_name
|
peers_api.rs
|
// Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::utils::w;
use crate::p2p::types::{PeerAddr, PeerInfoDisplay, ReasonForBan};
use crate::p2p::{self, PeerData};
use crate::rest::*;
use crate::router::{Handler, ResponseFuture};
use crate::web::*;
use hyper::{Body, Request, StatusCode};
use std::net::SocketAddr;
use std::sync::Weak;
pub struct PeersAllHandler {
pub peers: Weak<p2p::Peers>,
}
impl Handler for PeersAllHandler {
fn get(&self, _req: Request<Body>) -> ResponseFuture {
let peers = &w_fut!(&self.peers).all_peer_data();
json_response_pretty(&peers)
}
}
pub struct PeersConnectedHandler {
pub peers: Weak<p2p::Peers>,
}
impl PeersConnectedHandler {
pub fn get_connected_peers(&self) -> Result<Vec<PeerInfoDisplay>, Error> {
let peers = w(&self.peers)?
.iter()
.connected()
.into_iter()
.map(|p| p.info.clone().into())
.collect::<Vec<PeerInfoDisplay>>();
Ok(peers)
}
}
impl Handler for PeersConnectedHandler {
fn get(&self, _req: Request<Body>) -> ResponseFuture {
let peers: Vec<PeerInfoDisplay> = w_fut!(&self.peers)
.iter()
.connected()
.into_iter()
.map(|p| p.info.clone().into())
.collect();
json_response(&peers)
}
}
/// Peer operations
/// GET /v1/peers/10.12.12.13
/// POST /v1/peers/10.12.12.13/ban
/// POST /v1/peers/10.12.12.13/unban
pub struct PeerHandler {
pub peers: Weak<p2p::Peers>,
}
impl PeerHandler {
pub fn get_peers(&self, addr: Option<SocketAddr>) -> Result<Vec<PeerData>, Error> {
if let Some(addr) = addr {
let peer_addr = PeerAddr(addr);
let peer_data: PeerData = w(&self.peers)?.get_peer(peer_addr).map_err(|e| {
let e: Error = ErrorKind::Internal(format!("get peer error: {:?}", e)).into();
e
})?;
return Ok(vec![peer_data]);
}
let peers = w(&self.peers)?.all_peer_data();
Ok(peers)
}
pub fn ban_peer(&self, addr: SocketAddr) -> Result<(), Error> {
let peer_addr = PeerAddr(addr);
w(&self.peers)?
.ban_peer(peer_addr, ReasonForBan::ManualBan)
.map_err(|e| ErrorKind::Internal(format!("ban peer error: {:?}", e)).into())
}
pub fn unban_peer(&self, addr: SocketAddr) -> Result<(), Error> {
let peer_addr = PeerAddr(addr);
w(&self.peers)?
.unban_peer(peer_addr)
.map_err(|e| ErrorKind::Internal(format!("unban peer error: {:?}", e)).into())
}
}
impl Handler for PeerHandler {
fn get(&self, req: Request<Body>) -> ResponseFuture {
let command = right_path_element!(req);
// We support both "ip" and "ip:port" here for peer_addr.
// "ip:port" is only really useful for local usernet testing on loopback address.
// Normally we map peers to ip and only allow a single peer per ip address.
let peer_addr;
if let Ok(ip_addr) = command.parse() {
peer_addr = PeerAddr::from_ip(ip_addr);
} else if let Ok(addr) = command.parse() {
peer_addr = PeerAddr(addr);
} else {
return response(
StatusCode::BAD_REQUEST,
format!("peer address unrecognized: {}", req.uri().path()),
);
}
match w_fut!(&self.peers).get_peer(peer_addr) {
Ok(peer) => json_response(&peer),
Err(_) => response(StatusCode::NOT_FOUND, "peer not found"),
}
}
fn post(&self, req: Request<Body>) -> ResponseFuture {
let mut path_elems = req.uri().path().trim_end_matches('/').rsplit('/');
let command = match path_elems.next() {
None => return response(StatusCode::BAD_REQUEST, "invalid url"),
Some(c) => c,
};
let addr = match path_elems.next() {
None => return response(StatusCode::BAD_REQUEST, "invalid url"),
Some(a) =>
|
};
match command {
"ban" => match w_fut!(&self.peers).ban_peer(addr, ReasonForBan::ManualBan) {
Ok(_) => response(StatusCode::OK, "{}"),
Err(e) => response(
StatusCode::INTERNAL_SERVER_ERROR,
format!("ban failed: {:?}", e),
),
},
"unban" => match w_fut!(&self.peers).unban_peer(addr) {
Ok(_) => response(StatusCode::OK, "{}"),
Err(e) => response(
StatusCode::INTERNAL_SERVER_ERROR,
format!("unban failed: {:?}", e),
),
},
_ => response(StatusCode::BAD_REQUEST, "invalid command"),
}
}
}
|
{
if let Ok(ip_addr) = a.parse() {
PeerAddr::from_ip(ip_addr)
} else if let Ok(addr) = a.parse() {
PeerAddr(addr)
} else {
return response(
StatusCode::BAD_REQUEST,
format!("invalid peer address: {}", req.uri().path()),
);
}
}
|
conditional_block
|
peers_api.rs
|
// Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::utils::w;
use crate::p2p::types::{PeerAddr, PeerInfoDisplay, ReasonForBan};
use crate::p2p::{self, PeerData};
use crate::rest::*;
use crate::router::{Handler, ResponseFuture};
use crate::web::*;
use hyper::{Body, Request, StatusCode};
use std::net::SocketAddr;
use std::sync::Weak;
pub struct PeersAllHandler {
pub peers: Weak<p2p::Peers>,
}
impl Handler for PeersAllHandler {
fn get(&self, _req: Request<Body>) -> ResponseFuture {
let peers = &w_fut!(&self.peers).all_peer_data();
json_response_pretty(&peers)
}
}
pub struct PeersConnectedHandler {
pub peers: Weak<p2p::Peers>,
}
impl PeersConnectedHandler {
pub fn get_connected_peers(&self) -> Result<Vec<PeerInfoDisplay>, Error> {
let peers = w(&self.peers)?
.iter()
.connected()
.into_iter()
.map(|p| p.info.clone().into())
.collect::<Vec<PeerInfoDisplay>>();
Ok(peers)
}
}
impl Handler for PeersConnectedHandler {
fn get(&self, _req: Request<Body>) -> ResponseFuture {
let peers: Vec<PeerInfoDisplay> = w_fut!(&self.peers)
.iter()
.connected()
.into_iter()
.map(|p| p.info.clone().into())
.collect();
json_response(&peers)
}
}
/// Peer operations
/// GET /v1/peers/10.12.12.13
/// POST /v1/peers/10.12.12.13/ban
/// POST /v1/peers/10.12.12.13/unban
pub struct PeerHandler {
pub peers: Weak<p2p::Peers>,
}
impl PeerHandler {
pub fn get_peers(&self, addr: Option<SocketAddr>) -> Result<Vec<PeerData>, Error>
|
pub fn ban_peer(&self, addr: SocketAddr) -> Result<(), Error> {
let peer_addr = PeerAddr(addr);
w(&self.peers)?
.ban_peer(peer_addr, ReasonForBan::ManualBan)
.map_err(|e| ErrorKind::Internal(format!("ban peer error: {:?}", e)).into())
}
pub fn unban_peer(&self, addr: SocketAddr) -> Result<(), Error> {
let peer_addr = PeerAddr(addr);
w(&self.peers)?
.unban_peer(peer_addr)
.map_err(|e| ErrorKind::Internal(format!("unban peer error: {:?}", e)).into())
}
}
impl Handler for PeerHandler {
fn get(&self, req: Request<Body>) -> ResponseFuture {
let command = right_path_element!(req);
// We support both "ip" and "ip:port" here for peer_addr.
// "ip:port" is only really useful for local usernet testing on loopback address.
// Normally we map peers to ip and only allow a single peer per ip address.
let peer_addr;
if let Ok(ip_addr) = command.parse() {
peer_addr = PeerAddr::from_ip(ip_addr);
} else if let Ok(addr) = command.parse() {
peer_addr = PeerAddr(addr);
} else {
return response(
StatusCode::BAD_REQUEST,
format!("peer address unrecognized: {}", req.uri().path()),
);
}
match w_fut!(&self.peers).get_peer(peer_addr) {
Ok(peer) => json_response(&peer),
Err(_) => response(StatusCode::NOT_FOUND, "peer not found"),
}
}
fn post(&self, req: Request<Body>) -> ResponseFuture {
let mut path_elems = req.uri().path().trim_end_matches('/').rsplit('/');
let command = match path_elems.next() {
None => return response(StatusCode::BAD_REQUEST, "invalid url"),
Some(c) => c,
};
let addr = match path_elems.next() {
None => return response(StatusCode::BAD_REQUEST, "invalid url"),
Some(a) => {
if let Ok(ip_addr) = a.parse() {
PeerAddr::from_ip(ip_addr)
} else if let Ok(addr) = a.parse() {
PeerAddr(addr)
} else {
return response(
StatusCode::BAD_REQUEST,
format!("invalid peer address: {}", req.uri().path()),
);
}
}
};
match command {
"ban" => match w_fut!(&self.peers).ban_peer(addr, ReasonForBan::ManualBan) {
Ok(_) => response(StatusCode::OK, "{}"),
Err(e) => response(
StatusCode::INTERNAL_SERVER_ERROR,
format!("ban failed: {:?}", e),
),
},
"unban" => match w_fut!(&self.peers).unban_peer(addr) {
Ok(_) => response(StatusCode::OK, "{}"),
Err(e) => response(
StatusCode::INTERNAL_SERVER_ERROR,
format!("unban failed: {:?}", e),
),
},
_ => response(StatusCode::BAD_REQUEST, "invalid command"),
}
}
}
|
{
if let Some(addr) = addr {
let peer_addr = PeerAddr(addr);
let peer_data: PeerData = w(&self.peers)?.get_peer(peer_addr).map_err(|e| {
let e: Error = ErrorKind::Internal(format!("get peer error: {:?}", e)).into();
e
})?;
return Ok(vec![peer_data]);
}
let peers = w(&self.peers)?.all_peer_data();
Ok(peers)
}
|
identifier_body
|
lib.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
#![crate_id = "arena#0.10"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![license = "MIT/ASL2"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://static.rust-lang.org/doc/master")]
#![allow(missing_doc)]
#![feature(managed_boxes)]
extern crate collections;
use std::cast::{transmute, transmute_mut, transmute_mut_region};
use std::cast;
use std::cell::{Cell, RefCell};
use std::mem;
use std::ptr::read;
use std::cmp;
use std::num;
use std::rc::Rc;
use std::rt::global_heap;
use std::intrinsics::{TyDesc, get_tydesc};
use std::intrinsics;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone, Eq)]
struct Chunk {
data: Rc<RefCell<Vec<u8> >>,
fill: Cell<uint>,
is_copy: Cell<bool>,
}
impl Chunk {
fn capacity(&self) -> uint {
self.data.borrow().capacity()
}
unsafe fn as_ptr(&self) -> *u8 {
self.data.borrow().as_ptr()
}
}
// Arenas are used to quickly allocate objects that share a
// lifetime. The arena uses ~[u8] vectors as a backing store to
// allocate objects from. For each allocated object, the arena stores
// a pointer to the type descriptor followed by the
// object. (Potentially with alignment padding after each of them.)
// When the arena is destroyed, it iterates through all of its chunks,
// and uses the tydesc information to trace through the objects,
// calling the destructors on them.
// One subtle point that needs to be addressed is how to handle
// failures while running the user provided initializer function. It
// is important to not run the destructor on uninitialized objects, but
// how to detect them is somewhat subtle. Since alloc() can be invoked
// recursively, it is not sufficient to simply exclude the most recent
// object. To solve this without requiring extra space, we use the low
// order bit of the tydesc pointer to encode whether the object it
// describes has been fully initialized.
// As an optimization, objects with destructors are stored in
// different chunks than objects without destructors. This reduces
// overhead when initializing plain-old-data and means we don't need
// to waste time running the destructors of POD.
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to
// access the head.
priv head: Chunk,
priv copy_head: Chunk,
priv chunks: RefCell<Vec<Chunk>>,
}
impl Arena {
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: chunk(initial_size, false),
copy_head: chunk(initial_size, true),
chunks: RefCell::new(Vec::new()),
}
}
}
fn chunk(size: uint, is_copy: bool) -> Chunk {
Chunk {
data: Rc::new(RefCell::new(Vec::with_capacity(size))),
fill: Cell::new(0u),
is_copy: Cell::new(is_copy),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&self.head);
for chunk in self.chunks.borrow().iter() {
if!chunk.is_copy.get() {
destroy_chunk(chunk);
}
}
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(&(align - 1))).unwrap() &!(&(align - 1))
}
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = chunk.as_ptr();
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *uint = transmute(buf.offset(idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(buf.offset(start as int) as *i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::pref_align_of::<*TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a failure occurs
// during an initializer.
#[inline]
fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
p as uint | (is_done as uint)
}
#[inline]
fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
((p &!1) as *TyDesc, p & 1 == 1)
}
impl Arena {
fn chunk_size(&self) -> uint {
self.copy_head.capacity()
}
// Functions for the POD part of the arena
fn alloc_copy_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.copy_head.clone());
self.copy_head =
chunk(num::next_power_of_two(new_min_chunk_size + 1u), true);
return self.alloc_copy_inner(n_bytes, align);
}
#[inline]
fn alloc_copy_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
unsafe {
let this = transmute_mut_region(self);
let start = round_up(this.copy_head.fill.get(), align);
let end = start + n_bytes;
if end > self.chunk_size() {
return this.alloc_copy_grow(n_bytes, align);
}
this.copy_head.fill.set(end);
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill.get());
this.copy_head.as_ptr().offset(start as int)
}
}
#[inline]
fn alloc_copy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let ptr = self.alloc_copy_inner(mem::size_of::<T>(), mem::min_align_of::<T>());
let ptr: *mut T = transmute(ptr);
mem::move_val_init(&mut (*ptr), op());
return transmute(ptr);
}
}
// Functions for the non-POD part of the arena
fn alloc_noncopy_grow(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.head.clone());
self.head =
chunk(num::next_power_of_two(new_min_chunk_size + 1u), false);
return self.alloc_noncopy_inner(n_bytes, align);
}
#[inline]
fn alloc_noncopy_inner(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
unsafe {
let start;
let end;
let tydesc_start;
let after_tydesc;
{
let head = transmute_mut_region(&mut self.head);
tydesc_start = head.fill.get();
after_tydesc = head.fill.get() + mem::size_of::<*TyDesc>();
start = round_up(after_tydesc, align);
end = start + n_bytes;
}
if end > self.head.capacity() {
return self.alloc_noncopy_grow(n_bytes, align);
}
let head = transmute_mut_region(&mut self.head);
head.fill.set(round_up(end, mem::pref_align_of::<*TyDesc>()));
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill);
let buf = self.head.as_ptr();
return (buf.offset(tydesc_start as int), buf.offset(start as int));
}
}
#[inline]
fn alloc_noncopy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_noncopy_inner(mem::size_of::<T>(), mem::min_align_of::<T>());
let ty_ptr: *mut uint = transmute(ty_ptr);
let ptr: *mut T = transmute(ptr);
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = transmute(tydesc);
// Actually initialize it
mem::move_val_init(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return transmute(ptr);
}
}
// The external interface
#[inline]
pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
unsafe {
// FIXME: Borrow check
let this = transmute_mut(self);
if intrinsics::needs_drop::<T>() {
this.alloc_noncopy(op)
} else {
this.alloc_copy(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| @i);
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { @i });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, fail while allocating
arena.alloc::<@int>(|| {
// Now fail.
fail!();
});
}
/// An arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
priv ptr: *T,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
priv end: *T,
/// A pointer to the first arena segment.
priv first: Option<~TypedArenaChunk<T>>,
}
struct TypedArenaChunk<T> {
/// Pointer to the next arena segment.
next: Option<~TypedArenaChunk<T>>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
impl<T> TypedArenaChunk<T> {
#[inline]
fn new(next: Option<~TypedArenaChunk<T>>, capacity: uint) -> ~TypedArenaChunk<T> {
let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = global_heap::exchange_malloc(size);
let mut chunk: ~TypedArenaChunk<T> = cast::transmute(chunk);
mem::move_val_init(&mut chunk.next, next);
chunk
};
chunk.capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint) {
// Destroy all the allocated objects.
if intrinsics::needs_drop::<T>() {
let mut start = self.start();
for _ in range(0, len) {
read(start as *T); // run the destructor on the pointer
start = start.offset(mem::size_of::<T>() as int)
}
}
// Destroy the next chunk.
let next_opt = mem::replace(&mut self.next, None);
match next_opt {
None => {}
Some(mut next) => {
// We assume that the next chunk is completely filled.
next.destroy(next.capacity)
}
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self) -> *u8 {
let this: *TypedArenaChunk<T> = self;
unsafe {
cast::transmute(round_up(this.offset(1) as uint, mem::min_align_of::<T>()))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self) -> *u8 {
unsafe {
let size = mem::size_of::<T>().checked_mul(&self.capacity).unwrap();
self.start().offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new arena with preallocated space for 8 objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new arena with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
let chunk = TypedArenaChunk::<T>::new(None, capacity);
TypedArena {
ptr: chunk.start() as *T,
end: chunk.end() as *T,
first: Some(chunk),
}
}
/// Allocates an object into this arena.
#[inline]
pub fn alloc<'a>(&'a self, object: T) -> &'a T {
unsafe {
let this = cast::transmute_mut(self);
if this.ptr == this.end {
this.grow()
}
let ptr: &'a mut T = cast::transmute(this.ptr);
mem::move_val_init(ptr, object);
this.ptr = this.ptr.offset(1);
let ptr: &'a T = ptr;
ptr
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&mut self) {
let chunk = self.first.take_unwrap();
let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::<T>::new(Some(chunk), new_capacity);
self.ptr = chunk.start() as *T;
self.end = chunk.end() as *T;
self.first = Some(chunk)
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
// Determine how much was filled.
let start = self.first.get_ref().start() as uint;
let end = self.ptr as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
unsafe {
self.first.get_mut_ref().destroy(diff)
}
}
}
#[cfg(test)]
mod tests {
extern crate test;
use self::test::BenchHarness;
use super::{Arena, TypedArena};
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_copy() {
let arena = TypedArena::new();
for _ in range(0, 100000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_copy(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
})
})
}
#[bench]
pub fn
|
(bh: &mut BenchHarness) {
bh.iter(|| {
~Point {
x: 1,
y: 2,
z: 3,
}
})
}
#[bench]
pub fn bench_copy_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
})
})
}
struct Noncopy {
string: ~str,
array: Vec<int>,
}
#[test]
pub fn test_noncopy() {
let arena = TypedArena::new();
for _ in range(0, 100000) {
arena.alloc(Noncopy {
string: ~"hello world",
array: vec!( 1, 2, 3, 4, 5 ),
});
}
}
#[bench]
pub fn bench_noncopy(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Noncopy {
string: ~"hello world",
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
#[bench]
pub fn bench_noncopy_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
~Noncopy {
string: ~"hello world",
array: vec!( 1, 2, 3, 4, 5 ),
}
})
}
#[bench]
pub fn bench_noncopy_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
arena.alloc(|| Noncopy {
string: ~"hello world",
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
}
|
bench_copy_nonarena
|
identifier_name
|
lib.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
#![crate_id = "arena#0.10"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![license = "MIT/ASL2"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://static.rust-lang.org/doc/master")]
#![allow(missing_doc)]
#![feature(managed_boxes)]
extern crate collections;
use std::cast::{transmute, transmute_mut, transmute_mut_region};
use std::cast;
use std::cell::{Cell, RefCell};
use std::mem;
use std::ptr::read;
use std::cmp;
use std::num;
use std::rc::Rc;
use std::rt::global_heap;
use std::intrinsics::{TyDesc, get_tydesc};
use std::intrinsics;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone, Eq)]
struct Chunk {
data: Rc<RefCell<Vec<u8> >>,
fill: Cell<uint>,
is_copy: Cell<bool>,
}
impl Chunk {
fn capacity(&self) -> uint {
self.data.borrow().capacity()
}
unsafe fn as_ptr(&self) -> *u8 {
self.data.borrow().as_ptr()
}
}
// Arenas are used to quickly allocate objects that share a
// lifetime. The arena uses ~[u8] vectors as a backing store to
// allocate objects from. For each allocated object, the arena stores
// a pointer to the type descriptor followed by the
// object. (Potentially with alignment padding after each of them.)
// When the arena is destroyed, it iterates through all of its chunks,
// and uses the tydesc information to trace through the objects,
// calling the destructors on them.
// One subtle point that needs to be addressed is how to handle
// failures while running the user provided initializer function. It
// is important to not run the destructor on uninitialized objects, but
// how to detect them is somewhat subtle. Since alloc() can be invoked
// recursively, it is not sufficient to simply exclude the most recent
// object. To solve this without requiring extra space, we use the low
// order bit of the tydesc pointer to encode whether the object it
// describes has been fully initialized.
// As an optimization, objects with destructors are stored in
// different chunks than objects without destructors. This reduces
// overhead when initializing plain-old-data and means we don't need
// to waste time running the destructors of POD.
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to
// access the head.
priv head: Chunk,
priv copy_head: Chunk,
priv chunks: RefCell<Vec<Chunk>>,
}
impl Arena {
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: chunk(initial_size, false),
copy_head: chunk(initial_size, true),
chunks: RefCell::new(Vec::new()),
}
}
}
fn chunk(size: uint, is_copy: bool) -> Chunk {
Chunk {
data: Rc::new(RefCell::new(Vec::with_capacity(size))),
fill: Cell::new(0u),
is_copy: Cell::new(is_copy),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&self.head);
for chunk in self.chunks.borrow().iter() {
if!chunk.is_copy.get() {
destroy_chunk(chunk);
}
}
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(&(align - 1))).unwrap() &!(&(align - 1))
}
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = chunk.as_ptr();
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *uint = transmute(buf.offset(idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(buf.offset(start as int) as *i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::pref_align_of::<*TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a failure occurs
// during an initializer.
#[inline]
fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
p as uint | (is_done as uint)
}
#[inline]
fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
((p &!1) as *TyDesc, p & 1 == 1)
}
impl Arena {
fn chunk_size(&self) -> uint {
self.copy_head.capacity()
}
// Functions for the POD part of the arena
fn alloc_copy_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.copy_head.clone());
self.copy_head =
chunk(num::next_power_of_two(new_min_chunk_size + 1u), true);
return self.alloc_copy_inner(n_bytes, align);
}
#[inline]
fn alloc_copy_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
unsafe {
let this = transmute_mut_region(self);
let start = round_up(this.copy_head.fill.get(), align);
let end = start + n_bytes;
if end > self.chunk_size() {
return this.alloc_copy_grow(n_bytes, align);
}
this.copy_head.fill.set(end);
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill.get());
this.copy_head.as_ptr().offset(start as int)
}
}
#[inline]
fn alloc_copy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let ptr = self.alloc_copy_inner(mem::size_of::<T>(), mem::min_align_of::<T>());
let ptr: *mut T = transmute(ptr);
mem::move_val_init(&mut (*ptr), op());
return transmute(ptr);
}
}
// Functions for the non-POD part of the arena
fn alloc_noncopy_grow(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.head.clone());
self.head =
chunk(num::next_power_of_two(new_min_chunk_size + 1u), false);
return self.alloc_noncopy_inner(n_bytes, align);
}
#[inline]
fn alloc_noncopy_inner(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
unsafe {
let start;
let end;
let tydesc_start;
let after_tydesc;
{
let head = transmute_mut_region(&mut self.head);
tydesc_start = head.fill.get();
after_tydesc = head.fill.get() + mem::size_of::<*TyDesc>();
start = round_up(after_tydesc, align);
end = start + n_bytes;
}
if end > self.head.capacity() {
return self.alloc_noncopy_grow(n_bytes, align);
}
let head = transmute_mut_region(&mut self.head);
head.fill.set(round_up(end, mem::pref_align_of::<*TyDesc>()));
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill);
let buf = self.head.as_ptr();
return (buf.offset(tydesc_start as int), buf.offset(start as int));
}
}
#[inline]
fn alloc_noncopy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_noncopy_inner(mem::size_of::<T>(), mem::min_align_of::<T>());
let ty_ptr: *mut uint = transmute(ty_ptr);
let ptr: *mut T = transmute(ptr);
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = transmute(tydesc);
// Actually initialize it
mem::move_val_init(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return transmute(ptr);
}
}
// The external interface
#[inline]
pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
unsafe {
// FIXME: Borrow check
let this = transmute_mut(self);
if intrinsics::needs_drop::<T>() {
this.alloc_noncopy(op)
} else {
this.alloc_copy(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| @i);
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { @i });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, fail while allocating
arena.alloc::<@int>(|| {
// Now fail.
fail!();
});
}
/// An arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
priv ptr: *T,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
priv end: *T,
/// A pointer to the first arena segment.
priv first: Option<~TypedArenaChunk<T>>,
}
struct TypedArenaChunk<T> {
/// Pointer to the next arena segment.
next: Option<~TypedArenaChunk<T>>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
impl<T> TypedArenaChunk<T> {
#[inline]
fn new(next: Option<~TypedArenaChunk<T>>, capacity: uint) -> ~TypedArenaChunk<T> {
let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = global_heap::exchange_malloc(size);
let mut chunk: ~TypedArenaChunk<T> = cast::transmute(chunk);
mem::move_val_init(&mut chunk.next, next);
chunk
};
chunk.capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint) {
// Destroy all the allocated objects.
if intrinsics::needs_drop::<T>() {
let mut start = self.start();
for _ in range(0, len) {
read(start as *T); // run the destructor on the pointer
start = start.offset(mem::size_of::<T>() as int)
}
}
// Destroy the next chunk.
let next_opt = mem::replace(&mut self.next, None);
match next_opt {
None => {}
Some(mut next) => {
// We assume that the next chunk is completely filled.
next.destroy(next.capacity)
}
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self) -> *u8 {
let this: *TypedArenaChunk<T> = self;
unsafe {
cast::transmute(round_up(this.offset(1) as uint, mem::min_align_of::<T>()))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self) -> *u8 {
unsafe {
let size = mem::size_of::<T>().checked_mul(&self.capacity).unwrap();
self.start().offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new arena with preallocated space for 8 objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new arena with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
let chunk = TypedArenaChunk::<T>::new(None, capacity);
TypedArena {
ptr: chunk.start() as *T,
end: chunk.end() as *T,
first: Some(chunk),
}
}
/// Allocates an object into this arena.
#[inline]
pub fn alloc<'a>(&'a self, object: T) -> &'a T {
unsafe {
let this = cast::transmute_mut(self);
if this.ptr == this.end {
this.grow()
}
let ptr: &'a mut T = cast::transmute(this.ptr);
mem::move_val_init(ptr, object);
this.ptr = this.ptr.offset(1);
let ptr: &'a T = ptr;
ptr
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&mut self) {
let chunk = self.first.take_unwrap();
let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::<T>::new(Some(chunk), new_capacity);
self.ptr = chunk.start() as *T;
self.end = chunk.end() as *T;
self.first = Some(chunk)
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
|
let start = self.first.get_ref().start() as uint;
let end = self.ptr as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
unsafe {
self.first.get_mut_ref().destroy(diff)
}
}
}
#[cfg(test)]
mod tests {
extern crate test;
use self::test::BenchHarness;
use super::{Arena, TypedArena};
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_copy() {
let arena = TypedArena::new();
for _ in range(0, 100000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_copy(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
})
})
}
#[bench]
pub fn bench_copy_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
~Point {
x: 1,
y: 2,
z: 3,
}
})
}
#[bench]
pub fn bench_copy_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
})
})
}
struct Noncopy {
string: ~str,
array: Vec<int>,
}
#[test]
pub fn test_noncopy() {
let arena = TypedArena::new();
for _ in range(0, 100000) {
arena.alloc(Noncopy {
string: ~"hello world",
array: vec!( 1, 2, 3, 4, 5 ),
});
}
}
#[bench]
pub fn bench_noncopy(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Noncopy {
string: ~"hello world",
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
#[bench]
pub fn bench_noncopy_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
~Noncopy {
string: ~"hello world",
array: vec!( 1, 2, 3, 4, 5 ),
}
})
}
#[bench]
pub fn bench_noncopy_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
arena.alloc(|| Noncopy {
string: ~"hello world",
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
}
|
fn drop(&mut self) {
// Determine how much was filled.
|
random_line_split
|
lib.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
#![crate_id = "arena#0.10"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![license = "MIT/ASL2"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://static.rust-lang.org/doc/master")]
#![allow(missing_doc)]
#![feature(managed_boxes)]
extern crate collections;
use std::cast::{transmute, transmute_mut, transmute_mut_region};
use std::cast;
use std::cell::{Cell, RefCell};
use std::mem;
use std::ptr::read;
use std::cmp;
use std::num;
use std::rc::Rc;
use std::rt::global_heap;
use std::intrinsics::{TyDesc, get_tydesc};
use std::intrinsics;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone, Eq)]
struct Chunk {
data: Rc<RefCell<Vec<u8> >>,
fill: Cell<uint>,
is_copy: Cell<bool>,
}
impl Chunk {
fn capacity(&self) -> uint {
self.data.borrow().capacity()
}
unsafe fn as_ptr(&self) -> *u8 {
self.data.borrow().as_ptr()
}
}
// Arenas are used to quickly allocate objects that share a
// lifetime. The arena uses ~[u8] vectors as a backing store to
// allocate objects from. For each allocated object, the arena stores
// a pointer to the type descriptor followed by the
// object. (Potentially with alignment padding after each of them.)
// When the arena is destroyed, it iterates through all of its chunks,
// and uses the tydesc information to trace through the objects,
// calling the destructors on them.
// One subtle point that needs to be addressed is how to handle
// failures while running the user provided initializer function. It
// is important to not run the destructor on uninitialized objects, but
// how to detect them is somewhat subtle. Since alloc() can be invoked
// recursively, it is not sufficient to simply exclude the most recent
// object. To solve this without requiring extra space, we use the low
// order bit of the tydesc pointer to encode whether the object it
// describes has been fully initialized.
// As an optimization, objects with destructors are stored in
// different chunks than objects without destructors. This reduces
// overhead when initializing plain-old-data and means we don't need
// to waste time running the destructors of POD.
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to
// access the head.
priv head: Chunk,
priv copy_head: Chunk,
priv chunks: RefCell<Vec<Chunk>>,
}
impl Arena {
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: chunk(initial_size, false),
copy_head: chunk(initial_size, true),
chunks: RefCell::new(Vec::new()),
}
}
}
fn chunk(size: uint, is_copy: bool) -> Chunk {
Chunk {
data: Rc::new(RefCell::new(Vec::with_capacity(size))),
fill: Cell::new(0u),
is_copy: Cell::new(is_copy),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&self.head);
for chunk in self.chunks.borrow().iter() {
if!chunk.is_copy.get()
|
}
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(&(align - 1))).unwrap() &!(&(align - 1))
}
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = chunk.as_ptr();
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *uint = transmute(buf.offset(idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(buf.offset(start as int) as *i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::pref_align_of::<*TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a failure occurs
// during an initializer.
#[inline]
fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
p as uint | (is_done as uint)
}
#[inline]
fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
((p &!1) as *TyDesc, p & 1 == 1)
}
impl Arena {
fn chunk_size(&self) -> uint {
self.copy_head.capacity()
}
// Functions for the POD part of the arena
fn alloc_copy_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.copy_head.clone());
self.copy_head =
chunk(num::next_power_of_two(new_min_chunk_size + 1u), true);
return self.alloc_copy_inner(n_bytes, align);
}
#[inline]
fn alloc_copy_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
unsafe {
let this = transmute_mut_region(self);
let start = round_up(this.copy_head.fill.get(), align);
let end = start + n_bytes;
if end > self.chunk_size() {
return this.alloc_copy_grow(n_bytes, align);
}
this.copy_head.fill.set(end);
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill.get());
this.copy_head.as_ptr().offset(start as int)
}
}
#[inline]
fn alloc_copy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let ptr = self.alloc_copy_inner(mem::size_of::<T>(), mem::min_align_of::<T>());
let ptr: *mut T = transmute(ptr);
mem::move_val_init(&mut (*ptr), op());
return transmute(ptr);
}
}
// Functions for the non-POD part of the arena
fn alloc_noncopy_grow(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
// Allocate a new chunk.
let new_min_chunk_size = cmp::max(n_bytes, self.chunk_size());
self.chunks.borrow_mut().push(self.head.clone());
self.head =
chunk(num::next_power_of_two(new_min_chunk_size + 1u), false);
return self.alloc_noncopy_inner(n_bytes, align);
}
#[inline]
fn alloc_noncopy_inner(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
unsafe {
let start;
let end;
let tydesc_start;
let after_tydesc;
{
let head = transmute_mut_region(&mut self.head);
tydesc_start = head.fill.get();
after_tydesc = head.fill.get() + mem::size_of::<*TyDesc>();
start = round_up(after_tydesc, align);
end = start + n_bytes;
}
if end > self.head.capacity() {
return self.alloc_noncopy_grow(n_bytes, align);
}
let head = transmute_mut_region(&mut self.head);
head.fill.set(round_up(end, mem::pref_align_of::<*TyDesc>()));
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill);
let buf = self.head.as_ptr();
return (buf.offset(tydesc_start as int), buf.offset(start as int));
}
}
#[inline]
fn alloc_noncopy<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_noncopy_inner(mem::size_of::<T>(), mem::min_align_of::<T>());
let ty_ptr: *mut uint = transmute(ty_ptr);
let ptr: *mut T = transmute(ptr);
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = transmute(tydesc);
// Actually initialize it
mem::move_val_init(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return transmute(ptr);
}
}
// The external interface
#[inline]
pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
unsafe {
// FIXME: Borrow check
let this = transmute_mut(self);
if intrinsics::needs_drop::<T>() {
this.alloc_noncopy(op)
} else {
this.alloc_copy(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| @i);
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { @i });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, fail while allocating
arena.alloc::<@int>(|| {
// Now fail.
fail!();
});
}
/// An arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
priv ptr: *T,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
priv end: *T,
/// A pointer to the first arena segment.
priv first: Option<~TypedArenaChunk<T>>,
}
struct TypedArenaChunk<T> {
/// Pointer to the next arena segment.
next: Option<~TypedArenaChunk<T>>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
impl<T> TypedArenaChunk<T> {
#[inline]
fn new(next: Option<~TypedArenaChunk<T>>, capacity: uint) -> ~TypedArenaChunk<T> {
let mut size = mem::size_of::<TypedArenaChunk<T>>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = global_heap::exchange_malloc(size);
let mut chunk: ~TypedArenaChunk<T> = cast::transmute(chunk);
mem::move_val_init(&mut chunk.next, next);
chunk
};
chunk.capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint) {
// Destroy all the allocated objects.
if intrinsics::needs_drop::<T>() {
let mut start = self.start();
for _ in range(0, len) {
read(start as *T); // run the destructor on the pointer
start = start.offset(mem::size_of::<T>() as int)
}
}
// Destroy the next chunk.
let next_opt = mem::replace(&mut self.next, None);
match next_opt {
None => {}
Some(mut next) => {
// We assume that the next chunk is completely filled.
next.destroy(next.capacity)
}
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self) -> *u8 {
let this: *TypedArenaChunk<T> = self;
unsafe {
cast::transmute(round_up(this.offset(1) as uint, mem::min_align_of::<T>()))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self) -> *u8 {
unsafe {
let size = mem::size_of::<T>().checked_mul(&self.capacity).unwrap();
self.start().offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new arena with preallocated space for 8 objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new arena with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
let chunk = TypedArenaChunk::<T>::new(None, capacity);
TypedArena {
ptr: chunk.start() as *T,
end: chunk.end() as *T,
first: Some(chunk),
}
}
/// Allocates an object into this arena.
#[inline]
pub fn alloc<'a>(&'a self, object: T) -> &'a T {
unsafe {
let this = cast::transmute_mut(self);
if this.ptr == this.end {
this.grow()
}
let ptr: &'a mut T = cast::transmute(this.ptr);
mem::move_val_init(ptr, object);
this.ptr = this.ptr.offset(1);
let ptr: &'a T = ptr;
ptr
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&mut self) {
let chunk = self.first.take_unwrap();
let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::<T>::new(Some(chunk), new_capacity);
self.ptr = chunk.start() as *T;
self.end = chunk.end() as *T;
self.first = Some(chunk)
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
// Determine how much was filled.
let start = self.first.get_ref().start() as uint;
let end = self.ptr as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
unsafe {
self.first.get_mut_ref().destroy(diff)
}
}
}
#[cfg(test)]
mod tests {
extern crate test;
use self::test::BenchHarness;
use super::{Arena, TypedArena};
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_copy() {
let arena = TypedArena::new();
for _ in range(0, 100000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_copy(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
})
})
}
#[bench]
pub fn bench_copy_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
~Point {
x: 1,
y: 2,
z: 3,
}
})
}
#[bench]
pub fn bench_copy_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
})
})
}
struct Noncopy {
string: ~str,
array: Vec<int>,
}
#[test]
pub fn test_noncopy() {
let arena = TypedArena::new();
for _ in range(0, 100000) {
arena.alloc(Noncopy {
string: ~"hello world",
array: vec!( 1, 2, 3, 4, 5 ),
});
}
}
#[bench]
pub fn bench_noncopy(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Noncopy {
string: ~"hello world",
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
#[bench]
pub fn bench_noncopy_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
~Noncopy {
string: ~"hello world",
array: vec!( 1, 2, 3, 4, 5 ),
}
})
}
#[bench]
pub fn bench_noncopy_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
arena.alloc(|| Noncopy {
string: ~"hello world",
array: vec!( 1, 2, 3, 4, 5 ),
})
})
}
}
|
{
destroy_chunk(chunk);
}
|
conditional_block
|
79.rs
|
/* Problem 79: Passcode derivation
*
* A common security method used for online banking is to ask the user for three random characters
* from a passcode. For example, if the passcode was 531278, they may ask for the 2nd, 3rd, and 5th
* characters; the expected reply would be: 317.
*
* The text file, keylog.txt, contains fifty successful login attempts.
*
* Given that the three characters are always asked for in order, analyse the file so as to
* determine the shortest possible secret passcode of unknown length.
**/
use memchr::memchr;
use std::collections::HashSet;
const KEYLOG: &'static str = include_str!("../../data/79-keylog.txt");
fn main() {
let seen: HashSet<Vec<u8>> = KEYLOG
.lines()
.map(|line| {
line.chars()
.map(|ch| ch.to_digit(10).unwrap() as u8)
.collect()
})
.collect();
let different_digits = seen
.iter()
.flat_map(|entry| entry.iter())
.collect::<HashSet<_>>()
.len();
let hyp = find_hypothesis(different_digits as u8, |hyp| {
seen.iter().all(|followers| fits_followers(hyp, followers))
});
for num in hyp {
print!("{}", num);
}
println!("");
}
fn find_hypothesis(min_size: u8, f: impl Fn(&[u8]) -> bool) -> Vec<u8> {
let mut current = vec![0; min_size as usize];
loop {
let maxed_count = current.iter().rev().take_while(|i| **i == 9).count();
let first_to_turn = current.len() - maxed_count;
for num in &mut current[first_to_turn..] {
*num = 0;
}
if first_to_turn >= 1 {
current[first_to_turn - 1] += 1;
} else {
current.push(0);
}
if f(¤t) {
return current;
}
}
}
fn fits_followers(hyp: &[u8], followers: &[u8]) -> bool
|
{
let mut remaining = hyp;
for &follower in followers {
let index = memchr(follower, remaining);
match index {
Some(index) => remaining = &hyp[index + 1..],
None => return false,
}
}
return true;
}
|
identifier_body
|
|
79.rs
|
/* Problem 79: Passcode derivation
|
* from a passcode. For example, if the passcode was 531278, they may ask for the 2nd, 3rd, and 5th
* characters; the expected reply would be: 317.
*
* The text file, keylog.txt, contains fifty successful login attempts.
*
* Given that the three characters are always asked for in order, analyse the file so as to
* determine the shortest possible secret passcode of unknown length.
**/
use memchr::memchr;
use std::collections::HashSet;
const KEYLOG: &'static str = include_str!("../../data/79-keylog.txt");
fn main() {
let seen: HashSet<Vec<u8>> = KEYLOG
.lines()
.map(|line| {
line.chars()
.map(|ch| ch.to_digit(10).unwrap() as u8)
.collect()
})
.collect();
let different_digits = seen
.iter()
.flat_map(|entry| entry.iter())
.collect::<HashSet<_>>()
.len();
let hyp = find_hypothesis(different_digits as u8, |hyp| {
seen.iter().all(|followers| fits_followers(hyp, followers))
});
for num in hyp {
print!("{}", num);
}
println!("");
}
fn find_hypothesis(min_size: u8, f: impl Fn(&[u8]) -> bool) -> Vec<u8> {
let mut current = vec![0; min_size as usize];
loop {
let maxed_count = current.iter().rev().take_while(|i| **i == 9).count();
let first_to_turn = current.len() - maxed_count;
for num in &mut current[first_to_turn..] {
*num = 0;
}
if first_to_turn >= 1 {
current[first_to_turn - 1] += 1;
} else {
current.push(0);
}
if f(¤t) {
return current;
}
}
}
fn fits_followers(hyp: &[u8], followers: &[u8]) -> bool {
let mut remaining = hyp;
for &follower in followers {
let index = memchr(follower, remaining);
match index {
Some(index) => remaining = &hyp[index + 1..],
None => return false,
}
}
return true;
}
|
*
* A common security method used for online banking is to ask the user for three random characters
|
random_line_split
|
79.rs
|
/* Problem 79: Passcode derivation
*
* A common security method used for online banking is to ask the user for three random characters
* from a passcode. For example, if the passcode was 531278, they may ask for the 2nd, 3rd, and 5th
* characters; the expected reply would be: 317.
*
* The text file, keylog.txt, contains fifty successful login attempts.
*
* Given that the three characters are always asked for in order, analyse the file so as to
* determine the shortest possible secret passcode of unknown length.
**/
use memchr::memchr;
use std::collections::HashSet;
const KEYLOG: &'static str = include_str!("../../data/79-keylog.txt");
fn main() {
let seen: HashSet<Vec<u8>> = KEYLOG
.lines()
.map(|line| {
line.chars()
.map(|ch| ch.to_digit(10).unwrap() as u8)
.collect()
})
.collect();
let different_digits = seen
.iter()
.flat_map(|entry| entry.iter())
.collect::<HashSet<_>>()
.len();
let hyp = find_hypothesis(different_digits as u8, |hyp| {
seen.iter().all(|followers| fits_followers(hyp, followers))
});
for num in hyp {
print!("{}", num);
}
println!("");
}
fn
|
(min_size: u8, f: impl Fn(&[u8]) -> bool) -> Vec<u8> {
let mut current = vec![0; min_size as usize];
loop {
let maxed_count = current.iter().rev().take_while(|i| **i == 9).count();
let first_to_turn = current.len() - maxed_count;
for num in &mut current[first_to_turn..] {
*num = 0;
}
if first_to_turn >= 1 {
current[first_to_turn - 1] += 1;
} else {
current.push(0);
}
if f(¤t) {
return current;
}
}
}
fn fits_followers(hyp: &[u8], followers: &[u8]) -> bool {
let mut remaining = hyp;
for &follower in followers {
let index = memchr(follower, remaining);
match index {
Some(index) => remaining = &hyp[index + 1..],
None => return false,
}
}
return true;
}
|
find_hypothesis
|
identifier_name
|
sha2.rs
|
bits(self) -> (u64, u64) {
return (self >> 61, self << 3);
}
}
/// Adds the specified number of bytes to the bit count. panic!() if this would cause numeric
/// overflow.
fn add_bytes_to_bits(bits: u64, bytes: u64) -> u64 {
let (new_high_bits, new_low_bits) = bytes.to_bits();
if new_high_bits > 0 {
panic!("numeric overflow occurred.")
}
match bits.checked_add(new_low_bits) {
Some(x) => return x,
None => panic!("numeric overflow occurred.")
}
}
/// A FixedBuffer, likes its name implies, is a fixed size buffer. When the buffer becomes full, it
/// must be processed. The input() method takes care of processing and then clearing the buffer
/// automatically. However, other methods do not and require the caller to process the buffer. Any
/// method that modifies the buffer directory or provides the caller with bytes that can be modified
/// results in those bytes being marked as used by the buffer.
trait FixedBuffer {
/// Input a vector of bytes. If the buffer becomes full, process it with the provided
/// function and then clear the buffer.
fn input<F>(&mut self, input: &[u8], func: F) where
F: FnMut(&[u8]);
/// Reset the buffer.
fn reset(&mut self);
/// Zero the buffer up until the specified index. The buffer position currently must not be
/// greater than that index.
fn zero_until(&mut self, idx: usize);
/// Get a slice of the buffer of the specified size. There must be at least that many bytes
/// remaining in the buffer.
fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8];
/// Get the current buffer. The buffer must already be full. This clears the buffer as well.
fn full_buffer<'s>(&'s mut self) -> &'s [u8];
/// Get the current position of the buffer.
fn position(&self) -> usize;
/// Get the number of bytes remaining in the buffer until it is full.
fn remaining(&self) -> usize;
/// Get the size of the buffer
fn size(&self) -> usize;
}
/// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize.
struct FixedBuffer64 {
buffer: [u8; 64],
buffer_idx: usize,
}
impl FixedBuffer64 {
/// Create a new FixedBuffer64
fn new() -> FixedBuffer64 {
return FixedBuffer64 {
buffer: [0; 64],
buffer_idx: 0
};
}
}
impl FixedBuffer for FixedBuffer64 {
fn input<F>(&mut self, input: &[u8], mut func: F) where
F: FnMut(&[u8]),
{
let mut i = 0;
let size = self.size();
// If there is already data in the buffer, copy as much as we can into it and process
// the data if the buffer becomes full.
if self.buffer_idx!= 0 {
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
&input[..buffer_remaining],
&mut self.buffer[self.buffer_idx..size]);
self.buffer_idx = 0;
func(&self.buffer);
i += buffer_remaining;
} else {
copy_memory(
input,
&mut self.buffer[self.buffer_idx..self.buffer_idx + input.len()]);
self.buffer_idx += input.len();
return;
}
}
// While we have at least a full buffer size chunk's worth of data, process that data
// without copying it into the buffer
while input.len() - i >= size {
func(&input[i..i + size]);
i += size;
}
// Copy any input data into the buffer. At this point in the method, the amount of
// data left in the input vector will be less than the buffer size and the buffer will
// be empty.
let input_remaining = input.len() - i;
copy_memory(
&input[i..],
&mut self.buffer[..input_remaining]);
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: usize) {
assert!(idx >= self.buffer_idx);
self.buffer[self.buffer_idx..idx].set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8] {
self.buffer_idx += len;
return &mut self.buffer[self.buffer_idx - len..self.buffer_idx];
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
return &self.buffer[..64];
}
fn position(&self) -> usize { self.buffer_idx }
fn remaining(&self) -> usize { 64 - self.buffer_idx }
fn size(&self) -> usize { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
trait StandardPadding {
/// Add padding to the buffer. The buffer must not be full when this method is called and is
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding<F>(&mut self, rem: usize, func: F) where F: FnMut(&[u8]);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding<F>(&mut self, rem: usize, mut func: F) where F: FnMut(&[u8]) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2
/// family of digest functions.
pub trait Digest {
/// Provide message data.
///
/// # Arguments
///
/// * input - A vector of message data
fn input(&mut self, input: &[u8]);
/// Retrieve the digest result. This method may be called multiple times.
///
/// # Arguments
///
/// * out - the vector to hold the result. Must be large enough to contain output_bits().
fn result(&mut self, out: &mut [u8]);
/// Reset the digest. This method must be called after result() and before supplying more
/// data.
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> usize;
/// Convenience function that feeds a string into a digest.
///
/// # Arguments
///
/// * `input` The string to feed into the digest
fn input_str(&mut self, input: &str) {
self.input(input.as_bytes());
}
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn result_bytes(&mut self) -> Vec<u8> {
let mut buf: Vec<u8> = repeat(0).take((self.output_bits()+7)/8).collect();
self.result(&mut buf);
buf
}
/// Convenience function that retrieves the result of a digest as a
/// String in hexadecimal format.
fn result_str(&mut self) -> String {
self.result_bytes().to_hex().to_string()
}
}
// A structure that represents that state of a digest computation for the SHA-2 512 family of digest
// functions
struct Engine256State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32,
h5: u32,
h6: u32,
h7: u32,
}
impl Engine256State {
fn new(h: &[u32; 8]) -> Engine256State {
return Engine256State {
h0: h[0],
h1: h[1],
h2: h[2],
h3: h[3],
h4: h[4],
h5: h[5],
h6: h[6],
h7: h[7]
};
}
fn reset(&mut self, h: &[u32; 8]) {
self.h0 = h[0];
self.h1 = h[1];
self.h2 = h[2];
self.h3 = h[3];
self.h4 = h[4];
self.h5 = h[5];
self.h6 = h[6];
self.h7 = h[7];
}
fn process_block(&mut self, data: &[u8]) {
fn ch(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ ((!x) & z))
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ (x & z) ^ (y & z))
}
fn sum0(x: u32) -> u32 {
((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10))
}
fn sum1(x: u32) -> u32 {
((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7))
}
fn sigma0(x: u32) -> u32 {
((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3)
}
fn sigma1(x: u32) -> u32 {
((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10)
}
let mut a = self.h0;
let mut b = self.h1;
let mut c = self.h2;
let mut d = self.h3;
let mut e = self.h4;
let mut f = self.h5;
let mut g = self.h6;
let mut h = self.h7;
let mut w = [0; 64];
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round { ($t:expr) => (
w[$t] = sigma1(w[$t - 2]).wrapping_add(w[$t - 7])
.wrapping_add(sigma0(w[$t - 15])).wrapping_add(w[$t - 16]);
)
}
macro_rules! sha2_round {
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H = $H.wrapping_add(sum1($E)).wrapping_add(ch($E, $F, $G))
.wrapping_add($K[$t]).wrapping_add(w[$t]);
$D = $D.wrapping_add($H);
$H = $H.wrapping_add(sum0($A)).wrapping_add(maj($A, $B, $C));
}
)
}
read_u32v_be(&mut w[0..16], data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
for t in (0..48).step_by(8) {
schedule_round!(t + 16);
schedule_round!(t + 17);
schedule_round!(t + 18);
schedule_round!(t + 19);
schedule_round!(t + 20);
schedule_round!(t + 21);
schedule_round!(t + 22);
schedule_round!(t + 23);
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
for t in (48..64).step_by(8) {
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 = self.h0.wrapping_add(a);
self.h1 = self.h1.wrapping_add(b);
self.h2 = self.h2.wrapping_add(c);
self.h3 = self.h3.wrapping_add(d);
self.h4 = self.h4.wrapping_add(e);
self.h5 = self.h5.wrapping_add(f);
self.h6 = self.h6.wrapping_add(g);
self.h7 = self.h7.wrapping_add(h);
}
}
static K32: [u32; 64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
// A structure that keeps track of the state of the Sha-256 operation and contains the logic
// necessary to perform the final calculations.
struct Engine256 {
length_bits: u64,
buffer: FixedBuffer64,
state: Engine256State,
finished: bool,
}
impl Engine256 {
fn new(h: &[u32; 8]) -> Engine256 {
return Engine256 {
length_bits: 0,
buffer: FixedBuffer64::new(),
state: Engine256State::new(h),
finished: false
}
}
fn reset(&mut self, h: &[u32; 8]) {
self.length_bits = 0;
self.buffer.reset();
self.state.reset(h);
self.finished = false;
}
fn input(&mut self, input: &[u8]) {
assert!(!self.finished);
// Assumes that input.len() can be converted to u64 without overflow
self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64);
let self_state = &mut self.state;
self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) });
}
fn finish(&mut self) {
if self.finished {
return;
}
let self_state = &mut self.state;
self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) });
write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 );
write_u32_be(self.buffer.next(4), self.length_bits as u32);
self_state.process_block(self.buffer.full_buffer());
self.finished = true;
}
}
/// The SHA-256 hash algorithm
pub struct Sha256 {
engine: Engine256
}
impl Sha256 {
/// Construct a new instance of a SHA-256 digest.
pub fn new() -> Sha256 {
Sha256 {
engine: Engine256::new(&H256)
}
}
}
impl Digest for Sha256 {
fn input(&mut self, d: &[u8]) {
self.engine.input(d);
}
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
write_u32_be(&mut out[0..4], self.engine.state.h0);
write_u32_be(&mut out[4..8], self.engine.state.h1);
write_u32_be(&mut out[8..12], self.engine.state.h2);
write_u32_be(&mut out[12..16], self.engine.state.h3);
write_u32_be(&mut out[16..20], self.engine.state.h4);
write_u32_be(&mut out[20..24], self.engine.state.h5);
write_u32_be(&mut out[24..28], self.engine.state.h6);
write_u32_be(&mut out[28..32], self.engine.state.h7);
}
fn reset(&mut self) {
self.engine.reset(&H256);
}
fn output_bits(&self) -> usize { 256 }
}
static H256: [u32; 8] = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
];
#[cfg(test)]
mod tests {
#![allow(deprecated)]
extern crate rand;
use self::rand::Rng;
use self::rand::isaac::IsaacRng;
use serialize::hex::FromHex;
use std::iter::repeat;
use std::u64;
use super::{Digest, Sha256, FixedBuffer};
// A normal addition - no overflow occurs
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(super::add_bytes_to_bits(100, 10) == 180);
}
// A simple failure case - adding 1 to the max value
#[test]
#[should_panic]
fn test_add_bytes_to_bits_overflow() {
super::add_bytes_to_bits(u64::MAX, 1);
}
struct
|
{
input: String,
output_str: String,
}
fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests {
sh.reset();
sh.input_str(&t.input);
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
// Test that it works when accepting the message in pieces
for t in tests {
sh.reset();
let len = t.input.len();
let mut left = len;
while left > 0 {
let take = (left + 1) / 2;
sh.input_str(&t.input[len - left..take + len - left]);
left = left - take;
}
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
}
#[test]
fn test_sha256() {
// Examples from wikipedia
let wikipedia_tests = vec!(
Test {
input: "".to_string(),
output_str: "e3b0c44298fc1c149afb\
f4c8996fb92427ae41e4649b934ca495991b7852b855".to
|
Test
|
identifier_name
|
sha2.rs
|
bits(self) -> (u64, u64) {
return (self >> 61, self << 3);
}
}
/// Adds the specified number of bytes to the bit count. panic!() if this would cause numeric
/// overflow.
fn add_bytes_to_bits(bits: u64, bytes: u64) -> u64 {
let (new_high_bits, new_low_bits) = bytes.to_bits();
if new_high_bits > 0 {
panic!("numeric overflow occurred.")
}
match bits.checked_add(new_low_bits) {
Some(x) => return x,
None => panic!("numeric overflow occurred.")
}
}
/// A FixedBuffer, likes its name implies, is a fixed size buffer. When the buffer becomes full, it
/// must be processed. The input() method takes care of processing and then clearing the buffer
/// automatically. However, other methods do not and require the caller to process the buffer. Any
/// method that modifies the buffer directory or provides the caller with bytes that can be modified
/// results in those bytes being marked as used by the buffer.
trait FixedBuffer {
/// Input a vector of bytes. If the buffer becomes full, process it with the provided
/// function and then clear the buffer.
fn input<F>(&mut self, input: &[u8], func: F) where
F: FnMut(&[u8]);
/// Reset the buffer.
fn reset(&mut self);
/// Zero the buffer up until the specified index. The buffer position currently must not be
/// greater than that index.
fn zero_until(&mut self, idx: usize);
/// Get a slice of the buffer of the specified size. There must be at least that many bytes
/// remaining in the buffer.
fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8];
/// Get the current buffer. The buffer must already be full. This clears the buffer as well.
fn full_buffer<'s>(&'s mut self) -> &'s [u8];
/// Get the current position of the buffer.
fn position(&self) -> usize;
/// Get the number of bytes remaining in the buffer until it is full.
fn remaining(&self) -> usize;
/// Get the size of the buffer
fn size(&self) -> usize;
}
/// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize.
struct FixedBuffer64 {
buffer: [u8; 64],
buffer_idx: usize,
}
impl FixedBuffer64 {
/// Create a new FixedBuffer64
fn new() -> FixedBuffer64 {
return FixedBuffer64 {
buffer: [0; 64],
buffer_idx: 0
};
}
}
impl FixedBuffer for FixedBuffer64 {
fn input<F>(&mut self, input: &[u8], mut func: F) where
F: FnMut(&[u8]),
{
let mut i = 0;
let size = self.size();
// If there is already data in the buffer, copy as much as we can into it and process
// the data if the buffer becomes full.
if self.buffer_idx!= 0
|
// While we have at least a full buffer size chunk's worth of data, process that data
// without copying it into the buffer
while input.len() - i >= size {
func(&input[i..i + size]);
i += size;
}
// Copy any input data into the buffer. At this point in the method, the amount of
// data left in the input vector will be less than the buffer size and the buffer will
// be empty.
let input_remaining = input.len() - i;
copy_memory(
&input[i..],
&mut self.buffer[..input_remaining]);
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: usize) {
assert!(idx >= self.buffer_idx);
self.buffer[self.buffer_idx..idx].set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8] {
self.buffer_idx += len;
return &mut self.buffer[self.buffer_idx - len..self.buffer_idx];
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
return &self.buffer[..64];
}
fn position(&self) -> usize { self.buffer_idx }
fn remaining(&self) -> usize { 64 - self.buffer_idx }
fn size(&self) -> usize { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
trait StandardPadding {
/// Add padding to the buffer. The buffer must not be full when this method is called and is
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding<F>(&mut self, rem: usize, func: F) where F: FnMut(&[u8]);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding<F>(&mut self, rem: usize, mut func: F) where F: FnMut(&[u8]) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2
/// family of digest functions.
pub trait Digest {
/// Provide message data.
///
/// # Arguments
///
/// * input - A vector of message data
fn input(&mut self, input: &[u8]);
/// Retrieve the digest result. This method may be called multiple times.
///
/// # Arguments
///
/// * out - the vector to hold the result. Must be large enough to contain output_bits().
fn result(&mut self, out: &mut [u8]);
/// Reset the digest. This method must be called after result() and before supplying more
/// data.
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> usize;
/// Convenience function that feeds a string into a digest.
///
/// # Arguments
///
/// * `input` The string to feed into the digest
fn input_str(&mut self, input: &str) {
self.input(input.as_bytes());
}
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn result_bytes(&mut self) -> Vec<u8> {
let mut buf: Vec<u8> = repeat(0).take((self.output_bits()+7)/8).collect();
self.result(&mut buf);
buf
}
/// Convenience function that retrieves the result of a digest as a
/// String in hexadecimal format.
fn result_str(&mut self) -> String {
self.result_bytes().to_hex().to_string()
}
}
// A structure that represents that state of a digest computation for the SHA-2 512 family of digest
// functions
struct Engine256State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32,
h5: u32,
h6: u32,
h7: u32,
}
impl Engine256State {
fn new(h: &[u32; 8]) -> Engine256State {
return Engine256State {
h0: h[0],
h1: h[1],
h2: h[2],
h3: h[3],
h4: h[4],
h5: h[5],
h6: h[6],
h7: h[7]
};
}
fn reset(&mut self, h: &[u32; 8]) {
self.h0 = h[0];
self.h1 = h[1];
self.h2 = h[2];
self.h3 = h[3];
self.h4 = h[4];
self.h5 = h[5];
self.h6 = h[6];
self.h7 = h[7];
}
fn process_block(&mut self, data: &[u8]) {
fn ch(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ ((!x) & z))
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ (x & z) ^ (y & z))
}
fn sum0(x: u32) -> u32 {
((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10))
}
fn sum1(x: u32) -> u32 {
((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7))
}
fn sigma0(x: u32) -> u32 {
((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3)
}
fn sigma1(x: u32) -> u32 {
((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10)
}
let mut a = self.h0;
let mut b = self.h1;
let mut c = self.h2;
let mut d = self.h3;
let mut e = self.h4;
let mut f = self.h5;
let mut g = self.h6;
let mut h = self.h7;
let mut w = [0; 64];
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round { ($t:expr) => (
w[$t] = sigma1(w[$t - 2]).wrapping_add(w[$t - 7])
.wrapping_add(sigma0(w[$t - 15])).wrapping_add(w[$t - 16]);
)
}
macro_rules! sha2_round {
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H = $H.wrapping_add(sum1($E)).wrapping_add(ch($E, $F, $G))
.wrapping_add($K[$t]).wrapping_add(w[$t]);
$D = $D.wrapping_add($H);
$H = $H.wrapping_add(sum0($A)).wrapping_add(maj($A, $B, $C));
}
)
}
read_u32v_be(&mut w[0..16], data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
for t in (0..48).step_by(8) {
schedule_round!(t + 16);
schedule_round!(t + 17);
schedule_round!(t + 18);
schedule_round!(t + 19);
schedule_round!(t + 20);
schedule_round!(t + 21);
schedule_round!(t + 22);
schedule_round!(t + 23);
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
for t in (48..64).step_by(8) {
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 = self.h0.wrapping_add(a);
self.h1 = self.h1.wrapping_add(b);
self.h2 = self.h2.wrapping_add(c);
self.h3 = self.h3.wrapping_add(d);
self.h4 = self.h4.wrapping_add(e);
self.h5 = self.h5.wrapping_add(f);
self.h6 = self.h6.wrapping_add(g);
self.h7 = self.h7.wrapping_add(h);
}
}
static K32: [u32; 64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
// A structure that keeps track of the state of the Sha-256 operation and contains the logic
// necessary to perform the final calculations.
struct Engine256 {
length_bits: u64,
buffer: FixedBuffer64,
state: Engine256State,
finished: bool,
}
impl Engine256 {
fn new(h: &[u32; 8]) -> Engine256 {
return Engine256 {
length_bits: 0,
buffer: FixedBuffer64::new(),
state: Engine256State::new(h),
finished: false
}
}
fn reset(&mut self, h: &[u32; 8]) {
self.length_bits = 0;
self.buffer.reset();
self.state.reset(h);
self.finished = false;
}
fn input(&mut self, input: &[u8]) {
assert!(!self.finished);
// Assumes that input.len() can be converted to u64 without overflow
self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64);
let self_state = &mut self.state;
self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) });
}
fn finish(&mut self) {
if self.finished {
return;
}
let self_state = &mut self.state;
self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) });
write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 );
write_u32_be(self.buffer.next(4), self.length_bits as u32);
self_state.process_block(self.buffer.full_buffer());
self.finished = true;
}
}
/// The SHA-256 hash algorithm
pub struct Sha256 {
engine: Engine256
}
impl Sha256 {
/// Construct a new instance of a SHA-256 digest.
pub fn new() -> Sha256 {
Sha256 {
engine: Engine256::new(&H256)
}
}
}
impl Digest for Sha256 {
fn input(&mut self, d: &[u8]) {
self.engine.input(d);
}
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
write_u32_be(&mut out[0..4], self.engine.state.h0);
write_u32_be(&mut out[4..8], self.engine.state.h1);
write_u32_be(&mut out[8..12], self.engine.state.h2);
write_u32_be(&mut out[12..16], self.engine.state.h3);
write_u32_be(&mut out[16..20], self.engine.state.h4);
write_u32_be(&mut out[20..24], self.engine.state.h5);
write_u32_be(&mut out[24..28], self.engine.state.h6);
write_u32_be(&mut out[28..32], self.engine.state.h7);
}
fn reset(&mut self) {
self.engine.reset(&H256);
}
fn output_bits(&self) -> usize { 256 }
}
static H256: [u32; 8] = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
];
#[cfg(test)]
mod tests {
#![allow(deprecated)]
extern crate rand;
use self::rand::Rng;
use self::rand::isaac::IsaacRng;
use serialize::hex::FromHex;
use std::iter::repeat;
use std::u64;
use super::{Digest, Sha256, FixedBuffer};
// A normal addition - no overflow occurs
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(super::add_bytes_to_bits(100, 10) == 180);
}
// A simple failure case - adding 1 to the max value
#[test]
#[should_panic]
fn test_add_bytes_to_bits_overflow() {
super::add_bytes_to_bits(u64::MAX, 1);
}
struct Test {
input: String,
output_str: String,
}
fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests {
sh.reset();
sh.input_str(&t.input);
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
// Test that it works when accepting the message in pieces
for t in tests {
sh.reset();
let len = t.input.len();
let mut left = len;
while left > 0 {
let take = (left + 1) / 2;
sh.input_str(&t.input[len - left..take + len - left]);
left = left - take;
}
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
}
#[test]
fn test_sha256() {
// Examples from wikipedia
let wikipedia_tests = vec!(
Test {
input: "".to_string(),
output_str: "e3b0c44298fc1c149afb\
f4c8996fb92427ae41e4649b934ca495991b7852b855".to
|
{
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
&input[..buffer_remaining],
&mut self.buffer[self.buffer_idx..size]);
self.buffer_idx = 0;
func(&self.buffer);
i += buffer_remaining;
} else {
copy_memory(
input,
&mut self.buffer[self.buffer_idx..self.buffer_idx + input.len()]);
self.buffer_idx += input.len();
return;
}
}
|
conditional_block
|
sha2.rs
|
_bits(self) -> (u64, u64) {
return (self >> 61, self << 3);
}
}
/// Adds the specified number of bytes to the bit count. panic!() if this would cause numeric
/// overflow.
fn add_bytes_to_bits(bits: u64, bytes: u64) -> u64 {
let (new_high_bits, new_low_bits) = bytes.to_bits();
if new_high_bits > 0 {
panic!("numeric overflow occurred.")
}
match bits.checked_add(new_low_bits) {
Some(x) => return x,
None => panic!("numeric overflow occurred.")
}
}
/// A FixedBuffer, likes its name implies, is a fixed size buffer. When the buffer becomes full, it
/// must be processed. The input() method takes care of processing and then clearing the buffer
/// automatically. However, other methods do not and require the caller to process the buffer. Any
/// method that modifies the buffer directory or provides the caller with bytes that can be modified
/// results in those bytes being marked as used by the buffer.
trait FixedBuffer {
/// Input a vector of bytes. If the buffer becomes full, process it with the provided
/// function and then clear the buffer.
fn input<F>(&mut self, input: &[u8], func: F) where
F: FnMut(&[u8]);
/// Reset the buffer.
fn reset(&mut self);
/// Zero the buffer up until the specified index. The buffer position currently must not be
/// greater than that index.
fn zero_until(&mut self, idx: usize);
/// Get a slice of the buffer of the specified size. There must be at least that many bytes
/// remaining in the buffer.
fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8];
/// Get the current buffer. The buffer must already be full. This clears the buffer as well.
fn full_buffer<'s>(&'s mut self) -> &'s [u8];
/// Get the current position of the buffer.
fn position(&self) -> usize;
/// Get the number of bytes remaining in the buffer until it is full.
fn remaining(&self) -> usize;
/// Get the size of the buffer
fn size(&self) -> usize;
}
/// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize.
struct FixedBuffer64 {
buffer: [u8; 64],
buffer_idx: usize,
}
impl FixedBuffer64 {
/// Create a new FixedBuffer64
fn new() -> FixedBuffer64 {
return FixedBuffer64 {
buffer: [0; 64],
buffer_idx: 0
};
}
}
impl FixedBuffer for FixedBuffer64 {
fn input<F>(&mut self, input: &[u8], mut func: F) where
F: FnMut(&[u8]),
{
let mut i = 0;
let size = self.size();
// If there is already data in the buffer, copy as much as we can into it and process
// the data if the buffer becomes full.
if self.buffer_idx!= 0 {
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
&input[..buffer_remaining],
|
&mut self.buffer[self.buffer_idx..size]);
self.buffer_idx = 0;
func(&self.buffer);
i += buffer_remaining;
} else {
copy_memory(
input,
&mut self.buffer[self.buffer_idx..self.buffer_idx + input.len()]);
self.buffer_idx += input.len();
return;
}
}
// While we have at least a full buffer size chunk's worth of data, process that data
// without copying it into the buffer
while input.len() - i >= size {
func(&input[i..i + size]);
i += size;
}
// Copy any input data into the buffer. At this point in the method, the amount of
// data left in the input vector will be less than the buffer size and the buffer will
// be empty.
let input_remaining = input.len() - i;
copy_memory(
&input[i..],
&mut self.buffer[..input_remaining]);
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: usize) {
assert!(idx >= self.buffer_idx);
self.buffer[self.buffer_idx..idx].set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8] {
self.buffer_idx += len;
return &mut self.buffer[self.buffer_idx - len..self.buffer_idx];
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
return &self.buffer[..64];
}
fn position(&self) -> usize { self.buffer_idx }
fn remaining(&self) -> usize { 64 - self.buffer_idx }
fn size(&self) -> usize { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
trait StandardPadding {
/// Add padding to the buffer. The buffer must not be full when this method is called and is
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding<F>(&mut self, rem: usize, func: F) where F: FnMut(&[u8]);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding<F>(&mut self, rem: usize, mut func: F) where F: FnMut(&[u8]) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2
/// family of digest functions.
pub trait Digest {
/// Provide message data.
///
/// # Arguments
///
/// * input - A vector of message data
fn input(&mut self, input: &[u8]);
/// Retrieve the digest result. This method may be called multiple times.
///
/// # Arguments
///
/// * out - the vector to hold the result. Must be large enough to contain output_bits().
fn result(&mut self, out: &mut [u8]);
/// Reset the digest. This method must be called after result() and before supplying more
/// data.
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> usize;
/// Convenience function that feeds a string into a digest.
///
/// # Arguments
///
/// * `input` The string to feed into the digest
fn input_str(&mut self, input: &str) {
self.input(input.as_bytes());
}
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn result_bytes(&mut self) -> Vec<u8> {
let mut buf: Vec<u8> = repeat(0).take((self.output_bits()+7)/8).collect();
self.result(&mut buf);
buf
}
/// Convenience function that retrieves the result of a digest as a
/// String in hexadecimal format.
fn result_str(&mut self) -> String {
self.result_bytes().to_hex().to_string()
}
}
// A structure that represents that state of a digest computation for the SHA-2 512 family of digest
// functions
struct Engine256State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32,
h5: u32,
h6: u32,
h7: u32,
}
impl Engine256State {
fn new(h: &[u32; 8]) -> Engine256State {
return Engine256State {
h0: h[0],
h1: h[1],
h2: h[2],
h3: h[3],
h4: h[4],
h5: h[5],
h6: h[6],
h7: h[7]
};
}
fn reset(&mut self, h: &[u32; 8]) {
self.h0 = h[0];
self.h1 = h[1];
self.h2 = h[2];
self.h3 = h[3];
self.h4 = h[4];
self.h5 = h[5];
self.h6 = h[6];
self.h7 = h[7];
}
fn process_block(&mut self, data: &[u8]) {
fn ch(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ ((!x) & z))
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ (x & z) ^ (y & z))
}
fn sum0(x: u32) -> u32 {
((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10))
}
fn sum1(x: u32) -> u32 {
((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7))
}
fn sigma0(x: u32) -> u32 {
((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3)
}
fn sigma1(x: u32) -> u32 {
((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10)
}
let mut a = self.h0;
let mut b = self.h1;
let mut c = self.h2;
let mut d = self.h3;
let mut e = self.h4;
let mut f = self.h5;
let mut g = self.h6;
let mut h = self.h7;
let mut w = [0; 64];
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round { ($t:expr) => (
w[$t] = sigma1(w[$t - 2]).wrapping_add(w[$t - 7])
.wrapping_add(sigma0(w[$t - 15])).wrapping_add(w[$t - 16]);
)
}
macro_rules! sha2_round {
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H = $H.wrapping_add(sum1($E)).wrapping_add(ch($E, $F, $G))
.wrapping_add($K[$t]).wrapping_add(w[$t]);
$D = $D.wrapping_add($H);
$H = $H.wrapping_add(sum0($A)).wrapping_add(maj($A, $B, $C));
}
)
}
read_u32v_be(&mut w[0..16], data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
for t in (0..48).step_by(8) {
schedule_round!(t + 16);
schedule_round!(t + 17);
schedule_round!(t + 18);
schedule_round!(t + 19);
schedule_round!(t + 20);
schedule_round!(t + 21);
schedule_round!(t + 22);
schedule_round!(t + 23);
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
for t in (48..64).step_by(8) {
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 = self.h0.wrapping_add(a);
self.h1 = self.h1.wrapping_add(b);
self.h2 = self.h2.wrapping_add(c);
self.h3 = self.h3.wrapping_add(d);
self.h4 = self.h4.wrapping_add(e);
self.h5 = self.h5.wrapping_add(f);
self.h6 = self.h6.wrapping_add(g);
self.h7 = self.h7.wrapping_add(h);
}
}
static K32: [u32; 64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
// A structure that keeps track of the state of the Sha-256 operation and contains the logic
// necessary to perform the final calculations.
struct Engine256 {
length_bits: u64,
buffer: FixedBuffer64,
state: Engine256State,
finished: bool,
}
impl Engine256 {
fn new(h: &[u32; 8]) -> Engine256 {
return Engine256 {
length_bits: 0,
buffer: FixedBuffer64::new(),
state: Engine256State::new(h),
finished: false
}
}
fn reset(&mut self, h: &[u32; 8]) {
self.length_bits = 0;
self.buffer.reset();
self.state.reset(h);
self.finished = false;
}
fn input(&mut self, input: &[u8]) {
assert!(!self.finished);
// Assumes that input.len() can be converted to u64 without overflow
self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64);
let self_state = &mut self.state;
self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) });
}
fn finish(&mut self) {
if self.finished {
return;
}
let self_state = &mut self.state;
self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) });
write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 );
write_u32_be(self.buffer.next(4), self.length_bits as u32);
self_state.process_block(self.buffer.full_buffer());
self.finished = true;
}
}
/// The SHA-256 hash algorithm
pub struct Sha256 {
engine: Engine256
}
impl Sha256 {
/// Construct a new instance of a SHA-256 digest.
pub fn new() -> Sha256 {
Sha256 {
engine: Engine256::new(&H256)
}
}
}
impl Digest for Sha256 {
fn input(&mut self, d: &[u8]) {
self.engine.input(d);
}
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
write_u32_be(&mut out[0..4], self.engine.state.h0);
write_u32_be(&mut out[4..8], self.engine.state.h1);
write_u32_be(&mut out[8..12], self.engine.state.h2);
write_u32_be(&mut out[12..16], self.engine.state.h3);
write_u32_be(&mut out[16..20], self.engine.state.h4);
write_u32_be(&mut out[20..24], self.engine.state.h5);
write_u32_be(&mut out[24..28], self.engine.state.h6);
write_u32_be(&mut out[28..32], self.engine.state.h7);
}
fn reset(&mut self) {
self.engine.reset(&H256);
}
fn output_bits(&self) -> usize { 256 }
}
static H256: [u32; 8] = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
];
#[cfg(test)]
mod tests {
#![allow(deprecated)]
extern crate rand;
use self::rand::Rng;
use self::rand::isaac::IsaacRng;
use serialize::hex::FromHex;
use std::iter::repeat;
use std::u64;
use super::{Digest, Sha256, FixedBuffer};
// A normal addition - no overflow occurs
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(super::add_bytes_to_bits(100, 10) == 180);
}
// A simple failure case - adding 1 to the max value
#[test]
#[should_panic]
fn test_add_bytes_to_bits_overflow() {
super::add_bytes_to_bits(u64::MAX, 1);
}
struct Test {
input: String,
output_str: String,
}
fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests {
sh.reset();
sh.input_str(&t.input);
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
// Test that it works when accepting the message in pieces
for t in tests {
sh.reset();
let len = t.input.len();
let mut left = len;
while left > 0 {
let take = (left + 1) / 2;
sh.input_str(&t.input[len - left..take + len - left]);
left = left - take;
}
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
}
#[test]
fn test_sha256() {
// Examples from wikipedia
let wikipedia_tests = vec!(
Test {
input: "".to_string(),
output_str: "e3b0c44298fc1c149afb\
f4c8996fb92427ae41e4649b934ca495991b7852b855".to_
|
random_line_split
|
|
sha2.rs
|
};
}
}
impl FixedBuffer for FixedBuffer64 {
fn input<F>(&mut self, input: &[u8], mut func: F) where
F: FnMut(&[u8]),
{
let mut i = 0;
let size = self.size();
// If there is already data in the buffer, copy as much as we can into it and process
// the data if the buffer becomes full.
if self.buffer_idx!= 0 {
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
&input[..buffer_remaining],
&mut self.buffer[self.buffer_idx..size]);
self.buffer_idx = 0;
func(&self.buffer);
i += buffer_remaining;
} else {
copy_memory(
input,
&mut self.buffer[self.buffer_idx..self.buffer_idx + input.len()]);
self.buffer_idx += input.len();
return;
}
}
// While we have at least a full buffer size chunk's worth of data, process that data
// without copying it into the buffer
while input.len() - i >= size {
func(&input[i..i + size]);
i += size;
}
// Copy any input data into the buffer. At this point in the method, the amount of
// data left in the input vector will be less than the buffer size and the buffer will
// be empty.
let input_remaining = input.len() - i;
copy_memory(
&input[i..],
&mut self.buffer[..input_remaining]);
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: usize) {
assert!(idx >= self.buffer_idx);
self.buffer[self.buffer_idx..idx].set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8] {
self.buffer_idx += len;
return &mut self.buffer[self.buffer_idx - len..self.buffer_idx];
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
return &self.buffer[..64];
}
fn position(&self) -> usize { self.buffer_idx }
fn remaining(&self) -> usize { 64 - self.buffer_idx }
fn size(&self) -> usize { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
trait StandardPadding {
/// Add padding to the buffer. The buffer must not be full when this method is called and is
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding<F>(&mut self, rem: usize, func: F) where F: FnMut(&[u8]);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding<F>(&mut self, rem: usize, mut func: F) where F: FnMut(&[u8]) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2
/// family of digest functions.
pub trait Digest {
/// Provide message data.
///
/// # Arguments
///
/// * input - A vector of message data
fn input(&mut self, input: &[u8]);
/// Retrieve the digest result. This method may be called multiple times.
///
/// # Arguments
///
/// * out - the vector to hold the result. Must be large enough to contain output_bits().
fn result(&mut self, out: &mut [u8]);
/// Reset the digest. This method must be called after result() and before supplying more
/// data.
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> usize;
/// Convenience function that feeds a string into a digest.
///
/// # Arguments
///
/// * `input` The string to feed into the digest
fn input_str(&mut self, input: &str) {
self.input(input.as_bytes());
}
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn result_bytes(&mut self) -> Vec<u8> {
let mut buf: Vec<u8> = repeat(0).take((self.output_bits()+7)/8).collect();
self.result(&mut buf);
buf
}
/// Convenience function that retrieves the result of a digest as a
/// String in hexadecimal format.
fn result_str(&mut self) -> String {
self.result_bytes().to_hex().to_string()
}
}
// A structure that represents that state of a digest computation for the SHA-2 512 family of digest
// functions
struct Engine256State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32,
h5: u32,
h6: u32,
h7: u32,
}
impl Engine256State {
fn new(h: &[u32; 8]) -> Engine256State {
return Engine256State {
h0: h[0],
h1: h[1],
h2: h[2],
h3: h[3],
h4: h[4],
h5: h[5],
h6: h[6],
h7: h[7]
};
}
fn reset(&mut self, h: &[u32; 8]) {
self.h0 = h[0];
self.h1 = h[1];
self.h2 = h[2];
self.h3 = h[3];
self.h4 = h[4];
self.h5 = h[5];
self.h6 = h[6];
self.h7 = h[7];
}
fn process_block(&mut self, data: &[u8]) {
fn ch(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ ((!x) & z))
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ (x & z) ^ (y & z))
}
fn sum0(x: u32) -> u32 {
((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10))
}
fn sum1(x: u32) -> u32 {
((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7))
}
fn sigma0(x: u32) -> u32 {
((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3)
}
fn sigma1(x: u32) -> u32 {
((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10)
}
let mut a = self.h0;
let mut b = self.h1;
let mut c = self.h2;
let mut d = self.h3;
let mut e = self.h4;
let mut f = self.h5;
let mut g = self.h6;
let mut h = self.h7;
let mut w = [0; 64];
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round { ($t:expr) => (
w[$t] = sigma1(w[$t - 2]).wrapping_add(w[$t - 7])
.wrapping_add(sigma0(w[$t - 15])).wrapping_add(w[$t - 16]);
)
}
macro_rules! sha2_round {
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H = $H.wrapping_add(sum1($E)).wrapping_add(ch($E, $F, $G))
.wrapping_add($K[$t]).wrapping_add(w[$t]);
$D = $D.wrapping_add($H);
$H = $H.wrapping_add(sum0($A)).wrapping_add(maj($A, $B, $C));
}
)
}
read_u32v_be(&mut w[0..16], data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
for t in (0..48).step_by(8) {
schedule_round!(t + 16);
schedule_round!(t + 17);
schedule_round!(t + 18);
schedule_round!(t + 19);
schedule_round!(t + 20);
schedule_round!(t + 21);
schedule_round!(t + 22);
schedule_round!(t + 23);
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
for t in (48..64).step_by(8) {
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 = self.h0.wrapping_add(a);
self.h1 = self.h1.wrapping_add(b);
self.h2 = self.h2.wrapping_add(c);
self.h3 = self.h3.wrapping_add(d);
self.h4 = self.h4.wrapping_add(e);
self.h5 = self.h5.wrapping_add(f);
self.h6 = self.h6.wrapping_add(g);
self.h7 = self.h7.wrapping_add(h);
}
}
static K32: [u32; 64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
// A structure that keeps track of the state of the Sha-256 operation and contains the logic
// necessary to perform the final calculations.
struct Engine256 {
length_bits: u64,
buffer: FixedBuffer64,
state: Engine256State,
finished: bool,
}
impl Engine256 {
fn new(h: &[u32; 8]) -> Engine256 {
return Engine256 {
length_bits: 0,
buffer: FixedBuffer64::new(),
state: Engine256State::new(h),
finished: false
}
}
fn reset(&mut self, h: &[u32; 8]) {
self.length_bits = 0;
self.buffer.reset();
self.state.reset(h);
self.finished = false;
}
fn input(&mut self, input: &[u8]) {
assert!(!self.finished);
// Assumes that input.len() can be converted to u64 without overflow
self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64);
let self_state = &mut self.state;
self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) });
}
fn finish(&mut self) {
if self.finished {
return;
}
let self_state = &mut self.state;
self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) });
write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 );
write_u32_be(self.buffer.next(4), self.length_bits as u32);
self_state.process_block(self.buffer.full_buffer());
self.finished = true;
}
}
/// The SHA-256 hash algorithm
pub struct Sha256 {
engine: Engine256
}
impl Sha256 {
/// Construct a new instance of a SHA-256 digest.
pub fn new() -> Sha256 {
Sha256 {
engine: Engine256::new(&H256)
}
}
}
impl Digest for Sha256 {
fn input(&mut self, d: &[u8]) {
self.engine.input(d);
}
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
write_u32_be(&mut out[0..4], self.engine.state.h0);
write_u32_be(&mut out[4..8], self.engine.state.h1);
write_u32_be(&mut out[8..12], self.engine.state.h2);
write_u32_be(&mut out[12..16], self.engine.state.h3);
write_u32_be(&mut out[16..20], self.engine.state.h4);
write_u32_be(&mut out[20..24], self.engine.state.h5);
write_u32_be(&mut out[24..28], self.engine.state.h6);
write_u32_be(&mut out[28..32], self.engine.state.h7);
}
fn reset(&mut self) {
self.engine.reset(&H256);
}
fn output_bits(&self) -> usize { 256 }
}
static H256: [u32; 8] = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
];
#[cfg(test)]
mod tests {
#![allow(deprecated)]
extern crate rand;
use self::rand::Rng;
use self::rand::isaac::IsaacRng;
use serialize::hex::FromHex;
use std::iter::repeat;
use std::u64;
use super::{Digest, Sha256, FixedBuffer};
// A normal addition - no overflow occurs
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(super::add_bytes_to_bits(100, 10) == 180);
}
// A simple failure case - adding 1 to the max value
#[test]
#[should_panic]
fn test_add_bytes_to_bits_overflow() {
super::add_bytes_to_bits(u64::MAX, 1);
}
struct Test {
input: String,
output_str: String,
}
fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests {
sh.reset();
sh.input_str(&t.input);
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
// Test that it works when accepting the message in pieces
for t in tests {
sh.reset();
let len = t.input.len();
let mut left = len;
while left > 0 {
let take = (left + 1) / 2;
sh.input_str(&t.input[len - left..take + len - left]);
left = left - take;
}
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
}
#[test]
fn test_sha256() {
// Examples from wikipedia
let wikipedia_tests = vec!(
Test {
input: "".to_string(),
output_str: "e3b0c44298fc1c149afb\
f4c8996fb92427ae41e4649b934ca495991b7852b855".to_string()
},
Test {
input: "The quick brown fox jumps over the lazy \
dog".to_string(),
output_str: "d7a8fbb307d7809469ca\
9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592".to_string()
},
Test {
input: "The quick brown fox jumps over the lazy \
dog.".to_string(),
output_str: "ef537f25c895bfa78252\
6529a9b63d97aa631564d5d789c2b765448c8635fb6c".to_string()
});
let tests = wikipedia_tests;
let mut sh: Box<_> = box Sha256::new();
test_hash(&mut *sh, &tests);
}
/// Feed 1,000,000 'a's into the digest with varying input sizes and check that the result is
/// correct.
fn test_digest_1million_random<D: Digest>(digest: &mut D, blocksize: usize, expected: &str)
|
{
let total_size = 1000000;
let buffer: Vec<u8> = repeat('a' as u8).take(blocksize * 2).collect();
let mut rng = IsaacRng::new_unseeded();
let mut count = 0;
digest.reset();
while count < total_size {
let next: usize = rng.gen_range(0, 2 * blocksize + 1);
let remaining = total_size - count;
let size = if next > remaining { remaining } else { next };
digest.input(&buffer[..size]);
count += size;
}
let result_str = digest.result_str();
let result_bytes = digest.result_bytes();
assert_eq!(expected, result_str);
|
identifier_body
|
|
transport.rs
|
// Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
use kvproto::raft_serverpb::RaftMessage;
use crate::server::raft_client::RaftClient;
use crate::server::resolve::StoreAddrResolver;
use engine_traits::KvEngine;
use raftstore::router::RaftStoreRouter;
use raftstore::store::Transport;
use raftstore::Result as RaftStoreResult;
use std::marker::PhantomData;
pub struct ServerTransport<T, S, E>
where
T: RaftStoreRouter<E> +'static,
S: StoreAddrResolver +'static,
E: KvEngine,
{
raft_client: RaftClient<S, T, E>,
engine: PhantomData<E>,
}
impl<T, S, E> Clone for ServerTransport<T, S, E>
where
T: RaftStoreRouter<E> +'static,
S: StoreAddrResolver +'static,
E: KvEngine,
{
fn clone(&self) -> Self {
ServerTransport {
raft_client: self.raft_client.clone(),
engine: PhantomData,
}
}
}
impl<T, S, E> ServerTransport<T, S, E>
where
E: KvEngine,
T: RaftStoreRouter<E> +'static,
S: StoreAddrResolver +'static,
{
pub fn new(raft_client: RaftClient<S, T, E>) -> ServerTransport<T, S, E> {
ServerTransport {
raft_client,
engine: PhantomData,
}
}
}
impl<T, S, E> Transport for ServerTransport<T, S, E>
where
T: RaftStoreRouter<E> + Unpin +'static,
S: StoreAddrResolver + Unpin +'static,
E: KvEngine,
{
fn send(&mut self, msg: RaftMessage) -> RaftStoreResult<()> {
match self.raft_client.send(msg) {
Ok(()) => Ok(()),
Err(reason) => Err(raftstore::Error::Transport(reason)),
}
}
fn need_flush(&self) -> bool {
self.raft_client.need_flush()
}
fn
|
(&mut self) {
self.raft_client.flush();
}
}
|
flush
|
identifier_name
|
transport.rs
|
// Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
use kvproto::raft_serverpb::RaftMessage;
use crate::server::raft_client::RaftClient;
use crate::server::resolve::StoreAddrResolver;
use engine_traits::KvEngine;
use raftstore::router::RaftStoreRouter;
use raftstore::store::Transport;
use raftstore::Result as RaftStoreResult;
use std::marker::PhantomData;
pub struct ServerTransport<T, S, E>
where
T: RaftStoreRouter<E> +'static,
S: StoreAddrResolver +'static,
E: KvEngine,
{
raft_client: RaftClient<S, T, E>,
engine: PhantomData<E>,
}
impl<T, S, E> Clone for ServerTransport<T, S, E>
where
T: RaftStoreRouter<E> +'static,
S: StoreAddrResolver +'static,
E: KvEngine,
|
{
fn clone(&self) -> Self {
ServerTransport {
raft_client: self.raft_client.clone(),
engine: PhantomData,
}
}
}
impl<T, S, E> ServerTransport<T, S, E>
where
E: KvEngine,
T: RaftStoreRouter<E> +'static,
S: StoreAddrResolver +'static,
{
pub fn new(raft_client: RaftClient<S, T, E>) -> ServerTransport<T, S, E> {
ServerTransport {
raft_client,
engine: PhantomData,
}
}
}
impl<T, S, E> Transport for ServerTransport<T, S, E>
where
T: RaftStoreRouter<E> + Unpin +'static,
S: StoreAddrResolver + Unpin +'static,
E: KvEngine,
{
fn send(&mut self, msg: RaftMessage) -> RaftStoreResult<()> {
match self.raft_client.send(msg) {
Ok(()) => Ok(()),
Err(reason) => Err(raftstore::Error::Transport(reason)),
}
}
fn need_flush(&self) -> bool {
self.raft_client.need_flush()
}
fn flush(&mut self) {
self.raft_client.flush();
}
}
|
random_line_split
|
|
transport.rs
|
// Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
use kvproto::raft_serverpb::RaftMessage;
use crate::server::raft_client::RaftClient;
use crate::server::resolve::StoreAddrResolver;
use engine_traits::KvEngine;
use raftstore::router::RaftStoreRouter;
use raftstore::store::Transport;
use raftstore::Result as RaftStoreResult;
use std::marker::PhantomData;
pub struct ServerTransport<T, S, E>
where
T: RaftStoreRouter<E> +'static,
S: StoreAddrResolver +'static,
E: KvEngine,
{
raft_client: RaftClient<S, T, E>,
engine: PhantomData<E>,
}
impl<T, S, E> Clone for ServerTransport<T, S, E>
where
T: RaftStoreRouter<E> +'static,
S: StoreAddrResolver +'static,
E: KvEngine,
{
fn clone(&self) -> Self {
ServerTransport {
raft_client: self.raft_client.clone(),
engine: PhantomData,
}
}
}
impl<T, S, E> ServerTransport<T, S, E>
where
E: KvEngine,
T: RaftStoreRouter<E> +'static,
S: StoreAddrResolver +'static,
{
pub fn new(raft_client: RaftClient<S, T, E>) -> ServerTransport<T, S, E> {
ServerTransport {
raft_client,
engine: PhantomData,
}
}
}
impl<T, S, E> Transport for ServerTransport<T, S, E>
where
T: RaftStoreRouter<E> + Unpin +'static,
S: StoreAddrResolver + Unpin +'static,
E: KvEngine,
{
fn send(&mut self, msg: RaftMessage) -> RaftStoreResult<()> {
match self.raft_client.send(msg) {
Ok(()) => Ok(()),
Err(reason) => Err(raftstore::Error::Transport(reason)),
}
}
fn need_flush(&self) -> bool
|
fn flush(&mut self) {
self.raft_client.flush();
}
}
|
{
self.raft_client.need_flush()
}
|
identifier_body
|
vocab.rs
|
use std::collections::hashmap::{Occupied, Vacant, HashMap};
use std::io::BufferedReader;
use std::io::File;
use std::os;
use std::str::StrSlice;
fn
|
()
{
let args = os::args();
let path = Path::new(args[1].as_slice());
let mut file = BufferedReader::new(File::open(&path));
let mut counter: HashMap<String, uint> = HashMap::new();
for line_opt in file.lines()
{
let line = line_opt.ok().expect("Could not read line");
for word in line.as_slice().split(' ')
{
let key = word.to_string();
// Update count
match counter.entry(key) {
Vacant(entry) => { let _ = entry.set(1u); },
Occupied(mut entry) => {
*entry.get_mut() += 1;
}
};
}
}
println!("{}", counter.len());
}
|
main
|
identifier_name
|
vocab.rs
|
use std::collections::hashmap::{Occupied, Vacant, HashMap};
use std::io::BufferedReader;
use std::io::File;
use std::os;
use std::str::StrSlice;
fn main()
|
}
println!("{}", counter.len());
}
|
{
let args = os::args();
let path = Path::new(args[1].as_slice());
let mut file = BufferedReader::new(File::open(&path));
let mut counter: HashMap<String, uint> = HashMap::new();
for line_opt in file.lines()
{
let line = line_opt.ok().expect("Could not read line");
for word in line.as_slice().split(' ')
{
let key = word.to_string();
// Update count
match counter.entry(key) {
Vacant(entry) => { let _ = entry.set(1u); },
Occupied(mut entry) => {
*entry.get_mut() += 1;
}
};
}
|
identifier_body
|
vocab.rs
|
use std::collections::hashmap::{Occupied, Vacant, HashMap};
use std::io::BufferedReader;
use std::io::File;
use std::os;
use std::str::StrSlice;
fn main()
{
let args = os::args();
let path = Path::new(args[1].as_slice());
let mut file = BufferedReader::new(File::open(&path));
let mut counter: HashMap<String, uint> = HashMap::new();
for line_opt in file.lines()
{
let line = line_opt.ok().expect("Could not read line");
for word in line.as_slice().split(' ')
{
let key = word.to_string();
// Update count
match counter.entry(key) {
Vacant(entry) => { let _ = entry.set(1u); },
Occupied(mut entry) => {
*entry.get_mut() += 1;
|
}
};
}
}
println!("{}", counter.len());
}
|
random_line_split
|
|
vocab.rs
|
use std::collections::hashmap::{Occupied, Vacant, HashMap};
use std::io::BufferedReader;
use std::io::File;
use std::os;
use std::str::StrSlice;
fn main()
{
let args = os::args();
let path = Path::new(args[1].as_slice());
let mut file = BufferedReader::new(File::open(&path));
let mut counter: HashMap<String, uint> = HashMap::new();
for line_opt in file.lines()
{
let line = line_opt.ok().expect("Could not read line");
for word in line.as_slice().split(' ')
{
let key = word.to_string();
// Update count
match counter.entry(key) {
Vacant(entry) =>
|
,
Occupied(mut entry) => {
*entry.get_mut() += 1;
}
};
}
}
println!("{}", counter.len());
}
|
{ let _ = entry.set(1u); }
|
conditional_block
|
def_id_forest.rs
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::mem;
use smallvec::SmallVec;
use syntax::ast::CRATE_NODE_ID;
use ty::context::TyCtxt;
use ty::{DefId, DefIdTree};
/// Represents a forest of DefIds closed under the ancestor relation. That is,
/// if a DefId representing a module is contained in the forest then all
/// DefIds defined in that module or submodules are also implicitly contained
/// in the forest.
///
/// This is used to represent a set of modules in which a type is visibly
/// uninhabited.
#[derive(Clone)]
pub struct DefIdForest {
/// The minimal set of DefIds required to represent the whole set.
/// If A and B are DefIds in the DefIdForest, and A is a descendant
/// of B, then only B will be in root_ids.
/// We use a SmallVec here because (for its use for caching inhabitedness)
/// its rare that this will contain even two ids.
root_ids: SmallVec<[DefId; 1]>,
}
impl<'a, 'gcx, 'tcx> DefIdForest {
/// Create an empty forest.
pub fn empty() -> DefIdForest {
DefIdForest {
root_ids: SmallVec::new(),
}
}
/// Create a forest consisting of a single tree representing the entire
/// crate.
#[inline]
pub fn full(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> DefIdForest
|
/// Create a forest containing a DefId and all its descendants.
pub fn from_id(id: DefId) -> DefIdForest {
let mut root_ids = SmallVec::new();
root_ids.push(id);
DefIdForest {
root_ids,
}
}
/// Test whether the forest is empty.
pub fn is_empty(&self) -> bool {
self.root_ids.is_empty()
}
/// Test whether the forest contains a given DefId.
pub fn contains(&self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
id: DefId) -> bool
{
self.root_ids.iter().any(|root_id| tcx.is_descendant_of(id, *root_id))
}
/// Calculate the intersection of a collection of forests.
pub fn intersection<I>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
iter: I) -> DefIdForest
where I: IntoIterator<Item=DefIdForest>
{
let mut ret = DefIdForest::full(tcx);
let mut next_ret = SmallVec::new();
let mut old_ret: SmallVec<[DefId; 1]> = SmallVec::new();
for next_forest in iter {
for id in ret.root_ids.drain() {
if next_forest.contains(tcx, id) {
next_ret.push(id);
} else {
old_ret.push(id);
}
}
ret.root_ids.extend(old_ret.drain());
next_ret.extend(next_forest.root_ids.into_iter().filter(|&id| ret.contains(tcx, id)));
mem::swap(&mut next_ret, &mut ret.root_ids);
next_ret.drain();
}
ret
}
/// Calculate the union of a collection of forests.
pub fn union<I>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
iter: I) -> DefIdForest
where I: IntoIterator<Item=DefIdForest>
{
let mut ret = DefIdForest::empty();
let mut next_ret = SmallVec::new();
for next_forest in iter {
next_ret.extend(ret.root_ids.drain().filter(|&id|!next_forest.contains(tcx, id)));
for id in next_forest.root_ids {
if!next_ret.contains(&id) {
next_ret.push(id);
}
}
mem::swap(&mut next_ret, &mut ret.root_ids);
next_ret.drain();
}
ret
}
}
|
{
let crate_id = tcx.hir().local_def_id(CRATE_NODE_ID);
DefIdForest::from_id(crate_id)
}
|
identifier_body
|
def_id_forest.rs
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::mem;
use smallvec::SmallVec;
use syntax::ast::CRATE_NODE_ID;
use ty::context::TyCtxt;
use ty::{DefId, DefIdTree};
/// Represents a forest of DefIds closed under the ancestor relation. That is,
/// if a DefId representing a module is contained in the forest then all
/// DefIds defined in that module or submodules are also implicitly contained
/// in the forest.
///
/// This is used to represent a set of modules in which a type is visibly
/// uninhabited.
#[derive(Clone)]
pub struct DefIdForest {
/// The minimal set of DefIds required to represent the whole set.
/// If A and B are DefIds in the DefIdForest, and A is a descendant
/// of B, then only B will be in root_ids.
/// We use a SmallVec here because (for its use for caching inhabitedness)
/// its rare that this will contain even two ids.
root_ids: SmallVec<[DefId; 1]>,
}
impl<'a, 'gcx, 'tcx> DefIdForest {
/// Create an empty forest.
pub fn empty() -> DefIdForest {
DefIdForest {
root_ids: SmallVec::new(),
}
}
/// Create a forest consisting of a single tree representing the entire
/// crate.
#[inline]
pub fn full(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> DefIdForest {
let crate_id = tcx.hir().local_def_id(CRATE_NODE_ID);
DefIdForest::from_id(crate_id)
}
/// Create a forest containing a DefId and all its descendants.
pub fn from_id(id: DefId) -> DefIdForest {
let mut root_ids = SmallVec::new();
root_ids.push(id);
DefIdForest {
root_ids,
}
}
/// Test whether the forest is empty.
pub fn is_empty(&self) -> bool {
self.root_ids.is_empty()
}
/// Test whether the forest contains a given DefId.
pub fn contains(&self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
id: DefId) -> bool
{
self.root_ids.iter().any(|root_id| tcx.is_descendant_of(id, *root_id))
}
/// Calculate the intersection of a collection of forests.
pub fn intersection<I>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
iter: I) -> DefIdForest
where I: IntoIterator<Item=DefIdForest>
{
let mut ret = DefIdForest::full(tcx);
let mut next_ret = SmallVec::new();
let mut old_ret: SmallVec<[DefId; 1]> = SmallVec::new();
for next_forest in iter {
for id in ret.root_ids.drain() {
if next_forest.contains(tcx, id) {
next_ret.push(id);
} else
|
}
ret.root_ids.extend(old_ret.drain());
next_ret.extend(next_forest.root_ids.into_iter().filter(|&id| ret.contains(tcx, id)));
mem::swap(&mut next_ret, &mut ret.root_ids);
next_ret.drain();
}
ret
}
/// Calculate the union of a collection of forests.
pub fn union<I>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
iter: I) -> DefIdForest
where I: IntoIterator<Item=DefIdForest>
{
let mut ret = DefIdForest::empty();
let mut next_ret = SmallVec::new();
for next_forest in iter {
next_ret.extend(ret.root_ids.drain().filter(|&id|!next_forest.contains(tcx, id)));
for id in next_forest.root_ids {
if!next_ret.contains(&id) {
next_ret.push(id);
}
}
mem::swap(&mut next_ret, &mut ret.root_ids);
next_ret.drain();
}
ret
}
}
|
{
old_ret.push(id);
}
|
conditional_block
|
def_id_forest.rs
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::mem;
use smallvec::SmallVec;
use syntax::ast::CRATE_NODE_ID;
use ty::context::TyCtxt;
use ty::{DefId, DefIdTree};
/// Represents a forest of DefIds closed under the ancestor relation. That is,
/// if a DefId representing a module is contained in the forest then all
/// DefIds defined in that module or submodules are also implicitly contained
/// in the forest.
///
/// This is used to represent a set of modules in which a type is visibly
/// uninhabited.
#[derive(Clone)]
pub struct DefIdForest {
/// The minimal set of DefIds required to represent the whole set.
/// If A and B are DefIds in the DefIdForest, and A is a descendant
/// of B, then only B will be in root_ids.
/// We use a SmallVec here because (for its use for caching inhabitedness)
/// its rare that this will contain even two ids.
root_ids: SmallVec<[DefId; 1]>,
}
impl<'a, 'gcx, 'tcx> DefIdForest {
/// Create an empty forest.
pub fn empty() -> DefIdForest {
DefIdForest {
root_ids: SmallVec::new(),
}
}
/// Create a forest consisting of a single tree representing the entire
/// crate.
#[inline]
pub fn full(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> DefIdForest {
let crate_id = tcx.hir().local_def_id(CRATE_NODE_ID);
DefIdForest::from_id(crate_id)
}
/// Create a forest containing a DefId and all its descendants.
pub fn from_id(id: DefId) -> DefIdForest {
let mut root_ids = SmallVec::new();
root_ids.push(id);
DefIdForest {
root_ids,
}
}
/// Test whether the forest is empty.
|
pub fn contains(&self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
id: DefId) -> bool
{
self.root_ids.iter().any(|root_id| tcx.is_descendant_of(id, *root_id))
}
/// Calculate the intersection of a collection of forests.
pub fn intersection<I>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
iter: I) -> DefIdForest
where I: IntoIterator<Item=DefIdForest>
{
let mut ret = DefIdForest::full(tcx);
let mut next_ret = SmallVec::new();
let mut old_ret: SmallVec<[DefId; 1]> = SmallVec::new();
for next_forest in iter {
for id in ret.root_ids.drain() {
if next_forest.contains(tcx, id) {
next_ret.push(id);
} else {
old_ret.push(id);
}
}
ret.root_ids.extend(old_ret.drain());
next_ret.extend(next_forest.root_ids.into_iter().filter(|&id| ret.contains(tcx, id)));
mem::swap(&mut next_ret, &mut ret.root_ids);
next_ret.drain();
}
ret
}
/// Calculate the union of a collection of forests.
pub fn union<I>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
iter: I) -> DefIdForest
where I: IntoIterator<Item=DefIdForest>
{
let mut ret = DefIdForest::empty();
let mut next_ret = SmallVec::new();
for next_forest in iter {
next_ret.extend(ret.root_ids.drain().filter(|&id|!next_forest.contains(tcx, id)));
for id in next_forest.root_ids {
if!next_ret.contains(&id) {
next_ret.push(id);
}
}
mem::swap(&mut next_ret, &mut ret.root_ids);
next_ret.drain();
}
ret
}
}
|
pub fn is_empty(&self) -> bool {
self.root_ids.is_empty()
}
/// Test whether the forest contains a given DefId.
|
random_line_split
|
def_id_forest.rs
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::mem;
use smallvec::SmallVec;
use syntax::ast::CRATE_NODE_ID;
use ty::context::TyCtxt;
use ty::{DefId, DefIdTree};
/// Represents a forest of DefIds closed under the ancestor relation. That is,
/// if a DefId representing a module is contained in the forest then all
/// DefIds defined in that module or submodules are also implicitly contained
/// in the forest.
///
/// This is used to represent a set of modules in which a type is visibly
/// uninhabited.
#[derive(Clone)]
pub struct DefIdForest {
/// The minimal set of DefIds required to represent the whole set.
/// If A and B are DefIds in the DefIdForest, and A is a descendant
/// of B, then only B will be in root_ids.
/// We use a SmallVec here because (for its use for caching inhabitedness)
/// its rare that this will contain even two ids.
root_ids: SmallVec<[DefId; 1]>,
}
impl<'a, 'gcx, 'tcx> DefIdForest {
/// Create an empty forest.
pub fn empty() -> DefIdForest {
DefIdForest {
root_ids: SmallVec::new(),
}
}
/// Create a forest consisting of a single tree representing the entire
/// crate.
#[inline]
pub fn full(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> DefIdForest {
let crate_id = tcx.hir().local_def_id(CRATE_NODE_ID);
DefIdForest::from_id(crate_id)
}
/// Create a forest containing a DefId and all its descendants.
pub fn
|
(id: DefId) -> DefIdForest {
let mut root_ids = SmallVec::new();
root_ids.push(id);
DefIdForest {
root_ids,
}
}
/// Test whether the forest is empty.
pub fn is_empty(&self) -> bool {
self.root_ids.is_empty()
}
/// Test whether the forest contains a given DefId.
pub fn contains(&self,
tcx: TyCtxt<'a, 'gcx, 'tcx>,
id: DefId) -> bool
{
self.root_ids.iter().any(|root_id| tcx.is_descendant_of(id, *root_id))
}
/// Calculate the intersection of a collection of forests.
pub fn intersection<I>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
iter: I) -> DefIdForest
where I: IntoIterator<Item=DefIdForest>
{
let mut ret = DefIdForest::full(tcx);
let mut next_ret = SmallVec::new();
let mut old_ret: SmallVec<[DefId; 1]> = SmallVec::new();
for next_forest in iter {
for id in ret.root_ids.drain() {
if next_forest.contains(tcx, id) {
next_ret.push(id);
} else {
old_ret.push(id);
}
}
ret.root_ids.extend(old_ret.drain());
next_ret.extend(next_forest.root_ids.into_iter().filter(|&id| ret.contains(tcx, id)));
mem::swap(&mut next_ret, &mut ret.root_ids);
next_ret.drain();
}
ret
}
/// Calculate the union of a collection of forests.
pub fn union<I>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
iter: I) -> DefIdForest
where I: IntoIterator<Item=DefIdForest>
{
let mut ret = DefIdForest::empty();
let mut next_ret = SmallVec::new();
for next_forest in iter {
next_ret.extend(ret.root_ids.drain().filter(|&id|!next_forest.contains(tcx, id)));
for id in next_forest.root_ids {
if!next_ret.contains(&id) {
next_ret.push(id);
}
}
mem::swap(&mut next_ret, &mut ret.root_ids);
next_ret.drain();
}
ret
}
}
|
from_id
|
identifier_name
|
session.rs
|
use elasticsearch::Client;
use model::{Model, Error};
pub struct Session {
model: Model,
}
impl Session {
fn id(&self) -> &self/~str { &self.model._id }
fn user_id(&self) -> ~str { self.model.get_str(&~"user_id") }
fn set_user_id(&mut self, user_id: ~str) -> bool {
self.model.set_str(~"user_id", user_id)
}
fn create(&self) -> Result<(~str, uint), Error>
|
fn save(&self) -> Result<(~str, uint), Error> {
self.model.save()
}
fn delete(&self) {
self.model.delete()
}
}
pub fn Session(es: Client, index: ~str, user_id: ~str) -> Session {
let id = crypto::rand::rand_bytes(32u).to_base64();
let mut session = Session { model: Model(es, index, ~"session", id) };
session.set_user_id(user_id);
session
}
pub fn find(es: Client, index: ~str, id: ~str) -> Option<Session> {
do model::find(es, index, ~"session", id).map |model| {
Session { model: *model }
}
}
|
{
self.model.create()
}
|
identifier_body
|
session.rs
|
use elasticsearch::Client;
use model::{Model, Error};
pub struct Session {
model: Model,
}
impl Session {
fn id(&self) -> &self/~str { &self.model._id }
fn
|
(&self) -> ~str { self.model.get_str(&~"user_id") }
fn set_user_id(&mut self, user_id: ~str) -> bool {
self.model.set_str(~"user_id", user_id)
}
fn create(&self) -> Result<(~str, uint), Error> {
self.model.create()
}
fn save(&self) -> Result<(~str, uint), Error> {
self.model.save()
}
fn delete(&self) {
self.model.delete()
}
}
pub fn Session(es: Client, index: ~str, user_id: ~str) -> Session {
let id = crypto::rand::rand_bytes(32u).to_base64();
let mut session = Session { model: Model(es, index, ~"session", id) };
session.set_user_id(user_id);
session
}
pub fn find(es: Client, index: ~str, id: ~str) -> Option<Session> {
do model::find(es, index, ~"session", id).map |model| {
Session { model: *model }
}
}
|
user_id
|
identifier_name
|
session.rs
|
use elasticsearch::Client;
|
use model::{Model, Error};
pub struct Session {
model: Model,
}
impl Session {
fn id(&self) -> &self/~str { &self.model._id }
fn user_id(&self) -> ~str { self.model.get_str(&~"user_id") }
fn set_user_id(&mut self, user_id: ~str) -> bool {
self.model.set_str(~"user_id", user_id)
}
fn create(&self) -> Result<(~str, uint), Error> {
self.model.create()
}
fn save(&self) -> Result<(~str, uint), Error> {
self.model.save()
}
fn delete(&self) {
self.model.delete()
}
}
pub fn Session(es: Client, index: ~str, user_id: ~str) -> Session {
let id = crypto::rand::rand_bytes(32u).to_base64();
let mut session = Session { model: Model(es, index, ~"session", id) };
session.set_user_id(user_id);
session
}
pub fn find(es: Client, index: ~str, id: ~str) -> Option<Session> {
do model::find(es, index, ~"session", id).map |model| {
Session { model: *model }
}
}
|
random_line_split
|
|
wait_queue.rs
|
use alloc::boxed::Box;
use intrusive::queue::Queue;
use hal::local_irq_disable;
use sync::spin::{InterruptSpinLock, InterruptSpinGuard};
use super::{Thread, ThreadImpl, Scheduler};
pub type InternalQueue = Queue<Box<ThreadImpl>, ThreadImpl>;
pub struct WaitQueue {
queue: InterruptSpinLock<InternalQueue>,
}
impl WaitQueue {
/// Create a new WaitQueue
pub fn new() -> Self {
WaitQueue {
queue: InterruptSpinLock::new(Queue::new()),
}
}
#[doc(hidden)]
pub fn lock(&self) -> InterruptSpinGuard<InternalQueue> {
self.queue.lock()
}
#[inline]
/// Block the current thread
pub fn block(&self) {
local_irq_disable();
Scheduler::block(self.queue.lock());
}
#[inline]
fn unblock_thread(imp: Box<ThreadImpl>) {
let thread = Thread {
t_impl: imp,
};
Scheduler::ready(thread);
}
/// Unblock the first thread in the queue.
/// Returns true if a thread was unblocked, false otherwise
pub fn unblock(&self) {
if let Some(t) = self.queue.lock().dequeue() {
WaitQueue::unblock_thread(t);
}
}
/// Unblock all threads in the queue
pub fn unblock_all(&self)
|
}
unsafe impl Send for WaitQueue {}
unsafe impl Sync for WaitQueue {}
|
{
let mut queue = self.queue.lock();
while let Some(t) = queue.dequeue() {
WaitQueue::unblock_thread(t);
}
}
|
identifier_body
|
wait_queue.rs
|
use alloc::boxed::Box;
use intrusive::queue::Queue;
use hal::local_irq_disable;
use sync::spin::{InterruptSpinLock, InterruptSpinGuard};
use super::{Thread, ThreadImpl, Scheduler};
pub type InternalQueue = Queue<Box<ThreadImpl>, ThreadImpl>;
pub struct WaitQueue {
queue: InterruptSpinLock<InternalQueue>,
}
impl WaitQueue {
/// Create a new WaitQueue
pub fn
|
() -> Self {
WaitQueue {
queue: InterruptSpinLock::new(Queue::new()),
}
}
#[doc(hidden)]
pub fn lock(&self) -> InterruptSpinGuard<InternalQueue> {
self.queue.lock()
}
#[inline]
/// Block the current thread
pub fn block(&self) {
local_irq_disable();
Scheduler::block(self.queue.lock());
}
#[inline]
fn unblock_thread(imp: Box<ThreadImpl>) {
let thread = Thread {
t_impl: imp,
};
Scheduler::ready(thread);
}
/// Unblock the first thread in the queue.
/// Returns true if a thread was unblocked, false otherwise
pub fn unblock(&self) {
if let Some(t) = self.queue.lock().dequeue() {
WaitQueue::unblock_thread(t);
}
}
/// Unblock all threads in the queue
pub fn unblock_all(&self) {
let mut queue = self.queue.lock();
while let Some(t) = queue.dequeue() {
WaitQueue::unblock_thread(t);
}
}
}
unsafe impl Send for WaitQueue {}
unsafe impl Sync for WaitQueue {}
|
new
|
identifier_name
|
wait_queue.rs
|
use alloc::boxed::Box;
use intrusive::queue::Queue;
use hal::local_irq_disable;
use sync::spin::{InterruptSpinLock, InterruptSpinGuard};
use super::{Thread, ThreadImpl, Scheduler};
pub type InternalQueue = Queue<Box<ThreadImpl>, ThreadImpl>;
pub struct WaitQueue {
queue: InterruptSpinLock<InternalQueue>,
}
impl WaitQueue {
/// Create a new WaitQueue
pub fn new() -> Self {
WaitQueue {
queue: InterruptSpinLock::new(Queue::new()),
}
}
#[doc(hidden)]
pub fn lock(&self) -> InterruptSpinGuard<InternalQueue> {
self.queue.lock()
}
#[inline]
/// Block the current thread
pub fn block(&self) {
local_irq_disable();
Scheduler::block(self.queue.lock());
}
#[inline]
fn unblock_thread(imp: Box<ThreadImpl>) {
let thread = Thread {
t_impl: imp,
};
Scheduler::ready(thread);
}
/// Unblock the first thread in the queue.
/// Returns true if a thread was unblocked, false otherwise
pub fn unblock(&self) {
if let Some(t) = self.queue.lock().dequeue()
|
}
/// Unblock all threads in the queue
pub fn unblock_all(&self) {
let mut queue = self.queue.lock();
while let Some(t) = queue.dequeue() {
WaitQueue::unblock_thread(t);
}
}
}
unsafe impl Send for WaitQueue {}
unsafe impl Sync for WaitQueue {}
|
{
WaitQueue::unblock_thread(t);
}
|
conditional_block
|
wait_queue.rs
|
use alloc::boxed::Box;
use intrusive::queue::Queue;
use hal::local_irq_disable;
use sync::spin::{InterruptSpinLock, InterruptSpinGuard};
use super::{Thread, ThreadImpl, Scheduler};
pub type InternalQueue = Queue<Box<ThreadImpl>, ThreadImpl>;
pub struct WaitQueue {
queue: InterruptSpinLock<InternalQueue>,
}
impl WaitQueue {
/// Create a new WaitQueue
pub fn new() -> Self {
WaitQueue {
queue: InterruptSpinLock::new(Queue::new()),
}
}
#[doc(hidden)]
pub fn lock(&self) -> InterruptSpinGuard<InternalQueue> {
self.queue.lock()
}
#[inline]
/// Block the current thread
pub fn block(&self) {
local_irq_disable();
Scheduler::block(self.queue.lock());
}
#[inline]
fn unblock_thread(imp: Box<ThreadImpl>) {
let thread = Thread {
t_impl: imp,
};
Scheduler::ready(thread);
}
/// Unblock the first thread in the queue.
|
/// Returns true if a thread was unblocked, false otherwise
pub fn unblock(&self) {
if let Some(t) = self.queue.lock().dequeue() {
WaitQueue::unblock_thread(t);
}
}
/// Unblock all threads in the queue
pub fn unblock_all(&self) {
let mut queue = self.queue.lock();
while let Some(t) = queue.dequeue() {
WaitQueue::unblock_thread(t);
}
}
}
unsafe impl Send for WaitQueue {}
unsafe impl Sync for WaitQueue {}
|
random_line_split
|
|
shader.rs
|
use std::ffi::CStr;
use std::fmt;
use crate::gl;
use crate::gl::types::*;
/// A wrapper for a shader program id, with automatic lifetime management.
#[derive(Debug)]
pub struct ShaderProgram(GLuint);
#[derive(Copy, Clone, Debug)]
pub enum ShaderVersion {
/// OpenGL 3.3 core shaders.
Glsl3,
/// OpenGL ES 2.0 shaders.
Gles2,
}
impl ShaderVersion {
// Header to which we concatenate the entire shader. The newlines are required.
fn shader_header(&self) -> &'static str {
match self {
Self::Glsl3 => "#version 330 core\n",
Self::Gles2 => "#version 100\n#define GLES2_RENDERER\n",
}
}
}
impl ShaderProgram {
pub fn new(
shader_version: ShaderVersion,
vertex_shader: &'static str,
fragment_shader: &'static str,
) -> Result<Self, ShaderError> {
let vertex_shader = Shader::new(shader_version, gl::VERTEX_SHADER, vertex_shader)?;
let fragment_shader = Shader::new(shader_version, gl::FRAGMENT_SHADER, fragment_shader)?;
let program = unsafe { Self(gl::CreateProgram()) };
let mut success: GLint = 0;
unsafe {
gl::AttachShader(program.id(), vertex_shader.id());
gl::AttachShader(program.id(), fragment_shader.id());
gl::LinkProgram(program.id());
gl::GetProgramiv(program.id(), gl::LINK_STATUS, &mut success);
}
if success!= i32::from(gl::TRUE) {
return Err(ShaderError::Link(get_program_info_log(program.id())));
}
Ok(program)
}
/// Get uniform location by name. Panic if failed.
pub fn get_uniform_location(&self, name: &'static CStr) -> Result<GLint, ShaderError> {
// This call doesn't require `UseProgram`.
let ret = unsafe { gl::GetUniformLocation(self.id(), name.as_ptr()) };
if ret == -1 {
return Err(ShaderError::Uniform(name));
}
Ok(ret)
}
/// Get the shader program id.
pub fn id(&self) -> GLuint {
self.0
}
}
impl Drop for ShaderProgram {
fn drop(&mut self) {
unsafe { gl::DeleteProgram(self.0) }
}
}
/// A wrapper for a shader id, with automatic lifetime management.
#[derive(Debug)]
struct Shader(GLuint);
|
fn new(
shader_version: ShaderVersion,
kind: GLenum,
source: &'static str,
) -> Result<Self, ShaderError> {
let header = shader_version.shader_header();
let len: [GLint; 2] = [header.len() as GLint, source.len() as GLint];
let source = [header.as_ptr() as *const _, source.as_ptr() as *const _];
let shader = unsafe { Self(gl::CreateShader(kind)) };
let mut success: GLint = 0;
unsafe {
gl::ShaderSource(
shader.id(),
len.len() as GLint,
source.as_ptr() as *const _,
len.as_ptr(),
);
gl::CompileShader(shader.id());
gl::GetShaderiv(shader.id(), gl::COMPILE_STATUS, &mut success);
}
if success == GLint::from(gl::TRUE) {
Ok(shader)
} else {
Err(ShaderError::Compile(get_shader_info_log(shader.id())))
}
}
fn id(&self) -> GLuint {
self.0
}
}
impl Drop for Shader {
fn drop(&mut self) {
unsafe { gl::DeleteShader(self.0) }
}
}
fn get_program_info_log(program: GLuint) -> String {
// Get expected log length.
let mut max_length: GLint = 0;
unsafe {
gl::GetProgramiv(program, gl::INFO_LOG_LENGTH, &mut max_length);
}
// Read the info log.
let mut actual_length: GLint = 0;
let mut buf: Vec<u8> = Vec::with_capacity(max_length as usize);
unsafe {
gl::GetProgramInfoLog(program, max_length, &mut actual_length, buf.as_mut_ptr() as *mut _);
}
// Build a string.
unsafe {
buf.set_len(actual_length as usize);
}
String::from_utf8_lossy(&buf).to_string()
}
fn get_shader_info_log(shader: GLuint) -> String {
// Get expected log length.
let mut max_length: GLint = 0;
unsafe {
gl::GetShaderiv(shader, gl::INFO_LOG_LENGTH, &mut max_length);
}
// Read the info log.
let mut actual_length: GLint = 0;
let mut buf: Vec<u8> = Vec::with_capacity(max_length as usize);
unsafe {
gl::GetShaderInfoLog(shader, max_length, &mut actual_length, buf.as_mut_ptr() as *mut _);
}
// Build a string.
unsafe {
buf.set_len(actual_length as usize);
}
String::from_utf8_lossy(&buf).to_string()
}
#[derive(Debug)]
pub enum ShaderError {
/// Error compiling shader.
Compile(String),
/// Error linking shader.
Link(String),
/// Error getting uniform location.
Uniform(&'static CStr),
}
impl std::error::Error for ShaderError {}
impl fmt::Display for ShaderError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Compile(reason) => write!(f, "Failed compiling shader: {}", reason),
Self::Link(reason) => write!(f, "Failed linking shader: {}", reason),
Self::Uniform(name) => write!(f, "Failed to get uniform location of {:?}", name),
}
}
}
|
impl Shader {
|
random_line_split
|
shader.rs
|
use std::ffi::CStr;
use std::fmt;
use crate::gl;
use crate::gl::types::*;
/// A wrapper for a shader program id, with automatic lifetime management.
#[derive(Debug)]
pub struct ShaderProgram(GLuint);
#[derive(Copy, Clone, Debug)]
pub enum ShaderVersion {
/// OpenGL 3.3 core shaders.
Glsl3,
/// OpenGL ES 2.0 shaders.
Gles2,
}
impl ShaderVersion {
// Header to which we concatenate the entire shader. The newlines are required.
fn shader_header(&self) -> &'static str {
match self {
Self::Glsl3 => "#version 330 core\n",
Self::Gles2 => "#version 100\n#define GLES2_RENDERER\n",
}
}
}
impl ShaderProgram {
pub fn new(
shader_version: ShaderVersion,
vertex_shader: &'static str,
fragment_shader: &'static str,
) -> Result<Self, ShaderError> {
let vertex_shader = Shader::new(shader_version, gl::VERTEX_SHADER, vertex_shader)?;
let fragment_shader = Shader::new(shader_version, gl::FRAGMENT_SHADER, fragment_shader)?;
let program = unsafe { Self(gl::CreateProgram()) };
let mut success: GLint = 0;
unsafe {
gl::AttachShader(program.id(), vertex_shader.id());
gl::AttachShader(program.id(), fragment_shader.id());
gl::LinkProgram(program.id());
gl::GetProgramiv(program.id(), gl::LINK_STATUS, &mut success);
}
if success!= i32::from(gl::TRUE) {
return Err(ShaderError::Link(get_program_info_log(program.id())));
}
Ok(program)
}
/// Get uniform location by name. Panic if failed.
pub fn get_uniform_location(&self, name: &'static CStr) -> Result<GLint, ShaderError> {
// This call doesn't require `UseProgram`.
let ret = unsafe { gl::GetUniformLocation(self.id(), name.as_ptr()) };
if ret == -1 {
return Err(ShaderError::Uniform(name));
}
Ok(ret)
}
/// Get the shader program id.
pub fn id(&self) -> GLuint {
self.0
}
}
impl Drop for ShaderProgram {
fn drop(&mut self)
|
}
/// A wrapper for a shader id, with automatic lifetime management.
#[derive(Debug)]
struct Shader(GLuint);
impl Shader {
fn new(
shader_version: ShaderVersion,
kind: GLenum,
source: &'static str,
) -> Result<Self, ShaderError> {
let header = shader_version.shader_header();
let len: [GLint; 2] = [header.len() as GLint, source.len() as GLint];
let source = [header.as_ptr() as *const _, source.as_ptr() as *const _];
let shader = unsafe { Self(gl::CreateShader(kind)) };
let mut success: GLint = 0;
unsafe {
gl::ShaderSource(
shader.id(),
len.len() as GLint,
source.as_ptr() as *const _,
len.as_ptr(),
);
gl::CompileShader(shader.id());
gl::GetShaderiv(shader.id(), gl::COMPILE_STATUS, &mut success);
}
if success == GLint::from(gl::TRUE) {
Ok(shader)
} else {
Err(ShaderError::Compile(get_shader_info_log(shader.id())))
}
}
fn id(&self) -> GLuint {
self.0
}
}
impl Drop for Shader {
fn drop(&mut self) {
unsafe { gl::DeleteShader(self.0) }
}
}
fn get_program_info_log(program: GLuint) -> String {
// Get expected log length.
let mut max_length: GLint = 0;
unsafe {
gl::GetProgramiv(program, gl::INFO_LOG_LENGTH, &mut max_length);
}
// Read the info log.
let mut actual_length: GLint = 0;
let mut buf: Vec<u8> = Vec::with_capacity(max_length as usize);
unsafe {
gl::GetProgramInfoLog(program, max_length, &mut actual_length, buf.as_mut_ptr() as *mut _);
}
// Build a string.
unsafe {
buf.set_len(actual_length as usize);
}
String::from_utf8_lossy(&buf).to_string()
}
fn get_shader_info_log(shader: GLuint) -> String {
// Get expected log length.
let mut max_length: GLint = 0;
unsafe {
gl::GetShaderiv(shader, gl::INFO_LOG_LENGTH, &mut max_length);
}
// Read the info log.
let mut actual_length: GLint = 0;
let mut buf: Vec<u8> = Vec::with_capacity(max_length as usize);
unsafe {
gl::GetShaderInfoLog(shader, max_length, &mut actual_length, buf.as_mut_ptr() as *mut _);
}
// Build a string.
unsafe {
buf.set_len(actual_length as usize);
}
String::from_utf8_lossy(&buf).to_string()
}
#[derive(Debug)]
pub enum ShaderError {
/// Error compiling shader.
Compile(String),
/// Error linking shader.
Link(String),
/// Error getting uniform location.
Uniform(&'static CStr),
}
impl std::error::Error for ShaderError {}
impl fmt::Display for ShaderError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Compile(reason) => write!(f, "Failed compiling shader: {}", reason),
Self::Link(reason) => write!(f, "Failed linking shader: {}", reason),
Self::Uniform(name) => write!(f, "Failed to get uniform location of {:?}", name),
}
}
}
|
{
unsafe { gl::DeleteProgram(self.0) }
}
|
identifier_body
|
shader.rs
|
use std::ffi::CStr;
use std::fmt;
use crate::gl;
use crate::gl::types::*;
/// A wrapper for a shader program id, with automatic lifetime management.
#[derive(Debug)]
pub struct ShaderProgram(GLuint);
#[derive(Copy, Clone, Debug)]
pub enum ShaderVersion {
/// OpenGL 3.3 core shaders.
Glsl3,
/// OpenGL ES 2.0 shaders.
Gles2,
}
impl ShaderVersion {
// Header to which we concatenate the entire shader. The newlines are required.
fn shader_header(&self) -> &'static str {
match self {
Self::Glsl3 => "#version 330 core\n",
Self::Gles2 => "#version 100\n#define GLES2_RENDERER\n",
}
}
}
impl ShaderProgram {
pub fn new(
shader_version: ShaderVersion,
vertex_shader: &'static str,
fragment_shader: &'static str,
) -> Result<Self, ShaderError> {
let vertex_shader = Shader::new(shader_version, gl::VERTEX_SHADER, vertex_shader)?;
let fragment_shader = Shader::new(shader_version, gl::FRAGMENT_SHADER, fragment_shader)?;
let program = unsafe { Self(gl::CreateProgram()) };
let mut success: GLint = 0;
unsafe {
gl::AttachShader(program.id(), vertex_shader.id());
gl::AttachShader(program.id(), fragment_shader.id());
gl::LinkProgram(program.id());
gl::GetProgramiv(program.id(), gl::LINK_STATUS, &mut success);
}
if success!= i32::from(gl::TRUE) {
return Err(ShaderError::Link(get_program_info_log(program.id())));
}
Ok(program)
}
/// Get uniform location by name. Panic if failed.
pub fn get_uniform_location(&self, name: &'static CStr) -> Result<GLint, ShaderError> {
// This call doesn't require `UseProgram`.
let ret = unsafe { gl::GetUniformLocation(self.id(), name.as_ptr()) };
if ret == -1 {
return Err(ShaderError::Uniform(name));
}
Ok(ret)
}
/// Get the shader program id.
pub fn id(&self) -> GLuint {
self.0
}
}
impl Drop for ShaderProgram {
fn drop(&mut self) {
unsafe { gl::DeleteProgram(self.0) }
}
}
/// A wrapper for a shader id, with automatic lifetime management.
#[derive(Debug)]
struct Shader(GLuint);
impl Shader {
fn new(
shader_version: ShaderVersion,
kind: GLenum,
source: &'static str,
) -> Result<Self, ShaderError> {
let header = shader_version.shader_header();
let len: [GLint; 2] = [header.len() as GLint, source.len() as GLint];
let source = [header.as_ptr() as *const _, source.as_ptr() as *const _];
let shader = unsafe { Self(gl::CreateShader(kind)) };
let mut success: GLint = 0;
unsafe {
gl::ShaderSource(
shader.id(),
len.len() as GLint,
source.as_ptr() as *const _,
len.as_ptr(),
);
gl::CompileShader(shader.id());
gl::GetShaderiv(shader.id(), gl::COMPILE_STATUS, &mut success);
}
if success == GLint::from(gl::TRUE) {
Ok(shader)
} else {
Err(ShaderError::Compile(get_shader_info_log(shader.id())))
}
}
fn id(&self) -> GLuint {
self.0
}
}
impl Drop for Shader {
fn drop(&mut self) {
unsafe { gl::DeleteShader(self.0) }
}
}
fn get_program_info_log(program: GLuint) -> String {
// Get expected log length.
let mut max_length: GLint = 0;
unsafe {
gl::GetProgramiv(program, gl::INFO_LOG_LENGTH, &mut max_length);
}
// Read the info log.
let mut actual_length: GLint = 0;
let mut buf: Vec<u8> = Vec::with_capacity(max_length as usize);
unsafe {
gl::GetProgramInfoLog(program, max_length, &mut actual_length, buf.as_mut_ptr() as *mut _);
}
// Build a string.
unsafe {
buf.set_len(actual_length as usize);
}
String::from_utf8_lossy(&buf).to_string()
}
fn get_shader_info_log(shader: GLuint) -> String {
// Get expected log length.
let mut max_length: GLint = 0;
unsafe {
gl::GetShaderiv(shader, gl::INFO_LOG_LENGTH, &mut max_length);
}
// Read the info log.
let mut actual_length: GLint = 0;
let mut buf: Vec<u8> = Vec::with_capacity(max_length as usize);
unsafe {
gl::GetShaderInfoLog(shader, max_length, &mut actual_length, buf.as_mut_ptr() as *mut _);
}
// Build a string.
unsafe {
buf.set_len(actual_length as usize);
}
String::from_utf8_lossy(&buf).to_string()
}
#[derive(Debug)]
pub enum ShaderError {
/// Error compiling shader.
Compile(String),
/// Error linking shader.
Link(String),
/// Error getting uniform location.
Uniform(&'static CStr),
}
impl std::error::Error for ShaderError {}
impl fmt::Display for ShaderError {
fn
|
(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Compile(reason) => write!(f, "Failed compiling shader: {}", reason),
Self::Link(reason) => write!(f, "Failed linking shader: {}", reason),
Self::Uniform(name) => write!(f, "Failed to get uniform location of {:?}", name),
}
}
}
|
fmt
|
identifier_name
|
string.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::iter::repeat;
use test::Bencher;
#[bench]
fn bench_with_capacity(b: &mut Bencher) {
b.iter(|| String::with_capacity(100));
}
#[bench]
fn bench_push_str(b: &mut Bencher) {
let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
b.iter(|| {
let mut r = String::new();
r.push_str(s);
});
}
const REPETITIONS: u64 = 10_000;
#[bench]
fn bench_push_str_one_byte(b: &mut Bencher) {
b.bytes = REPETITIONS;
b.iter(|| {
let mut r = String::new();
for _ in 0..REPETITIONS {
r.push_str("a")
}
});
}
#[bench]
fn bench_push_char_one_byte(b: &mut Bencher) {
b.bytes = REPETITIONS;
b.iter(|| {
let mut r = String::new();
for _ in 0..REPETITIONS {
r.push('a')
}
});
}
#[bench]
fn bench_push_char_two_bytes(b: &mut Bencher) {
b.bytes = REPETITIONS * 2;
b.iter(|| {
let mut r = String::new();
for _ in 0..REPETITIONS {
r.push('â')
}
});
}
#[bench]
fn from_utf8_lossy_100_ascii(b: &mut Bencher) {
let s = b"Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
assert_eq!(100, s.len());
b.iter(|| {
let _ = String::from_utf8_lossy(s);
});
}
#[bench]
fn from_utf8_lossy_100_multibyte(b: &mut Bencher) {
let s = "𐌀𐌖𐌋𐌄𐌑𐌉ปรدولة الكويتทศไทย中华𐍅𐌿𐌻𐍆𐌹𐌻𐌰".as_bytes();
assert_eq!(100, s.len());
b.iter(|| {
let _ = String::from_utf8_lossy(s);
});
}
#[bench]
fn from_utf8_lossy_invalid(b: &mut Bencher) {
let s = b"Hello\xC0\x80 There\xE6\x83 Goodbye";
b.iter(|| {
let _ = String::from_utf8_lossy(s);
});
}
#[bench]
fn from_utf8_lossy_100_invalid(b: &mut Bencher) {
let s = repeat(0xf5).take(100).collect::<Vec<_>>();
b.iter(|| {
|
there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
// ensure our operation produces an exact-size string before we benchmark it
let mut r = String::with_capacity(s.len());
r.push_str(s);
assert_eq!(r.len(), r.capacity());
b.iter(|| {
let mut r = String::with_capacity(s.len());
r.push_str(s);
r.shrink_to_fit();
r
});
}
#[bench]
fn bench_from_str(b: &mut Bencher) {
let s = "Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
b.iter(|| String::from(s))
}
#[bench]
fn bench_from(b: &mut Bencher) {
let s = "Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
b.iter(|| String::from(s))
}
#[bench]
fn bench_to_string(b: &mut Bencher) {
let s = "Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
b.iter(|| s.to_string())
}
|
let _ = String::from_utf8_lossy(&s);
});
}
#[bench]
fn bench_exact_size_shrink_to_fit(b: &mut Bencher) {
let s = "Hello
|
identifier_body
|
string.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::iter::repeat;
use test::Bencher;
#[bench]
fn bench_with_capacity(b: &mut Bencher) {
b.iter(|| String::with_capacity(100));
}
#[bench]
fn
|
(b: &mut Bencher) {
let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
b.iter(|| {
let mut r = String::new();
r.push_str(s);
});
}
const REPETITIONS: u64 = 10_000;
#[bench]
fn bench_push_str_one_byte(b: &mut Bencher) {
b.bytes = REPETITIONS;
b.iter(|| {
let mut r = String::new();
for _ in 0..REPETITIONS {
r.push_str("a")
}
});
}
#[bench]
fn bench_push_char_one_byte(b: &mut Bencher) {
b.bytes = REPETITIONS;
b.iter(|| {
let mut r = String::new();
for _ in 0..REPETITIONS {
r.push('a')
}
});
}
#[bench]
fn bench_push_char_two_bytes(b: &mut Bencher) {
b.bytes = REPETITIONS * 2;
b.iter(|| {
let mut r = String::new();
for _ in 0..REPETITIONS {
r.push('â')
}
});
}
#[bench]
fn from_utf8_lossy_100_ascii(b: &mut Bencher) {
let s = b"Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
assert_eq!(100, s.len());
b.iter(|| {
let _ = String::from_utf8_lossy(s);
});
}
#[bench]
fn from_utf8_lossy_100_multibyte(b: &mut Bencher) {
let s = "𐌀𐌖𐌋𐌄𐌑𐌉ปรدولة الكويتทศไทย中华𐍅𐌿𐌻𐍆𐌹𐌻𐌰".as_bytes();
assert_eq!(100, s.len());
b.iter(|| {
let _ = String::from_utf8_lossy(s);
});
}
#[bench]
fn from_utf8_lossy_invalid(b: &mut Bencher) {
let s = b"Hello\xC0\x80 There\xE6\x83 Goodbye";
b.iter(|| {
let _ = String::from_utf8_lossy(s);
});
}
#[bench]
fn from_utf8_lossy_100_invalid(b: &mut Bencher) {
let s = repeat(0xf5).take(100).collect::<Vec<_>>();
b.iter(|| {
let _ = String::from_utf8_lossy(&s);
});
}
#[bench]
fn bench_exact_size_shrink_to_fit(b: &mut Bencher) {
let s = "Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
// ensure our operation produces an exact-size string before we benchmark it
let mut r = String::with_capacity(s.len());
r.push_str(s);
assert_eq!(r.len(), r.capacity());
b.iter(|| {
let mut r = String::with_capacity(s.len());
r.push_str(s);
r.shrink_to_fit();
r
});
}
#[bench]
fn bench_from_str(b: &mut Bencher) {
let s = "Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
b.iter(|| String::from(s))
}
#[bench]
fn bench_from(b: &mut Bencher) {
let s = "Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
b.iter(|| String::from(s))
}
#[bench]
fn bench_to_string(b: &mut Bencher) {
let s = "Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
b.iter(|| s.to_string())
}
|
bench_push_str
|
identifier_name
|
string.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::iter::repeat;
use test::Bencher;
#[bench]
fn bench_with_capacity(b: &mut Bencher) {
b.iter(|| String::with_capacity(100));
}
#[bench]
fn bench_push_str(b: &mut Bencher) {
let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
b.iter(|| {
let mut r = String::new();
r.push_str(s);
});
}
const REPETITIONS: u64 = 10_000;
#[bench]
fn bench_push_str_one_byte(b: &mut Bencher) {
b.bytes = REPETITIONS;
b.iter(|| {
let mut r = String::new();
for _ in 0..REPETITIONS {
r.push_str("a")
}
});
}
#[bench]
fn bench_push_char_one_byte(b: &mut Bencher) {
b.bytes = REPETITIONS;
b.iter(|| {
let mut r = String::new();
for _ in 0..REPETITIONS {
r.push('a')
}
});
}
#[bench]
fn bench_push_char_two_bytes(b: &mut Bencher) {
b.bytes = REPETITIONS * 2;
|
for _ in 0..REPETITIONS {
r.push('â')
}
});
}
#[bench]
fn from_utf8_lossy_100_ascii(b: &mut Bencher) {
let s = b"Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
assert_eq!(100, s.len());
b.iter(|| {
let _ = String::from_utf8_lossy(s);
});
}
#[bench]
fn from_utf8_lossy_100_multibyte(b: &mut Bencher) {
let s = "𐌀𐌖𐌋𐌄𐌑𐌉ปรدولة الكويتทศไทย中华𐍅𐌿𐌻𐍆𐌹𐌻𐌰".as_bytes();
assert_eq!(100, s.len());
b.iter(|| {
let _ = String::from_utf8_lossy(s);
});
}
#[bench]
fn from_utf8_lossy_invalid(b: &mut Bencher) {
let s = b"Hello\xC0\x80 There\xE6\x83 Goodbye";
b.iter(|| {
let _ = String::from_utf8_lossy(s);
});
}
#[bench]
fn from_utf8_lossy_100_invalid(b: &mut Bencher) {
let s = repeat(0xf5).take(100).collect::<Vec<_>>();
b.iter(|| {
let _ = String::from_utf8_lossy(&s);
});
}
#[bench]
fn bench_exact_size_shrink_to_fit(b: &mut Bencher) {
let s = "Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
// ensure our operation produces an exact-size string before we benchmark it
let mut r = String::with_capacity(s.len());
r.push_str(s);
assert_eq!(r.len(), r.capacity());
b.iter(|| {
let mut r = String::with_capacity(s.len());
r.push_str(s);
r.shrink_to_fit();
r
});
}
#[bench]
fn bench_from_str(b: &mut Bencher) {
let s = "Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
b.iter(|| String::from(s))
}
#[bench]
fn bench_from(b: &mut Bencher) {
let s = "Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
b.iter(|| String::from(s))
}
#[bench]
fn bench_to_string(b: &mut Bencher) {
let s = "Hello there, the quick brown fox jumped over the lazy dog! \
Lorem ipsum dolor sit amet, consectetur. ";
b.iter(|| s.to_string())
}
|
b.iter(|| {
let mut r = String::new();
|
random_line_split
|
disk.rs
|
use alloc::arc::Arc;
use alloc::boxed::Box;
use collections::borrow::ToOwned;
use collections::String;
use core::cell::UnsafeCell;
use core::cmp;
use disk::Disk;
use fs::{KScheme, Resource, ResourceSeek, VecResource};
use syscall::{MODE_DIR, MODE_FILE, Stat};
use system::error::{Error, Result, ENOENT};
/// A disk resource
pub struct DiskResource {
pub path: String,
pub disk: Arc<UnsafeCell<Box<Disk>>>,
pub seek: u64,
}
impl Resource for DiskResource {
fn dup(&self) -> Result<Box<Resource>> {
Ok(box DiskResource {
path: self.path.clone(),
disk: self.disk.clone(),
seek: self.seek,
})
}
fn path(&self, buf: &mut [u8]) -> Result<usize> {
let path = self.path.as_bytes();
for (b, p) in buf.iter_mut().zip(path.iter()) {
*b = *p;
}
Ok(cmp::min(buf.len(), path.len()))
}
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
let count = try!(unsafe { &mut *self.disk.get() }.read(self.seek/512, buf));
self.seek += count as u64;
Ok(count)
}
fn write(&mut self, buf: &[u8]) -> Result<usize> {
let count = try!(unsafe { &mut *self.disk.get() }.write(self.seek/512, buf));
self.seek += count as u64;
Ok(count)
}
fn seek(&mut self, pos: ResourceSeek) -> Result<usize> {
let size = unsafe { & *self.disk.get() }.size();
match pos {
ResourceSeek::Start(offset) => self.seek = cmp::min(size, offset as u64),
ResourceSeek::Current(offset) => self.seek = cmp::min(size, cmp::max(0, self.seek as i64 + offset as i64) as u64),
ResourceSeek::End(offset) => self.seek = cmp::min(size, cmp::max(0, size as i64 + offset as i64) as u64),
}
Ok(self.seek as usize)
}
fn stat(&self, stat: &mut Stat) -> Result<()> {
stat.st_size = unsafe { & *self.disk.get() }.size() as u32;
stat.st_mode = MODE_FILE;
Ok(())
}
fn sync(&mut self) -> Result<()> {
Ok(())
}
|
}
impl Drop for DiskResource {
fn drop(&mut self) {
let _ = self.sync();
}
}
/// A disk scheme
pub struct DiskScheme;
impl KScheme for DiskScheme {
fn scheme(&self) -> &str {
"disk"
}
fn on_irq(&mut self, irq: u8) {
for disk in unsafe { &mut *::env().disks.get() }.iter_mut() {
unsafe { &mut *disk.get() }.on_irq(irq);
}
}
fn open(&mut self, url: &str, _flags: usize) -> Result<Box<Resource>> {
let path = url.splitn(2, ":").nth(1).unwrap_or("").trim_matches('/');
if path.is_empty() {
let mut list = String::new();
for i in 0..unsafe { & *::env().disks.get() }.len() {
if! list.is_empty() {
list.push('\n');
}
list.push_str(&format!("{}", i));
}
return Ok(box VecResource::new("disk:/".to_owned(), list.into_bytes(), MODE_DIR));
} else {
if let Ok(number) = path.parse::<usize>() {
if let Some(disk) = unsafe { & *::env().disks.get() }.get(number) {
return Ok(box DiskResource {
path: format!("disk:/{}", number),
disk: disk.clone(),
seek: 0
});
}
}
}
Err(Error::new(ENOENT))
}
}
|
random_line_split
|
|
disk.rs
|
use alloc::arc::Arc;
use alloc::boxed::Box;
use collections::borrow::ToOwned;
use collections::String;
use core::cell::UnsafeCell;
use core::cmp;
use disk::Disk;
use fs::{KScheme, Resource, ResourceSeek, VecResource};
use syscall::{MODE_DIR, MODE_FILE, Stat};
use system::error::{Error, Result, ENOENT};
/// A disk resource
pub struct DiskResource {
pub path: String,
pub disk: Arc<UnsafeCell<Box<Disk>>>,
pub seek: u64,
}
impl Resource for DiskResource {
fn dup(&self) -> Result<Box<Resource>> {
Ok(box DiskResource {
path: self.path.clone(),
disk: self.disk.clone(),
seek: self.seek,
})
}
fn path(&self, buf: &mut [u8]) -> Result<usize> {
let path = self.path.as_bytes();
for (b, p) in buf.iter_mut().zip(path.iter()) {
*b = *p;
}
Ok(cmp::min(buf.len(), path.len()))
}
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
let count = try!(unsafe { &mut *self.disk.get() }.read(self.seek/512, buf));
self.seek += count as u64;
Ok(count)
}
fn write(&mut self, buf: &[u8]) -> Result<usize> {
let count = try!(unsafe { &mut *self.disk.get() }.write(self.seek/512, buf));
self.seek += count as u64;
Ok(count)
}
fn seek(&mut self, pos: ResourceSeek) -> Result<usize> {
let size = unsafe { & *self.disk.get() }.size();
match pos {
ResourceSeek::Start(offset) => self.seek = cmp::min(size, offset as u64),
ResourceSeek::Current(offset) => self.seek = cmp::min(size, cmp::max(0, self.seek as i64 + offset as i64) as u64),
ResourceSeek::End(offset) => self.seek = cmp::min(size, cmp::max(0, size as i64 + offset as i64) as u64),
}
Ok(self.seek as usize)
}
fn stat(&self, stat: &mut Stat) -> Result<()> {
stat.st_size = unsafe { & *self.disk.get() }.size() as u32;
stat.st_mode = MODE_FILE;
Ok(())
}
fn sync(&mut self) -> Result<()> {
Ok(())
}
}
impl Drop for DiskResource {
fn drop(&mut self) {
let _ = self.sync();
}
}
/// A disk scheme
pub struct DiskScheme;
impl KScheme for DiskScheme {
fn scheme(&self) -> &str {
"disk"
}
fn on_irq(&mut self, irq: u8) {
for disk in unsafe { &mut *::env().disks.get() }.iter_mut() {
unsafe { &mut *disk.get() }.on_irq(irq);
}
}
fn open(&mut self, url: &str, _flags: usize) -> Result<Box<Resource>>
|
});
}
}
}
Err(Error::new(ENOENT))
}
}
|
{
let path = url.splitn(2, ":").nth(1).unwrap_or("").trim_matches('/');
if path.is_empty() {
let mut list = String::new();
for i in 0..unsafe { & *::env().disks.get() }.len() {
if ! list.is_empty() {
list.push('\n');
}
list.push_str(&format!("{}", i));
}
return Ok(box VecResource::new("disk:/".to_owned(), list.into_bytes(), MODE_DIR));
} else {
if let Ok(number) = path.parse::<usize>() {
if let Some(disk) = unsafe { & *::env().disks.get() }.get(number) {
return Ok(box DiskResource {
path: format!("disk:/{}", number),
disk: disk.clone(),
seek: 0
|
identifier_body
|
disk.rs
|
use alloc::arc::Arc;
use alloc::boxed::Box;
use collections::borrow::ToOwned;
use collections::String;
use core::cell::UnsafeCell;
use core::cmp;
use disk::Disk;
use fs::{KScheme, Resource, ResourceSeek, VecResource};
use syscall::{MODE_DIR, MODE_FILE, Stat};
use system::error::{Error, Result, ENOENT};
/// A disk resource
pub struct DiskResource {
pub path: String,
pub disk: Arc<UnsafeCell<Box<Disk>>>,
pub seek: u64,
}
impl Resource for DiskResource {
fn dup(&self) -> Result<Box<Resource>> {
Ok(box DiskResource {
path: self.path.clone(),
disk: self.disk.clone(),
seek: self.seek,
})
}
fn path(&self, buf: &mut [u8]) -> Result<usize> {
let path = self.path.as_bytes();
for (b, p) in buf.iter_mut().zip(path.iter()) {
*b = *p;
}
Ok(cmp::min(buf.len(), path.len()))
}
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
let count = try!(unsafe { &mut *self.disk.get() }.read(self.seek/512, buf));
self.seek += count as u64;
Ok(count)
}
fn
|
(&mut self, buf: &[u8]) -> Result<usize> {
let count = try!(unsafe { &mut *self.disk.get() }.write(self.seek/512, buf));
self.seek += count as u64;
Ok(count)
}
fn seek(&mut self, pos: ResourceSeek) -> Result<usize> {
let size = unsafe { & *self.disk.get() }.size();
match pos {
ResourceSeek::Start(offset) => self.seek = cmp::min(size, offset as u64),
ResourceSeek::Current(offset) => self.seek = cmp::min(size, cmp::max(0, self.seek as i64 + offset as i64) as u64),
ResourceSeek::End(offset) => self.seek = cmp::min(size, cmp::max(0, size as i64 + offset as i64) as u64),
}
Ok(self.seek as usize)
}
fn stat(&self, stat: &mut Stat) -> Result<()> {
stat.st_size = unsafe { & *self.disk.get() }.size() as u32;
stat.st_mode = MODE_FILE;
Ok(())
}
fn sync(&mut self) -> Result<()> {
Ok(())
}
}
impl Drop for DiskResource {
fn drop(&mut self) {
let _ = self.sync();
}
}
/// A disk scheme
pub struct DiskScheme;
impl KScheme for DiskScheme {
fn scheme(&self) -> &str {
"disk"
}
fn on_irq(&mut self, irq: u8) {
for disk in unsafe { &mut *::env().disks.get() }.iter_mut() {
unsafe { &mut *disk.get() }.on_irq(irq);
}
}
fn open(&mut self, url: &str, _flags: usize) -> Result<Box<Resource>> {
let path = url.splitn(2, ":").nth(1).unwrap_or("").trim_matches('/');
if path.is_empty() {
let mut list = String::new();
for i in 0..unsafe { & *::env().disks.get() }.len() {
if! list.is_empty() {
list.push('\n');
}
list.push_str(&format!("{}", i));
}
return Ok(box VecResource::new("disk:/".to_owned(), list.into_bytes(), MODE_DIR));
} else {
if let Ok(number) = path.parse::<usize>() {
if let Some(disk) = unsafe { & *::env().disks.get() }.get(number) {
return Ok(box DiskResource {
path: format!("disk:/{}", number),
disk: disk.clone(),
seek: 0
});
}
}
}
Err(Error::new(ENOENT))
}
}
|
write
|
identifier_name
|
clone_on_copy.rs
|
use clippy_utils::diagnostics::{span_lint_and_sugg, span_lint_and_then};
use clippy_utils::get_parent_node;
use clippy_utils::source::snippet_with_context;
use clippy_utils::sugg;
use clippy_utils::ty::is_copy;
use rustc_errors::Applicability;
use rustc_hir::{BindingAnnotation, Expr, ExprKind, MatchSource, Node, PatKind};
use rustc_lint::LateContext;
use rustc_middle::ty::{self, adjustment::Adjust};
use rustc_span::symbol::{sym, Symbol};
use super::CLONE_DOUBLE_REF;
use super::CLONE_ON_COPY;
/// Checks for the `CLONE_ON_COPY` lint.
#[allow(clippy::too_many_lines)]
pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, method_name: Symbol, args: &[Expr<'_>]) {
let arg = match args {
[arg] if method_name == sym::clone => arg,
_ => return,
};
if cx
.typeck_results()
.type_dependent_def_id(expr.hir_id)
.and_then(|id| cx.tcx.trait_of_item(id))
.zip(cx.tcx.lang_items().clone_trait())
.map_or(true, |(x, y)| x!= y)
{
return;
}
let arg_adjustments = cx.typeck_results().expr_adjustments(arg);
let arg_ty = arg_adjustments
.last()
.map_or_else(|| cx.typeck_results().expr_ty(arg), |a| a.target);
let ty = cx.typeck_results().expr_ty(expr);
if let ty::Ref(_, inner, _) = arg_ty.kind() {
if let ty::Ref(_, innermost, _) = inner.kind() {
span_lint_and_then(
cx,
CLONE_DOUBLE_REF,
expr.span,
&format!(
"using `clone` on a double-reference; \
this will copy the reference of type `{}` instead of cloning the inner type",
ty
),
|diag| {
if let Some(snip) = sugg::Sugg::hir_opt(cx, arg) {
let mut ty = innermost;
let mut n = 0;
while let ty::Ref(_, inner, _) = ty.kind() {
ty = inner;
n += 1;
}
let refs = "&".repeat(n + 1);
let derefs = "*".repeat(n);
let explicit = format!("<{}{}>::clone({})", refs, ty, snip);
diag.span_suggestion(
expr.span,
"try dereferencing it",
format!("{}({}{}).clone()", refs, derefs, snip.deref()),
Applicability::MaybeIncorrect,
);
diag.span_suggestion(
expr.span,
"or try being explicit if you are sure, that you want to clone a reference",
explicit,
Applicability::MaybeIncorrect,
);
}
},
);
return; // don't report clone_on_copy
}
}
|
if is_copy(cx, ty) {
let parent_is_suffix_expr = match get_parent_node(cx.tcx, expr.hir_id) {
Some(Node::Expr(parent)) => match parent.kind {
// &*x is a nop, &x.clone() is not
ExprKind::AddrOf(..) => return,
// (*x).func() is useless, x.clone().func() can work in case func borrows self
ExprKind::MethodCall(_, _, [self_arg,..], _)
if expr.hir_id == self_arg.hir_id && ty!= cx.typeck_results().expr_ty_adjusted(expr) =>
{
return;
},
ExprKind::MethodCall(_, _, [self_arg,..], _) if expr.hir_id == self_arg.hir_id => true,
ExprKind::Match(_, _, MatchSource::TryDesugar | MatchSource::AwaitDesugar)
| ExprKind::Field(..)
| ExprKind::Index(..) => true,
_ => false,
},
// local binding capturing a reference
Some(Node::Local(l))
if matches!(
l.pat.kind,
PatKind::Binding(BindingAnnotation::Ref | BindingAnnotation::RefMut,..)
) =>
{
return;
},
_ => false,
};
let mut app = Applicability::MachineApplicable;
let snip = snippet_with_context(cx, arg.span, expr.span.ctxt(), "_", &mut app).0;
let deref_count = arg_adjustments
.iter()
.take_while(|adj| matches!(adj.kind, Adjust::Deref(_)))
.count();
let (help, sugg) = if deref_count == 0 {
("try removing the `clone` call", snip.into())
} else if parent_is_suffix_expr {
("try dereferencing it", format!("({}{})", "*".repeat(deref_count), snip))
} else {
("try dereferencing it", format!("{}{}", "*".repeat(deref_count), snip))
};
span_lint_and_sugg(
cx,
CLONE_ON_COPY,
expr.span,
&format!("using `clone` on type `{}` which implements the `Copy` trait", ty),
help,
sugg,
app,
);
}
}
|
random_line_split
|
|
clone_on_copy.rs
|
use clippy_utils::diagnostics::{span_lint_and_sugg, span_lint_and_then};
use clippy_utils::get_parent_node;
use clippy_utils::source::snippet_with_context;
use clippy_utils::sugg;
use clippy_utils::ty::is_copy;
use rustc_errors::Applicability;
use rustc_hir::{BindingAnnotation, Expr, ExprKind, MatchSource, Node, PatKind};
use rustc_lint::LateContext;
use rustc_middle::ty::{self, adjustment::Adjust};
use rustc_span::symbol::{sym, Symbol};
use super::CLONE_DOUBLE_REF;
use super::CLONE_ON_COPY;
/// Checks for the `CLONE_ON_COPY` lint.
#[allow(clippy::too_many_lines)]
pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, method_name: Symbol, args: &[Expr<'_>])
|
if let ty::Ref(_, inner, _) = arg_ty.kind() {
if let ty::Ref(_, innermost, _) = inner.kind() {
span_lint_and_then(
cx,
CLONE_DOUBLE_REF,
expr.span,
&format!(
"using `clone` on a double-reference; \
this will copy the reference of type `{}` instead of cloning the inner type",
ty
),
|diag| {
if let Some(snip) = sugg::Sugg::hir_opt(cx, arg) {
let mut ty = innermost;
let mut n = 0;
while let ty::Ref(_, inner, _) = ty.kind() {
ty = inner;
n += 1;
}
let refs = "&".repeat(n + 1);
let derefs = "*".repeat(n);
let explicit = format!("<{}{}>::clone({})", refs, ty, snip);
diag.span_suggestion(
expr.span,
"try dereferencing it",
format!("{}({}{}).clone()", refs, derefs, snip.deref()),
Applicability::MaybeIncorrect,
);
diag.span_suggestion(
expr.span,
"or try being explicit if you are sure, that you want to clone a reference",
explicit,
Applicability::MaybeIncorrect,
);
}
},
);
return; // don't report clone_on_copy
}
}
if is_copy(cx, ty) {
let parent_is_suffix_expr = match get_parent_node(cx.tcx, expr.hir_id) {
Some(Node::Expr(parent)) => match parent.kind {
// &*x is a nop, &x.clone() is not
ExprKind::AddrOf(..) => return,
// (*x).func() is useless, x.clone().func() can work in case func borrows self
ExprKind::MethodCall(_, _, [self_arg,..], _)
if expr.hir_id == self_arg.hir_id && ty!= cx.typeck_results().expr_ty_adjusted(expr) =>
{
return;
},
ExprKind::MethodCall(_, _, [self_arg,..], _) if expr.hir_id == self_arg.hir_id => true,
ExprKind::Match(_, _, MatchSource::TryDesugar | MatchSource::AwaitDesugar)
| ExprKind::Field(..)
| ExprKind::Index(..) => true,
_ => false,
},
// local binding capturing a reference
Some(Node::Local(l))
if matches!(
l.pat.kind,
PatKind::Binding(BindingAnnotation::Ref | BindingAnnotation::RefMut,..)
) =>
{
return;
},
_ => false,
};
let mut app = Applicability::MachineApplicable;
let snip = snippet_with_context(cx, arg.span, expr.span.ctxt(), "_", &mut app).0;
let deref_count = arg_adjustments
.iter()
.take_while(|adj| matches!(adj.kind, Adjust::Deref(_)))
.count();
let (help, sugg) = if deref_count == 0 {
("try removing the `clone` call", snip.into())
} else if parent_is_suffix_expr {
("try dereferencing it", format!("({}{})", "*".repeat(deref_count), snip))
} else {
("try dereferencing it", format!("{}{}", "*".repeat(deref_count), snip))
};
span_lint_and_sugg(
cx,
CLONE_ON_COPY,
expr.span,
&format!("using `clone` on type `{}` which implements the `Copy` trait", ty),
help,
sugg,
app,
);
}
}
|
{
let arg = match args {
[arg] if method_name == sym::clone => arg,
_ => return,
};
if cx
.typeck_results()
.type_dependent_def_id(expr.hir_id)
.and_then(|id| cx.tcx.trait_of_item(id))
.zip(cx.tcx.lang_items().clone_trait())
.map_or(true, |(x, y)| x != y)
{
return;
}
let arg_adjustments = cx.typeck_results().expr_adjustments(arg);
let arg_ty = arg_adjustments
.last()
.map_or_else(|| cx.typeck_results().expr_ty(arg), |a| a.target);
let ty = cx.typeck_results().expr_ty(expr);
|
identifier_body
|
clone_on_copy.rs
|
use clippy_utils::diagnostics::{span_lint_and_sugg, span_lint_and_then};
use clippy_utils::get_parent_node;
use clippy_utils::source::snippet_with_context;
use clippy_utils::sugg;
use clippy_utils::ty::is_copy;
use rustc_errors::Applicability;
use rustc_hir::{BindingAnnotation, Expr, ExprKind, MatchSource, Node, PatKind};
use rustc_lint::LateContext;
use rustc_middle::ty::{self, adjustment::Adjust};
use rustc_span::symbol::{sym, Symbol};
use super::CLONE_DOUBLE_REF;
use super::CLONE_ON_COPY;
/// Checks for the `CLONE_ON_COPY` lint.
#[allow(clippy::too_many_lines)]
pub(super) fn
|
(cx: &LateContext<'_>, expr: &Expr<'_>, method_name: Symbol, args: &[Expr<'_>]) {
let arg = match args {
[arg] if method_name == sym::clone => arg,
_ => return,
};
if cx
.typeck_results()
.type_dependent_def_id(expr.hir_id)
.and_then(|id| cx.tcx.trait_of_item(id))
.zip(cx.tcx.lang_items().clone_trait())
.map_or(true, |(x, y)| x!= y)
{
return;
}
let arg_adjustments = cx.typeck_results().expr_adjustments(arg);
let arg_ty = arg_adjustments
.last()
.map_or_else(|| cx.typeck_results().expr_ty(arg), |a| a.target);
let ty = cx.typeck_results().expr_ty(expr);
if let ty::Ref(_, inner, _) = arg_ty.kind() {
if let ty::Ref(_, innermost, _) = inner.kind() {
span_lint_and_then(
cx,
CLONE_DOUBLE_REF,
expr.span,
&format!(
"using `clone` on a double-reference; \
this will copy the reference of type `{}` instead of cloning the inner type",
ty
),
|diag| {
if let Some(snip) = sugg::Sugg::hir_opt(cx, arg) {
let mut ty = innermost;
let mut n = 0;
while let ty::Ref(_, inner, _) = ty.kind() {
ty = inner;
n += 1;
}
let refs = "&".repeat(n + 1);
let derefs = "*".repeat(n);
let explicit = format!("<{}{}>::clone({})", refs, ty, snip);
diag.span_suggestion(
expr.span,
"try dereferencing it",
format!("{}({}{}).clone()", refs, derefs, snip.deref()),
Applicability::MaybeIncorrect,
);
diag.span_suggestion(
expr.span,
"or try being explicit if you are sure, that you want to clone a reference",
explicit,
Applicability::MaybeIncorrect,
);
}
},
);
return; // don't report clone_on_copy
}
}
if is_copy(cx, ty) {
let parent_is_suffix_expr = match get_parent_node(cx.tcx, expr.hir_id) {
Some(Node::Expr(parent)) => match parent.kind {
// &*x is a nop, &x.clone() is not
ExprKind::AddrOf(..) => return,
// (*x).func() is useless, x.clone().func() can work in case func borrows self
ExprKind::MethodCall(_, _, [self_arg,..], _)
if expr.hir_id == self_arg.hir_id && ty!= cx.typeck_results().expr_ty_adjusted(expr) =>
{
return;
},
ExprKind::MethodCall(_, _, [self_arg,..], _) if expr.hir_id == self_arg.hir_id => true,
ExprKind::Match(_, _, MatchSource::TryDesugar | MatchSource::AwaitDesugar)
| ExprKind::Field(..)
| ExprKind::Index(..) => true,
_ => false,
},
// local binding capturing a reference
Some(Node::Local(l))
if matches!(
l.pat.kind,
PatKind::Binding(BindingAnnotation::Ref | BindingAnnotation::RefMut,..)
) =>
{
return;
},
_ => false,
};
let mut app = Applicability::MachineApplicable;
let snip = snippet_with_context(cx, arg.span, expr.span.ctxt(), "_", &mut app).0;
let deref_count = arg_adjustments
.iter()
.take_while(|adj| matches!(adj.kind, Adjust::Deref(_)))
.count();
let (help, sugg) = if deref_count == 0 {
("try removing the `clone` call", snip.into())
} else if parent_is_suffix_expr {
("try dereferencing it", format!("({}{})", "*".repeat(deref_count), snip))
} else {
("try dereferencing it", format!("{}{}", "*".repeat(deref_count), snip))
};
span_lint_and_sugg(
cx,
CLONE_ON_COPY,
expr.span,
&format!("using `clone` on type `{}` which implements the `Copy` trait", ty),
help,
sugg,
app,
);
}
}
|
check
|
identifier_name
|
mod.rs
|
extern crate regex;
pub struct Prefix {
pub nick: String,
pub user: String,
pub host: String
}
impl Prefix {
/// Attempts to parse a prefix, e.g. ":[email protected]"
pub fn parse(prefix: &str) -> Option<Prefix> {
let re = match regex::Regex::new(r"^:(.+)!(.+)@(.+)$") {
Ok(re) => re,
Err(err) => panic!("{}", err),
};
match re.captures(prefix) {
Some(caps) => Some(Prefix{
nick: caps.at(1).to_string(),
user: caps.at(2).to_string(),
host: caps.at(3).to_string()
}),
None => None
}
}
}
pub struct Message {
pub prefix: Option<Prefix>,
pub command: String,
pub trailing: String
}
impl Message {
// Returns a single token from a line
pub fn parse_token(line: &str) -> (&str, &str) {
let tokens: Vec<&str> = line.splitn(1,'').collect();
return (tokens[0], tokens[1]);
}
/// Parses a line and returns a Message instance
pub fn parse(line: &str) -> Message {
let (prefix, line) = Message::parse_token(line);
let (command, line) = Message::parse_token(line);
Message::new(prefix, command, line)
}
pub fn new(prefix: &str, command: &str, trailing: &str) -> Message
|
}
|
{
Message{
prefix: Prefix::parse(prefix),
command: command.to_string(),
trailing: trailing.to_string()
}
}
|
identifier_body
|
mod.rs
|
extern crate regex;
pub struct Prefix {
pub nick: String,
pub user: String,
pub host: String
}
impl Prefix {
/// Attempts to parse a prefix, e.g. ":[email protected]"
pub fn parse(prefix: &str) -> Option<Prefix> {
let re = match regex::Regex::new(r"^:(.+)!(.+)@(.+)$") {
Ok(re) => re,
Err(err) => panic!("{}", err),
};
match re.captures(prefix) {
Some(caps) => Some(Prefix{
nick: caps.at(1).to_string(),
user: caps.at(2).to_string(),
host: caps.at(3).to_string()
}),
None => None
}
}
}
pub struct
|
{
pub prefix: Option<Prefix>,
pub command: String,
pub trailing: String
}
impl Message {
// Returns a single token from a line
pub fn parse_token(line: &str) -> (&str, &str) {
let tokens: Vec<&str> = line.splitn(1,'').collect();
return (tokens[0], tokens[1]);
}
/// Parses a line and returns a Message instance
pub fn parse(line: &str) -> Message {
let (prefix, line) = Message::parse_token(line);
let (command, line) = Message::parse_token(line);
Message::new(prefix, command, line)
}
pub fn new(prefix: &str, command: &str, trailing: &str) -> Message {
Message{
prefix: Prefix::parse(prefix),
command: command.to_string(),
trailing: trailing.to_string()
}
}
}
|
Message
|
identifier_name
|
mod.rs
|
extern crate regex;
pub struct Prefix {
pub nick: String,
|
}
impl Prefix {
/// Attempts to parse a prefix, e.g. ":[email protected]"
pub fn parse(prefix: &str) -> Option<Prefix> {
let re = match regex::Regex::new(r"^:(.+)!(.+)@(.+)$") {
Ok(re) => re,
Err(err) => panic!("{}", err),
};
match re.captures(prefix) {
Some(caps) => Some(Prefix{
nick: caps.at(1).to_string(),
user: caps.at(2).to_string(),
host: caps.at(3).to_string()
}),
None => None
}
}
}
pub struct Message {
pub prefix: Option<Prefix>,
pub command: String,
pub trailing: String
}
impl Message {
// Returns a single token from a line
pub fn parse_token(line: &str) -> (&str, &str) {
let tokens: Vec<&str> = line.splitn(1,'').collect();
return (tokens[0], tokens[1]);
}
/// Parses a line and returns a Message instance
pub fn parse(line: &str) -> Message {
let (prefix, line) = Message::parse_token(line);
let (command, line) = Message::parse_token(line);
Message::new(prefix, command, line)
}
pub fn new(prefix: &str, command: &str, trailing: &str) -> Message {
Message{
prefix: Prefix::parse(prefix),
command: command.to_string(),
trailing: trailing.to_string()
}
}
}
|
pub user: String,
pub host: String
|
random_line_split
|
event.rs
|
use std::ops::Deref;
use std::any::Any;
use glium::Display;
use timer::Ms;
use render::Renderer;
pub use glium::glutin::Event as WindowEvent;
pub enum Event {
Window (WindowEvent),
Message (String),
Something (String, Box<Any>),
}
pub struct EventStream (Vec<Event>);
impl EventStream {
pub fn new(display: &Display) -> EventStream {
use glium::glutin::Event::MouseMoved;
let f = display.get_window().unwrap().hidpi_factor();
let (w, h) = display.get_framebuffer_dimensions();
let (w, h) = (w as f32, h as f32);
let events: Vec<_> = display.poll_events().map(|event| match event {
MouseMoved ((x, y)) => {
let (x, y) = (x as f32, y as f32);
MouseMoved((((x - w/2.0)/f) as i32, (-(y - h/2.0)/f) as i32))
}
x => x
}).map(|e| Event::Window(e)).collect();
EventStream(events)
}
}
impl Deref for EventStream {
type Target = Vec<Event>;
fn deref<'a>(&'a self) -> &'a Vec<Event> {
let &EventStream (ref x) = self;
return x;
}
}
pub trait Update {
fn update(&mut self, renderer: &Renderer, delta: Ms, stream: EventStream) -> EventStream;
}
impl<'a> Update for Vec<&'a mut Update> {
|
for item in self {
stream = item.update(renderer, delta, stream);
}
return stream;
}
}
|
fn update(&mut self, renderer: &Renderer, delta: Ms, mut stream: EventStream)
-> EventStream
{
|
random_line_split
|
event.rs
|
use std::ops::Deref;
use std::any::Any;
use glium::Display;
use timer::Ms;
use render::Renderer;
pub use glium::glutin::Event as WindowEvent;
pub enum Event {
Window (WindowEvent),
Message (String),
Something (String, Box<Any>),
}
pub struct EventStream (Vec<Event>);
impl EventStream {
pub fn new(display: &Display) -> EventStream {
use glium::glutin::Event::MouseMoved;
let f = display.get_window().unwrap().hidpi_factor();
let (w, h) = display.get_framebuffer_dimensions();
let (w, h) = (w as f32, h as f32);
let events: Vec<_> = display.poll_events().map(|event| match event {
MouseMoved ((x, y)) => {
let (x, y) = (x as f32, y as f32);
MouseMoved((((x - w/2.0)/f) as i32, (-(y - h/2.0)/f) as i32))
}
x => x
}).map(|e| Event::Window(e)).collect();
EventStream(events)
}
}
impl Deref for EventStream {
type Target = Vec<Event>;
fn deref<'a>(&'a self) -> &'a Vec<Event>
|
}
pub trait Update {
fn update(&mut self, renderer: &Renderer, delta: Ms, stream: EventStream) -> EventStream;
}
impl<'a> Update for Vec<&'a mut Update> {
fn update(&mut self, renderer: &Renderer, delta: Ms, mut stream: EventStream)
-> EventStream
{
for item in self {
stream = item.update(renderer, delta, stream);
}
return stream;
}
}
|
{
let &EventStream (ref x) = self;
return x;
}
|
identifier_body
|
event.rs
|
use std::ops::Deref;
use std::any::Any;
use glium::Display;
use timer::Ms;
use render::Renderer;
pub use glium::glutin::Event as WindowEvent;
pub enum
|
{
Window (WindowEvent),
Message (String),
Something (String, Box<Any>),
}
pub struct EventStream (Vec<Event>);
impl EventStream {
pub fn new(display: &Display) -> EventStream {
use glium::glutin::Event::MouseMoved;
let f = display.get_window().unwrap().hidpi_factor();
let (w, h) = display.get_framebuffer_dimensions();
let (w, h) = (w as f32, h as f32);
let events: Vec<_> = display.poll_events().map(|event| match event {
MouseMoved ((x, y)) => {
let (x, y) = (x as f32, y as f32);
MouseMoved((((x - w/2.0)/f) as i32, (-(y - h/2.0)/f) as i32))
}
x => x
}).map(|e| Event::Window(e)).collect();
EventStream(events)
}
}
impl Deref for EventStream {
type Target = Vec<Event>;
fn deref<'a>(&'a self) -> &'a Vec<Event> {
let &EventStream (ref x) = self;
return x;
}
}
pub trait Update {
fn update(&mut self, renderer: &Renderer, delta: Ms, stream: EventStream) -> EventStream;
}
impl<'a> Update for Vec<&'a mut Update> {
fn update(&mut self, renderer: &Renderer, delta: Ms, mut stream: EventStream)
-> EventStream
{
for item in self {
stream = item.update(renderer, delta, stream);
}
return stream;
}
}
|
Event
|
identifier_name
|
nodemap.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use crate::errors::corruption;
use crate::Result;
use crate::RevlogEntry;
use radixbuf::key::KeyId;
use radixbuf::radix::{
radix_insert, radix_lookup, radix_lookup_unchecked, radix_prefix_lookup, RADIX_NCHILDREN,
};
use std::sync::Arc;
use std::u32;
/// An index for node to rev lookups.
///
/// The index depends entirely on an append-only changelog.i source
/// of truth. It does not support in-memory overrides, which could be
/// implemented at a higher level.
///
/// ```text
///
/// changelogi
/// +------------+
/// |... | node | < rev 0 \
/// +------------+ |
/// |... | node | < rev 1 |> included in the main (on-disk) index
/// +------------+ |
/// |.......... |... /
/// +------------+
/// |... | node | < next_index_rev \
/// +------------+ |
/// |... | node | < next_index_rev + 1 | will be built on-demand
/// +------------+ |> in the side (in-memory)
/// |.......... |... | index
/// +------------+ |
/// |... | node | < next_changelog_rev - 1 /
/// +------------+
/// < next_changelog_rev
/// ```
///
/// The main index is an immutable, periodically-rebuilt, on-disk radix buffer
/// with an extra metadata about what's the next revision unknown to the index.
/// The side index covers remaining revisions in changelogi, built on-demand and
/// is in-memory only. The side index is usually much smaller than the main index
/// so it can be built quickly.
///
/// ```text
/// main index side index
/// +---------------------+ +----------------------+
/// | next_index_rev: u32 | | (small radix buffer) |
/// +---------------------+ +----------------------+
/// | | (in-memory only)
/// |(large radix buffer) |
/// | |
/// +---------------------+
/// (backed by filesystem)
/// ```
///
/// Having the side index allows us to make the main index immutable for most
/// of the time even if the source of truth has changed. It's possible to update
/// the main index in-place. But that requires extra efforts to deal with possible
/// filesystem issues like locking, or unexpected poweroff.
#[derive(Clone)]
pub struct NodeRevMap<C, I> {
pub(crate) changelogi: C,
main_index: I, // Immutable main index
side_index: Arc<Vec<u32>>, // Mutable side index
}
// Offsets in the main radix and key buffers
const RADIX_NEXT_REV_OFFSET: usize = 0;
const RADIX_HEADER_LEN: usize = RADIX_NEXT_REV_OFFSET + 1;
// Offsets of root nodes in radix buffers
const MAIN_RADIX_OFFSET: u32 = 1;
const SIDE_RADIX_OFFSET: u32 = 0;
impl<C: AsRef<[RevlogEntry]>, I: AsRef<[u32]>> NodeRevMap<C, I> {
/// Initialize NodeMap from a non-inlined version of changelog.i and an incomplete index.
pub fn new(changelogi: C, main_index: I) -> Result<Self> {
// Sanity check if the index is corrupted or not.
// The index must contain at least 17 elements. index[0] tracks the last rev the index has.
// index[1..17] is the root radix node.
if main_index.as_ref().len() < RADIX_HEADER_LEN + RADIX_NCHILDREN {
return corruption("revlog radix index corrupted (main index too small)");
}
// Check if the index is behind and build incrementally
let next_rev = u32::from_be(main_index.as_ref()[RADIX_NEXT_REV_OFFSET]);
let end_rev = changelog_end_rev(&changelogi);
if next_rev > end_rev {
// next_rev cannot be larger than what changelogi has.
return corruption("revlog radix index corrupted (next_rev > end_rev)");
} else if next_rev > 0 {
// Sanity check: if the last node stored in the index does not match the changelogi,
// the index is broken and needs rebuilt. That could happen if strip happens.
let rev: KeyId = (next_rev - 1).into();
let node = rev_to_node(&changelogi, rev)?;
if let Ok(Some(id)) = radix_lookup_unchecked(&main_index, MAIN_RADIX_OFFSET, &node) {
if id!= rev {
return corruption("revlog radix index corrupted (revlog out-of-sync)");
}
} else {
return corruption("revlog radix index corrupted (revlog out-of-sync)");
}
}
// Build side_index for the revisions not in the main index
let mut side_index = vec![0u32; RADIX_NCHILDREN];
build(
&changelogi,
&mut side_index,
SIDE_RADIX_OFFSET,
next_rev,
end_rev,
)?;
let side_index = Arc::new(side_index);
Ok(NodeRevMap {
changelogi,
main_index,
side_index,
})
}
/// Convert hex prefix to node.
pub fn hex_prefix_to_node<T: AsRef<[u8]>>(&self, hex_prefix: T) -> Result<Option<&[u8]>> {
let bin_prefix = match hex_to_bin_base16(hex_prefix) {
Some(v) => v,
None => return Ok(None),
};
let iter = bin_prefix.iter().cloned();
let cl = &self.changelogi;
let main_res = radix_prefix_lookup(
&self.main_index,
MAIN_RADIX_OFFSET,
iter.clone(),
rev_to_node,
cl,
)?;
let side_res = radix_prefix_lookup(
self.side_index.as_ref(),
SIDE_RADIX_OFFSET,
iter,
rev_to_node,
cl,
)?;
match (main_res, side_res) {
(Some(_), Some(_)) => Err(crate::Error::AmbiguousPrefix),
(Some(rev), None) | (None, Some(rev)) => Ok(Some(rev_to_node(&self.changelogi, rev)?)),
_ => Ok(None),
}
}
/// Convert node to rev.
pub fn node_to_rev<T: AsRef<[u8]>>(&self, node: T) -> Result<Option<u32>> {
let cl = &self.changelogi;
if let Some(rev) = radix_lookup(&self.main_index, 1, &node, rev_to_node, cl)? {
Ok(Some(rev.into()))
} else if let Some(rev) = radix_lookup(self.side_index.as_ref(), 0, &node, rev_to_node, cl)?
{
Ok(Some(rev.into()))
} else {
Ok(None)
}
}
/// How many revisions the side index has.
pub fn lag(&self) -> u32 {
let next_rev = u32::from_be(self.main_index.as_ref()[0]);
let end_rev = changelog_end_rev(&self.changelogi);
end_rev - next_rev
}
/// Incrementally build the main index based on the existing one.
/// Note: this will memcpy the immutable main index so the new buffer
/// could be written and resized.
pub fn build_incrementally(&self) -> Result<Vec<u32>> {
// Copy the main index since we need to modify it.
let mut index = self.main_index.as_ref().to_vec();
let end_rev = changelog_end_rev(&self.changelogi);
let next_rev = u32::from_be(index[0]);
build(
&self.changelogi,
&mut index,
MAIN_RADIX_OFFSET,
next_rev,
end_rev,
)?;
index[0] = end_rev.to_be();
Ok(index)
}
}
/// Return the minimal revision number the changelog.i does not have.
fn changelog_end_rev<T: AsRef<[RevlogEntry]>>(changelogi: &T) -> u32 {
let changelogi = changelogi.as_ref();
let rev = changelogi.len();
if rev as u64 > u32::MAX as u64 {
panic!("rev exceeds 32 bit integers")
}
rev as u32
}
/// Read the given range of revisions (from `start_rev` (inclusive) to
/// `end_rev` (exclusive)) from changelogi. Insert them to the radix
/// index.
fn build<T>(
changelogi: &T,
index: &mut Vec<u32>,
radix_offset: u32,
start_rev: u32,
end_rev: u32,
) -> Result<()>
where
T: AsRef<[RevlogEntry]>,
{
// Reserve the approximate size needed for the index - 28 bytes for each revision.
// See D1291 for a table of number of revisions and index sizes.
|
index.reserve(7 * (end_rev - start_rev) as usize);
for i in start_rev..end_rev {
let _ = radix_insert(index, radix_offset, i.into(), rev_to_node, changelogi)?;
}
Ok(())
}
/// Helper method similar to `radixbuf::key::FixedKey::read`, but takes a revision number instead.
fn rev_to_node<K: AsRef<[RevlogEntry]>>(changelogi: &K, rev: KeyId) -> radixbuf::Result<&[u8]> {
let buf = changelogi.as_ref();
let rev_usize: usize = rev.into();
let entry = &buf[rev_usize];
Ok(&entry.node[..])
}
/// Convert hex base16 sequence to binary base16 sequence.
fn hex_to_bin_base16<T: AsRef<[u8]>>(base16: T) -> Option<Vec<u8>> {
let base16 = base16.as_ref();
let len = base16.len();
let mut result = vec![0u8; len];
for (i, &ch) in base16.iter().enumerate() {
result[i] = match ch {
b'a'..=b'f' => ch - b'a' + 10,
b'A'..=b'F' => ch - b'A' + 10,
b'0'..=b'9' => ch - b'0',
_ => return None,
}
}
Some(result)
}
/// Return an empty index that can be used as "main_index" when passed to `new`.
pub fn empty_index_buffer() -> Vec<u8> {
return vec![0u8; 4 * (RADIX_HEADER_LEN + RADIX_NCHILDREN)];
}
|
random_line_split
|
|
nodemap.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use crate::errors::corruption;
use crate::Result;
use crate::RevlogEntry;
use radixbuf::key::KeyId;
use radixbuf::radix::{
radix_insert, radix_lookup, radix_lookup_unchecked, radix_prefix_lookup, RADIX_NCHILDREN,
};
use std::sync::Arc;
use std::u32;
/// An index for node to rev lookups.
///
/// The index depends entirely on an append-only changelog.i source
/// of truth. It does not support in-memory overrides, which could be
/// implemented at a higher level.
///
/// ```text
///
/// changelogi
/// +------------+
/// |... | node | < rev 0 \
/// +------------+ |
/// |... | node | < rev 1 |> included in the main (on-disk) index
/// +------------+ |
/// |.......... |... /
/// +------------+
/// |... | node | < next_index_rev \
/// +------------+ |
/// |... | node | < next_index_rev + 1 | will be built on-demand
/// +------------+ |> in the side (in-memory)
/// |.......... |... | index
/// +------------+ |
/// |... | node | < next_changelog_rev - 1 /
/// +------------+
/// < next_changelog_rev
/// ```
///
/// The main index is an immutable, periodically-rebuilt, on-disk radix buffer
/// with an extra metadata about what's the next revision unknown to the index.
/// The side index covers remaining revisions in changelogi, built on-demand and
/// is in-memory only. The side index is usually much smaller than the main index
/// so it can be built quickly.
///
/// ```text
/// main index side index
/// +---------------------+ +----------------------+
/// | next_index_rev: u32 | | (small radix buffer) |
/// +---------------------+ +----------------------+
/// | | (in-memory only)
/// |(large radix buffer) |
/// | |
/// +---------------------+
/// (backed by filesystem)
/// ```
///
/// Having the side index allows us to make the main index immutable for most
/// of the time even if the source of truth has changed. It's possible to update
/// the main index in-place. But that requires extra efforts to deal with possible
/// filesystem issues like locking, or unexpected poweroff.
#[derive(Clone)]
pub struct NodeRevMap<C, I> {
pub(crate) changelogi: C,
main_index: I, // Immutable main index
side_index: Arc<Vec<u32>>, // Mutable side index
}
// Offsets in the main radix and key buffers
const RADIX_NEXT_REV_OFFSET: usize = 0;
const RADIX_HEADER_LEN: usize = RADIX_NEXT_REV_OFFSET + 1;
// Offsets of root nodes in radix buffers
const MAIN_RADIX_OFFSET: u32 = 1;
const SIDE_RADIX_OFFSET: u32 = 0;
impl<C: AsRef<[RevlogEntry]>, I: AsRef<[u32]>> NodeRevMap<C, I> {
/// Initialize NodeMap from a non-inlined version of changelog.i and an incomplete index.
pub fn new(changelogi: C, main_index: I) -> Result<Self> {
// Sanity check if the index is corrupted or not.
// The index must contain at least 17 elements. index[0] tracks the last rev the index has.
// index[1..17] is the root radix node.
if main_index.as_ref().len() < RADIX_HEADER_LEN + RADIX_NCHILDREN {
return corruption("revlog radix index corrupted (main index too small)");
}
// Check if the index is behind and build incrementally
let next_rev = u32::from_be(main_index.as_ref()[RADIX_NEXT_REV_OFFSET]);
let end_rev = changelog_end_rev(&changelogi);
if next_rev > end_rev {
// next_rev cannot be larger than what changelogi has.
return corruption("revlog radix index corrupted (next_rev > end_rev)");
} else if next_rev > 0 {
// Sanity check: if the last node stored in the index does not match the changelogi,
// the index is broken and needs rebuilt. That could happen if strip happens.
let rev: KeyId = (next_rev - 1).into();
let node = rev_to_node(&changelogi, rev)?;
if let Ok(Some(id)) = radix_lookup_unchecked(&main_index, MAIN_RADIX_OFFSET, &node) {
if id!= rev {
return corruption("revlog radix index corrupted (revlog out-of-sync)");
}
} else {
return corruption("revlog radix index corrupted (revlog out-of-sync)");
}
}
// Build side_index for the revisions not in the main index
let mut side_index = vec![0u32; RADIX_NCHILDREN];
build(
&changelogi,
&mut side_index,
SIDE_RADIX_OFFSET,
next_rev,
end_rev,
)?;
let side_index = Arc::new(side_index);
Ok(NodeRevMap {
changelogi,
main_index,
side_index,
})
}
/// Convert hex prefix to node.
pub fn hex_prefix_to_node<T: AsRef<[u8]>>(&self, hex_prefix: T) -> Result<Option<&[u8]>> {
let bin_prefix = match hex_to_bin_base16(hex_prefix) {
Some(v) => v,
None => return Ok(None),
};
let iter = bin_prefix.iter().cloned();
let cl = &self.changelogi;
let main_res = radix_prefix_lookup(
&self.main_index,
MAIN_RADIX_OFFSET,
iter.clone(),
rev_to_node,
cl,
)?;
let side_res = radix_prefix_lookup(
self.side_index.as_ref(),
SIDE_RADIX_OFFSET,
iter,
rev_to_node,
cl,
)?;
match (main_res, side_res) {
(Some(_), Some(_)) => Err(crate::Error::AmbiguousPrefix),
(Some(rev), None) | (None, Some(rev)) => Ok(Some(rev_to_node(&self.changelogi, rev)?)),
_ => Ok(None),
}
}
/// Convert node to rev.
pub fn node_to_rev<T: AsRef<[u8]>>(&self, node: T) -> Result<Option<u32>> {
let cl = &self.changelogi;
if let Some(rev) = radix_lookup(&self.main_index, 1, &node, rev_to_node, cl)? {
Ok(Some(rev.into()))
} else if let Some(rev) = radix_lookup(self.side_index.as_ref(), 0, &node, rev_to_node, cl)?
{
Ok(Some(rev.into()))
} else {
Ok(None)
}
}
/// How many revisions the side index has.
pub fn lag(&self) -> u32 {
let next_rev = u32::from_be(self.main_index.as_ref()[0]);
let end_rev = changelog_end_rev(&self.changelogi);
end_rev - next_rev
}
/// Incrementally build the main index based on the existing one.
/// Note: this will memcpy the immutable main index so the new buffer
/// could be written and resized.
pub fn build_incrementally(&self) -> Result<Vec<u32>> {
// Copy the main index since we need to modify it.
let mut index = self.main_index.as_ref().to_vec();
let end_rev = changelog_end_rev(&self.changelogi);
let next_rev = u32::from_be(index[0]);
build(
&self.changelogi,
&mut index,
MAIN_RADIX_OFFSET,
next_rev,
end_rev,
)?;
index[0] = end_rev.to_be();
Ok(index)
}
}
/// Return the minimal revision number the changelog.i does not have.
fn changelog_end_rev<T: AsRef<[RevlogEntry]>>(changelogi: &T) -> u32 {
let changelogi = changelogi.as_ref();
let rev = changelogi.len();
if rev as u64 > u32::MAX as u64 {
panic!("rev exceeds 32 bit integers")
}
rev as u32
}
/// Read the given range of revisions (from `start_rev` (inclusive) to
/// `end_rev` (exclusive)) from changelogi. Insert them to the radix
/// index.
fn build<T>(
changelogi: &T,
index: &mut Vec<u32>,
radix_offset: u32,
start_rev: u32,
end_rev: u32,
) -> Result<()>
where
T: AsRef<[RevlogEntry]>,
{
// Reserve the approximate size needed for the index - 28 bytes for each revision.
// See D1291 for a table of number of revisions and index sizes.
index.reserve(7 * (end_rev - start_rev) as usize);
for i in start_rev..end_rev {
let _ = radix_insert(index, radix_offset, i.into(), rev_to_node, changelogi)?;
}
Ok(())
}
/// Helper method similar to `radixbuf::key::FixedKey::read`, but takes a revision number instead.
fn
|
<K: AsRef<[RevlogEntry]>>(changelogi: &K, rev: KeyId) -> radixbuf::Result<&[u8]> {
let buf = changelogi.as_ref();
let rev_usize: usize = rev.into();
let entry = &buf[rev_usize];
Ok(&entry.node[..])
}
/// Convert hex base16 sequence to binary base16 sequence.
fn hex_to_bin_base16<T: AsRef<[u8]>>(base16: T) -> Option<Vec<u8>> {
let base16 = base16.as_ref();
let len = base16.len();
let mut result = vec![0u8; len];
for (i, &ch) in base16.iter().enumerate() {
result[i] = match ch {
b'a'..=b'f' => ch - b'a' + 10,
b'A'..=b'F' => ch - b'A' + 10,
b'0'..=b'9' => ch - b'0',
_ => return None,
}
}
Some(result)
}
/// Return an empty index that can be used as "main_index" when passed to `new`.
pub fn empty_index_buffer() -> Vec<u8> {
return vec![0u8; 4 * (RADIX_HEADER_LEN + RADIX_NCHILDREN)];
}
|
rev_to_node
|
identifier_name
|
nodemap.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use crate::errors::corruption;
use crate::Result;
use crate::RevlogEntry;
use radixbuf::key::KeyId;
use radixbuf::radix::{
radix_insert, radix_lookup, radix_lookup_unchecked, radix_prefix_lookup, RADIX_NCHILDREN,
};
use std::sync::Arc;
use std::u32;
/// An index for node to rev lookups.
///
/// The index depends entirely on an append-only changelog.i source
/// of truth. It does not support in-memory overrides, which could be
/// implemented at a higher level.
///
/// ```text
///
/// changelogi
/// +------------+
/// |... | node | < rev 0 \
/// +------------+ |
/// |... | node | < rev 1 |> included in the main (on-disk) index
/// +------------+ |
/// |.......... |... /
/// +------------+
/// |... | node | < next_index_rev \
/// +------------+ |
/// |... | node | < next_index_rev + 1 | will be built on-demand
/// +------------+ |> in the side (in-memory)
/// |.......... |... | index
/// +------------+ |
/// |... | node | < next_changelog_rev - 1 /
/// +------------+
/// < next_changelog_rev
/// ```
///
/// The main index is an immutable, periodically-rebuilt, on-disk radix buffer
/// with an extra metadata about what's the next revision unknown to the index.
/// The side index covers remaining revisions in changelogi, built on-demand and
/// is in-memory only. The side index is usually much smaller than the main index
/// so it can be built quickly.
///
/// ```text
/// main index side index
/// +---------------------+ +----------------------+
/// | next_index_rev: u32 | | (small radix buffer) |
/// +---------------------+ +----------------------+
/// | | (in-memory only)
/// |(large radix buffer) |
/// | |
/// +---------------------+
/// (backed by filesystem)
/// ```
///
/// Having the side index allows us to make the main index immutable for most
/// of the time even if the source of truth has changed. It's possible to update
/// the main index in-place. But that requires extra efforts to deal with possible
/// filesystem issues like locking, or unexpected poweroff.
#[derive(Clone)]
pub struct NodeRevMap<C, I> {
pub(crate) changelogi: C,
main_index: I, // Immutable main index
side_index: Arc<Vec<u32>>, // Mutable side index
}
// Offsets in the main radix and key buffers
const RADIX_NEXT_REV_OFFSET: usize = 0;
const RADIX_HEADER_LEN: usize = RADIX_NEXT_REV_OFFSET + 1;
// Offsets of root nodes in radix buffers
const MAIN_RADIX_OFFSET: u32 = 1;
const SIDE_RADIX_OFFSET: u32 = 0;
impl<C: AsRef<[RevlogEntry]>, I: AsRef<[u32]>> NodeRevMap<C, I> {
/// Initialize NodeMap from a non-inlined version of changelog.i and an incomplete index.
pub fn new(changelogi: C, main_index: I) -> Result<Self> {
// Sanity check if the index is corrupted or not.
// The index must contain at least 17 elements. index[0] tracks the last rev the index has.
// index[1..17] is the root radix node.
if main_index.as_ref().len() < RADIX_HEADER_LEN + RADIX_NCHILDREN
|
// Check if the index is behind and build incrementally
let next_rev = u32::from_be(main_index.as_ref()[RADIX_NEXT_REV_OFFSET]);
let end_rev = changelog_end_rev(&changelogi);
if next_rev > end_rev {
// next_rev cannot be larger than what changelogi has.
return corruption("revlog radix index corrupted (next_rev > end_rev)");
} else if next_rev > 0 {
// Sanity check: if the last node stored in the index does not match the changelogi,
// the index is broken and needs rebuilt. That could happen if strip happens.
let rev: KeyId = (next_rev - 1).into();
let node = rev_to_node(&changelogi, rev)?;
if let Ok(Some(id)) = radix_lookup_unchecked(&main_index, MAIN_RADIX_OFFSET, &node) {
if id!= rev {
return corruption("revlog radix index corrupted (revlog out-of-sync)");
}
} else {
return corruption("revlog radix index corrupted (revlog out-of-sync)");
}
}
// Build side_index for the revisions not in the main index
let mut side_index = vec![0u32; RADIX_NCHILDREN];
build(
&changelogi,
&mut side_index,
SIDE_RADIX_OFFSET,
next_rev,
end_rev,
)?;
let side_index = Arc::new(side_index);
Ok(NodeRevMap {
changelogi,
main_index,
side_index,
})
}
/// Convert hex prefix to node.
pub fn hex_prefix_to_node<T: AsRef<[u8]>>(&self, hex_prefix: T) -> Result<Option<&[u8]>> {
let bin_prefix = match hex_to_bin_base16(hex_prefix) {
Some(v) => v,
None => return Ok(None),
};
let iter = bin_prefix.iter().cloned();
let cl = &self.changelogi;
let main_res = radix_prefix_lookup(
&self.main_index,
MAIN_RADIX_OFFSET,
iter.clone(),
rev_to_node,
cl,
)?;
let side_res = radix_prefix_lookup(
self.side_index.as_ref(),
SIDE_RADIX_OFFSET,
iter,
rev_to_node,
cl,
)?;
match (main_res, side_res) {
(Some(_), Some(_)) => Err(crate::Error::AmbiguousPrefix),
(Some(rev), None) | (None, Some(rev)) => Ok(Some(rev_to_node(&self.changelogi, rev)?)),
_ => Ok(None),
}
}
/// Convert node to rev.
pub fn node_to_rev<T: AsRef<[u8]>>(&self, node: T) -> Result<Option<u32>> {
let cl = &self.changelogi;
if let Some(rev) = radix_lookup(&self.main_index, 1, &node, rev_to_node, cl)? {
Ok(Some(rev.into()))
} else if let Some(rev) = radix_lookup(self.side_index.as_ref(), 0, &node, rev_to_node, cl)?
{
Ok(Some(rev.into()))
} else {
Ok(None)
}
}
/// How many revisions the side index has.
pub fn lag(&self) -> u32 {
let next_rev = u32::from_be(self.main_index.as_ref()[0]);
let end_rev = changelog_end_rev(&self.changelogi);
end_rev - next_rev
}
/// Incrementally build the main index based on the existing one.
/// Note: this will memcpy the immutable main index so the new buffer
/// could be written and resized.
pub fn build_incrementally(&self) -> Result<Vec<u32>> {
// Copy the main index since we need to modify it.
let mut index = self.main_index.as_ref().to_vec();
let end_rev = changelog_end_rev(&self.changelogi);
let next_rev = u32::from_be(index[0]);
build(
&self.changelogi,
&mut index,
MAIN_RADIX_OFFSET,
next_rev,
end_rev,
)?;
index[0] = end_rev.to_be();
Ok(index)
}
}
/// Return the minimal revision number the changelog.i does not have.
fn changelog_end_rev<T: AsRef<[RevlogEntry]>>(changelogi: &T) -> u32 {
let changelogi = changelogi.as_ref();
let rev = changelogi.len();
if rev as u64 > u32::MAX as u64 {
panic!("rev exceeds 32 bit integers")
}
rev as u32
}
/// Read the given range of revisions (from `start_rev` (inclusive) to
/// `end_rev` (exclusive)) from changelogi. Insert them to the radix
/// index.
fn build<T>(
changelogi: &T,
index: &mut Vec<u32>,
radix_offset: u32,
start_rev: u32,
end_rev: u32,
) -> Result<()>
where
T: AsRef<[RevlogEntry]>,
{
// Reserve the approximate size needed for the index - 28 bytes for each revision.
// See D1291 for a table of number of revisions and index sizes.
index.reserve(7 * (end_rev - start_rev) as usize);
for i in start_rev..end_rev {
let _ = radix_insert(index, radix_offset, i.into(), rev_to_node, changelogi)?;
}
Ok(())
}
/// Helper method similar to `radixbuf::key::FixedKey::read`, but takes a revision number instead.
fn rev_to_node<K: AsRef<[RevlogEntry]>>(changelogi: &K, rev: KeyId) -> radixbuf::Result<&[u8]> {
let buf = changelogi.as_ref();
let rev_usize: usize = rev.into();
let entry = &buf[rev_usize];
Ok(&entry.node[..])
}
/// Convert hex base16 sequence to binary base16 sequence.
fn hex_to_bin_base16<T: AsRef<[u8]>>(base16: T) -> Option<Vec<u8>> {
let base16 = base16.as_ref();
let len = base16.len();
let mut result = vec![0u8; len];
for (i, &ch) in base16.iter().enumerate() {
result[i] = match ch {
b'a'..=b'f' => ch - b'a' + 10,
b'A'..=b'F' => ch - b'A' + 10,
b'0'..=b'9' => ch - b'0',
_ => return None,
}
}
Some(result)
}
/// Return an empty index that can be used as "main_index" when passed to `new`.
pub fn empty_index_buffer() -> Vec<u8> {
return vec![0u8; 4 * (RADIX_HEADER_LEN + RADIX_NCHILDREN)];
}
|
{
return corruption("revlog radix index corrupted (main index too small)");
}
|
conditional_block
|
zero.rs
|
// Copyright (c) 2006-2009 Graydon Hoare
// Copyright (c) 2009-2013 Mozilla Foundation
// Permission is hereby granted, free of charge, to any
// person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the
// Software without restriction, including without
// limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following
// conditions:
// The above copyright notice and this permission notice
// shall be included in all copies or substantial portions
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
// zero.rs
//
// Minimal definitions of the core primitives in Rust. Include this file with
// your project to create a freestanding Rust program that can run on bare
// metal.
//
#[allow(ctypes)];
// Built-in traits
#[lang="copy"]
pub trait Copy {}
#[lang="owned"]
pub trait Owned {}
#[lang="freeze"]
pub trait Freeze {}
#[lang="opaque"]
pub enum Opaque {}
#[lang="ty_desc"]
pub struct TyDesc;
#[lang="ty_visitor"]
pub trait TyVisitor {}
#[lang="closure_exchange_malloc"]
pub trait ClosureExchangeMalloc {}
#[lang="send"]
pub trait Send {}
#[lang="sized"]
pub trait Sized {}
#[lang="drop"]
pub trait Drop {
fn finalize(&self);
}
// Operator overloading
#[lang="eq"]
pub trait Eq {
fn eq(&self, other: &Self) -> bool;
fn ne(&self, other: &Self) -> bool;
}
#[lang="ord"]
pub trait Ord {
fn lt(&self, other: &Self) -> bool;
fn le(&self, other: &Self) -> bool;
fn ge(&self, other: &Self) -> bool;
fn gt(&self, other: &Self) -> bool;
}
#[lang="add"]
pub trait Add<Rhs,Result> {
fn add(&self, rhs: &Rhs) -> Result;
}
#[lang="sub"]
pub trait Sub<Rhs,Result> {
fn sub(&self, rhs: &Rhs) -> Result;
}
#[lang="mul"]
pub trait Mul<Rhs,Result> {
fn mul(&self, rhs: &Rhs) -> Result;
}
#[lang="div"]
pub trait Div<Rhs,Result> {
fn div(&self, rhs: &Rhs) -> Result;
}
#[lang="rem"]
pub trait Rem<Rhs,Result> {
fn rem(&self, rhs: &Rhs) -> Result;
}
#[lang="neg"]
pub trait Neg<Rhs,Result> {
fn neg(&self) -> Result;
}
#[lang="not"]
pub trait Not<Rhs,Result> {
fn not(&self) -> Result;
}
#[lang="bitand"]
pub trait BitAnd<Rhs,Result> {
fn bitand(&self, rhs: &Rhs) -> Result;
}
#[lang="bitor"]
pub trait BitOr<Rhs,Result> {
fn bitor(&self, rhs: &Rhs) -> Result;
}
#[lang="bitxor"]
pub trait BitXor<Rhs,Result> {
fn bitxor(&self, rhs: &Rhs) -> Result;
}
#[lang="shl"]
pub trait Shl<Rhs,Result> {
fn shl(&self, rhs: &Rhs) -> Result;
}
#[lang="shr"]
pub trait Shr<Rhs,Result> {
fn shr(&self, rhs: &Rhs) -> Result;
}
#[lang="index"]
pub trait Index<Index,Result> {
fn index(&self, rhs: &Index) -> Result;
}
// String utilities
#[lang="str_eq"]
#[fixed_stack_segment]
pub fn str_eq(a: &str, b: &str) -> bool {
unsafe {
let (aptr, alen): (*u8, uint) = transmute(a);
let (bptr, blen): (*u8, uint) = transmute(b);
if alen!= blen {
return false
}
memcmp(aptr, bptr, alen - 1) == 0
}
}
// FIXME(pcwalton): This function is legacy junk.
#[lang="uniq_str_eq"]
pub fn uniq_str_eq(a: &~str, b: &~str) -> bool {
str_eq(*a, *b)
}
struct StringRepr {
fill: uint,
alloc: uint,
}
// FIXME(pcwalton): This function should not be necessary, I don't think.
#[lang="strdup_uniq"]
#[fixed_stack_segment]
pub unsafe fn strdup_uniq(ptr: *u8, len: uint) -> ~str {
let size = size_of::<StringRepr>() + len + 1;
let string: *mut StringRepr = transmute(exchange_malloc(transmute(0),
size));
(*string).fill = len + 1;
(*string).alloc = len + 1;
let mut data_ptr: uint = transmute(string);
data_ptr += size_of::<StringRepr>();
let data_ptr: *mut u8 = transmute(data_ptr);
memcpy(data_ptr, ptr, len + 1);
transmute(string)
}
// Legacy junk
#[lang="log_type"]
pub fn log_type<T>(_: u32, _: &T) {
// FIXME: This function should not be a lang item.
}
#[lang="annihilate"]
pub unsafe fn annihilate() {}
// Failure
#[lang="fail_"]
#[fixed_stack_segment]
pub fn fail(_: *i8, _: *i8, _: uint) ->! {
unsafe {
abort()
}
}
#[lang="fail_bounds_check"]
#[fixed_stack_segment]
pub fn fail_bounds_check(_: *i8, _: uint, _: uint, _: uint) {
unsafe {
abort()
}
}
// Memory allocation
// FIXME: So grotesquely inefficient.
struct Header {
minus_one: uint, // Must be -1.
type_desc: *i8,
null_0: uint, // Must be null.
null_1: uint, // Must be null.
}
|
let alloc: *mut Header = transmute(malloc(size_of::<Header>() + size));
(*alloc).minus_one = -1;
(*alloc).type_desc = type_desc;
(*alloc).null_0 = 0;
(*alloc).null_1 = 0;
transmute(alloc)
}
#[lang="exchange_free"]
#[fixed_stack_segment]
pub unsafe fn exchange_free(alloc: *i8) {
free(transmute(alloc))
}
// Entry point
// TODO(pcwalton): Stash argc and argv somewhere. Probably needs to wait on
// global variables.
#[lang="start"]
pub fn start(main: *u8, _: int, _: **i8, _: *u8) -> int {
unsafe {
let main: extern "Rust" fn() = transmute(main);
main();
0
}
}
// The nonexistent garbage collector
#[lang="malloc"]
#[fixed_stack_segment]
pub unsafe fn gc_malloc(_: *i8, _: uint) -> *i8 {
abort()
}
#[lang="free"]
#[fixed_stack_segment]
pub unsafe fn gc_free(_: *i8) {
abort()
}
#[lang="borrow_as_imm"]
#[fixed_stack_segment]
pub unsafe fn borrow_as_imm(_: *u8, _: *i8, _: uint) -> uint {
abort()
}
#[lang="borrow_as_mut"]
#[fixed_stack_segment]
pub unsafe fn borrow_as_mut(_: *u8, _: *i8, _: uint) -> uint {
abort()
}
#[lang="record_borrow"]
#[fixed_stack_segment]
pub unsafe fn record_borrow(_: *u8, _: uint, _: *i8, _: uint) {
abort()
}
#[lang="unrecord_borrow"]
#[fixed_stack_segment]
pub unsafe fn unrecord_borrow(_: *u8, _: uint, _: *i8, _: uint) {
abort()
}
#[lang="return_to_mut"]
#[fixed_stack_segment]
pub unsafe fn return_to_mut(_: *u8, _: uint, _: *i8, _: uint) {
abort()
}
#[lang="check_not_borrowed"]
#[fixed_stack_segment]
pub unsafe fn check_not_borrowed(_: *u8, _: *i8, _: uint) {
abort()
}
// libc dependencies
extern {
#[fast_ffi]
pub fn malloc(size: uint) -> *u8;
#[fast_ffi]
pub fn free(ptr: *u8);
#[fast_ffi]
pub fn abort() ->!;
#[fast_ffi]
pub fn memcpy(dest: *mut u8, src: *u8, size: uint) -> *u8;
#[fast_ffi]
pub fn memcmp(a: *u8, b: *u8, size: uint) -> i32;
}
// Rust intrinsic dependencies
extern "rust-intrinsic" {
pub fn transmute<T,U>(val: T) -> U;
pub fn size_of<T>() -> uint;
}
|
// FIXME: This is horrendously inefficient.
#[lang="exchange_malloc"]
#[fixed_stack_segment]
pub unsafe fn exchange_malloc(type_desc: *i8, size: uint) -> *i8 {
|
random_line_split
|
zero.rs
|
// Copyright (c) 2006-2009 Graydon Hoare
// Copyright (c) 2009-2013 Mozilla Foundation
// Permission is hereby granted, free of charge, to any
// person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the
// Software without restriction, including without
// limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following
// conditions:
// The above copyright notice and this permission notice
// shall be included in all copies or substantial portions
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
// zero.rs
//
// Minimal definitions of the core primitives in Rust. Include this file with
// your project to create a freestanding Rust program that can run on bare
// metal.
//
#[allow(ctypes)];
// Built-in traits
#[lang="copy"]
pub trait Copy {}
#[lang="owned"]
pub trait Owned {}
#[lang="freeze"]
pub trait Freeze {}
#[lang="opaque"]
pub enum Opaque {}
#[lang="ty_desc"]
pub struct TyDesc;
#[lang="ty_visitor"]
pub trait TyVisitor {}
#[lang="closure_exchange_malloc"]
pub trait ClosureExchangeMalloc {}
#[lang="send"]
pub trait Send {}
#[lang="sized"]
pub trait Sized {}
#[lang="drop"]
pub trait Drop {
fn finalize(&self);
}
// Operator overloading
#[lang="eq"]
pub trait Eq {
fn eq(&self, other: &Self) -> bool;
fn ne(&self, other: &Self) -> bool;
}
#[lang="ord"]
pub trait Ord {
fn lt(&self, other: &Self) -> bool;
fn le(&self, other: &Self) -> bool;
fn ge(&self, other: &Self) -> bool;
fn gt(&self, other: &Self) -> bool;
}
#[lang="add"]
pub trait Add<Rhs,Result> {
fn add(&self, rhs: &Rhs) -> Result;
}
#[lang="sub"]
pub trait Sub<Rhs,Result> {
fn sub(&self, rhs: &Rhs) -> Result;
}
#[lang="mul"]
pub trait Mul<Rhs,Result> {
fn mul(&self, rhs: &Rhs) -> Result;
}
#[lang="div"]
pub trait Div<Rhs,Result> {
fn div(&self, rhs: &Rhs) -> Result;
}
#[lang="rem"]
pub trait Rem<Rhs,Result> {
fn rem(&self, rhs: &Rhs) -> Result;
}
#[lang="neg"]
pub trait Neg<Rhs,Result> {
fn neg(&self) -> Result;
}
#[lang="not"]
pub trait Not<Rhs,Result> {
fn not(&self) -> Result;
}
#[lang="bitand"]
pub trait BitAnd<Rhs,Result> {
fn bitand(&self, rhs: &Rhs) -> Result;
}
#[lang="bitor"]
pub trait BitOr<Rhs,Result> {
fn bitor(&self, rhs: &Rhs) -> Result;
}
#[lang="bitxor"]
pub trait BitXor<Rhs,Result> {
fn bitxor(&self, rhs: &Rhs) -> Result;
}
#[lang="shl"]
pub trait Shl<Rhs,Result> {
fn shl(&self, rhs: &Rhs) -> Result;
}
#[lang="shr"]
pub trait Shr<Rhs,Result> {
fn shr(&self, rhs: &Rhs) -> Result;
}
#[lang="index"]
pub trait Index<Index,Result> {
fn index(&self, rhs: &Index) -> Result;
}
// String utilities
#[lang="str_eq"]
#[fixed_stack_segment]
pub fn str_eq(a: &str, b: &str) -> bool {
unsafe {
let (aptr, alen): (*u8, uint) = transmute(a);
let (bptr, blen): (*u8, uint) = transmute(b);
if alen!= blen
|
memcmp(aptr, bptr, alen - 1) == 0
}
}
// FIXME(pcwalton): This function is legacy junk.
#[lang="uniq_str_eq"]
pub fn uniq_str_eq(a: &~str, b: &~str) -> bool {
str_eq(*a, *b)
}
struct StringRepr {
fill: uint,
alloc: uint,
}
// FIXME(pcwalton): This function should not be necessary, I don't think.
#[lang="strdup_uniq"]
#[fixed_stack_segment]
pub unsafe fn strdup_uniq(ptr: *u8, len: uint) -> ~str {
let size = size_of::<StringRepr>() + len + 1;
let string: *mut StringRepr = transmute(exchange_malloc(transmute(0),
size));
(*string).fill = len + 1;
(*string).alloc = len + 1;
let mut data_ptr: uint = transmute(string);
data_ptr += size_of::<StringRepr>();
let data_ptr: *mut u8 = transmute(data_ptr);
memcpy(data_ptr, ptr, len + 1);
transmute(string)
}
// Legacy junk
#[lang="log_type"]
pub fn log_type<T>(_: u32, _: &T) {
// FIXME: This function should not be a lang item.
}
#[lang="annihilate"]
pub unsafe fn annihilate() {}
// Failure
#[lang="fail_"]
#[fixed_stack_segment]
pub fn fail(_: *i8, _: *i8, _: uint) ->! {
unsafe {
abort()
}
}
#[lang="fail_bounds_check"]
#[fixed_stack_segment]
pub fn fail_bounds_check(_: *i8, _: uint, _: uint, _: uint) {
unsafe {
abort()
}
}
// Memory allocation
// FIXME: So grotesquely inefficient.
struct Header {
minus_one: uint, // Must be -1.
type_desc: *i8,
null_0: uint, // Must be null.
null_1: uint, // Must be null.
}
// FIXME: This is horrendously inefficient.
#[lang="exchange_malloc"]
#[fixed_stack_segment]
pub unsafe fn exchange_malloc(type_desc: *i8, size: uint) -> *i8 {
let alloc: *mut Header = transmute(malloc(size_of::<Header>() + size));
(*alloc).minus_one = -1;
(*alloc).type_desc = type_desc;
(*alloc).null_0 = 0;
(*alloc).null_1 = 0;
transmute(alloc)
}
#[lang="exchange_free"]
#[fixed_stack_segment]
pub unsafe fn exchange_free(alloc: *i8) {
free(transmute(alloc))
}
// Entry point
// TODO(pcwalton): Stash argc and argv somewhere. Probably needs to wait on
// global variables.
#[lang="start"]
pub fn start(main: *u8, _: int, _: **i8, _: *u8) -> int {
unsafe {
let main: extern "Rust" fn() = transmute(main);
main();
0
}
}
// The nonexistent garbage collector
#[lang="malloc"]
#[fixed_stack_segment]
pub unsafe fn gc_malloc(_: *i8, _: uint) -> *i8 {
abort()
}
#[lang="free"]
#[fixed_stack_segment]
pub unsafe fn gc_free(_: *i8) {
abort()
}
#[lang="borrow_as_imm"]
#[fixed_stack_segment]
pub unsafe fn borrow_as_imm(_: *u8, _: *i8, _: uint) -> uint {
abort()
}
#[lang="borrow_as_mut"]
#[fixed_stack_segment]
pub unsafe fn borrow_as_mut(_: *u8, _: *i8, _: uint) -> uint {
abort()
}
#[lang="record_borrow"]
#[fixed_stack_segment]
pub unsafe fn record_borrow(_: *u8, _: uint, _: *i8, _: uint) {
abort()
}
#[lang="unrecord_borrow"]
#[fixed_stack_segment]
pub unsafe fn unrecord_borrow(_: *u8, _: uint, _: *i8, _: uint) {
abort()
}
#[lang="return_to_mut"]
#[fixed_stack_segment]
pub unsafe fn return_to_mut(_: *u8, _: uint, _: *i8, _: uint) {
abort()
}
#[lang="check_not_borrowed"]
#[fixed_stack_segment]
pub unsafe fn check_not_borrowed(_: *u8, _: *i8, _: uint) {
abort()
}
// libc dependencies
extern {
#[fast_ffi]
pub fn malloc(size: uint) -> *u8;
#[fast_ffi]
pub fn free(ptr: *u8);
#[fast_ffi]
pub fn abort() ->!;
#[fast_ffi]
pub fn memcpy(dest: *mut u8, src: *u8, size: uint) -> *u8;
#[fast_ffi]
pub fn memcmp(a: *u8, b: *u8, size: uint) -> i32;
}
// Rust intrinsic dependencies
extern "rust-intrinsic" {
pub fn transmute<T,U>(val: T) -> U;
pub fn size_of<T>() -> uint;
}
|
{
return false
}
|
conditional_block
|
zero.rs
|
// Copyright (c) 2006-2009 Graydon Hoare
// Copyright (c) 2009-2013 Mozilla Foundation
// Permission is hereby granted, free of charge, to any
// person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the
// Software without restriction, including without
// limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following
// conditions:
// The above copyright notice and this permission notice
// shall be included in all copies or substantial portions
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
// zero.rs
//
// Minimal definitions of the core primitives in Rust. Include this file with
// your project to create a freestanding Rust program that can run on bare
// metal.
//
#[allow(ctypes)];
// Built-in traits
#[lang="copy"]
pub trait Copy {}
#[lang="owned"]
pub trait Owned {}
#[lang="freeze"]
pub trait Freeze {}
#[lang="opaque"]
pub enum Opaque {}
#[lang="ty_desc"]
pub struct TyDesc;
#[lang="ty_visitor"]
pub trait TyVisitor {}
#[lang="closure_exchange_malloc"]
pub trait ClosureExchangeMalloc {}
#[lang="send"]
pub trait Send {}
#[lang="sized"]
pub trait Sized {}
#[lang="drop"]
pub trait Drop {
fn finalize(&self);
}
// Operator overloading
#[lang="eq"]
pub trait Eq {
fn eq(&self, other: &Self) -> bool;
fn ne(&self, other: &Self) -> bool;
}
#[lang="ord"]
pub trait Ord {
fn lt(&self, other: &Self) -> bool;
fn le(&self, other: &Self) -> bool;
fn ge(&self, other: &Self) -> bool;
fn gt(&self, other: &Self) -> bool;
}
#[lang="add"]
pub trait Add<Rhs,Result> {
fn add(&self, rhs: &Rhs) -> Result;
}
#[lang="sub"]
pub trait Sub<Rhs,Result> {
fn sub(&self, rhs: &Rhs) -> Result;
}
#[lang="mul"]
pub trait Mul<Rhs,Result> {
fn mul(&self, rhs: &Rhs) -> Result;
}
#[lang="div"]
pub trait Div<Rhs,Result> {
fn div(&self, rhs: &Rhs) -> Result;
}
#[lang="rem"]
pub trait Rem<Rhs,Result> {
fn rem(&self, rhs: &Rhs) -> Result;
}
#[lang="neg"]
pub trait Neg<Rhs,Result> {
fn neg(&self) -> Result;
}
#[lang="not"]
pub trait Not<Rhs,Result> {
fn not(&self) -> Result;
}
#[lang="bitand"]
pub trait BitAnd<Rhs,Result> {
fn bitand(&self, rhs: &Rhs) -> Result;
}
#[lang="bitor"]
pub trait BitOr<Rhs,Result> {
fn bitor(&self, rhs: &Rhs) -> Result;
}
#[lang="bitxor"]
pub trait BitXor<Rhs,Result> {
fn bitxor(&self, rhs: &Rhs) -> Result;
}
#[lang="shl"]
pub trait Shl<Rhs,Result> {
fn shl(&self, rhs: &Rhs) -> Result;
}
#[lang="shr"]
pub trait Shr<Rhs,Result> {
fn shr(&self, rhs: &Rhs) -> Result;
}
#[lang="index"]
pub trait Index<Index,Result> {
fn index(&self, rhs: &Index) -> Result;
}
// String utilities
#[lang="str_eq"]
#[fixed_stack_segment]
pub fn str_eq(a: &str, b: &str) -> bool {
unsafe {
let (aptr, alen): (*u8, uint) = transmute(a);
let (bptr, blen): (*u8, uint) = transmute(b);
if alen!= blen {
return false
}
memcmp(aptr, bptr, alen - 1) == 0
}
}
// FIXME(pcwalton): This function is legacy junk.
#[lang="uniq_str_eq"]
pub fn uniq_str_eq(a: &~str, b: &~str) -> bool {
str_eq(*a, *b)
}
struct StringRepr {
fill: uint,
alloc: uint,
}
// FIXME(pcwalton): This function should not be necessary, I don't think.
#[lang="strdup_uniq"]
#[fixed_stack_segment]
pub unsafe fn strdup_uniq(ptr: *u8, len: uint) -> ~str
|
// Legacy junk
#[lang="log_type"]
pub fn log_type<T>(_: u32, _: &T) {
// FIXME: This function should not be a lang item.
}
#[lang="annihilate"]
pub unsafe fn annihilate() {}
// Failure
#[lang="fail_"]
#[fixed_stack_segment]
pub fn fail(_: *i8, _: *i8, _: uint) ->! {
unsafe {
abort()
}
}
#[lang="fail_bounds_check"]
#[fixed_stack_segment]
pub fn fail_bounds_check(_: *i8, _: uint, _: uint, _: uint) {
unsafe {
abort()
}
}
// Memory allocation
// FIXME: So grotesquely inefficient.
struct Header {
minus_one: uint, // Must be -1.
type_desc: *i8,
null_0: uint, // Must be null.
null_1: uint, // Must be null.
}
// FIXME: This is horrendously inefficient.
#[lang="exchange_malloc"]
#[fixed_stack_segment]
pub unsafe fn exchange_malloc(type_desc: *i8, size: uint) -> *i8 {
let alloc: *mut Header = transmute(malloc(size_of::<Header>() + size));
(*alloc).minus_one = -1;
(*alloc).type_desc = type_desc;
(*alloc).null_0 = 0;
(*alloc).null_1 = 0;
transmute(alloc)
}
#[lang="exchange_free"]
#[fixed_stack_segment]
pub unsafe fn exchange_free(alloc: *i8) {
free(transmute(alloc))
}
// Entry point
// TODO(pcwalton): Stash argc and argv somewhere. Probably needs to wait on
// global variables.
#[lang="start"]
pub fn start(main: *u8, _: int, _: **i8, _: *u8) -> int {
unsafe {
let main: extern "Rust" fn() = transmute(main);
main();
0
}
}
// The nonexistent garbage collector
#[lang="malloc"]
#[fixed_stack_segment]
pub unsafe fn gc_malloc(_: *i8, _: uint) -> *i8 {
abort()
}
#[lang="free"]
#[fixed_stack_segment]
pub unsafe fn gc_free(_: *i8) {
abort()
}
#[lang="borrow_as_imm"]
#[fixed_stack_segment]
pub unsafe fn borrow_as_imm(_: *u8, _: *i8, _: uint) -> uint {
abort()
}
#[lang="borrow_as_mut"]
#[fixed_stack_segment]
pub unsafe fn borrow_as_mut(_: *u8, _: *i8, _: uint) -> uint {
abort()
}
#[lang="record_borrow"]
#[fixed_stack_segment]
pub unsafe fn record_borrow(_: *u8, _: uint, _: *i8, _: uint) {
abort()
}
#[lang="unrecord_borrow"]
#[fixed_stack_segment]
pub unsafe fn unrecord_borrow(_: *u8, _: uint, _: *i8, _: uint) {
abort()
}
#[lang="return_to_mut"]
#[fixed_stack_segment]
pub unsafe fn return_to_mut(_: *u8, _: uint, _: *i8, _: uint) {
abort()
}
#[lang="check_not_borrowed"]
#[fixed_stack_segment]
pub unsafe fn check_not_borrowed(_: *u8, _: *i8, _: uint) {
abort()
}
// libc dependencies
extern {
#[fast_ffi]
pub fn malloc(size: uint) -> *u8;
#[fast_ffi]
pub fn free(ptr: *u8);
#[fast_ffi]
pub fn abort() ->!;
#[fast_ffi]
pub fn memcpy(dest: *mut u8, src: *u8, size: uint) -> *u8;
#[fast_ffi]
pub fn memcmp(a: *u8, b: *u8, size: uint) -> i32;
}
// Rust intrinsic dependencies
extern "rust-intrinsic" {
pub fn transmute<T,U>(val: T) -> U;
pub fn size_of<T>() -> uint;
}
|
{
let size = size_of::<StringRepr>() + len + 1;
let string: *mut StringRepr = transmute(exchange_malloc(transmute(0),
size));
(*string).fill = len + 1;
(*string).alloc = len + 1;
let mut data_ptr: uint = transmute(string);
data_ptr += size_of::<StringRepr>();
let data_ptr: *mut u8 = transmute(data_ptr);
memcpy(data_ptr, ptr, len + 1);
transmute(string)
}
|
identifier_body
|
zero.rs
|
// Copyright (c) 2006-2009 Graydon Hoare
// Copyright (c) 2009-2013 Mozilla Foundation
// Permission is hereby granted, free of charge, to any
// person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the
// Software without restriction, including without
// limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following
// conditions:
// The above copyright notice and this permission notice
// shall be included in all copies or substantial portions
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
// zero.rs
//
// Minimal definitions of the core primitives in Rust. Include this file with
// your project to create a freestanding Rust program that can run on bare
// metal.
//
#[allow(ctypes)];
// Built-in traits
#[lang="copy"]
pub trait Copy {}
#[lang="owned"]
pub trait Owned {}
#[lang="freeze"]
pub trait Freeze {}
#[lang="opaque"]
pub enum Opaque {}
#[lang="ty_desc"]
pub struct TyDesc;
#[lang="ty_visitor"]
pub trait TyVisitor {}
#[lang="closure_exchange_malloc"]
pub trait ClosureExchangeMalloc {}
#[lang="send"]
pub trait Send {}
#[lang="sized"]
pub trait Sized {}
#[lang="drop"]
pub trait Drop {
fn finalize(&self);
}
// Operator overloading
#[lang="eq"]
pub trait Eq {
fn eq(&self, other: &Self) -> bool;
fn ne(&self, other: &Self) -> bool;
}
#[lang="ord"]
pub trait Ord {
fn lt(&self, other: &Self) -> bool;
fn le(&self, other: &Self) -> bool;
fn ge(&self, other: &Self) -> bool;
fn gt(&self, other: &Self) -> bool;
}
#[lang="add"]
pub trait Add<Rhs,Result> {
fn add(&self, rhs: &Rhs) -> Result;
}
#[lang="sub"]
pub trait Sub<Rhs,Result> {
fn sub(&self, rhs: &Rhs) -> Result;
}
#[lang="mul"]
pub trait Mul<Rhs,Result> {
fn mul(&self, rhs: &Rhs) -> Result;
}
#[lang="div"]
pub trait Div<Rhs,Result> {
fn div(&self, rhs: &Rhs) -> Result;
}
#[lang="rem"]
pub trait Rem<Rhs,Result> {
fn rem(&self, rhs: &Rhs) -> Result;
}
#[lang="neg"]
pub trait Neg<Rhs,Result> {
fn neg(&self) -> Result;
}
#[lang="not"]
pub trait Not<Rhs,Result> {
fn not(&self) -> Result;
}
#[lang="bitand"]
pub trait BitAnd<Rhs,Result> {
fn bitand(&self, rhs: &Rhs) -> Result;
}
#[lang="bitor"]
pub trait BitOr<Rhs,Result> {
fn bitor(&self, rhs: &Rhs) -> Result;
}
#[lang="bitxor"]
pub trait BitXor<Rhs,Result> {
fn bitxor(&self, rhs: &Rhs) -> Result;
}
#[lang="shl"]
pub trait Shl<Rhs,Result> {
fn shl(&self, rhs: &Rhs) -> Result;
}
#[lang="shr"]
pub trait Shr<Rhs,Result> {
fn shr(&self, rhs: &Rhs) -> Result;
}
#[lang="index"]
pub trait Index<Index,Result> {
fn index(&self, rhs: &Index) -> Result;
}
// String utilities
#[lang="str_eq"]
#[fixed_stack_segment]
pub fn str_eq(a: &str, b: &str) -> bool {
unsafe {
let (aptr, alen): (*u8, uint) = transmute(a);
let (bptr, blen): (*u8, uint) = transmute(b);
if alen!= blen {
return false
}
memcmp(aptr, bptr, alen - 1) == 0
}
}
// FIXME(pcwalton): This function is legacy junk.
#[lang="uniq_str_eq"]
pub fn uniq_str_eq(a: &~str, b: &~str) -> bool {
str_eq(*a, *b)
}
struct StringRepr {
fill: uint,
alloc: uint,
}
// FIXME(pcwalton): This function should not be necessary, I don't think.
#[lang="strdup_uniq"]
#[fixed_stack_segment]
pub unsafe fn strdup_uniq(ptr: *u8, len: uint) -> ~str {
let size = size_of::<StringRepr>() + len + 1;
let string: *mut StringRepr = transmute(exchange_malloc(transmute(0),
size));
(*string).fill = len + 1;
(*string).alloc = len + 1;
let mut data_ptr: uint = transmute(string);
data_ptr += size_of::<StringRepr>();
let data_ptr: *mut u8 = transmute(data_ptr);
memcpy(data_ptr, ptr, len + 1);
transmute(string)
}
// Legacy junk
#[lang="log_type"]
pub fn log_type<T>(_: u32, _: &T) {
// FIXME: This function should not be a lang item.
}
#[lang="annihilate"]
pub unsafe fn annihilate() {}
// Failure
#[lang="fail_"]
#[fixed_stack_segment]
pub fn fail(_: *i8, _: *i8, _: uint) ->! {
unsafe {
abort()
}
}
#[lang="fail_bounds_check"]
#[fixed_stack_segment]
pub fn fail_bounds_check(_: *i8, _: uint, _: uint, _: uint) {
unsafe {
abort()
}
}
// Memory allocation
// FIXME: So grotesquely inefficient.
struct Header {
minus_one: uint, // Must be -1.
type_desc: *i8,
null_0: uint, // Must be null.
null_1: uint, // Must be null.
}
// FIXME: This is horrendously inefficient.
#[lang="exchange_malloc"]
#[fixed_stack_segment]
pub unsafe fn
|
(type_desc: *i8, size: uint) -> *i8 {
let alloc: *mut Header = transmute(malloc(size_of::<Header>() + size));
(*alloc).minus_one = -1;
(*alloc).type_desc = type_desc;
(*alloc).null_0 = 0;
(*alloc).null_1 = 0;
transmute(alloc)
}
#[lang="exchange_free"]
#[fixed_stack_segment]
pub unsafe fn exchange_free(alloc: *i8) {
free(transmute(alloc))
}
// Entry point
// TODO(pcwalton): Stash argc and argv somewhere. Probably needs to wait on
// global variables.
#[lang="start"]
pub fn start(main: *u8, _: int, _: **i8, _: *u8) -> int {
unsafe {
let main: extern "Rust" fn() = transmute(main);
main();
0
}
}
// The nonexistent garbage collector
#[lang="malloc"]
#[fixed_stack_segment]
pub unsafe fn gc_malloc(_: *i8, _: uint) -> *i8 {
abort()
}
#[lang="free"]
#[fixed_stack_segment]
pub unsafe fn gc_free(_: *i8) {
abort()
}
#[lang="borrow_as_imm"]
#[fixed_stack_segment]
pub unsafe fn borrow_as_imm(_: *u8, _: *i8, _: uint) -> uint {
abort()
}
#[lang="borrow_as_mut"]
#[fixed_stack_segment]
pub unsafe fn borrow_as_mut(_: *u8, _: *i8, _: uint) -> uint {
abort()
}
#[lang="record_borrow"]
#[fixed_stack_segment]
pub unsafe fn record_borrow(_: *u8, _: uint, _: *i8, _: uint) {
abort()
}
#[lang="unrecord_borrow"]
#[fixed_stack_segment]
pub unsafe fn unrecord_borrow(_: *u8, _: uint, _: *i8, _: uint) {
abort()
}
#[lang="return_to_mut"]
#[fixed_stack_segment]
pub unsafe fn return_to_mut(_: *u8, _: uint, _: *i8, _: uint) {
abort()
}
#[lang="check_not_borrowed"]
#[fixed_stack_segment]
pub unsafe fn check_not_borrowed(_: *u8, _: *i8, _: uint) {
abort()
}
// libc dependencies
extern {
#[fast_ffi]
pub fn malloc(size: uint) -> *u8;
#[fast_ffi]
pub fn free(ptr: *u8);
#[fast_ffi]
pub fn abort() ->!;
#[fast_ffi]
pub fn memcpy(dest: *mut u8, src: *u8, size: uint) -> *u8;
#[fast_ffi]
pub fn memcmp(a: *u8, b: *u8, size: uint) -> i32;
}
// Rust intrinsic dependencies
extern "rust-intrinsic" {
pub fn transmute<T,U>(val: T) -> U;
pub fn size_of<T>() -> uint;
}
|
exchange_malloc
|
identifier_name
|
check_button.rs
|
// This file was generated by gir (17af302) from gir-files (11e0e6d)
// DO NOT EDIT
use Actionable;
use Bin;
use Buildable;
use Button;
use Container;
use ToggleButton;
use Widget;
use ffi;
use glib::object::Downcast;
use glib::object::IsA;
use glib::translate::*;
glib_wrapper! {
pub struct CheckButton(Object<ffi::GtkCheckButton>): Widget, Container, Bin, Button, ToggleButton, Actionable, Buildable;
match fn {
get_type => || ffi::gtk_check_button_get_type(),
}
}
impl CheckButton {
pub fn new() -> CheckButton {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_check_button_new()).downcast_unchecked()
}
}
pub fn new_with_label(label: &str) -> CheckButton {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_check_button_new_with_label(label.to_glib_none().0)).downcast_unchecked()
}
}
pub fn
|
(label: &str) -> CheckButton {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_check_button_new_with_mnemonic(label.to_glib_none().0)).downcast_unchecked()
}
}
}
pub trait CheckButtonExt {}
impl<O: IsA<CheckButton>> CheckButtonExt for O {}
|
new_with_mnemonic
|
identifier_name
|
check_button.rs
|
// This file was generated by gir (17af302) from gir-files (11e0e6d)
// DO NOT EDIT
use Actionable;
use Bin;
use Buildable;
use Button;
use Container;
use ToggleButton;
use Widget;
use ffi;
use glib::object::Downcast;
use glib::object::IsA;
use glib::translate::*;
glib_wrapper! {
pub struct CheckButton(Object<ffi::GtkCheckButton>): Widget, Container, Bin, Button, ToggleButton, Actionable, Buildable;
match fn {
get_type => || ffi::gtk_check_button_get_type(),
}
}
impl CheckButton {
pub fn new() -> CheckButton {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_check_button_new()).downcast_unchecked()
}
}
pub fn new_with_label(label: &str) -> CheckButton {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_check_button_new_with_label(label.to_glib_none().0)).downcast_unchecked()
}
|
pub fn new_with_mnemonic(label: &str) -> CheckButton {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_check_button_new_with_mnemonic(label.to_glib_none().0)).downcast_unchecked()
}
}
}
pub trait CheckButtonExt {}
impl<O: IsA<CheckButton>> CheckButtonExt for O {}
|
}
|
random_line_split
|
check_button.rs
|
// This file was generated by gir (17af302) from gir-files (11e0e6d)
// DO NOT EDIT
use Actionable;
use Bin;
use Buildable;
use Button;
use Container;
use ToggleButton;
use Widget;
use ffi;
use glib::object::Downcast;
use glib::object::IsA;
use glib::translate::*;
glib_wrapper! {
pub struct CheckButton(Object<ffi::GtkCheckButton>): Widget, Container, Bin, Button, ToggleButton, Actionable, Buildable;
match fn {
get_type => || ffi::gtk_check_button_get_type(),
}
}
impl CheckButton {
pub fn new() -> CheckButton
|
pub fn new_with_label(label: &str) -> CheckButton {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_check_button_new_with_label(label.to_glib_none().0)).downcast_unchecked()
}
}
pub fn new_with_mnemonic(label: &str) -> CheckButton {
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_check_button_new_with_mnemonic(label.to_glib_none().0)).downcast_unchecked()
}
}
}
pub trait CheckButtonExt {}
impl<O: IsA<CheckButton>> CheckButtonExt for O {}
|
{
assert_initialized_main_thread!();
unsafe {
Widget::from_glib_none(ffi::gtk_check_button_new()).downcast_unchecked()
}
}
|
identifier_body
|
lib.rs
|
//! Linear Algebra eXtension (LAX)
//! ===============================
//!
//! ndarray-free safe Rust wrapper for LAPACK FFI
//!
//! Linear equation, Inverse matrix, Condition number
//! --------------------------------------------------
//!
//! As the property of $A$, several types of triangular factorization are used:
//!
//! - LU-decomposition for general matrix
//! - $PA = LU$, where $L$ is lower matrix, $U$ is upper matrix, and $P$ is permutation matrix
//! - Bunch-Kaufman diagonal pivoting method for nonpositive-definite Hermitian matrix
//! - $A = U D U^\dagger$, where $U$ is upper matrix,
//! $D$ is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks.
//!
//! | matrix type | Triangler factorization (TRF) | Solve (TRS) | Inverse matrix (TRI) | Reciprocal condition number (CON) |
//! |:--------------------------------|:------------------------------|:------------|:---------------------|:----------------------------------|
//! | General (GE) | [lu] | [solve] | [inv] | [rcond] |
//! | Symmetric (SY) / Hermitian (HE) | [bk] | [solveh] | [invh] | - |
//!
//! [lu]: solve/trait.Solve_.html#tymethod.lu
//! [solve]: solve/trait.Solve_.html#tymethod.solve
//! [inv]: solve/trait.Solve_.html#tymethod.inv
//! [rcond]: solve/trait.Solve_.html#tymethod.rcond
//!
//! [bk]: solveh/trait.Solveh_.html#tymethod.bk
//! [solveh]: solveh/trait.Solveh_.html#tymethod.solveh
//! [invh]: solveh/trait.Solveh_.html#tymethod.invh
//!
//! Eigenvalue Problem
//! -------------------
//!
//! Solve eigenvalue problem for a matrix $A$
//!
//! $$ Av_i = \lambda_i v_i $$
//!
//! or generalized eigenvalue problem
//!
//! $$ Av_i = \lambda_i B v_i $$
//!
//! | matrix type | Eigenvalue (EV) | Generalized Eigenvalue Problem (EG) |
//! |:--------------------------------|:----------------|:------------------------------------|
//! | General (GE) |[eig] | - |
//! | Symmetric (SY) / Hermitian (HE) |[eigh] |[eigh_generalized] |
//!
//! [eig]: eig/trait.Eig_.html#tymethod.eig
//! [eigh]: eigh/trait.Eigh_.html#tymethod.eigh
//! [eigh_generalized]: eigh/trait.Eigh_.html#tymethod.eigh_generalized
//!
//! Singular Value Decomposition (SVD), Least square problem
//! ----------------------------------------------------------
//!
//! | matrix type | Singular Value Decomposition (SVD) | SVD with divided-and-conquer (SDD) | Least square problem (LSD) |
//! |:-------------|:-----------------------------------|:-----------------------------------|:---------------------------|
//! | General (GE) | [svd] | [svddc] | [least_squares] |
//!
//! [svd]: svd/trait.SVD_.html#tymethod.svd
//! [svddc]: svddck/trait.SVDDC_.html#tymethod.svddc
//! [least_squares]: least_squares/trait.LeastSquaresSvdDivideConquer_.html#tymethod.least_squares
#[cfg(any(feature = "intel-mkl-system", feature = "intel-mkl-static"))]
extern crate intel_mkl_src as _src;
#[cfg(any(feature = "openblas-system", feature = "openblas-static"))]
extern crate openblas_src as _src;
#[cfg(any(feature = "netlib-system", feature = "netlib-static"))]
extern crate netlib_src as _src;
pub mod error;
pub mod layout;
mod cholesky;
mod eig;
mod eigh;
mod least_squares;
mod opnorm;
mod qr;
mod rcond;
mod solve;
mod solveh;
mod svd;
mod svddc;
mod triangular;
mod tridiagonal;
pub use self::cholesky::*;
pub use self::eig::*;
pub use self::eigh::*;
pub use self::least_squares::*;
pub use self::opnorm::*;
pub use self::qr::*;
pub use self::rcond::*;
pub use self::solve::*;
pub use self::solveh::*;
pub use self::svd::*;
pub use self::svddc::*;
pub use self::triangular::*;
pub use self::tridiagonal::*;
use cauchy::*;
pub type Pivot = Vec<i32>;
/// Trait for primitive types which implements LAPACK subroutines
pub trait Lapack:
OperatorNorm_
+ QR_
+ SVD_
+ SVDDC_
+ Solve_
+ Solveh_
+ Cholesky_
+ Eig_
+ Eigh_
+ Triangular_
+ Tridiagonal_
+ Rcond_
+ LeastSquaresSvdDivideConquer_
{
}
impl Lapack for f32 {}
impl Lapack for f64 {}
impl Lapack for c32 {}
impl Lapack for c64 {}
/// Upper/Lower specification for seveal usages
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum UPLO {
Upper = b'U',
Lower = b'L',
}
impl UPLO {
pub fn t(self) -> Self {
match self {
UPLO::Upper => UPLO::Lower,
UPLO::Lower => UPLO::Upper,
}
}
}
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum Transpose {
No = b'N',
Transpose = b'T',
Hermite = b'C',
}
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum NormType {
One = b'O',
Infinity = b'I',
Frobenius = b'F',
}
impl NormType {
pub fn
|
(self) -> Self {
match self {
NormType::One => NormType::Infinity,
NormType::Infinity => NormType::One,
NormType::Frobenius => NormType::Frobenius,
}
}
}
/// Create a vector without initialization
///
/// Safety
/// ------
/// - Memory is not initialized. Do not read the memory before write.
///
unsafe fn vec_uninit<T: Sized>(n: usize) -> Vec<T> {
let mut v = Vec::with_capacity(n);
v.set_len(n);
v
}
|
transpose
|
identifier_name
|
lib.rs
|
//! Linear Algebra eXtension (LAX)
//! ===============================
//!
//! ndarray-free safe Rust wrapper for LAPACK FFI
//!
//! Linear equation, Inverse matrix, Condition number
//! --------------------------------------------------
//!
//! As the property of $A$, several types of triangular factorization are used:
//!
//! - LU-decomposition for general matrix
//! - $PA = LU$, where $L$ is lower matrix, $U$ is upper matrix, and $P$ is permutation matrix
//! - Bunch-Kaufman diagonal pivoting method for nonpositive-definite Hermitian matrix
//! - $A = U D U^\dagger$, where $U$ is upper matrix,
//! $D$ is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks.
//!
//! | matrix type | Triangler factorization (TRF) | Solve (TRS) | Inverse matrix (TRI) | Reciprocal condition number (CON) |
//! |:--------------------------------|:------------------------------|:------------|:---------------------|:----------------------------------|
//! | General (GE) | [lu] | [solve] | [inv] | [rcond] |
//! | Symmetric (SY) / Hermitian (HE) | [bk] | [solveh] | [invh] | - |
//!
//! [lu]: solve/trait.Solve_.html#tymethod.lu
//! [solve]: solve/trait.Solve_.html#tymethod.solve
//! [inv]: solve/trait.Solve_.html#tymethod.inv
//! [rcond]: solve/trait.Solve_.html#tymethod.rcond
//!
//! [bk]: solveh/trait.Solveh_.html#tymethod.bk
//! [solveh]: solveh/trait.Solveh_.html#tymethod.solveh
//! [invh]: solveh/trait.Solveh_.html#tymethod.invh
//!
//! Eigenvalue Problem
//! -------------------
//!
//! Solve eigenvalue problem for a matrix $A$
//!
//! $$ Av_i = \lambda_i v_i $$
//!
//! or generalized eigenvalue problem
//!
//! $$ Av_i = \lambda_i B v_i $$
//!
//! | matrix type | Eigenvalue (EV) | Generalized Eigenvalue Problem (EG) |
//! |:--------------------------------|:----------------|:------------------------------------|
//! | General (GE) |[eig] | - |
//! | Symmetric (SY) / Hermitian (HE) |[eigh] |[eigh_generalized] |
//!
//! [eig]: eig/trait.Eig_.html#tymethod.eig
//! [eigh]: eigh/trait.Eigh_.html#tymethod.eigh
//! [eigh_generalized]: eigh/trait.Eigh_.html#tymethod.eigh_generalized
//!
//! Singular Value Decomposition (SVD), Least square problem
//! ----------------------------------------------------------
//!
//! | matrix type | Singular Value Decomposition (SVD) | SVD with divided-and-conquer (SDD) | Least square problem (LSD) |
//! |:-------------|:-----------------------------------|:-----------------------------------|:---------------------------|
//! | General (GE) | [svd] | [svddc] | [least_squares] |
//!
//! [svd]: svd/trait.SVD_.html#tymethod.svd
//! [svddc]: svddck/trait.SVDDC_.html#tymethod.svddc
//! [least_squares]: least_squares/trait.LeastSquaresSvdDivideConquer_.html#tymethod.least_squares
#[cfg(any(feature = "intel-mkl-system", feature = "intel-mkl-static"))]
extern crate intel_mkl_src as _src;
#[cfg(any(feature = "openblas-system", feature = "openblas-static"))]
extern crate openblas_src as _src;
#[cfg(any(feature = "netlib-system", feature = "netlib-static"))]
extern crate netlib_src as _src;
pub mod error;
pub mod layout;
mod cholesky;
mod eig;
mod eigh;
mod least_squares;
mod opnorm;
mod qr;
mod rcond;
mod solve;
mod solveh;
mod svd;
mod svddc;
mod triangular;
mod tridiagonal;
pub use self::cholesky::*;
pub use self::eig::*;
pub use self::eigh::*;
pub use self::least_squares::*;
pub use self::opnorm::*;
pub use self::qr::*;
pub use self::rcond::*;
pub use self::solve::*;
pub use self::solveh::*;
pub use self::svd::*;
pub use self::svddc::*;
pub use self::triangular::*;
pub use self::tridiagonal::*;
use cauchy::*;
pub type Pivot = Vec<i32>;
/// Trait for primitive types which implements LAPACK subroutines
pub trait Lapack:
OperatorNorm_
+ QR_
+ SVD_
+ SVDDC_
+ Solve_
+ Solveh_
+ Cholesky_
+ Eig_
+ Eigh_
+ Triangular_
+ Tridiagonal_
+ Rcond_
+ LeastSquaresSvdDivideConquer_
{
}
impl Lapack for f32 {}
impl Lapack for f64 {}
impl Lapack for c32 {}
impl Lapack for c64 {}
/// Upper/Lower specification for seveal usages
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum UPLO {
Upper = b'U',
Lower = b'L',
}
impl UPLO {
pub fn t(self) -> Self {
match self {
UPLO::Upper => UPLO::Lower,
UPLO::Lower => UPLO::Upper,
}
}
}
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum Transpose {
No = b'N',
Transpose = b'T',
Hermite = b'C',
}
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum NormType {
One = b'O',
Infinity = b'I',
Frobenius = b'F',
}
impl NormType {
pub fn transpose(self) -> Self {
match self {
NormType::One => NormType::Infinity,
NormType::Infinity => NormType::One,
NormType::Frobenius => NormType::Frobenius,
}
}
}
/// Create a vector without initialization
///
/// Safety
/// ------
/// - Memory is not initialized. Do not read the memory before write.
///
unsafe fn vec_uninit<T: Sized>(n: usize) -> Vec<T>
|
{
let mut v = Vec::with_capacity(n);
v.set_len(n);
v
}
|
identifier_body
|
|
lib.rs
|
//! Linear Algebra eXtension (LAX)
//! ===============================
//!
//! ndarray-free safe Rust wrapper for LAPACK FFI
//!
//! Linear equation, Inverse matrix, Condition number
//! --------------------------------------------------
//!
//! As the property of $A$, several types of triangular factorization are used:
//!
//! - LU-decomposition for general matrix
//! - $PA = LU$, where $L$ is lower matrix, $U$ is upper matrix, and $P$ is permutation matrix
//! - Bunch-Kaufman diagonal pivoting method for nonpositive-definite Hermitian matrix
//! - $A = U D U^\dagger$, where $U$ is upper matrix,
//! $D$ is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks.
//!
//! | matrix type | Triangler factorization (TRF) | Solve (TRS) | Inverse matrix (TRI) | Reciprocal condition number (CON) |
//! |:--------------------------------|:------------------------------|:------------|:---------------------|:----------------------------------|
//! | General (GE) | [lu] | [solve] | [inv] | [rcond] |
//! | Symmetric (SY) / Hermitian (HE) | [bk] | [solveh] | [invh] | - |
//!
//! [lu]: solve/trait.Solve_.html#tymethod.lu
//! [solve]: solve/trait.Solve_.html#tymethod.solve
//! [inv]: solve/trait.Solve_.html#tymethod.inv
//! [rcond]: solve/trait.Solve_.html#tymethod.rcond
//!
//! [bk]: solveh/trait.Solveh_.html#tymethod.bk
//! [solveh]: solveh/trait.Solveh_.html#tymethod.solveh
//! [invh]: solveh/trait.Solveh_.html#tymethod.invh
//!
//! Eigenvalue Problem
//! -------------------
//!
//! Solve eigenvalue problem for a matrix $A$
//!
//! $$ Av_i = \lambda_i v_i $$
//!
//! or generalized eigenvalue problem
//!
//! $$ Av_i = \lambda_i B v_i $$
//!
//! | matrix type | Eigenvalue (EV) | Generalized Eigenvalue Problem (EG) |
//! |:--------------------------------|:----------------|:------------------------------------|
//! | General (GE) |[eig] | - |
//! | Symmetric (SY) / Hermitian (HE) |[eigh] |[eigh_generalized] |
//!
//! [eig]: eig/trait.Eig_.html#tymethod.eig
//! [eigh]: eigh/trait.Eigh_.html#tymethod.eigh
//! [eigh_generalized]: eigh/trait.Eigh_.html#tymethod.eigh_generalized
//!
//! Singular Value Decomposition (SVD), Least square problem
//! ----------------------------------------------------------
//!
//! | matrix type | Singular Value Decomposition (SVD) | SVD with divided-and-conquer (SDD) | Least square problem (LSD) |
//! |:-------------|:-----------------------------------|:-----------------------------------|:---------------------------|
//! | General (GE) | [svd] | [svddc] | [least_squares] |
//!
//! [svd]: svd/trait.SVD_.html#tymethod.svd
//! [svddc]: svddck/trait.SVDDC_.html#tymethod.svddc
//! [least_squares]: least_squares/trait.LeastSquaresSvdDivideConquer_.html#tymethod.least_squares
#[cfg(any(feature = "intel-mkl-system", feature = "intel-mkl-static"))]
extern crate intel_mkl_src as _src;
#[cfg(any(feature = "openblas-system", feature = "openblas-static"))]
extern crate openblas_src as _src;
#[cfg(any(feature = "netlib-system", feature = "netlib-static"))]
extern crate netlib_src as _src;
pub mod error;
pub mod layout;
mod cholesky;
mod eig;
mod eigh;
mod least_squares;
mod opnorm;
mod qr;
mod rcond;
mod solve;
mod solveh;
mod svd;
mod svddc;
mod triangular;
mod tridiagonal;
pub use self::cholesky::*;
pub use self::eig::*;
pub use self::eigh::*;
pub use self::least_squares::*;
pub use self::opnorm::*;
pub use self::qr::*;
pub use self::rcond::*;
pub use self::solve::*;
pub use self::solveh::*;
pub use self::svd::*;
pub use self::svddc::*;
pub use self::triangular::*;
pub use self::tridiagonal::*;
use cauchy::*;
pub type Pivot = Vec<i32>;
/// Trait for primitive types which implements LAPACK subroutines
pub trait Lapack:
OperatorNorm_
+ QR_
+ SVD_
+ SVDDC_
+ Solve_
+ Solveh_
+ Cholesky_
+ Eig_
+ Eigh_
+ Triangular_
+ Tridiagonal_
+ Rcond_
+ LeastSquaresSvdDivideConquer_
{
}
impl Lapack for f32 {}
impl Lapack for f64 {}
impl Lapack for c32 {}
impl Lapack for c64 {}
/// Upper/Lower specification for seveal usages
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum UPLO {
Upper = b'U',
Lower = b'L',
}
impl UPLO {
pub fn t(self) -> Self {
match self {
UPLO::Upper => UPLO::Lower,
UPLO::Lower => UPLO::Upper,
}
}
}
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum Transpose {
No = b'N',
Transpose = b'T',
Hermite = b'C',
}
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum NormType {
One = b'O',
|
impl NormType {
pub fn transpose(self) -> Self {
match self {
NormType::One => NormType::Infinity,
NormType::Infinity => NormType::One,
NormType::Frobenius => NormType::Frobenius,
}
}
}
/// Create a vector without initialization
///
/// Safety
/// ------
/// - Memory is not initialized. Do not read the memory before write.
///
unsafe fn vec_uninit<T: Sized>(n: usize) -> Vec<T> {
let mut v = Vec::with_capacity(n);
v.set_len(n);
v
}
|
Infinity = b'I',
Frobenius = b'F',
}
|
random_line_split
|
mem.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Basic functions for dealing with memory
//!
//! This module contains functions for querying the size and alignment of
//! types, initializing and manipulating memory.
#![stable(feature = "rust1", since = "1.0.0")]
use marker::Sized;
use intrinsics;
use ptr;
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::transmute;
/// Leaks a value into the void, consuming ownership and never running its
/// destructor.
///
/// This function will take ownership of its argument, but is distinct from the
/// `mem::drop` function in that it **does not run the destructor**, leaking the
/// value and any resources that it owns.
///
/// There's only a few reasons to use this function. They mainly come
/// up in unsafe code or FFI code.
///
/// * You have an uninitialized value, perhaps for performance reasons, and
/// need to prevent the destructor from running on it.
/// * You have two copies of a value (like when writing something like
/// [`mem::swap`][swap]), but need the destructor to only run once to
/// prevent a double `free`.
/// * Transferring resources across [FFI][ffi] boundaries.
///
/// [swap]: fn.swap.html
/// [ffi]:../../book/ffi.html
///
/// # Safety
///
/// This function is not marked as `unsafe` as Rust does not guarantee that the
/// `Drop` implementation for a value will always run. Note, however, that
/// leaking resources such as memory or I/O objects is likely not desired, so
/// this function is only recommended for specialized use cases.
///
/// The safety of this function implies that when writing `unsafe` code
/// yourself care must be taken when leveraging a destructor that is required to
/// run to preserve memory safety. There are known situations where the
/// destructor may not run (such as if ownership of the object with the
/// destructor is returned) which must be taken into account.
///
/// # Other forms of Leakage
///
/// It's important to point out that this function is not the only method by
/// which a value can be leaked in safe Rust code. Other known sources of
/// leakage are:
///
/// * `Rc` and `Arc` cycles
/// * `mpsc::{Sender, Receiver}` cycles (they use `Arc` internally)
/// * Panicking destructors are likely to leak local resources
///
/// # Example
///
/// Leak some heap memory by never deallocating it:
///
/// ```rust
/// use std::mem;
///
/// let heap_memory = Box::new(3);
/// mem::forget(heap_memory);
/// ```
///
/// Leak an I/O object, never closing the file:
///
/// ```rust,no_run
/// use std::mem;
/// use std::fs::File;
///
/// let file = File::open("foo.txt").unwrap();
/// mem::forget(file);
/// ```
///
/// The `mem::swap` function uses `mem::forget` to good effect:
///
/// ```rust
/// use std::mem;
/// use std::ptr;
///
/// fn swap<T>(x: &mut T, y: &mut T) {
/// unsafe {
/// // Give ourselves some scratch space to work with
/// let mut t: T = mem::uninitialized();
///
/// // Perform the swap, `&mut` pointers never alias
/// ptr::copy_nonoverlapping(&*x, &mut t, 1);
/// ptr::copy_nonoverlapping(&*y, x, 1);
/// ptr::copy_nonoverlapping(&t, y, 1);
///
/// // y and t now point to the same thing, but we need to completely
/// // forget `t` because we do not want to run the destructor for `T`
/// // on its value, which is still owned somewhere outside this function.
/// mem::forget(t);
/// }
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn forget<T>(t: T) {
unsafe { intrinsics::forget(t) }
}
/// Returns the size of a type in bytes.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::size_of::<i32>());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn size_of<T>() -> usize {
unsafe { intrinsics::size_of::<T>() }
}
/// Returns the size of the type that `val` points to in bytes.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::size_of_val(&5i32));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn size_of_val<T:?Sized>(val: &T) -> usize {
unsafe { intrinsics::size_of_val(val) }
}
/// Returns the ABI-required minimum alignment of a type
///
/// This is the alignment used for struct fields. It may be smaller than the preferred alignment.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::min_align_of::<i32>());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[deprecated(reason = "use `align_of` instead", since = "1.2.0")]
pub fn min_align_of<T>() -> usize {
unsafe { intrinsics::min_align_of::<T>() }
}
/// Returns the ABI-required minimum alignment of the type of the value that `val` points to
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::min_align_of_val(&5i32));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[deprecated(reason = "use `align_of_val` instead", since = "1.2.0")]
pub fn min_align_of_val<T:?Sized>(val: &T) -> usize {
unsafe { intrinsics::min_align_of_val(val) }
}
/// Returns the alignment in memory for a type.
///
/// This is the alignment used for struct fields. It may be smaller than the preferred alignment.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::align_of::<i32>());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn align_of<T>() -> usize {
unsafe { intrinsics::min_align_of::<T>() }
}
/// Returns the ABI-required minimum alignment of the type of the value that `val` points to
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::align_of_val(&5i32));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn align_of_val<T:?Sized>(val: &T) -> usize {
unsafe { intrinsics::min_align_of_val(val) }
}
/// Creates a value initialized to zero.
///
/// This function is similar to allocating space for a local variable and zeroing it out (an unsafe
/// operation).
///
/// Care must be taken when using this function, if the type `T` has a destructor and the value
/// falls out of scope (due to unwinding or returning) before being initialized, then the
/// destructor will run on zeroed data, likely leading to crashes.
///
/// This is useful for FFI functions sometimes, but should generally be avoided.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// let x: i32 = unsafe { mem::zeroed() };
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn zeroed<T>() -> T {
intrinsics::init()
}
/// Creates a value initialized to an unspecified series of bytes.
///
/// The byte sequence usually indicates that the value at the memory
/// in question has been dropped. Thus, *if* T carries a drop flag,
/// any associated destructor will not be run when the value falls out
/// of scope.
///
/// Some code at one time used the `zeroed` function above to
/// accomplish this goal.
///
/// This function is expected to be deprecated with the transition
/// to non-zeroing drop.
#[inline]
#[unstable(feature = "filling_drop", issue = "5016")]
pub unsafe fn dropped<T>() -> T {
#[inline(always)]
unsafe fn dropped_impl<T>() -> T { intrinsics::init_dropped() }
dropped_impl()
}
/// Bypasses Rust's normal memory-initialization checks by pretending to
/// produce a value of type T, while doing nothing at all.
///
/// **This is incredibly dangerous, and should not be done lightly. Deeply
/// consider initializing your memory with a default value instead.**
///
/// This is useful for FFI functions and initializing arrays sometimes,
/// but should generally be avoided.
///
/// # Undefined Behavior
///
/// It is Undefined Behavior to read uninitialized memory. Even just an
/// uninitialized boolean. For instance, if you branch on the value of such
/// a boolean your program may take one, both, or neither of the branches.
///
/// Note that this often also includes *writing* to the uninitialized value.
/// Rust believes the value is initialized, and will therefore try to Drop
/// the uninitialized value and its fields if you try to overwrite the memory
/// in a normal manner. The only way to safely initialize an arbitrary
/// uninitialized value is with one of the `ptr` functions: `write`, `copy`, or
/// `copy_nonoverlapping`. This isn't necessary if `T` is a primitive
/// or otherwise only contains types that don't implement Drop.
///
/// If this value *does* need some kind of Drop, it must be initialized before
/// it goes out of scope (and therefore would be dropped). Note that this
/// includes a `panic` occurring and unwinding the stack suddenly.
///
/// # Examples
///
/// Here's how to safely initialize an array of `Vec`s.
///
/// ```
/// use std::mem;
/// use std::ptr;
///
/// // Only declare the array. This safely leaves it
/// // uninitialized in a way that Rust will track for us.
/// // However we can't initialize it element-by-element
/// // safely, and we can't use the `[value; 1000]`
/// // constructor because it only works with `Copy` data.
/// let mut data: [Vec<u32>; 1000];
///
/// unsafe {
/// // So we need to do this to initialize it.
/// data = mem::uninitialized();
///
/// // DANGER ZONE: if anything panics or otherwise
/// // incorrectly reads the array here, we will have
/// // Undefined Behavior.
///
/// // It's ok to mutably iterate the data, since this
/// // doesn't involve reading it at all.
/// // (ptr and len are statically known for arrays)
/// for elem in &mut data[..] {
/// // *elem = Vec::new() would try to drop the
/// // uninitialized memory at `elem` -- bad!
/// //
/// // Vec::new doesn't allocate or do really
/// // anything. It's only safe to call here
/// // because we know it won't panic.
/// ptr::write(elem, Vec::new());
/// }
///
/// // SAFE ZONE: everything is initialized.
/// }
///
/// println!("{:?}", &data[0]);
/// ```
///
/// This example emphasizes exactly how delicate and dangerous doing this is.
/// Note that the `vec!` macro *does* let you initialize every element with a
/// value that is only `Clone`, so the following is semantically equivalent and
/// vastly less dangerous, as long as you can live with an extra heap
|
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn uninitialized<T>() -> T {
intrinsics::uninit()
}
/// Swap the values at two mutable locations of the same type, without deinitializing or copying
/// either one.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// let x = &mut 5;
/// let y = &mut 42;
///
/// mem::swap(x, y);
///
/// assert_eq!(42, *x);
/// assert_eq!(5, *y);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn swap<T>(x: &mut T, y: &mut T) {
unsafe {
// Give ourselves some scratch space to work with
let mut t: T = uninitialized();
// Perform the swap, `&mut` pointers never alias
ptr::copy_nonoverlapping(&*x, &mut t, 1);
ptr::copy_nonoverlapping(&*y, x, 1);
ptr::copy_nonoverlapping(&t, y, 1);
// y and t now point to the same thing, but we need to completely
// forget `t` because we do not want to run the destructor for `T`
// on its value, which is still owned somewhere outside this function.
forget(t);
}
}
/// Replaces the value at a mutable location with a new one, returning the old value, without
/// deinitializing or copying either one.
///
/// This is primarily used for transferring and swapping ownership of a value in a mutable
/// location.
///
/// # Examples
///
/// A simple example:
///
/// ```
/// use std::mem;
///
/// let mut v: Vec<i32> = Vec::new();
///
/// mem::replace(&mut v, Vec::new());
/// ```
///
/// This function allows consumption of one field of a struct by replacing it with another value.
/// The normal approach doesn't always work:
///
/// ```rust,ignore
/// struct Buffer<T> { buf: Vec<T> }
///
/// impl<T> Buffer<T> {
/// fn get_and_reset(&mut self) -> Vec<T> {
/// // error: cannot move out of dereference of `&mut`-pointer
/// let buf = self.buf;
/// self.buf = Vec::new();
/// buf
/// }
/// }
/// ```
///
/// Note that `T` does not necessarily implement `Clone`, so it can't even clone and reset
/// `self.buf`. But `replace` can be used to disassociate the original value of `self.buf` from
/// `self`, allowing it to be returned:
///
/// ```
/// use std::mem;
/// # struct Buffer<T> { buf: Vec<T> }
/// impl<T> Buffer<T> {
/// fn get_and_reset(&mut self) -> Vec<T> {
/// mem::replace(&mut self.buf, Vec::new())
/// }
/// }
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn replace<T>(dest: &mut T, mut src: T) -> T {
swap(dest, &mut src);
src
}
/// Disposes of a value.
///
/// While this does call the argument's implementation of `Drop`, it will not
/// release any borrows, as borrows are based on lexical scope.
///
/// This effectively does nothing for
/// [types which implement `Copy`](../../book/ownership.html#copy-types),
/// e.g. integers. Such values are copied and _then_ moved into the function,
/// so the value persists after this function call.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let v = vec![1, 2, 3];
///
/// drop(v); // explicitly drop the vector
/// ```
///
/// Borrows are based on lexical scope, so this produces an error:
///
/// ```ignore
/// let mut v = vec![1, 2, 3];
/// let x = &v[0];
///
/// drop(x); // explicitly drop the reference, but the borrow still exists
///
/// v.push(4); // error: cannot borrow `v` as mutable because it is also
/// // borrowed as immutable
/// ```
///
/// An inner scope is needed to fix this:
///
/// ```
/// let mut v = vec![1, 2, 3];
///
/// {
/// let x = &v[0];
///
/// drop(x); // this is now redundant, as `x` is going out of scope anyway
/// }
///
/// v.push(4); // no problems
/// ```
///
/// Since `RefCell` enforces the borrow rules at runtime, `drop()` can
/// seemingly release a borrow of one:
///
/// ```
/// use std::cell::RefCell;
///
/// let x = RefCell::new(1);
///
/// let mut mutable_borrow = x.borrow_mut();
/// *mutable_borrow = 1;
///
/// drop(mutable_borrow); // relinquish the mutable borrow on this slot
///
/// let borrow = x.borrow();
/// println!("{}", *borrow);
/// ```
///
/// Integers and other types implementing `Copy` are unaffected by `drop()`
///
/// ```
/// #[derive(Copy, Clone)]
/// struct Foo(u8);
///
/// let x = 1;
/// let y = Foo(2);
/// drop(x); // a copy of `x` is moved and dropped
/// drop(y); // a copy of `y` is moved and dropped
///
/// println!("x: {}, y: {}", x, y.0); // still available
/// ```
///
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn drop<T>(_x: T) { }
macro_rules! repeat_u8_as_u32 {
($name:expr) => { (($name as u32) << 24 |
($name as u32) << 16 |
($name as u32) << 8 |
($name as u32)) }
}
macro_rules! repeat_u8_as_u64 {
($name:expr) => { ((repeat_u8_as_u32!($name) as u64) << 32 |
(repeat_u8_as_u32!($name) as u64)) }
}
// NOTE: Keep synchronized with values used in librustc_trans::trans::adt.
//
// In particular, the POST_DROP_U8 marker must never equal the
// DTOR_NEEDED_U8 marker.
//
// For a while pnkfelix was using 0xc1 here.
// But having the sign bit set is a pain, so 0x1d is probably better.
//
// And of course, 0x00 brings back the old world of zero'ing on drop.
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_U8: u8 = 0x1d;
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_U32: u32 = repeat_u8_as_u32!(POST_DROP_U8);
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_U64: u64 = repeat_u8_as_u64!(POST_DROP_U8);
#[cfg(target_pointer_width = "32")]
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_USIZE: usize = POST_DROP_U32 as usize;
#[cfg(target_pointer_width = "64")]
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_USIZE: usize = POST_DROP_U64 as usize;
/// Interprets `src` as `&U`, and then reads `src` without moving the contained
/// value.
///
/// This function will unsafely assume the pointer `src` is valid for
/// `sizeof(U)` bytes by transmuting `&T` to `&U` and then reading the `&U`. It
/// will also unsafely create a copy of the contained value instead of moving
/// out of `src`.
///
/// It is not a compile-time error if `T` and `U` have different sizes, but it
/// is highly encouraged to only invoke this function where `T` and `U` have the
/// same size. This function triggers undefined behavior if `U` is larger than
/// `T`.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// let one = unsafe { mem::transmute_copy(&1) };
///
/// assert_eq!(1, one);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn transmute_copy<T, U>(src: &T) -> U {
// FIXME(#23542) Replace with type ascription.
#![allow(trivial_casts)]
ptr::read(src as *const T as *const U)
}
|
/// allocation:
///
/// ```
/// let data: Vec<Vec<u32>> = vec![Vec::new(); 1000];
/// println!("{:?}", &data[0]);
|
random_line_split
|
mem.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Basic functions for dealing with memory
//!
//! This module contains functions for querying the size and alignment of
//! types, initializing and manipulating memory.
#![stable(feature = "rust1", since = "1.0.0")]
use marker::Sized;
use intrinsics;
use ptr;
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::transmute;
/// Leaks a value into the void, consuming ownership and never running its
/// destructor.
///
/// This function will take ownership of its argument, but is distinct from the
/// `mem::drop` function in that it **does not run the destructor**, leaking the
/// value and any resources that it owns.
///
/// There's only a few reasons to use this function. They mainly come
/// up in unsafe code or FFI code.
///
/// * You have an uninitialized value, perhaps for performance reasons, and
/// need to prevent the destructor from running on it.
/// * You have two copies of a value (like when writing something like
/// [`mem::swap`][swap]), but need the destructor to only run once to
/// prevent a double `free`.
/// * Transferring resources across [FFI][ffi] boundaries.
///
/// [swap]: fn.swap.html
/// [ffi]:../../book/ffi.html
///
/// # Safety
///
/// This function is not marked as `unsafe` as Rust does not guarantee that the
/// `Drop` implementation for a value will always run. Note, however, that
/// leaking resources such as memory or I/O objects is likely not desired, so
/// this function is only recommended for specialized use cases.
///
/// The safety of this function implies that when writing `unsafe` code
/// yourself care must be taken when leveraging a destructor that is required to
/// run to preserve memory safety. There are known situations where the
/// destructor may not run (such as if ownership of the object with the
/// destructor is returned) which must be taken into account.
///
/// # Other forms of Leakage
///
/// It's important to point out that this function is not the only method by
/// which a value can be leaked in safe Rust code. Other known sources of
/// leakage are:
///
/// * `Rc` and `Arc` cycles
/// * `mpsc::{Sender, Receiver}` cycles (they use `Arc` internally)
/// * Panicking destructors are likely to leak local resources
///
/// # Example
///
/// Leak some heap memory by never deallocating it:
///
/// ```rust
/// use std::mem;
///
/// let heap_memory = Box::new(3);
/// mem::forget(heap_memory);
/// ```
///
/// Leak an I/O object, never closing the file:
///
/// ```rust,no_run
/// use std::mem;
/// use std::fs::File;
///
/// let file = File::open("foo.txt").unwrap();
/// mem::forget(file);
/// ```
///
/// The `mem::swap` function uses `mem::forget` to good effect:
///
/// ```rust
/// use std::mem;
/// use std::ptr;
///
/// fn swap<T>(x: &mut T, y: &mut T) {
/// unsafe {
/// // Give ourselves some scratch space to work with
/// let mut t: T = mem::uninitialized();
///
/// // Perform the swap, `&mut` pointers never alias
/// ptr::copy_nonoverlapping(&*x, &mut t, 1);
/// ptr::copy_nonoverlapping(&*y, x, 1);
/// ptr::copy_nonoverlapping(&t, y, 1);
///
/// // y and t now point to the same thing, but we need to completely
/// // forget `t` because we do not want to run the destructor for `T`
/// // on its value, which is still owned somewhere outside this function.
/// mem::forget(t);
/// }
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn forget<T>(t: T) {
unsafe { intrinsics::forget(t) }
}
/// Returns the size of a type in bytes.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::size_of::<i32>());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn size_of<T>() -> usize {
unsafe { intrinsics::size_of::<T>() }
}
/// Returns the size of the type that `val` points to in bytes.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::size_of_val(&5i32));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn size_of_val<T:?Sized>(val: &T) -> usize {
unsafe { intrinsics::size_of_val(val) }
}
/// Returns the ABI-required minimum alignment of a type
///
/// This is the alignment used for struct fields. It may be smaller than the preferred alignment.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::min_align_of::<i32>());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[deprecated(reason = "use `align_of` instead", since = "1.2.0")]
pub fn min_align_of<T>() -> usize {
unsafe { intrinsics::min_align_of::<T>() }
}
/// Returns the ABI-required minimum alignment of the type of the value that `val` points to
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::min_align_of_val(&5i32));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[deprecated(reason = "use `align_of_val` instead", since = "1.2.0")]
pub fn min_align_of_val<T:?Sized>(val: &T) -> usize {
unsafe { intrinsics::min_align_of_val(val) }
}
/// Returns the alignment in memory for a type.
///
/// This is the alignment used for struct fields. It may be smaller than the preferred alignment.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::align_of::<i32>());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn
|
<T>() -> usize {
unsafe { intrinsics::min_align_of::<T>() }
}
/// Returns the ABI-required minimum alignment of the type of the value that `val` points to
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::align_of_val(&5i32));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn align_of_val<T:?Sized>(val: &T) -> usize {
unsafe { intrinsics::min_align_of_val(val) }
}
/// Creates a value initialized to zero.
///
/// This function is similar to allocating space for a local variable and zeroing it out (an unsafe
/// operation).
///
/// Care must be taken when using this function, if the type `T` has a destructor and the value
/// falls out of scope (due to unwinding or returning) before being initialized, then the
/// destructor will run on zeroed data, likely leading to crashes.
///
/// This is useful for FFI functions sometimes, but should generally be avoided.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// let x: i32 = unsafe { mem::zeroed() };
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn zeroed<T>() -> T {
intrinsics::init()
}
/// Creates a value initialized to an unspecified series of bytes.
///
/// The byte sequence usually indicates that the value at the memory
/// in question has been dropped. Thus, *if* T carries a drop flag,
/// any associated destructor will not be run when the value falls out
/// of scope.
///
/// Some code at one time used the `zeroed` function above to
/// accomplish this goal.
///
/// This function is expected to be deprecated with the transition
/// to non-zeroing drop.
#[inline]
#[unstable(feature = "filling_drop", issue = "5016")]
pub unsafe fn dropped<T>() -> T {
#[inline(always)]
unsafe fn dropped_impl<T>() -> T { intrinsics::init_dropped() }
dropped_impl()
}
/// Bypasses Rust's normal memory-initialization checks by pretending to
/// produce a value of type T, while doing nothing at all.
///
/// **This is incredibly dangerous, and should not be done lightly. Deeply
/// consider initializing your memory with a default value instead.**
///
/// This is useful for FFI functions and initializing arrays sometimes,
/// but should generally be avoided.
///
/// # Undefined Behavior
///
/// It is Undefined Behavior to read uninitialized memory. Even just an
/// uninitialized boolean. For instance, if you branch on the value of such
/// a boolean your program may take one, both, or neither of the branches.
///
/// Note that this often also includes *writing* to the uninitialized value.
/// Rust believes the value is initialized, and will therefore try to Drop
/// the uninitialized value and its fields if you try to overwrite the memory
/// in a normal manner. The only way to safely initialize an arbitrary
/// uninitialized value is with one of the `ptr` functions: `write`, `copy`, or
/// `copy_nonoverlapping`. This isn't necessary if `T` is a primitive
/// or otherwise only contains types that don't implement Drop.
///
/// If this value *does* need some kind of Drop, it must be initialized before
/// it goes out of scope (and therefore would be dropped). Note that this
/// includes a `panic` occurring and unwinding the stack suddenly.
///
/// # Examples
///
/// Here's how to safely initialize an array of `Vec`s.
///
/// ```
/// use std::mem;
/// use std::ptr;
///
/// // Only declare the array. This safely leaves it
/// // uninitialized in a way that Rust will track for us.
/// // However we can't initialize it element-by-element
/// // safely, and we can't use the `[value; 1000]`
/// // constructor because it only works with `Copy` data.
/// let mut data: [Vec<u32>; 1000];
///
/// unsafe {
/// // So we need to do this to initialize it.
/// data = mem::uninitialized();
///
/// // DANGER ZONE: if anything panics or otherwise
/// // incorrectly reads the array here, we will have
/// // Undefined Behavior.
///
/// // It's ok to mutably iterate the data, since this
/// // doesn't involve reading it at all.
/// // (ptr and len are statically known for arrays)
/// for elem in &mut data[..] {
/// // *elem = Vec::new() would try to drop the
/// // uninitialized memory at `elem` -- bad!
/// //
/// // Vec::new doesn't allocate or do really
/// // anything. It's only safe to call here
/// // because we know it won't panic.
/// ptr::write(elem, Vec::new());
/// }
///
/// // SAFE ZONE: everything is initialized.
/// }
///
/// println!("{:?}", &data[0]);
/// ```
///
/// This example emphasizes exactly how delicate and dangerous doing this is.
/// Note that the `vec!` macro *does* let you initialize every element with a
/// value that is only `Clone`, so the following is semantically equivalent and
/// vastly less dangerous, as long as you can live with an extra heap
/// allocation:
///
/// ```
/// let data: Vec<Vec<u32>> = vec![Vec::new(); 1000];
/// println!("{:?}", &data[0]);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn uninitialized<T>() -> T {
intrinsics::uninit()
}
/// Swap the values at two mutable locations of the same type, without deinitializing or copying
/// either one.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// let x = &mut 5;
/// let y = &mut 42;
///
/// mem::swap(x, y);
///
/// assert_eq!(42, *x);
/// assert_eq!(5, *y);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn swap<T>(x: &mut T, y: &mut T) {
unsafe {
// Give ourselves some scratch space to work with
let mut t: T = uninitialized();
// Perform the swap, `&mut` pointers never alias
ptr::copy_nonoverlapping(&*x, &mut t, 1);
ptr::copy_nonoverlapping(&*y, x, 1);
ptr::copy_nonoverlapping(&t, y, 1);
// y and t now point to the same thing, but we need to completely
// forget `t` because we do not want to run the destructor for `T`
// on its value, which is still owned somewhere outside this function.
forget(t);
}
}
/// Replaces the value at a mutable location with a new one, returning the old value, without
/// deinitializing or copying either one.
///
/// This is primarily used for transferring and swapping ownership of a value in a mutable
/// location.
///
/// # Examples
///
/// A simple example:
///
/// ```
/// use std::mem;
///
/// let mut v: Vec<i32> = Vec::new();
///
/// mem::replace(&mut v, Vec::new());
/// ```
///
/// This function allows consumption of one field of a struct by replacing it with another value.
/// The normal approach doesn't always work:
///
/// ```rust,ignore
/// struct Buffer<T> { buf: Vec<T> }
///
/// impl<T> Buffer<T> {
/// fn get_and_reset(&mut self) -> Vec<T> {
/// // error: cannot move out of dereference of `&mut`-pointer
/// let buf = self.buf;
/// self.buf = Vec::new();
/// buf
/// }
/// }
/// ```
///
/// Note that `T` does not necessarily implement `Clone`, so it can't even clone and reset
/// `self.buf`. But `replace` can be used to disassociate the original value of `self.buf` from
/// `self`, allowing it to be returned:
///
/// ```
/// use std::mem;
/// # struct Buffer<T> { buf: Vec<T> }
/// impl<T> Buffer<T> {
/// fn get_and_reset(&mut self) -> Vec<T> {
/// mem::replace(&mut self.buf, Vec::new())
/// }
/// }
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn replace<T>(dest: &mut T, mut src: T) -> T {
swap(dest, &mut src);
src
}
/// Disposes of a value.
///
/// While this does call the argument's implementation of `Drop`, it will not
/// release any borrows, as borrows are based on lexical scope.
///
/// This effectively does nothing for
/// [types which implement `Copy`](../../book/ownership.html#copy-types),
/// e.g. integers. Such values are copied and _then_ moved into the function,
/// so the value persists after this function call.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let v = vec![1, 2, 3];
///
/// drop(v); // explicitly drop the vector
/// ```
///
/// Borrows are based on lexical scope, so this produces an error:
///
/// ```ignore
/// let mut v = vec![1, 2, 3];
/// let x = &v[0];
///
/// drop(x); // explicitly drop the reference, but the borrow still exists
///
/// v.push(4); // error: cannot borrow `v` as mutable because it is also
/// // borrowed as immutable
/// ```
///
/// An inner scope is needed to fix this:
///
/// ```
/// let mut v = vec![1, 2, 3];
///
/// {
/// let x = &v[0];
///
/// drop(x); // this is now redundant, as `x` is going out of scope anyway
/// }
///
/// v.push(4); // no problems
/// ```
///
/// Since `RefCell` enforces the borrow rules at runtime, `drop()` can
/// seemingly release a borrow of one:
///
/// ```
/// use std::cell::RefCell;
///
/// let x = RefCell::new(1);
///
/// let mut mutable_borrow = x.borrow_mut();
/// *mutable_borrow = 1;
///
/// drop(mutable_borrow); // relinquish the mutable borrow on this slot
///
/// let borrow = x.borrow();
/// println!("{}", *borrow);
/// ```
///
/// Integers and other types implementing `Copy` are unaffected by `drop()`
///
/// ```
/// #[derive(Copy, Clone)]
/// struct Foo(u8);
///
/// let x = 1;
/// let y = Foo(2);
/// drop(x); // a copy of `x` is moved and dropped
/// drop(y); // a copy of `y` is moved and dropped
///
/// println!("x: {}, y: {}", x, y.0); // still available
/// ```
///
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn drop<T>(_x: T) { }
macro_rules! repeat_u8_as_u32 {
($name:expr) => { (($name as u32) << 24 |
($name as u32) << 16 |
($name as u32) << 8 |
($name as u32)) }
}
macro_rules! repeat_u8_as_u64 {
($name:expr) => { ((repeat_u8_as_u32!($name) as u64) << 32 |
(repeat_u8_as_u32!($name) as u64)) }
}
// NOTE: Keep synchronized with values used in librustc_trans::trans::adt.
//
// In particular, the POST_DROP_U8 marker must never equal the
// DTOR_NEEDED_U8 marker.
//
// For a while pnkfelix was using 0xc1 here.
// But having the sign bit set is a pain, so 0x1d is probably better.
//
// And of course, 0x00 brings back the old world of zero'ing on drop.
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_U8: u8 = 0x1d;
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_U32: u32 = repeat_u8_as_u32!(POST_DROP_U8);
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_U64: u64 = repeat_u8_as_u64!(POST_DROP_U8);
#[cfg(target_pointer_width = "32")]
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_USIZE: usize = POST_DROP_U32 as usize;
#[cfg(target_pointer_width = "64")]
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_USIZE: usize = POST_DROP_U64 as usize;
/// Interprets `src` as `&U`, and then reads `src` without moving the contained
/// value.
///
/// This function will unsafely assume the pointer `src` is valid for
/// `sizeof(U)` bytes by transmuting `&T` to `&U` and then reading the `&U`. It
/// will also unsafely create a copy of the contained value instead of moving
/// out of `src`.
///
/// It is not a compile-time error if `T` and `U` have different sizes, but it
/// is highly encouraged to only invoke this function where `T` and `U` have the
/// same size. This function triggers undefined behavior if `U` is larger than
/// `T`.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// let one = unsafe { mem::transmute_copy(&1) };
///
/// assert_eq!(1, one);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn transmute_copy<T, U>(src: &T) -> U {
// FIXME(#23542) Replace with type ascription.
#![allow(trivial_casts)]
ptr::read(src as *const T as *const U)
}
|
align_of
|
identifier_name
|
mem.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Basic functions for dealing with memory
//!
//! This module contains functions for querying the size and alignment of
//! types, initializing and manipulating memory.
#![stable(feature = "rust1", since = "1.0.0")]
use marker::Sized;
use intrinsics;
use ptr;
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::transmute;
/// Leaks a value into the void, consuming ownership and never running its
/// destructor.
///
/// This function will take ownership of its argument, but is distinct from the
/// `mem::drop` function in that it **does not run the destructor**, leaking the
/// value and any resources that it owns.
///
/// There's only a few reasons to use this function. They mainly come
/// up in unsafe code or FFI code.
///
/// * You have an uninitialized value, perhaps for performance reasons, and
/// need to prevent the destructor from running on it.
/// * You have two copies of a value (like when writing something like
/// [`mem::swap`][swap]), but need the destructor to only run once to
/// prevent a double `free`.
/// * Transferring resources across [FFI][ffi] boundaries.
///
/// [swap]: fn.swap.html
/// [ffi]:../../book/ffi.html
///
/// # Safety
///
/// This function is not marked as `unsafe` as Rust does not guarantee that the
/// `Drop` implementation for a value will always run. Note, however, that
/// leaking resources such as memory or I/O objects is likely not desired, so
/// this function is only recommended for specialized use cases.
///
/// The safety of this function implies that when writing `unsafe` code
/// yourself care must be taken when leveraging a destructor that is required to
/// run to preserve memory safety. There are known situations where the
/// destructor may not run (such as if ownership of the object with the
/// destructor is returned) which must be taken into account.
///
/// # Other forms of Leakage
///
/// It's important to point out that this function is not the only method by
/// which a value can be leaked in safe Rust code. Other known sources of
/// leakage are:
///
/// * `Rc` and `Arc` cycles
/// * `mpsc::{Sender, Receiver}` cycles (they use `Arc` internally)
/// * Panicking destructors are likely to leak local resources
///
/// # Example
///
/// Leak some heap memory by never deallocating it:
///
/// ```rust
/// use std::mem;
///
/// let heap_memory = Box::new(3);
/// mem::forget(heap_memory);
/// ```
///
/// Leak an I/O object, never closing the file:
///
/// ```rust,no_run
/// use std::mem;
/// use std::fs::File;
///
/// let file = File::open("foo.txt").unwrap();
/// mem::forget(file);
/// ```
///
/// The `mem::swap` function uses `mem::forget` to good effect:
///
/// ```rust
/// use std::mem;
/// use std::ptr;
///
/// fn swap<T>(x: &mut T, y: &mut T) {
/// unsafe {
/// // Give ourselves some scratch space to work with
/// let mut t: T = mem::uninitialized();
///
/// // Perform the swap, `&mut` pointers never alias
/// ptr::copy_nonoverlapping(&*x, &mut t, 1);
/// ptr::copy_nonoverlapping(&*y, x, 1);
/// ptr::copy_nonoverlapping(&t, y, 1);
///
/// // y and t now point to the same thing, but we need to completely
/// // forget `t` because we do not want to run the destructor for `T`
/// // on its value, which is still owned somewhere outside this function.
/// mem::forget(t);
/// }
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn forget<T>(t: T) {
unsafe { intrinsics::forget(t) }
}
/// Returns the size of a type in bytes.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::size_of::<i32>());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn size_of<T>() -> usize
|
/// Returns the size of the type that `val` points to in bytes.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::size_of_val(&5i32));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn size_of_val<T:?Sized>(val: &T) -> usize {
unsafe { intrinsics::size_of_val(val) }
}
/// Returns the ABI-required minimum alignment of a type
///
/// This is the alignment used for struct fields. It may be smaller than the preferred alignment.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::min_align_of::<i32>());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[deprecated(reason = "use `align_of` instead", since = "1.2.0")]
pub fn min_align_of<T>() -> usize {
unsafe { intrinsics::min_align_of::<T>() }
}
/// Returns the ABI-required minimum alignment of the type of the value that `val` points to
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::min_align_of_val(&5i32));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[deprecated(reason = "use `align_of_val` instead", since = "1.2.0")]
pub fn min_align_of_val<T:?Sized>(val: &T) -> usize {
unsafe { intrinsics::min_align_of_val(val) }
}
/// Returns the alignment in memory for a type.
///
/// This is the alignment used for struct fields. It may be smaller than the preferred alignment.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::align_of::<i32>());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn align_of<T>() -> usize {
unsafe { intrinsics::min_align_of::<T>() }
}
/// Returns the ABI-required minimum alignment of the type of the value that `val` points to
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// assert_eq!(4, mem::align_of_val(&5i32));
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn align_of_val<T:?Sized>(val: &T) -> usize {
unsafe { intrinsics::min_align_of_val(val) }
}
/// Creates a value initialized to zero.
///
/// This function is similar to allocating space for a local variable and zeroing it out (an unsafe
/// operation).
///
/// Care must be taken when using this function, if the type `T` has a destructor and the value
/// falls out of scope (due to unwinding or returning) before being initialized, then the
/// destructor will run on zeroed data, likely leading to crashes.
///
/// This is useful for FFI functions sometimes, but should generally be avoided.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// let x: i32 = unsafe { mem::zeroed() };
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn zeroed<T>() -> T {
intrinsics::init()
}
/// Creates a value initialized to an unspecified series of bytes.
///
/// The byte sequence usually indicates that the value at the memory
/// in question has been dropped. Thus, *if* T carries a drop flag,
/// any associated destructor will not be run when the value falls out
/// of scope.
///
/// Some code at one time used the `zeroed` function above to
/// accomplish this goal.
///
/// This function is expected to be deprecated with the transition
/// to non-zeroing drop.
#[inline]
#[unstable(feature = "filling_drop", issue = "5016")]
pub unsafe fn dropped<T>() -> T {
#[inline(always)]
unsafe fn dropped_impl<T>() -> T { intrinsics::init_dropped() }
dropped_impl()
}
/// Bypasses Rust's normal memory-initialization checks by pretending to
/// produce a value of type T, while doing nothing at all.
///
/// **This is incredibly dangerous, and should not be done lightly. Deeply
/// consider initializing your memory with a default value instead.**
///
/// This is useful for FFI functions and initializing arrays sometimes,
/// but should generally be avoided.
///
/// # Undefined Behavior
///
/// It is Undefined Behavior to read uninitialized memory. Even just an
/// uninitialized boolean. For instance, if you branch on the value of such
/// a boolean your program may take one, both, or neither of the branches.
///
/// Note that this often also includes *writing* to the uninitialized value.
/// Rust believes the value is initialized, and will therefore try to Drop
/// the uninitialized value and its fields if you try to overwrite the memory
/// in a normal manner. The only way to safely initialize an arbitrary
/// uninitialized value is with one of the `ptr` functions: `write`, `copy`, or
/// `copy_nonoverlapping`. This isn't necessary if `T` is a primitive
/// or otherwise only contains types that don't implement Drop.
///
/// If this value *does* need some kind of Drop, it must be initialized before
/// it goes out of scope (and therefore would be dropped). Note that this
/// includes a `panic` occurring and unwinding the stack suddenly.
///
/// # Examples
///
/// Here's how to safely initialize an array of `Vec`s.
///
/// ```
/// use std::mem;
/// use std::ptr;
///
/// // Only declare the array. This safely leaves it
/// // uninitialized in a way that Rust will track for us.
/// // However we can't initialize it element-by-element
/// // safely, and we can't use the `[value; 1000]`
/// // constructor because it only works with `Copy` data.
/// let mut data: [Vec<u32>; 1000];
///
/// unsafe {
/// // So we need to do this to initialize it.
/// data = mem::uninitialized();
///
/// // DANGER ZONE: if anything panics or otherwise
/// // incorrectly reads the array here, we will have
/// // Undefined Behavior.
///
/// // It's ok to mutably iterate the data, since this
/// // doesn't involve reading it at all.
/// // (ptr and len are statically known for arrays)
/// for elem in &mut data[..] {
/// // *elem = Vec::new() would try to drop the
/// // uninitialized memory at `elem` -- bad!
/// //
/// // Vec::new doesn't allocate or do really
/// // anything. It's only safe to call here
/// // because we know it won't panic.
/// ptr::write(elem, Vec::new());
/// }
///
/// // SAFE ZONE: everything is initialized.
/// }
///
/// println!("{:?}", &data[0]);
/// ```
///
/// This example emphasizes exactly how delicate and dangerous doing this is.
/// Note that the `vec!` macro *does* let you initialize every element with a
/// value that is only `Clone`, so the following is semantically equivalent and
/// vastly less dangerous, as long as you can live with an extra heap
/// allocation:
///
/// ```
/// let data: Vec<Vec<u32>> = vec![Vec::new(); 1000];
/// println!("{:?}", &data[0]);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn uninitialized<T>() -> T {
intrinsics::uninit()
}
/// Swap the values at two mutable locations of the same type, without deinitializing or copying
/// either one.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// let x = &mut 5;
/// let y = &mut 42;
///
/// mem::swap(x, y);
///
/// assert_eq!(42, *x);
/// assert_eq!(5, *y);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn swap<T>(x: &mut T, y: &mut T) {
unsafe {
// Give ourselves some scratch space to work with
let mut t: T = uninitialized();
// Perform the swap, `&mut` pointers never alias
ptr::copy_nonoverlapping(&*x, &mut t, 1);
ptr::copy_nonoverlapping(&*y, x, 1);
ptr::copy_nonoverlapping(&t, y, 1);
// y and t now point to the same thing, but we need to completely
// forget `t` because we do not want to run the destructor for `T`
// on its value, which is still owned somewhere outside this function.
forget(t);
}
}
/// Replaces the value at a mutable location with a new one, returning the old value, without
/// deinitializing or copying either one.
///
/// This is primarily used for transferring and swapping ownership of a value in a mutable
/// location.
///
/// # Examples
///
/// A simple example:
///
/// ```
/// use std::mem;
///
/// let mut v: Vec<i32> = Vec::new();
///
/// mem::replace(&mut v, Vec::new());
/// ```
///
/// This function allows consumption of one field of a struct by replacing it with another value.
/// The normal approach doesn't always work:
///
/// ```rust,ignore
/// struct Buffer<T> { buf: Vec<T> }
///
/// impl<T> Buffer<T> {
/// fn get_and_reset(&mut self) -> Vec<T> {
/// // error: cannot move out of dereference of `&mut`-pointer
/// let buf = self.buf;
/// self.buf = Vec::new();
/// buf
/// }
/// }
/// ```
///
/// Note that `T` does not necessarily implement `Clone`, so it can't even clone and reset
/// `self.buf`. But `replace` can be used to disassociate the original value of `self.buf` from
/// `self`, allowing it to be returned:
///
/// ```
/// use std::mem;
/// # struct Buffer<T> { buf: Vec<T> }
/// impl<T> Buffer<T> {
/// fn get_and_reset(&mut self) -> Vec<T> {
/// mem::replace(&mut self.buf, Vec::new())
/// }
/// }
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn replace<T>(dest: &mut T, mut src: T) -> T {
swap(dest, &mut src);
src
}
/// Disposes of a value.
///
/// While this does call the argument's implementation of `Drop`, it will not
/// release any borrows, as borrows are based on lexical scope.
///
/// This effectively does nothing for
/// [types which implement `Copy`](../../book/ownership.html#copy-types),
/// e.g. integers. Such values are copied and _then_ moved into the function,
/// so the value persists after this function call.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let v = vec![1, 2, 3];
///
/// drop(v); // explicitly drop the vector
/// ```
///
/// Borrows are based on lexical scope, so this produces an error:
///
/// ```ignore
/// let mut v = vec![1, 2, 3];
/// let x = &v[0];
///
/// drop(x); // explicitly drop the reference, but the borrow still exists
///
/// v.push(4); // error: cannot borrow `v` as mutable because it is also
/// // borrowed as immutable
/// ```
///
/// An inner scope is needed to fix this:
///
/// ```
/// let mut v = vec![1, 2, 3];
///
/// {
/// let x = &v[0];
///
/// drop(x); // this is now redundant, as `x` is going out of scope anyway
/// }
///
/// v.push(4); // no problems
/// ```
///
/// Since `RefCell` enforces the borrow rules at runtime, `drop()` can
/// seemingly release a borrow of one:
///
/// ```
/// use std::cell::RefCell;
///
/// let x = RefCell::new(1);
///
/// let mut mutable_borrow = x.borrow_mut();
/// *mutable_borrow = 1;
///
/// drop(mutable_borrow); // relinquish the mutable borrow on this slot
///
/// let borrow = x.borrow();
/// println!("{}", *borrow);
/// ```
///
/// Integers and other types implementing `Copy` are unaffected by `drop()`
///
/// ```
/// #[derive(Copy, Clone)]
/// struct Foo(u8);
///
/// let x = 1;
/// let y = Foo(2);
/// drop(x); // a copy of `x` is moved and dropped
/// drop(y); // a copy of `y` is moved and dropped
///
/// println!("x: {}, y: {}", x, y.0); // still available
/// ```
///
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn drop<T>(_x: T) { }
macro_rules! repeat_u8_as_u32 {
($name:expr) => { (($name as u32) << 24 |
($name as u32) << 16 |
($name as u32) << 8 |
($name as u32)) }
}
macro_rules! repeat_u8_as_u64 {
($name:expr) => { ((repeat_u8_as_u32!($name) as u64) << 32 |
(repeat_u8_as_u32!($name) as u64)) }
}
// NOTE: Keep synchronized with values used in librustc_trans::trans::adt.
//
// In particular, the POST_DROP_U8 marker must never equal the
// DTOR_NEEDED_U8 marker.
//
// For a while pnkfelix was using 0xc1 here.
// But having the sign bit set is a pain, so 0x1d is probably better.
//
// And of course, 0x00 brings back the old world of zero'ing on drop.
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_U8: u8 = 0x1d;
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_U32: u32 = repeat_u8_as_u32!(POST_DROP_U8);
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_U64: u64 = repeat_u8_as_u64!(POST_DROP_U8);
#[cfg(target_pointer_width = "32")]
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_USIZE: usize = POST_DROP_U32 as usize;
#[cfg(target_pointer_width = "64")]
#[unstable(feature = "filling_drop", issue = "5016")]
#[allow(missing_docs)]
pub const POST_DROP_USIZE: usize = POST_DROP_U64 as usize;
/// Interprets `src` as `&U`, and then reads `src` without moving the contained
/// value.
///
/// This function will unsafely assume the pointer `src` is valid for
/// `sizeof(U)` bytes by transmuting `&T` to `&U` and then reading the `&U`. It
/// will also unsafely create a copy of the contained value instead of moving
/// out of `src`.
///
/// It is not a compile-time error if `T` and `U` have different sizes, but it
/// is highly encouraged to only invoke this function where `T` and `U` have the
/// same size. This function triggers undefined behavior if `U` is larger than
/// `T`.
///
/// # Examples
///
/// ```
/// use std::mem;
///
/// let one = unsafe { mem::transmute_copy(&1) };
///
/// assert_eq!(1, one);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn transmute_copy<T, U>(src: &T) -> U {
// FIXME(#23542) Replace with type ascription.
#![allow(trivial_casts)]
ptr::read(src as *const T as *const U)
}
|
{
unsafe { intrinsics::size_of::<T>() }
}
|
identifier_body
|
function.rs
|
use syntax::ast::*;
use syntax::codemap::{Span, Spanned};
use syntax::ext::base::Annotatable;
use utils::{ArgExt, span};
#[derive(Debug)]
pub struct Function(Spanned<(Ident, FnDecl)>);
impl Function {
pub fn from(annotated: &Annotatable) -> Result<Function, Span> {
let inner = match *annotated {
Annotatable::Item(ref item) => match item.node {
ItemKind::Fn(ref decl,..) =>
|
_ => return Err(item.span)
},
Annotatable::TraitItem(ref item) => return Err(item.span),
Annotatable::ImplItem(ref item) => return Err(item.span),
};
Ok(Function(inner))
}
pub fn ident(&self) -> &Ident {
&self.0.node.0
}
pub fn decl(&self) -> &FnDecl {
&self.0.node.1
}
pub fn span(&self) -> Span {
self.0.span
}
pub fn find_input<'a>(&'a self, name: &Name) -> Option<&'a Arg> {
self.decl().inputs.iter().find(|arg| arg.named(name))
}
}
|
{
span((item.ident, decl.clone().unwrap()), item.span)
}
|
conditional_block
|
function.rs
|
use syntax::ast::*;
use syntax::codemap::{Span, Spanned};
use syntax::ext::base::Annotatable;
use utils::{ArgExt, span};
#[derive(Debug)]
pub struct
|
(Spanned<(Ident, FnDecl)>);
impl Function {
pub fn from(annotated: &Annotatable) -> Result<Function, Span> {
let inner = match *annotated {
Annotatable::Item(ref item) => match item.node {
ItemKind::Fn(ref decl,..) => {
span((item.ident, decl.clone().unwrap()), item.span)
}
_ => return Err(item.span)
},
Annotatable::TraitItem(ref item) => return Err(item.span),
Annotatable::ImplItem(ref item) => return Err(item.span),
};
Ok(Function(inner))
}
pub fn ident(&self) -> &Ident {
&self.0.node.0
}
pub fn decl(&self) -> &FnDecl {
&self.0.node.1
}
pub fn span(&self) -> Span {
self.0.span
}
pub fn find_input<'a>(&'a self, name: &Name) -> Option<&'a Arg> {
self.decl().inputs.iter().find(|arg| arg.named(name))
}
}
|
Function
|
identifier_name
|
function.rs
|
use syntax::codemap::{Span, Spanned};
use syntax::ext::base::Annotatable;
use utils::{ArgExt, span};
#[derive(Debug)]
pub struct Function(Spanned<(Ident, FnDecl)>);
impl Function {
pub fn from(annotated: &Annotatable) -> Result<Function, Span> {
let inner = match *annotated {
Annotatable::Item(ref item) => match item.node {
ItemKind::Fn(ref decl,..) => {
span((item.ident, decl.clone().unwrap()), item.span)
}
_ => return Err(item.span)
},
Annotatable::TraitItem(ref item) => return Err(item.span),
Annotatable::ImplItem(ref item) => return Err(item.span),
};
Ok(Function(inner))
}
pub fn ident(&self) -> &Ident {
&self.0.node.0
}
pub fn decl(&self) -> &FnDecl {
&self.0.node.1
}
pub fn span(&self) -> Span {
self.0.span
}
pub fn find_input<'a>(&'a self, name: &Name) -> Option<&'a Arg> {
self.decl().inputs.iter().find(|arg| arg.named(name))
}
}
|
use syntax::ast::*;
|
random_line_split
|
|
function.rs
|
use syntax::ast::*;
use syntax::codemap::{Span, Spanned};
use syntax::ext::base::Annotatable;
use utils::{ArgExt, span};
#[derive(Debug)]
pub struct Function(Spanned<(Ident, FnDecl)>);
impl Function {
pub fn from(annotated: &Annotatable) -> Result<Function, Span> {
let inner = match *annotated {
Annotatable::Item(ref item) => match item.node {
ItemKind::Fn(ref decl,..) => {
span((item.ident, decl.clone().unwrap()), item.span)
}
_ => return Err(item.span)
},
Annotatable::TraitItem(ref item) => return Err(item.span),
Annotatable::ImplItem(ref item) => return Err(item.span),
};
Ok(Function(inner))
}
pub fn ident(&self) -> &Ident {
&self.0.node.0
}
pub fn decl(&self) -> &FnDecl {
&self.0.node.1
}
pub fn span(&self) -> Span
|
pub fn find_input<'a>(&'a self, name: &Name) -> Option<&'a Arg> {
self.decl().inputs.iter().find(|arg| arg.named(name))
}
}
|
{
self.0.span
}
|
identifier_body
|
lz4.rs
|
extern crate lz4;
use std::env;
|
use std::io::Write;
use std::iter::FromIterator;
use std::path::Path;
fn main() {
println!("LZ4 version: {}", lz4::version());
let suffix = ".lz4";
for arg in Vec::from_iter(env::args())[1..].iter() {
if arg.ends_with(suffix) {
decompress(
&Path::new(arg),
&Path::new(&arg[0..arg.len() - suffix.len()]),
).unwrap();
} else {
compress(&Path::new(arg), &Path::new(&(arg.to_string() + suffix))).unwrap();
}
}
}
fn compress(src: &Path, dst: &Path) -> Result<()> {
println!("Compressing: {:?} -> {:?}", src, dst);
let mut fi = try!(File::open(src));
let mut fo = try!(lz4::EncoderBuilder::new().build(try!(File::create(dst))));
try!(copy(&mut fi, &mut fo));
match fo.finish() {
(_, result) => result,
}
}
fn decompress(src: &Path, dst: &Path) -> Result<()> {
println!("Decompressing: {:?} -> {:?}", src, dst);
let mut fi = try!(lz4::Decoder::new(try!(File::open(src))));
let mut fo = try!(File::create(dst));
copy(&mut fi, &mut fo)
}
fn copy(src: &mut Read, dst: &mut Write) -> Result<()> {
let mut buffer: [u8; 1024] = [0; 1024];
loop {
let len = try!(src.read(&mut buffer));
if len == 0 {
break;
}
try!(dst.write_all(&buffer[0..len]));
}
Ok(())
}
|
use std::fs::File;
use std::io::Read;
use std::io::Result;
|
random_line_split
|
lz4.rs
|
extern crate lz4;
use std::env;
use std::fs::File;
use std::io::Read;
use std::io::Result;
use std::io::Write;
use std::iter::FromIterator;
use std::path::Path;
fn main() {
println!("LZ4 version: {}", lz4::version());
let suffix = ".lz4";
for arg in Vec::from_iter(env::args())[1..].iter() {
if arg.ends_with(suffix) {
decompress(
&Path::new(arg),
&Path::new(&arg[0..arg.len() - suffix.len()]),
).unwrap();
} else {
compress(&Path::new(arg), &Path::new(&(arg.to_string() + suffix))).unwrap();
}
}
}
fn compress(src: &Path, dst: &Path) -> Result<()> {
println!("Compressing: {:?} -> {:?}", src, dst);
let mut fi = try!(File::open(src));
let mut fo = try!(lz4::EncoderBuilder::new().build(try!(File::create(dst))));
try!(copy(&mut fi, &mut fo));
match fo.finish() {
(_, result) => result,
}
}
fn
|
(src: &Path, dst: &Path) -> Result<()> {
println!("Decompressing: {:?} -> {:?}", src, dst);
let mut fi = try!(lz4::Decoder::new(try!(File::open(src))));
let mut fo = try!(File::create(dst));
copy(&mut fi, &mut fo)
}
fn copy(src: &mut Read, dst: &mut Write) -> Result<()> {
let mut buffer: [u8; 1024] = [0; 1024];
loop {
let len = try!(src.read(&mut buffer));
if len == 0 {
break;
}
try!(dst.write_all(&buffer[0..len]));
}
Ok(())
}
|
decompress
|
identifier_name
|
lz4.rs
|
extern crate lz4;
use std::env;
use std::fs::File;
use std::io::Read;
use std::io::Result;
use std::io::Write;
use std::iter::FromIterator;
use std::path::Path;
fn main() {
println!("LZ4 version: {}", lz4::version());
let suffix = ".lz4";
for arg in Vec::from_iter(env::args())[1..].iter() {
if arg.ends_with(suffix) {
decompress(
&Path::new(arg),
&Path::new(&arg[0..arg.len() - suffix.len()]),
).unwrap();
} else {
compress(&Path::new(arg), &Path::new(&(arg.to_string() + suffix))).unwrap();
}
}
}
fn compress(src: &Path, dst: &Path) -> Result<()> {
println!("Compressing: {:?} -> {:?}", src, dst);
let mut fi = try!(File::open(src));
let mut fo = try!(lz4::EncoderBuilder::new().build(try!(File::create(dst))));
try!(copy(&mut fi, &mut fo));
match fo.finish() {
(_, result) => result,
}
}
fn decompress(src: &Path, dst: &Path) -> Result<()>
|
fn copy(src: &mut Read, dst: &mut Write) -> Result<()> {
let mut buffer: [u8; 1024] = [0; 1024];
loop {
let len = try!(src.read(&mut buffer));
if len == 0 {
break;
}
try!(dst.write_all(&buffer[0..len]));
}
Ok(())
}
|
{
println!("Decompressing: {:?} -> {:?}", src, dst);
let mut fi = try!(lz4::Decoder::new(try!(File::open(src))));
let mut fo = try!(File::create(dst));
copy(&mut fi, &mut fo)
}
|
identifier_body
|
lz4.rs
|
extern crate lz4;
use std::env;
use std::fs::File;
use std::io::Read;
use std::io::Result;
use std::io::Write;
use std::iter::FromIterator;
use std::path::Path;
fn main() {
println!("LZ4 version: {}", lz4::version());
let suffix = ".lz4";
for arg in Vec::from_iter(env::args())[1..].iter() {
if arg.ends_with(suffix) {
decompress(
&Path::new(arg),
&Path::new(&arg[0..arg.len() - suffix.len()]),
).unwrap();
} else
|
}
}
fn compress(src: &Path, dst: &Path) -> Result<()> {
println!("Compressing: {:?} -> {:?}", src, dst);
let mut fi = try!(File::open(src));
let mut fo = try!(lz4::EncoderBuilder::new().build(try!(File::create(dst))));
try!(copy(&mut fi, &mut fo));
match fo.finish() {
(_, result) => result,
}
}
fn decompress(src: &Path, dst: &Path) -> Result<()> {
println!("Decompressing: {:?} -> {:?}", src, dst);
let mut fi = try!(lz4::Decoder::new(try!(File::open(src))));
let mut fo = try!(File::create(dst));
copy(&mut fi, &mut fo)
}
fn copy(src: &mut Read, dst: &mut Write) -> Result<()> {
let mut buffer: [u8; 1024] = [0; 1024];
loop {
let len = try!(src.read(&mut buffer));
if len == 0 {
break;
}
try!(dst.write_all(&buffer[0..len]));
}
Ok(())
}
|
{
compress(&Path::new(arg), &Path::new(&(arg.to_string() + suffix))).unwrap();
}
|
conditional_block
|
dbvt_broad_phase3d.rs
|
extern crate nalgebra as na;
use na::{Isometry3, Vector3};
use ncollide3d::bounding_volume;
use ncollide3d::broad_phase::{BroadPhase, BroadPhaseInterferenceHandler, DBVTBroadPhase};
use ncollide3d::shape::Ball;
struct InterferenceHandler;
impl BroadPhaseInterferenceHandler<i32> for InterferenceHandler {
fn is_interference_allowed(&mut self, a: &i32, b: &i32) -> bool
|
fn interference_started(&mut self, _: &i32, _: &i32) {}
fn interference_stopped(&mut self, _: &i32, _: &i32) {}
}
fn main() {
/*
* Create the objects.
*/
let poss = [
Isometry3::new(Vector3::new(0.0, 0.0, 0.0), na::zero()),
Isometry3::new(Vector3::new(0.0, 0.5, 0.0), na::zero()),
Isometry3::new(Vector3::new(0.5, 0.0, 0.0), na::zero()),
Isometry3::new(Vector3::new(0.5, 0.5, 0.0), na::zero()),
];
// We will use the same shape for the four objects.
let ball = Ball::new(0.5);
/*
* Create the broad phase.
*/
let mut bf = DBVTBroadPhase::new(0.2);
// First parameter: the object bounding box.
// Second parameter: some data (here, the id that identify each object).
let proxy1 = bf.create_proxy(bounding_volume::aabb(&ball, &poss[0]), 0);
let proxy2 = bf.create_proxy(bounding_volume::aabb(&ball, &poss[1]), 1);
let _ = bf.create_proxy(bounding_volume::aabb(&ball, &poss[2]), 2);
let _ = bf.create_proxy(bounding_volume::aabb(&ball, &poss[3]), 3);
// Update the broad phase.
// The collision filter (first closure) prevents self-collision.
bf.update(&mut InterferenceHandler);
assert!(bf.num_interferences() == 6);
// Remove two objects.
bf.remove(&[proxy1, proxy2], &mut |_, _| {});
// Update the broad phase.
// The collision filter (first closure) prevents self-collision.
bf.update(&mut InterferenceHandler);
assert!(bf.num_interferences() == 1)
}
|
{
// Prevent self-collision.
*a != *b
}
|
identifier_body
|
dbvt_broad_phase3d.rs
|
extern crate nalgebra as na;
use na::{Isometry3, Vector3};
use ncollide3d::bounding_volume;
use ncollide3d::broad_phase::{BroadPhase, BroadPhaseInterferenceHandler, DBVTBroadPhase};
use ncollide3d::shape::Ball;
struct
|
;
impl BroadPhaseInterferenceHandler<i32> for InterferenceHandler {
fn is_interference_allowed(&mut self, a: &i32, b: &i32) -> bool {
// Prevent self-collision.
*a!= *b
}
fn interference_started(&mut self, _: &i32, _: &i32) {}
fn interference_stopped(&mut self, _: &i32, _: &i32) {}
}
fn main() {
/*
* Create the objects.
*/
let poss = [
Isometry3::new(Vector3::new(0.0, 0.0, 0.0), na::zero()),
Isometry3::new(Vector3::new(0.0, 0.5, 0.0), na::zero()),
Isometry3::new(Vector3::new(0.5, 0.0, 0.0), na::zero()),
Isometry3::new(Vector3::new(0.5, 0.5, 0.0), na::zero()),
];
// We will use the same shape for the four objects.
let ball = Ball::new(0.5);
/*
* Create the broad phase.
*/
let mut bf = DBVTBroadPhase::new(0.2);
// First parameter: the object bounding box.
// Second parameter: some data (here, the id that identify each object).
let proxy1 = bf.create_proxy(bounding_volume::aabb(&ball, &poss[0]), 0);
let proxy2 = bf.create_proxy(bounding_volume::aabb(&ball, &poss[1]), 1);
let _ = bf.create_proxy(bounding_volume::aabb(&ball, &poss[2]), 2);
let _ = bf.create_proxy(bounding_volume::aabb(&ball, &poss[3]), 3);
// Update the broad phase.
// The collision filter (first closure) prevents self-collision.
bf.update(&mut InterferenceHandler);
assert!(bf.num_interferences() == 6);
// Remove two objects.
bf.remove(&[proxy1, proxy2], &mut |_, _| {});
// Update the broad phase.
// The collision filter (first closure) prevents self-collision.
bf.update(&mut InterferenceHandler);
assert!(bf.num_interferences() == 1)
}
|
InterferenceHandler
|
identifier_name
|
dbvt_broad_phase3d.rs
|
extern crate nalgebra as na;
use na::{Isometry3, Vector3};
use ncollide3d::bounding_volume;
use ncollide3d::broad_phase::{BroadPhase, BroadPhaseInterferenceHandler, DBVTBroadPhase};
use ncollide3d::shape::Ball;
struct InterferenceHandler;
impl BroadPhaseInterferenceHandler<i32> for InterferenceHandler {
fn is_interference_allowed(&mut self, a: &i32, b: &i32) -> bool {
|
fn interference_stopped(&mut self, _: &i32, _: &i32) {}
}
fn main() {
/*
* Create the objects.
*/
let poss = [
Isometry3::new(Vector3::new(0.0, 0.0, 0.0), na::zero()),
Isometry3::new(Vector3::new(0.0, 0.5, 0.0), na::zero()),
Isometry3::new(Vector3::new(0.5, 0.0, 0.0), na::zero()),
Isometry3::new(Vector3::new(0.5, 0.5, 0.0), na::zero()),
];
// We will use the same shape for the four objects.
let ball = Ball::new(0.5);
/*
* Create the broad phase.
*/
let mut bf = DBVTBroadPhase::new(0.2);
// First parameter: the object bounding box.
// Second parameter: some data (here, the id that identify each object).
let proxy1 = bf.create_proxy(bounding_volume::aabb(&ball, &poss[0]), 0);
let proxy2 = bf.create_proxy(bounding_volume::aabb(&ball, &poss[1]), 1);
let _ = bf.create_proxy(bounding_volume::aabb(&ball, &poss[2]), 2);
let _ = bf.create_proxy(bounding_volume::aabb(&ball, &poss[3]), 3);
// Update the broad phase.
// The collision filter (first closure) prevents self-collision.
bf.update(&mut InterferenceHandler);
assert!(bf.num_interferences() == 6);
// Remove two objects.
bf.remove(&[proxy1, proxy2], &mut |_, _| {});
// Update the broad phase.
// The collision filter (first closure) prevents self-collision.
bf.update(&mut InterferenceHandler);
assert!(bf.num_interferences() == 1)
}
|
// Prevent self-collision.
*a != *b
}
fn interference_started(&mut self, _: &i32, _: &i32) {}
|
random_line_split
|
knm_kernel.rs
|
use matrix::{Scalar,Mat,RoCM,Matrix};
use core::{ptr, marker::PhantomData};
use composables::{GemmNode,AlgorithmStep};
use thread_comm::{ThreadInfo};
use typenum::Unsigned;
use super::knm_kernel_wrapper::{KnmKernelWrapper,GenericKnmKernelWrapper};
pub struct KnmKernel<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, Mr: Unsigned, Nr: Unsigned>{
tmp: Matrix<T>,
_at: PhantomData<At>,
_bt: PhantomData<Bt>,
_ct: PhantomData<Ct>,
_mrt: PhantomData<Mr>,
_nrt: PhantomData<Nr>,
}
impl<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, Mr: Unsigned, Nr: Unsigned>
GemmNode<T, At, Bt, Ct> for KnmKernel<T, At, Bt, Ct, Mr, Nr> {
#[inline(always)]
default unsafe fn run(&mut self, a: &mut At, b: &mut Bt, c: &mut Ct, _thr: &ThreadInfo<T>) -> () {
for z in 0..a.width() {
for y in 0..c.height() {
for x in 0..c.width() {
let t = a.get(y,z) * b.get(z,x) + c.get(y,x);
c.set(y, x, t);
}
}
}
}
fn new() -> Self {
KnmKernel{ tmp: <Matrix<T>>::new(Mr::to_usize(), Nr::to_usize()),
_at: PhantomData, _bt: PhantomData, _ct: PhantomData, _mrt: PhantomData, _nrt: PhantomData }
}
fn hierarchy_description() -> Vec<AlgorithmStep> {
let mut desc = Vec::new();
desc.push(AlgorithmStep::K{bsz: 4});
desc
}
}
impl<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, Mr: Unsigned, Nr: Unsigned>
GemmNode<T, At, Bt, Ct> for KnmKernel<T, At, Bt, Ct, Mr, Nr>
where At: RoCM<T>, Bt: RoCM<T>, Ct: RoCM<T>
{
#[inline(always)]
unsafe fn run(&mut self, a: &mut At, b: &mut Bt, c: &mut Ct, _thr: &ThreadInfo<T>) -> () {
debug_assert!(c.height() <= Mr::to_usize());
debug_assert!(c.width() <= Nr::to_usize());
let ap = a.get_mut_buffer();
let bp = b.get_mut_buffer();
let cp = c.get_mut_buffer();
let c_leaf_rs = c.get_leaf_rs() as isize;
let c_leaf_cs = c.get_leaf_cs() as isize;
let mut alpha = a.get_scalar() * b.get_scalar();
let mut beta = c.get_scalar();
let k = a.width() as isize;
if Ct::full_leaves() || (c.height() == Mr::to_usize() && c.width() == Nr::to_usize()) {
<KnmKernelWrapper<Mr,Nr,T>>::run(k, &mut alpha, ap, bp, &mut beta, cp, c_leaf_rs, c_leaf_cs);
}
else {
let tp = self.tmp.get_mut_buffer();
let t_rs = self.tmp.get_row_stride() as isize;
let t_cs = self.tmp.get_column_stride() as isize;
let mut zero = T::zero();
<KnmKernelWrapper<Mr,Nr,T>>::run(k, &mut alpha, ap, bp, &mut zero, tp, t_rs, t_cs);
//Add t to c
for ii in 0..c.height() as isize {
|
ptr::write(cp.offset(ii * c_leaf_rs + jj * c_leaf_cs), tau+beta*chi);
}
}
}
}
}
|
for jj in 0..c.width() as isize {
let tau = ptr::read(tp.offset(ii * t_rs + jj * t_cs));
let chi = ptr::read(cp.offset(ii * c_leaf_rs + jj * c_leaf_cs));
|
random_line_split
|
knm_kernel.rs
|
use matrix::{Scalar,Mat,RoCM,Matrix};
use core::{ptr, marker::PhantomData};
use composables::{GemmNode,AlgorithmStep};
use thread_comm::{ThreadInfo};
use typenum::Unsigned;
use super::knm_kernel_wrapper::{KnmKernelWrapper,GenericKnmKernelWrapper};
pub struct KnmKernel<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, Mr: Unsigned, Nr: Unsigned>{
tmp: Matrix<T>,
_at: PhantomData<At>,
_bt: PhantomData<Bt>,
_ct: PhantomData<Ct>,
_mrt: PhantomData<Mr>,
_nrt: PhantomData<Nr>,
}
impl<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, Mr: Unsigned, Nr: Unsigned>
GemmNode<T, At, Bt, Ct> for KnmKernel<T, At, Bt, Ct, Mr, Nr> {
#[inline(always)]
default unsafe fn run(&mut self, a: &mut At, b: &mut Bt, c: &mut Ct, _thr: &ThreadInfo<T>) -> () {
for z in 0..a.width() {
for y in 0..c.height() {
for x in 0..c.width() {
let t = a.get(y,z) * b.get(z,x) + c.get(y,x);
c.set(y, x, t);
}
}
}
}
fn new() -> Self {
KnmKernel{ tmp: <Matrix<T>>::new(Mr::to_usize(), Nr::to_usize()),
_at: PhantomData, _bt: PhantomData, _ct: PhantomData, _mrt: PhantomData, _nrt: PhantomData }
}
fn hierarchy_description() -> Vec<AlgorithmStep> {
let mut desc = Vec::new();
desc.push(AlgorithmStep::K{bsz: 4});
desc
}
}
impl<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, Mr: Unsigned, Nr: Unsigned>
GemmNode<T, At, Bt, Ct> for KnmKernel<T, At, Bt, Ct, Mr, Nr>
where At: RoCM<T>, Bt: RoCM<T>, Ct: RoCM<T>
{
#[inline(always)]
unsafe fn run(&mut self, a: &mut At, b: &mut Bt, c: &mut Ct, _thr: &ThreadInfo<T>) -> () {
debug_assert!(c.height() <= Mr::to_usize());
debug_assert!(c.width() <= Nr::to_usize());
let ap = a.get_mut_buffer();
let bp = b.get_mut_buffer();
let cp = c.get_mut_buffer();
let c_leaf_rs = c.get_leaf_rs() as isize;
let c_leaf_cs = c.get_leaf_cs() as isize;
let mut alpha = a.get_scalar() * b.get_scalar();
let mut beta = c.get_scalar();
let k = a.width() as isize;
if Ct::full_leaves() || (c.height() == Mr::to_usize() && c.width() == Nr::to_usize()) {
<KnmKernelWrapper<Mr,Nr,T>>::run(k, &mut alpha, ap, bp, &mut beta, cp, c_leaf_rs, c_leaf_cs);
}
else
|
}
}
|
{
let tp = self.tmp.get_mut_buffer();
let t_rs = self.tmp.get_row_stride() as isize;
let t_cs = self.tmp.get_column_stride() as isize;
let mut zero = T::zero();
<KnmKernelWrapper<Mr,Nr,T>>::run(k, &mut alpha, ap, bp, &mut zero, tp, t_rs, t_cs);
//Add t to c
for ii in 0..c.height() as isize {
for jj in 0..c.width() as isize {
let tau = ptr::read(tp.offset(ii * t_rs + jj * t_cs));
let chi = ptr::read(cp.offset(ii * c_leaf_rs + jj * c_leaf_cs));
ptr::write(cp.offset(ii * c_leaf_rs + jj * c_leaf_cs), tau+beta*chi);
}
}
}
|
conditional_block
|
knm_kernel.rs
|
use matrix::{Scalar,Mat,RoCM,Matrix};
use core::{ptr, marker::PhantomData};
use composables::{GemmNode,AlgorithmStep};
use thread_comm::{ThreadInfo};
use typenum::Unsigned;
use super::knm_kernel_wrapper::{KnmKernelWrapper,GenericKnmKernelWrapper};
pub struct KnmKernel<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, Mr: Unsigned, Nr: Unsigned>{
tmp: Matrix<T>,
_at: PhantomData<At>,
_bt: PhantomData<Bt>,
_ct: PhantomData<Ct>,
_mrt: PhantomData<Mr>,
_nrt: PhantomData<Nr>,
}
impl<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, Mr: Unsigned, Nr: Unsigned>
GemmNode<T, At, Bt, Ct> for KnmKernel<T, At, Bt, Ct, Mr, Nr> {
#[inline(always)]
default unsafe fn
|
(&mut self, a: &mut At, b: &mut Bt, c: &mut Ct, _thr: &ThreadInfo<T>) -> () {
for z in 0..a.width() {
for y in 0..c.height() {
for x in 0..c.width() {
let t = a.get(y,z) * b.get(z,x) + c.get(y,x);
c.set(y, x, t);
}
}
}
}
fn new() -> Self {
KnmKernel{ tmp: <Matrix<T>>::new(Mr::to_usize(), Nr::to_usize()),
_at: PhantomData, _bt: PhantomData, _ct: PhantomData, _mrt: PhantomData, _nrt: PhantomData }
}
fn hierarchy_description() -> Vec<AlgorithmStep> {
let mut desc = Vec::new();
desc.push(AlgorithmStep::K{bsz: 4});
desc
}
}
impl<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, Mr: Unsigned, Nr: Unsigned>
GemmNode<T, At, Bt, Ct> for KnmKernel<T, At, Bt, Ct, Mr, Nr>
where At: RoCM<T>, Bt: RoCM<T>, Ct: RoCM<T>
{
#[inline(always)]
unsafe fn run(&mut self, a: &mut At, b: &mut Bt, c: &mut Ct, _thr: &ThreadInfo<T>) -> () {
debug_assert!(c.height() <= Mr::to_usize());
debug_assert!(c.width() <= Nr::to_usize());
let ap = a.get_mut_buffer();
let bp = b.get_mut_buffer();
let cp = c.get_mut_buffer();
let c_leaf_rs = c.get_leaf_rs() as isize;
let c_leaf_cs = c.get_leaf_cs() as isize;
let mut alpha = a.get_scalar() * b.get_scalar();
let mut beta = c.get_scalar();
let k = a.width() as isize;
if Ct::full_leaves() || (c.height() == Mr::to_usize() && c.width() == Nr::to_usize()) {
<KnmKernelWrapper<Mr,Nr,T>>::run(k, &mut alpha, ap, bp, &mut beta, cp, c_leaf_rs, c_leaf_cs);
}
else {
let tp = self.tmp.get_mut_buffer();
let t_rs = self.tmp.get_row_stride() as isize;
let t_cs = self.tmp.get_column_stride() as isize;
let mut zero = T::zero();
<KnmKernelWrapper<Mr,Nr,T>>::run(k, &mut alpha, ap, bp, &mut zero, tp, t_rs, t_cs);
//Add t to c
for ii in 0..c.height() as isize {
for jj in 0..c.width() as isize {
let tau = ptr::read(tp.offset(ii * t_rs + jj * t_cs));
let chi = ptr::read(cp.offset(ii * c_leaf_rs + jj * c_leaf_cs));
ptr::write(cp.offset(ii * c_leaf_rs + jj * c_leaf_cs), tau+beta*chi);
}
}
}
}
}
|
run
|
identifier_name
|
knm_kernel.rs
|
use matrix::{Scalar,Mat,RoCM,Matrix};
use core::{ptr, marker::PhantomData};
use composables::{GemmNode,AlgorithmStep};
use thread_comm::{ThreadInfo};
use typenum::Unsigned;
use super::knm_kernel_wrapper::{KnmKernelWrapper,GenericKnmKernelWrapper};
pub struct KnmKernel<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, Mr: Unsigned, Nr: Unsigned>{
tmp: Matrix<T>,
_at: PhantomData<At>,
_bt: PhantomData<Bt>,
_ct: PhantomData<Ct>,
_mrt: PhantomData<Mr>,
_nrt: PhantomData<Nr>,
}
impl<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, Mr: Unsigned, Nr: Unsigned>
GemmNode<T, At, Bt, Ct> for KnmKernel<T, At, Bt, Ct, Mr, Nr> {
#[inline(always)]
default unsafe fn run(&mut self, a: &mut At, b: &mut Bt, c: &mut Ct, _thr: &ThreadInfo<T>) -> () {
for z in 0..a.width() {
for y in 0..c.height() {
for x in 0..c.width() {
let t = a.get(y,z) * b.get(z,x) + c.get(y,x);
c.set(y, x, t);
}
}
}
}
fn new() -> Self {
KnmKernel{ tmp: <Matrix<T>>::new(Mr::to_usize(), Nr::to_usize()),
_at: PhantomData, _bt: PhantomData, _ct: PhantomData, _mrt: PhantomData, _nrt: PhantomData }
}
fn hierarchy_description() -> Vec<AlgorithmStep>
|
}
impl<T: Scalar, At: Mat<T>, Bt: Mat<T>, Ct: Mat<T>, Mr: Unsigned, Nr: Unsigned>
GemmNode<T, At, Bt, Ct> for KnmKernel<T, At, Bt, Ct, Mr, Nr>
where At: RoCM<T>, Bt: RoCM<T>, Ct: RoCM<T>
{
#[inline(always)]
unsafe fn run(&mut self, a: &mut At, b: &mut Bt, c: &mut Ct, _thr: &ThreadInfo<T>) -> () {
debug_assert!(c.height() <= Mr::to_usize());
debug_assert!(c.width() <= Nr::to_usize());
let ap = a.get_mut_buffer();
let bp = b.get_mut_buffer();
let cp = c.get_mut_buffer();
let c_leaf_rs = c.get_leaf_rs() as isize;
let c_leaf_cs = c.get_leaf_cs() as isize;
let mut alpha = a.get_scalar() * b.get_scalar();
let mut beta = c.get_scalar();
let k = a.width() as isize;
if Ct::full_leaves() || (c.height() == Mr::to_usize() && c.width() == Nr::to_usize()) {
<KnmKernelWrapper<Mr,Nr,T>>::run(k, &mut alpha, ap, bp, &mut beta, cp, c_leaf_rs, c_leaf_cs);
}
else {
let tp = self.tmp.get_mut_buffer();
let t_rs = self.tmp.get_row_stride() as isize;
let t_cs = self.tmp.get_column_stride() as isize;
let mut zero = T::zero();
<KnmKernelWrapper<Mr,Nr,T>>::run(k, &mut alpha, ap, bp, &mut zero, tp, t_rs, t_cs);
//Add t to c
for ii in 0..c.height() as isize {
for jj in 0..c.width() as isize {
let tau = ptr::read(tp.offset(ii * t_rs + jj * t_cs));
let chi = ptr::read(cp.offset(ii * c_leaf_rs + jj * c_leaf_cs));
ptr::write(cp.offset(ii * c_leaf_rs + jj * c_leaf_cs), tau+beta*chi);
}
}
}
}
}
|
{
let mut desc = Vec::new();
desc.push(AlgorithmStep::K{bsz: 4});
desc
}
|
identifier_body
|
sha2.rs
|
/// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize.
struct FixedBuffer64 {
buffer: [u8,..64],
buffer_idx: uint,
}
impl FixedBuffer64 {
/// Create a new FixedBuffer64
fn new() -> FixedBuffer64 {
return FixedBuffer64 {
buffer: [0u8,..64],
buffer_idx: 0
};
}
}
impl FixedBuffer for FixedBuffer64 {
fn input(&mut self, input: &[u8], func: |&[u8]|) {
let mut i = 0;
let size = self.size();
// If there is already data in the buffer, copy as much as we can into it and process
// the data if the buffer becomes full.
if self.buffer_idx!= 0 {
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
self.buffer[mut self.buffer_idx..size],
input[..buffer_remaining]);
self.buffer_idx = 0;
func(&self.buffer);
i += buffer_remaining;
} else {
copy_memory(
self.buffer[mut self.buffer_idx..self.buffer_idx + input.len()],
input);
self.buffer_idx += input.len();
return;
}
}
// While we have at least a full buffer size chunk's worth of data, process that data
// without copying it into the buffer
while input.len() - i >= size {
func(input[i..i + size]);
i += size;
}
// Copy any input data into the buffer. At this point in the method, the amount of
// data left in the input vector will be less than the buffer size and the buffer will
// be empty.
let input_remaining = input.len() - i;
copy_memory(
self.buffer[mut..input_remaining],
input[i..]);
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: uint) {
assert!(idx >= self.buffer_idx);
self.buffer[mut self.buffer_idx..idx].set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8] {
self.buffer_idx += len;
return self.buffer[mut self.buffer_idx - len..self.buffer_idx];
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
return self.buffer[..64];
}
fn position(&self) -> uint { self.buffer_idx }
fn remaining(&self) -> uint { 64 - self.buffer_idx }
fn size(&self) -> uint { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
trait StandardPadding {
/// Add padding to the buffer. The buffer must not be full when this method is called and is
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding(&mut self, rem: uint, func: |&[u8]|);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding(&mut self, rem: uint, func: |&[u8]|) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2
/// family of digest functions.
pub trait Digest {
/// Provide message data.
///
/// # Arguments
///
/// * input - A vector of message data
fn input(&mut self, input: &[u8]);
/// Retrieve the digest result. This method may be called multiple times.
///
/// # Arguments
///
/// * out - the vector to hold the result. Must be large enough to contain output_bits().
fn result(&mut self, out: &mut [u8]);
/// Reset the digest. This method must be called after result() and before supplying more
/// data.
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> uint;
/// Convenience function that feeds a string into a digest.
///
/// # Arguments
///
/// * `input` The string to feed into the digest
fn input_str(&mut self, input: &str) {
self.input(input.as_bytes());
}
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn result_bytes(&mut self) -> Vec<u8> {
let mut buf = Vec::from_elem((self.output_bits()+7)/8, 0u8);
self.result(buf.as_mut_slice());
buf
}
/// Convenience function that retrieves the result of a digest as a
/// String in hexadecimal format.
fn result_str(&mut self) -> String {
self.result_bytes().to_hex().to_string()
}
}
// A structure that represents that state of a digest computation for the SHA-2 512 family of digest
// functions
struct Engine256State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32,
h5: u32,
h6: u32,
h7: u32,
}
impl Engine256State {
fn new(h: &[u32,..8]) -> Engine256State {
return Engine256State {
h0: h[0],
h1: h[1],
h2: h[2],
h3: h[3],
h4: h[4],
h5: h[5],
h6: h[6],
h7: h[7]
};
}
fn reset(&mut self, h: &[u32,..8]) {
self.h0 = h[0];
self.h1 = h[1];
self.h2 = h[2];
self.h3 = h[3];
self.h4 = h[4];
self.h5 = h[5];
self.h6 = h[6];
self.h7 = h[7];
}
fn process_block(&mut self, data: &[u8]) {
fn ch(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ ((!x) & z))
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ (x & z) ^ (y & z))
}
fn sum0(x: u32) -> u32 {
((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10))
}
fn sum1(x: u32) -> u32 {
((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7))
}
fn sigma0(x: u32) -> u32 {
((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3)
}
fn sigma1(x: u32) -> u32 {
((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10)
}
let mut a = self.h0;
let mut b = self.h1;
let mut c = self.h2;
let mut d = self.h3;
let mut e = self.h4;
let mut f = self.h5;
let mut g = self.h6;
let mut h = self.h7;
let mut w = [0u32,..64];
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round( ($t:expr) => (
w[$t] = sigma1(w[$t - 2]) + w[$t - 7] + sigma0(w[$t - 15]) + w[$t - 16];
)
)
macro_rules! sha2_round(
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H += sum1($E) + ch($E, $F, $G) + $K[$t] + w[$t];
$D += $H;
$H += sum0($A) + maj($A, $B, $C);
}
)
)
read_u32v_be(w[mut 0..16], data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
for t in range_step(0u, 48, 8) {
schedule_round!(t + 16);
schedule_round!(t + 17);
schedule_round!(t + 18);
schedule_round!(t + 19);
schedule_round!(t + 20);
schedule_round!(t + 21);
schedule_round!(t + 22);
schedule_round!(t + 23);
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
for t in range_step(48u, 64, 8) {
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 += a;
self.h1 += b;
self.h2 += c;
self.h3 += d;
self.h4 += e;
self.h5 += f;
self.h6 += g;
self.h7 += h;
}
}
static K32: [u32,..64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
// A structure that keeps track of the state of the Sha-256 operation and contains the logic
// necessary to perform the final calculations.
struct Engine256 {
length_bits: u64,
buffer: FixedBuffer64,
state: Engine256State,
finished: bool,
}
impl Engine256 {
fn new(h: &[u32,..8]) -> Engine256 {
return Engine256 {
length_bits: 0,
buffer: FixedBuffer64::new(),
state: Engine256State::new(h),
finished: false
}
}
fn reset(&mut self, h: &[u32,..8]) {
self.length_bits = 0;
self.buffer.reset();
self.state.reset(h);
self.finished = false;
}
fn input(&mut self, input: &[u8]) {
assert!(!self.finished)
// Assumes that input.len() can be converted to u64 without overflow
self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64);
let self_state = &mut self.state;
self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) });
}
fn finish(&mut self) {
if self.finished {
return;
}
let self_state = &mut self.state;
self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) });
write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 );
write_u32_be(self.buffer.next(4), self.length_bits as u32);
self_state.process_block(self.buffer.full_buffer());
self.finished = true;
}
}
/// The SHA-256 hash algorithm
pub struct Sha256 {
engine: Engine256
}
impl Sha256 {
/// Construct a new instance of a SHA-256 digest.
pub fn new() -> Sha256 {
Sha256 {
engine: Engine256::new(&H256)
}
}
}
impl Digest for Sha256 {
fn input(&mut self, d: &[u8]) {
self.engine.input(d);
}
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
write_u32_be(out[mut 0..4], self.engine.state.h0);
write_u32_be(out[mut 4..8], self.engine.state.h1);
write_u32_be(out[mut 8..12], self.engine.state.h2);
write_u32_be(out[mut 12..16], self.engine.state.h3);
write_u32_be(out[mut 16..20], self.engine.state.h4);
write_u32_be(out[mut 20..24], self.engine.state.h5);
write_u32_be(out[mut 24..28], self.engine.state.h6);
write_u32_be(out[mut 28..32], self.engine.state.h7);
}
fn reset(&mut self) {
self.engine.reset(&H256);
}
fn output_bits(&self) -> uint { 256 }
}
static H256: [u32,..8] = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
];
#[cfg(test)]
mod tests {
extern crate rand;
use super::{Digest, Sha256, FixedBuffer};
use self::rand::isaac::IsaacRng;
use self::rand::Rng;
use serialize::hex::FromHex;
use std::num::Int;
// A normal addition - no overflow occurs
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(super::add_bytes_to_bits::<u64>(100, 10) == 180);
}
// A simple failure case - adding 1 to the max value
#[test]
#[should_fail]
fn test_add_bytes_to_bits_overflow() {
super::add_bytes_to_bits::<u64>(Int::max_value(), 1);
}
struct Test {
input: String,
output_str: String,
}
fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests.iter() {
sh.reset();
sh.input_str(t.input.as_slice());
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
// Test that it works when accepting the message in pieces
for t in tests.iter() {
sh.reset();
let len = t.input.len();
let mut left = len;
while left > 0u {
let take = (left + 1u) / 2u;
sh.input_str(t.input
.slice(len - left, take + len - left));
left = left - take;
}
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
}
#[test]
fn test_sha256() {
// Examples from wikipedia
let wikipedia_tests = vec!(
Test {
input: "".to_string(),
output_str: "e3b0c44298fc1c149afb\
f4c8996fb92427ae41e4649b934ca495991b7852b855".to_string()
},
Test {
input: "The quick brown fox jumps over the lazy \
dog".to_string(),
output_str: "d7a8fbb307d7809469ca\
9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592".to_string()
},
Test {
input: "The quick brown fox jumps over the lazy \
dog.".to_string(),
output_str: "ef537f25c895bfa78252\
6529a9b63d97aa631564d5d789c2b765448c8635fb6c".to_string()
});
let tests = wikipedia_tests;
let mut sh = box Sha256::new();
test_hash(&mut *sh, tests.as_slice());
}
/// Feed 1,000,000 'a's into the digest with varying input sizes and check that the result is
/// correct.
fn test_digest_1million_random<D: Digest>(digest: &mut D, blocksize: uint, expected: &str)
|
{
let total_size = 1000000;
let buffer = Vec::from_elem(blocksize * 2, 'a' as u8);
let mut rng = IsaacRng::new_unseeded();
let mut count = 0;
digest.reset();
while count < total_size {
let next: uint = rng.gen_range(0, 2 * blocksize + 1);
let remaining = total_size - count;
let size = if next > remaining { remaining } else { next };
digest.input(buffer.slice_to(size));
count += size;
}
let result_str = digest.result_str();
let result_bytes = digest.result_bytes();
assert_eq!(expected, result_str.as_slice());
|
identifier_body
|
|
sha2.rs
|
the
/// high-order value and the 2nd item is the low order value.
fn to_bits(self) -> (Self, Self);
}
impl ToBits for u64 {
fn to_bits(self) -> (u64, u64) {
return (self >> 61, self << 3);
}
}
/// Adds the specified number of bytes to the bit count. panic!() if this would cause numeric
/// overflow.
fn add_bytes_to_bits<T: Int + ToBits>(bits: T, bytes: T) -> T {
let (new_high_bits, new_low_bits) = bytes.to_bits();
if new_high_bits > Int::zero()
|
match bits.checked_add(new_low_bits) {
Some(x) => return x,
None => panic!("numeric overflow occurred.")
}
}
/// A FixedBuffer, likes its name implies, is a fixed size buffer. When the buffer becomes full, it
/// must be processed. The input() method takes care of processing and then clearing the buffer
/// automatically. However, other methods do not and require the caller to process the buffer. Any
/// method that modifies the buffer directory or provides the caller with bytes that can be modified
/// results in those bytes being marked as used by the buffer.
trait FixedBuffer {
/// Input a vector of bytes. If the buffer becomes full, process it with the provided
/// function and then clear the buffer.
fn input(&mut self, input: &[u8], func: |&[u8]|);
/// Reset the buffer.
fn reset(&mut self);
/// Zero the buffer up until the specified index. The buffer position currently must not be
/// greater than that index.
fn zero_until(&mut self, idx: uint);
/// Get a slice of the buffer of the specified size. There must be at least that many bytes
/// remaining in the buffer.
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8];
/// Get the current buffer. The buffer must already be full. This clears the buffer as well.
fn full_buffer<'s>(&'s mut self) -> &'s [u8];
/// Get the current position of the buffer.
fn position(&self) -> uint;
/// Get the number of bytes remaining in the buffer until it is full.
fn remaining(&self) -> uint;
/// Get the size of the buffer
fn size(&self) -> uint;
}
/// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize.
struct FixedBuffer64 {
buffer: [u8,..64],
buffer_idx: uint,
}
impl FixedBuffer64 {
/// Create a new FixedBuffer64
fn new() -> FixedBuffer64 {
return FixedBuffer64 {
buffer: [0u8,..64],
buffer_idx: 0
};
}
}
impl FixedBuffer for FixedBuffer64 {
fn input(&mut self, input: &[u8], func: |&[u8]|) {
let mut i = 0;
let size = self.size();
// If there is already data in the buffer, copy as much as we can into it and process
// the data if the buffer becomes full.
if self.buffer_idx!= 0 {
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
self.buffer[mut self.buffer_idx..size],
input[..buffer_remaining]);
self.buffer_idx = 0;
func(&self.buffer);
i += buffer_remaining;
} else {
copy_memory(
self.buffer[mut self.buffer_idx..self.buffer_idx + input.len()],
input);
self.buffer_idx += input.len();
return;
}
}
// While we have at least a full buffer size chunk's worth of data, process that data
// without copying it into the buffer
while input.len() - i >= size {
func(input[i..i + size]);
i += size;
}
// Copy any input data into the buffer. At this point in the method, the amount of
// data left in the input vector will be less than the buffer size and the buffer will
// be empty.
let input_remaining = input.len() - i;
copy_memory(
self.buffer[mut..input_remaining],
input[i..]);
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: uint) {
assert!(idx >= self.buffer_idx);
self.buffer[mut self.buffer_idx..idx].set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8] {
self.buffer_idx += len;
return self.buffer[mut self.buffer_idx - len..self.buffer_idx];
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
return self.buffer[..64];
}
fn position(&self) -> uint { self.buffer_idx }
fn remaining(&self) -> uint { 64 - self.buffer_idx }
fn size(&self) -> uint { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
trait StandardPadding {
/// Add padding to the buffer. The buffer must not be full when this method is called and is
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding(&mut self, rem: uint, func: |&[u8]|);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding(&mut self, rem: uint, func: |&[u8]|) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2
/// family of digest functions.
pub trait Digest {
/// Provide message data.
///
/// # Arguments
///
/// * input - A vector of message data
fn input(&mut self, input: &[u8]);
/// Retrieve the digest result. This method may be called multiple times.
///
/// # Arguments
///
/// * out - the vector to hold the result. Must be large enough to contain output_bits().
fn result(&mut self, out: &mut [u8]);
/// Reset the digest. This method must be called after result() and before supplying more
/// data.
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> uint;
/// Convenience function that feeds a string into a digest.
///
/// # Arguments
///
/// * `input` The string to feed into the digest
fn input_str(&mut self, input: &str) {
self.input(input.as_bytes());
}
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn result_bytes(&mut self) -> Vec<u8> {
let mut buf = Vec::from_elem((self.output_bits()+7)/8, 0u8);
self.result(buf.as_mut_slice());
buf
}
/// Convenience function that retrieves the result of a digest as a
/// String in hexadecimal format.
fn result_str(&mut self) -> String {
self.result_bytes().to_hex().to_string()
}
}
// A structure that represents that state of a digest computation for the SHA-2 512 family of digest
// functions
struct Engine256State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32,
h5: u32,
h6: u32,
h7: u32,
}
impl Engine256State {
fn new(h: &[u32,..8]) -> Engine256State {
return Engine256State {
h0: h[0],
h1: h[1],
h2: h[2],
h3: h[3],
h4: h[4],
h5: h[5],
h6: h[6],
h7: h[7]
};
}
fn reset(&mut self, h: &[u32,..8]) {
self.h0 = h[0];
self.h1 = h[1];
self.h2 = h[2];
self.h3 = h[3];
self.h4 = h[4];
self.h5 = h[5];
self.h6 = h[6];
self.h7 = h[7];
}
fn process_block(&mut self, data: &[u8]) {
fn ch(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ ((!x) & z))
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ (x & z) ^ (y & z))
}
fn sum0(x: u32) -> u32 {
((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10))
}
fn sum1(x: u32) -> u32 {
((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7))
}
fn sigma0(x: u32) -> u32 {
((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3)
}
fn sigma1(x: u32) -> u32 {
((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10)
}
let mut a = self.h0;
let mut b = self.h1;
let mut c = self.h2;
let mut d = self.h3;
let mut e = self.h4;
let mut f = self.h5;
let mut g = self.h6;
let mut h = self.h7;
let mut w = [0u32,..64];
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round( ($t:expr) => (
w[$t] = sigma1(w[$t - 2]) + w[$t - 7] + sigma0(w[$t - 15]) + w[$t - 16];
)
)
macro_rules! sha2_round(
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H += sum1($E) + ch($E, $F, $G) + $K[$t] + w[$t];
$D += $H;
$H += sum0($A) + maj($A, $B, $C);
}
)
)
read_u32v_be(w[mut 0..16], data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
for t in range_step(0u, 48, 8) {
schedule_round!(t + 16);
schedule_round!(t + 17);
schedule_round!(t + 18);
schedule_round!(t + 19);
schedule_round!(t + 20);
schedule_round!(t + 21);
schedule_round!(t + 22);
schedule_round!(t + 23);
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
for t in range_step(48u, 64, 8) {
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 += a;
self.h1 += b;
self.h2 += c;
self.h3 += d;
self.h4 += e;
self.h5 += f;
self.h6 += g;
self.h7 += h;
}
}
static K32: [u32,..64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
// A structure that keeps track of the state of the Sha-256 operation and contains the logic
// necessary to perform the final calculations.
struct Engine256 {
length_bits: u64,
buffer: FixedBuffer64,
state: Engine256State,
finished: bool,
}
impl Engine256 {
fn new(h: &[u32,..8]) -> Engine256 {
return Engine256 {
length_bits: 0,
buffer: FixedBuffer64::new(),
state: Engine256State::new(h),
finished: false
}
}
fn reset(&mut self, h: &[u32,..8]) {
self.length_bits = 0;
self.buffer.reset();
self.state.reset(h);
self.finished = false;
}
fn input(&mut self, input: &[u8]) {
assert!(!self.finished)
// Assumes that input.len() can be converted to u64 without overflow
self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64);
let self_state = &mut self.state;
self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) });
}
fn finish(&mut self) {
if self.finished {
return;
}
let self_state = &mut self.state;
self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) });
write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 );
write_u32_be(self.buffer.next(4), self.length_bits as u32);
self_state.process_block(self.buffer.full_buffer());
self.finished = true;
}
}
/// The SHA-256 hash algorithm
pub struct Sha256 {
engine: Engine256
}
impl Sha256 {
/// Construct a new instance of a SHA-256 digest.
pub fn new() -> Sha256 {
Sha256 {
engine: Engine256::new(&H256)
}
}
}
impl Digest for Sha256 {
fn input(&mut self, d: &[u8]) {
self.engine.input(d);
}
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
write_u32_be(out[mut 0..4], self.engine.state.h0);
write_u32_be(out[mut 4..8], self.engine.state.h1);
write_u32_be(out[mut 8..12], self.engine.state.h2);
write_u32_be(out[mut 12..16], self.engine.state.h3);
write_u32_be(out[mut 16..20], self.engine.state.h4);
write_u32_be(out[mut 20..24], self.engine.state.h5);
write_u32_be(out[mut 24..28], self.engine.state.h6);
write_u32_be(out[mut 28..32], self.engine.state.h7);
}
fn reset(&mut self) {
self.engine.reset(&H256);
}
fn output_bits(&self) -> uint { 256 }
}
static H256: [u32,..8] = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
];
#[cfg(test)]
mod tests {
extern crate rand;
use super::{Digest, Sha256, FixedBuffer};
use self::rand::isaac::IsaacRng;
use self::rand::Rng;
use serialize::hex::FromHex;
use std::num::Int;
// A normal addition - no overflow occurs
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(super::add_bytes_to_bits::<u64>(100, 10) == 180);
}
// A simple failure case - adding 1 to the max value
#[test]
#[should_fail]
fn test_add_bytes_to_bits_overflow() {
super::add_bytes_to_bits::<u64>(Int::max_value(), 1);
}
struct Test {
input: String,
output_str: String,
}
fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests.iter() {
sh.reset();
sh.input_str(t.input.as_slice());
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
// Test that it works when accepting the message in pieces
for t in tests.iter() {
sh.reset();
let len = t.input.len();
let mut left = len;
while left > 0u {
let take = (left + 1u) / 2u;
sh.input_str(t.input
.slice(len - left, take + len - left));
left = left - take;
}
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
}
#[test]
fn test_sha256() {
// Examples from wikipedia
let wikipedia_tests = vec!(
Test {
input: "".to_string(),
output_str: "e3b0c44298fc1c149afb\
f4c8996fb92427ae41e4649b934ca495991b7852b855".to_string()
},
Test {
input: "The quick brown fox jumps over the lazy \
dog".to_string
|
{
panic!("numeric overflow occurred.")
}
|
conditional_block
|
sha2.rs
|
is the
/// high-order value and the 2nd item is the low order value.
fn to_bits(self) -> (Self, Self);
}
impl ToBits for u64 {
fn to_bits(self) -> (u64, u64) {
return (self >> 61, self << 3);
}
}
/// Adds the specified number of bytes to the bit count. panic!() if this would cause numeric
/// overflow.
fn add_bytes_to_bits<T: Int + ToBits>(bits: T, bytes: T) -> T {
let (new_high_bits, new_low_bits) = bytes.to_bits();
if new_high_bits > Int::zero() {
panic!("numeric overflow occurred.")
}
match bits.checked_add(new_low_bits) {
Some(x) => return x,
None => panic!("numeric overflow occurred.")
}
}
/// A FixedBuffer, likes its name implies, is a fixed size buffer. When the buffer becomes full, it
/// must be processed. The input() method takes care of processing and then clearing the buffer
/// automatically. However, other methods do not and require the caller to process the buffer. Any
/// method that modifies the buffer directory or provides the caller with bytes that can be modified
/// results in those bytes being marked as used by the buffer.
trait FixedBuffer {
/// Input a vector of bytes. If the buffer becomes full, process it with the provided
/// function and then clear the buffer.
fn input(&mut self, input: &[u8], func: |&[u8]|);
/// Reset the buffer.
fn reset(&mut self);
/// Zero the buffer up until the specified index. The buffer position currently must not be
/// greater than that index.
fn zero_until(&mut self, idx: uint);
/// Get a slice of the buffer of the specified size. There must be at least that many bytes
/// remaining in the buffer.
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8];
/// Get the current buffer. The buffer must already be full. This clears the buffer as well.
fn full_buffer<'s>(&'s mut self) -> &'s [u8];
/// Get the current position of the buffer.
fn position(&self) -> uint;
/// Get the number of bytes remaining in the buffer until it is full.
fn remaining(&self) -> uint;
/// Get the size of the buffer
fn size(&self) -> uint;
}
/// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize.
struct FixedBuffer64 {
buffer: [u8,..64],
buffer_idx: uint,
}
impl FixedBuffer64 {
/// Create a new FixedBuffer64
fn new() -> FixedBuffer64 {
return FixedBuffer64 {
buffer: [0u8,..64],
buffer_idx: 0
};
}
}
impl FixedBuffer for FixedBuffer64 {
fn input(&mut self, input: &[u8], func: |&[u8]|) {
let mut i = 0;
let size = self.size();
// If there is already data in the buffer, copy as much as we can into it and process
// the data if the buffer becomes full.
if self.buffer_idx!= 0 {
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
self.buffer[mut self.buffer_idx..size],
input[..buffer_remaining]);
self.buffer_idx = 0;
func(&self.buffer);
i += buffer_remaining;
} else {
copy_memory(
self.buffer[mut self.buffer_idx..self.buffer_idx + input.len()],
input);
self.buffer_idx += input.len();
return;
}
}
// While we have at least a full buffer size chunk's worth of data, process that data
// without copying it into the buffer
while input.len() - i >= size {
func(input[i..i + size]);
i += size;
}
// Copy any input data into the buffer. At this point in the method, the amount of
// data left in the input vector will be less than the buffer size and the buffer will
// be empty.
let input_remaining = input.len() - i;
copy_memory(
self.buffer[mut..input_remaining],
input[i..]);
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: uint) {
assert!(idx >= self.buffer_idx);
self.buffer[mut self.buffer_idx..idx].set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8] {
self.buffer_idx += len;
return self.buffer[mut self.buffer_idx - len..self.buffer_idx];
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
return self.buffer[..64];
}
fn position(&self) -> uint { self.buffer_idx }
fn remaining(&self) -> uint { 64 - self.buffer_idx }
fn size(&self) -> uint { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
trait StandardPadding {
/// Add padding to the buffer. The buffer must not be full when this method is called and is
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding(&mut self, rem: uint, func: |&[u8]|);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding(&mut self, rem: uint, func: |&[u8]|) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2
/// family of digest functions.
pub trait Digest {
/// Provide message data.
///
/// # Arguments
///
/// * input - A vector of message data
fn input(&mut self, input: &[u8]);
/// Retrieve the digest result. This method may be called multiple times.
///
/// # Arguments
///
/// * out - the vector to hold the result. Must be large enough to contain output_bits().
fn result(&mut self, out: &mut [u8]);
/// Reset the digest. This method must be called after result() and before supplying more
/// data.
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> uint;
/// Convenience function that feeds a string into a digest.
///
/// # Arguments
///
/// * `input` The string to feed into the digest
fn input_str(&mut self, input: &str) {
self.input(input.as_bytes());
}
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn result_bytes(&mut self) -> Vec<u8> {
let mut buf = Vec::from_elem((self.output_bits()+7)/8, 0u8);
self.result(buf.as_mut_slice());
buf
}
/// Convenience function that retrieves the result of a digest as a
/// String in hexadecimal format.
fn result_str(&mut self) -> String {
self.result_bytes().to_hex().to_string()
|
// A structure that represents that state of a digest computation for the SHA-2 512 family of digest
// functions
struct Engine256State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32,
h5: u32,
h6: u32,
h7: u32,
}
impl Engine256State {
fn new(h: &[u32,..8]) -> Engine256State {
return Engine256State {
h0: h[0],
h1: h[1],
h2: h[2],
h3: h[3],
h4: h[4],
h5: h[5],
h6: h[6],
h7: h[7]
};
}
fn reset(&mut self, h: &[u32,..8]) {
self.h0 = h[0];
self.h1 = h[1];
self.h2 = h[2];
self.h3 = h[3];
self.h4 = h[4];
self.h5 = h[5];
self.h6 = h[6];
self.h7 = h[7];
}
fn process_block(&mut self, data: &[u8]) {
fn ch(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ ((!x) & z))
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ (x & z) ^ (y & z))
}
fn sum0(x: u32) -> u32 {
((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10))
}
fn sum1(x: u32) -> u32 {
((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7))
}
fn sigma0(x: u32) -> u32 {
((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3)
}
fn sigma1(x: u32) -> u32 {
((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10)
}
let mut a = self.h0;
let mut b = self.h1;
let mut c = self.h2;
let mut d = self.h3;
let mut e = self.h4;
let mut f = self.h5;
let mut g = self.h6;
let mut h = self.h7;
let mut w = [0u32,..64];
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round( ($t:expr) => (
w[$t] = sigma1(w[$t - 2]) + w[$t - 7] + sigma0(w[$t - 15]) + w[$t - 16];
)
)
macro_rules! sha2_round(
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H += sum1($E) + ch($E, $F, $G) + $K[$t] + w[$t];
$D += $H;
$H += sum0($A) + maj($A, $B, $C);
}
)
)
read_u32v_be(w[mut 0..16], data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
for t in range_step(0u, 48, 8) {
schedule_round!(t + 16);
schedule_round!(t + 17);
schedule_round!(t + 18);
schedule_round!(t + 19);
schedule_round!(t + 20);
schedule_round!(t + 21);
schedule_round!(t + 22);
schedule_round!(t + 23);
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
for t in range_step(48u, 64, 8) {
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 += a;
self.h1 += b;
self.h2 += c;
self.h3 += d;
self.h4 += e;
self.h5 += f;
self.h6 += g;
self.h7 += h;
}
}
static K32: [u32,..64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
// A structure that keeps track of the state of the Sha-256 operation and contains the logic
// necessary to perform the final calculations.
struct Engine256 {
length_bits: u64,
buffer: FixedBuffer64,
state: Engine256State,
finished: bool,
}
impl Engine256 {
fn new(h: &[u32,..8]) -> Engine256 {
return Engine256 {
length_bits: 0,
buffer: FixedBuffer64::new(),
state: Engine256State::new(h),
finished: false
}
}
fn reset(&mut self, h: &[u32,..8]) {
self.length_bits = 0;
self.buffer.reset();
self.state.reset(h);
self.finished = false;
}
fn input(&mut self, input: &[u8]) {
assert!(!self.finished)
// Assumes that input.len() can be converted to u64 without overflow
self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64);
let self_state = &mut self.state;
self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) });
}
fn finish(&mut self) {
if self.finished {
return;
}
let self_state = &mut self.state;
self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) });
write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 );
write_u32_be(self.buffer.next(4), self.length_bits as u32);
self_state.process_block(self.buffer.full_buffer());
self.finished = true;
}
}
/// The SHA-256 hash algorithm
pub struct Sha256 {
engine: Engine256
}
impl Sha256 {
/// Construct a new instance of a SHA-256 digest.
pub fn new() -> Sha256 {
Sha256 {
engine: Engine256::new(&H256)
}
}
}
impl Digest for Sha256 {
fn input(&mut self, d: &[u8]) {
self.engine.input(d);
}
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
write_u32_be(out[mut 0..4], self.engine.state.h0);
write_u32_be(out[mut 4..8], self.engine.state.h1);
write_u32_be(out[mut 8..12], self.engine.state.h2);
write_u32_be(out[mut 12..16], self.engine.state.h3);
write_u32_be(out[mut 16..20], self.engine.state.h4);
write_u32_be(out[mut 20..24], self.engine.state.h5);
write_u32_be(out[mut 24..28], self.engine.state.h6);
write_u32_be(out[mut 28..32], self.engine.state.h7);
}
fn reset(&mut self) {
self.engine.reset(&H256);
}
fn output_bits(&self) -> uint { 256 }
}
static H256: [u32,..8] = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
];
#[cfg(test)]
mod tests {
extern crate rand;
use super::{Digest, Sha256, FixedBuffer};
use self::rand::isaac::IsaacRng;
use self::rand::Rng;
use serialize::hex::FromHex;
use std::num::Int;
// A normal addition - no overflow occurs
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(super::add_bytes_to_bits::<u64>(100, 10) == 180);
}
// A simple failure case - adding 1 to the max value
#[test]
#[should_fail]
fn test_add_bytes_to_bits_overflow() {
super::add_bytes_to_bits::<u64>(Int::max_value(), 1);
}
struct Test {
input: String,
output_str: String,
}
fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests.iter() {
sh.reset();
sh.input_str(t.input.as_slice());
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
// Test that it works when accepting the message in pieces
for t in tests.iter() {
sh.reset();
let len = t.input.len();
let mut left = len;
while left > 0u {
let take = (left + 1u) / 2u;
sh.input_str(t.input
.slice(len - left, take + len - left));
left = left - take;
}
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
}
#[test]
fn test_sha256() {
// Examples from wikipedia
let wikipedia_tests = vec!(
Test {
input: "".to_string(),
output_str: "e3b0c44298fc1c149afb\
f4c8996fb92427ae41e4649b934ca495991b7852b855".to_string()
},
Test {
input: "The quick brown fox jumps over the lazy \
dog".to_string(),
|
}
}
|
random_line_split
|
sha2.rs
|
the
/// high-order value and the 2nd item is the low order value.
fn to_bits(self) -> (Self, Self);
}
impl ToBits for u64 {
fn to_bits(self) -> (u64, u64) {
return (self >> 61, self << 3);
}
}
/// Adds the specified number of bytes to the bit count. panic!() if this would cause numeric
/// overflow.
fn add_bytes_to_bits<T: Int + ToBits>(bits: T, bytes: T) -> T {
let (new_high_bits, new_low_bits) = bytes.to_bits();
if new_high_bits > Int::zero() {
panic!("numeric overflow occurred.")
}
match bits.checked_add(new_low_bits) {
Some(x) => return x,
None => panic!("numeric overflow occurred.")
}
}
/// A FixedBuffer, likes its name implies, is a fixed size buffer. When the buffer becomes full, it
/// must be processed. The input() method takes care of processing and then clearing the buffer
/// automatically. However, other methods do not and require the caller to process the buffer. Any
/// method that modifies the buffer directory or provides the caller with bytes that can be modified
/// results in those bytes being marked as used by the buffer.
trait FixedBuffer {
/// Input a vector of bytes. If the buffer becomes full, process it with the provided
/// function and then clear the buffer.
fn input(&mut self, input: &[u8], func: |&[u8]|);
/// Reset the buffer.
fn reset(&mut self);
/// Zero the buffer up until the specified index. The buffer position currently must not be
/// greater than that index.
fn zero_until(&mut self, idx: uint);
/// Get a slice of the buffer of the specified size. There must be at least that many bytes
/// remaining in the buffer.
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8];
/// Get the current buffer. The buffer must already be full. This clears the buffer as well.
fn full_buffer<'s>(&'s mut self) -> &'s [u8];
/// Get the current position of the buffer.
fn position(&self) -> uint;
/// Get the number of bytes remaining in the buffer until it is full.
fn remaining(&self) -> uint;
/// Get the size of the buffer
fn size(&self) -> uint;
}
/// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize.
struct FixedBuffer64 {
buffer: [u8,..64],
buffer_idx: uint,
}
impl FixedBuffer64 {
/// Create a new FixedBuffer64
fn new() -> FixedBuffer64 {
return FixedBuffer64 {
buffer: [0u8,..64],
buffer_idx: 0
};
}
}
impl FixedBuffer for FixedBuffer64 {
fn input(&mut self, input: &[u8], func: |&[u8]|) {
let mut i = 0;
let size = self.size();
// If there is already data in the buffer, copy as much as we can into it and process
// the data if the buffer becomes full.
if self.buffer_idx!= 0 {
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
self.buffer[mut self.buffer_idx..size],
input[..buffer_remaining]);
self.buffer_idx = 0;
func(&self.buffer);
i += buffer_remaining;
} else {
copy_memory(
self.buffer[mut self.buffer_idx..self.buffer_idx + input.len()],
input);
self.buffer_idx += input.len();
return;
}
}
// While we have at least a full buffer size chunk's worth of data, process that data
// without copying it into the buffer
while input.len() - i >= size {
func(input[i..i + size]);
i += size;
}
// Copy any input data into the buffer. At this point in the method, the amount of
// data left in the input vector will be less than the buffer size and the buffer will
// be empty.
let input_remaining = input.len() - i;
copy_memory(
self.buffer[mut..input_remaining],
input[i..]);
self.buffer_idx += input_remaining;
}
fn reset(&mut self) {
self.buffer_idx = 0;
}
fn zero_until(&mut self, idx: uint) {
assert!(idx >= self.buffer_idx);
self.buffer[mut self.buffer_idx..idx].set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8] {
self.buffer_idx += len;
return self.buffer[mut self.buffer_idx - len..self.buffer_idx];
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
return self.buffer[..64];
}
fn position(&self) -> uint { self.buffer_idx }
fn remaining(&self) -> uint { 64 - self.buffer_idx }
fn size(&self) -> uint { 64 }
}
/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct.
trait StandardPadding {
/// Add padding to the buffer. The buffer must not be full when this method is called and is
/// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least
/// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled
/// with zeros again until only rem bytes are remaining.
fn standard_padding(&mut self, rem: uint, func: |&[u8]|);
}
impl <T: FixedBuffer> StandardPadding for T {
fn standard_padding(&mut self, rem: uint, func: |&[u8]|) {
let size = self.size();
self.next(1)[0] = 128;
if self.remaining() < rem {
self.zero_until(size);
func(self.full_buffer());
}
self.zero_until(size - rem);
}
}
/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2
/// family of digest functions.
pub trait Digest {
/// Provide message data.
///
/// # Arguments
///
/// * input - A vector of message data
fn input(&mut self, input: &[u8]);
/// Retrieve the digest result. This method may be called multiple times.
///
/// # Arguments
///
/// * out - the vector to hold the result. Must be large enough to contain output_bits().
fn result(&mut self, out: &mut [u8]);
/// Reset the digest. This method must be called after result() and before supplying more
/// data.
fn reset(&mut self);
/// Get the output size in bits.
fn output_bits(&self) -> uint;
/// Convenience function that feeds a string into a digest.
///
/// # Arguments
///
/// * `input` The string to feed into the digest
fn input_str(&mut self, input: &str) {
self.input(input.as_bytes());
}
/// Convenience function that retrieves the result of a digest as a
/// newly allocated vec of bytes.
fn
|
(&mut self) -> Vec<u8> {
let mut buf = Vec::from_elem((self.output_bits()+7)/8, 0u8);
self.result(buf.as_mut_slice());
buf
}
/// Convenience function that retrieves the result of a digest as a
/// String in hexadecimal format.
fn result_str(&mut self) -> String {
self.result_bytes().to_hex().to_string()
}
}
// A structure that represents that state of a digest computation for the SHA-2 512 family of digest
// functions
struct Engine256State {
h0: u32,
h1: u32,
h2: u32,
h3: u32,
h4: u32,
h5: u32,
h6: u32,
h7: u32,
}
impl Engine256State {
fn new(h: &[u32,..8]) -> Engine256State {
return Engine256State {
h0: h[0],
h1: h[1],
h2: h[2],
h3: h[3],
h4: h[4],
h5: h[5],
h6: h[6],
h7: h[7]
};
}
fn reset(&mut self, h: &[u32,..8]) {
self.h0 = h[0];
self.h1 = h[1];
self.h2 = h[2];
self.h3 = h[3];
self.h4 = h[4];
self.h5 = h[5];
self.h6 = h[6];
self.h7 = h[7];
}
fn process_block(&mut self, data: &[u8]) {
fn ch(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ ((!x) & z))
}
fn maj(x: u32, y: u32, z: u32) -> u32 {
((x & y) ^ (x & z) ^ (y & z))
}
fn sum0(x: u32) -> u32 {
((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10))
}
fn sum1(x: u32) -> u32 {
((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7))
}
fn sigma0(x: u32) -> u32 {
((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3)
}
fn sigma1(x: u32) -> u32 {
((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10)
}
let mut a = self.h0;
let mut b = self.h1;
let mut c = self.h2;
let mut d = self.h3;
let mut e = self.h4;
let mut f = self.h5;
let mut g = self.h6;
let mut h = self.h7;
let mut w = [0u32,..64];
// Sha-512 and Sha-256 use basically the same calculations which are implemented
// by these macros. Inlining the calculations seems to result in better generated code.
macro_rules! schedule_round( ($t:expr) => (
w[$t] = sigma1(w[$t - 2]) + w[$t - 7] + sigma0(w[$t - 15]) + w[$t - 16];
)
)
macro_rules! sha2_round(
($A:ident, $B:ident, $C:ident, $D:ident,
$E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => (
{
$H += sum1($E) + ch($E, $F, $G) + $K[$t] + w[$t];
$D += $H;
$H += sum0($A) + maj($A, $B, $C);
}
)
)
read_u32v_be(w[mut 0..16], data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
for t in range_step(0u, 48, 8) {
schedule_round!(t + 16);
schedule_round!(t + 17);
schedule_round!(t + 18);
schedule_round!(t + 19);
schedule_round!(t + 20);
schedule_round!(t + 21);
schedule_round!(t + 22);
schedule_round!(t + 23);
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
for t in range_step(48u, 64, 8) {
sha2_round!(a, b, c, d, e, f, g, h, K32, t);
sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1);
sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2);
sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3);
sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4);
sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5);
sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6);
sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7);
}
self.h0 += a;
self.h1 += b;
self.h2 += c;
self.h3 += d;
self.h4 += e;
self.h5 += f;
self.h6 += g;
self.h7 += h;
}
}
static K32: [u32,..64] = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
];
// A structure that keeps track of the state of the Sha-256 operation and contains the logic
// necessary to perform the final calculations.
struct Engine256 {
length_bits: u64,
buffer: FixedBuffer64,
state: Engine256State,
finished: bool,
}
impl Engine256 {
fn new(h: &[u32,..8]) -> Engine256 {
return Engine256 {
length_bits: 0,
buffer: FixedBuffer64::new(),
state: Engine256State::new(h),
finished: false
}
}
fn reset(&mut self, h: &[u32,..8]) {
self.length_bits = 0;
self.buffer.reset();
self.state.reset(h);
self.finished = false;
}
fn input(&mut self, input: &[u8]) {
assert!(!self.finished)
// Assumes that input.len() can be converted to u64 without overflow
self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64);
let self_state = &mut self.state;
self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) });
}
fn finish(&mut self) {
if self.finished {
return;
}
let self_state = &mut self.state;
self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) });
write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 );
write_u32_be(self.buffer.next(4), self.length_bits as u32);
self_state.process_block(self.buffer.full_buffer());
self.finished = true;
}
}
/// The SHA-256 hash algorithm
pub struct Sha256 {
engine: Engine256
}
impl Sha256 {
/// Construct a new instance of a SHA-256 digest.
pub fn new() -> Sha256 {
Sha256 {
engine: Engine256::new(&H256)
}
}
}
impl Digest for Sha256 {
fn input(&mut self, d: &[u8]) {
self.engine.input(d);
}
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
write_u32_be(out[mut 0..4], self.engine.state.h0);
write_u32_be(out[mut 4..8], self.engine.state.h1);
write_u32_be(out[mut 8..12], self.engine.state.h2);
write_u32_be(out[mut 12..16], self.engine.state.h3);
write_u32_be(out[mut 16..20], self.engine.state.h4);
write_u32_be(out[mut 20..24], self.engine.state.h5);
write_u32_be(out[mut 24..28], self.engine.state.h6);
write_u32_be(out[mut 28..32], self.engine.state.h7);
}
fn reset(&mut self) {
self.engine.reset(&H256);
}
fn output_bits(&self) -> uint { 256 }
}
static H256: [u32,..8] = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19
];
#[cfg(test)]
mod tests {
extern crate rand;
use super::{Digest, Sha256, FixedBuffer};
use self::rand::isaac::IsaacRng;
use self::rand::Rng;
use serialize::hex::FromHex;
use std::num::Int;
// A normal addition - no overflow occurs
#[test]
fn test_add_bytes_to_bits_ok() {
assert!(super::add_bytes_to_bits::<u64>(100, 10) == 180);
}
// A simple failure case - adding 1 to the max value
#[test]
#[should_fail]
fn test_add_bytes_to_bits_overflow() {
super::add_bytes_to_bits::<u64>(Int::max_value(), 1);
}
struct Test {
input: String,
output_str: String,
}
fn test_hash<D: Digest>(sh: &mut D, tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests.iter() {
sh.reset();
sh.input_str(t.input.as_slice());
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
// Test that it works when accepting the message in pieces
for t in tests.iter() {
sh.reset();
let len = t.input.len();
let mut left = len;
while left > 0u {
let take = (left + 1u) / 2u;
sh.input_str(t.input
.slice(len - left, take + len - left));
left = left - take;
}
let out_str = sh.result_str();
assert!(out_str == t.output_str);
}
}
#[test]
fn test_sha256() {
// Examples from wikipedia
let wikipedia_tests = vec!(
Test {
input: "".to_string(),
output_str: "e3b0c44298fc1c149afb\
f4c8996fb92427ae41e4649b934ca495991b7852b855".to_string()
},
Test {
input: "The quick brown fox jumps over the lazy \
dog".to_string
|
result_bytes
|
identifier_name
|
unboxed-closures-drop.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(path_statements)]
#![allow(dead_code)]
// A battery of tests to ensure destructors of unboxed closure environments
// run at the right times.
static mut DROP_COUNT: usize = 0;
fn drop_count() -> usize {
unsafe {
DROP_COUNT
}
}
struct Droppable {
x: isize,
}
impl Droppable {
fn new() -> Droppable {
Droppable {
x: 1
}
}
}
impl Drop for Droppable {
fn drop(&mut self) {
unsafe {
DROP_COUNT += 1
}
}
}
fn a<F:Fn(isize, isize) -> isize>(f: F) -> isize {
f(1, 2)
}
fn b<F:FnMut(isize, isize) -> isize>(mut f: F) -> isize {
f(3, 4)
}
fn c<F:FnOnce(isize, isize) -> isize>(f: F) -> isize {
f(5, 6)
}
fn test_fn() {
{
a(move |a: isize, b| { a + b });
}
assert_eq!(drop_count(), 0);
{
let z = &Droppable::new();
a(move |a: isize, b| { z; a + b });
assert_eq!(drop_count(), 0);
}
assert_eq!(drop_count(), 1);
{
let z = &Droppable::new();
let zz = &Droppable::new();
|
assert_eq!(drop_count(), 1);
}
assert_eq!(drop_count(), 3);
}
fn test_fn_mut() {
{
b(move |a: isize, b| { a + b });
}
assert_eq!(drop_count(), 3);
{
let z = &Droppable::new();
b(move |a: isize, b| { z; a + b });
assert_eq!(drop_count(), 3);
}
assert_eq!(drop_count(), 4);
{
let z = &Droppable::new();
let zz = &Droppable::new();
b(move |a: isize, b| { z; zz; a + b });
assert_eq!(drop_count(), 4);
}
assert_eq!(drop_count(), 6);
}
fn test_fn_once() {
{
c(move |a: isize, b| { a + b });
}
assert_eq!(drop_count(), 6);
{
let z = Droppable::new();
c(move |a: isize, b| { z; a + b });
assert_eq!(drop_count(), 7);
}
assert_eq!(drop_count(), 7);
{
let z = Droppable::new();
let zz = Droppable::new();
c(move |a: isize, b| { z; zz; a + b });
assert_eq!(drop_count(), 9);
}
assert_eq!(drop_count(), 9);
}
fn main() {
test_fn();
test_fn_mut();
test_fn_once();
}
|
a(move |a: isize, b| { z; zz; a + b });
|
random_line_split
|
unboxed-closures-drop.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(path_statements)]
#![allow(dead_code)]
// A battery of tests to ensure destructors of unboxed closure environments
// run at the right times.
static mut DROP_COUNT: usize = 0;
fn drop_count() -> usize {
unsafe {
DROP_COUNT
}
}
struct Droppable {
x: isize,
}
impl Droppable {
fn new() -> Droppable {
Droppable {
x: 1
}
}
}
impl Drop for Droppable {
fn drop(&mut self) {
unsafe {
DROP_COUNT += 1
}
}
}
fn a<F:Fn(isize, isize) -> isize>(f: F) -> isize
|
fn b<F:FnMut(isize, isize) -> isize>(mut f: F) -> isize {
f(3, 4)
}
fn c<F:FnOnce(isize, isize) -> isize>(f: F) -> isize {
f(5, 6)
}
fn test_fn() {
{
a(move |a: isize, b| { a + b });
}
assert_eq!(drop_count(), 0);
{
let z = &Droppable::new();
a(move |a: isize, b| { z; a + b });
assert_eq!(drop_count(), 0);
}
assert_eq!(drop_count(), 1);
{
let z = &Droppable::new();
let zz = &Droppable::new();
a(move |a: isize, b| { z; zz; a + b });
assert_eq!(drop_count(), 1);
}
assert_eq!(drop_count(), 3);
}
fn test_fn_mut() {
{
b(move |a: isize, b| { a + b });
}
assert_eq!(drop_count(), 3);
{
let z = &Droppable::new();
b(move |a: isize, b| { z; a + b });
assert_eq!(drop_count(), 3);
}
assert_eq!(drop_count(), 4);
{
let z = &Droppable::new();
let zz = &Droppable::new();
b(move |a: isize, b| { z; zz; a + b });
assert_eq!(drop_count(), 4);
}
assert_eq!(drop_count(), 6);
}
fn test_fn_once() {
{
c(move |a: isize, b| { a + b });
}
assert_eq!(drop_count(), 6);
{
let z = Droppable::new();
c(move |a: isize, b| { z; a + b });
assert_eq!(drop_count(), 7);
}
assert_eq!(drop_count(), 7);
{
let z = Droppable::new();
let zz = Droppable::new();
c(move |a: isize, b| { z; zz; a + b });
assert_eq!(drop_count(), 9);
}
assert_eq!(drop_count(), 9);
}
fn main() {
test_fn();
test_fn_mut();
test_fn_once();
}
|
{
f(1, 2)
}
|
identifier_body
|
unboxed-closures-drop.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(path_statements)]
#![allow(dead_code)]
// A battery of tests to ensure destructors of unboxed closure environments
// run at the right times.
static mut DROP_COUNT: usize = 0;
fn drop_count() -> usize {
unsafe {
DROP_COUNT
}
}
struct Droppable {
x: isize,
}
impl Droppable {
fn new() -> Droppable {
Droppable {
x: 1
}
}
}
impl Drop for Droppable {
fn drop(&mut self) {
unsafe {
DROP_COUNT += 1
}
}
}
fn
|
<F:Fn(isize, isize) -> isize>(f: F) -> isize {
f(1, 2)
}
fn b<F:FnMut(isize, isize) -> isize>(mut f: F) -> isize {
f(3, 4)
}
fn c<F:FnOnce(isize, isize) -> isize>(f: F) -> isize {
f(5, 6)
}
fn test_fn() {
{
a(move |a: isize, b| { a + b });
}
assert_eq!(drop_count(), 0);
{
let z = &Droppable::new();
a(move |a: isize, b| { z; a + b });
assert_eq!(drop_count(), 0);
}
assert_eq!(drop_count(), 1);
{
let z = &Droppable::new();
let zz = &Droppable::new();
a(move |a: isize, b| { z; zz; a + b });
assert_eq!(drop_count(), 1);
}
assert_eq!(drop_count(), 3);
}
fn test_fn_mut() {
{
b(move |a: isize, b| { a + b });
}
assert_eq!(drop_count(), 3);
{
let z = &Droppable::new();
b(move |a: isize, b| { z; a + b });
assert_eq!(drop_count(), 3);
}
assert_eq!(drop_count(), 4);
{
let z = &Droppable::new();
let zz = &Droppable::new();
b(move |a: isize, b| { z; zz; a + b });
assert_eq!(drop_count(), 4);
}
assert_eq!(drop_count(), 6);
}
fn test_fn_once() {
{
c(move |a: isize, b| { a + b });
}
assert_eq!(drop_count(), 6);
{
let z = Droppable::new();
c(move |a: isize, b| { z; a + b });
assert_eq!(drop_count(), 7);
}
assert_eq!(drop_count(), 7);
{
let z = Droppable::new();
let zz = Droppable::new();
c(move |a: isize, b| { z; zz; a + b });
assert_eq!(drop_count(), 9);
}
assert_eq!(drop_count(), 9);
}
fn main() {
test_fn();
test_fn_mut();
test_fn_once();
}
|
a
|
identifier_name
|
tree.rs
|
#![cfg_attr(feature = "unstable", feature(test))]
extern crate rand;
extern crate rdxsort;
#[cfg(feature = "unstable")]
mod unstable {
extern crate test;
use self::test::Bencher;
use rand::{Rng, XorShiftRng};
use rdxsort::*;
use std::collections::BTreeSet;
use std::collections::HashSet;
static N_MEDIUM: usize = 10_000;
fn
|
<F>(b: &mut Bencher, f: F) where F: Fn(Vec<u32>) {
let mut set = HashSet::new();
let mut rng = XorShiftRng::new_unseeded();
while set.len() < N_MEDIUM {
set.insert(rng.gen::<u32>());
}
let mut vec: Vec<u32> = set.into_iter().collect();
rng.shuffle(&mut vec[..]);
let _ = b.iter(|| {
let vec = vec.clone();
f(vec);
});
}
#[bench]
fn bench_set_rdx(b: &mut Bencher) {
bench_generic(b, |vec| {
let mut set = RdxTree::new();
for x in vec {
set.insert(x);
}
});
}
#[bench]
fn bench_set_std(b: &mut Bencher) {
bench_generic(b, |vec| {
let mut set = BTreeSet::new();
for x in vec {
set.insert(x);
}
});
}
}
|
bench_generic
|
identifier_name
|
tree.rs
|
#![cfg_attr(feature = "unstable", feature(test))]
extern crate rand;
extern crate rdxsort;
#[cfg(feature = "unstable")]
mod unstable {
extern crate test;
use self::test::Bencher;
use rand::{Rng, XorShiftRng};
use rdxsort::*;
use std::collections::BTreeSet;
use std::collections::HashSet;
static N_MEDIUM: usize = 10_000;
fn bench_generic<F>(b: &mut Bencher, f: F) where F: Fn(Vec<u32>)
|
#[bench]
fn bench_set_rdx(b: &mut Bencher) {
bench_generic(b, |vec| {
let mut set = RdxTree::new();
for x in vec {
set.insert(x);
}
});
}
#[bench]
fn bench_set_std(b: &mut Bencher) {
bench_generic(b, |vec| {
let mut set = BTreeSet::new();
for x in vec {
set.insert(x);
}
});
}
}
|
{
let mut set = HashSet::new();
let mut rng = XorShiftRng::new_unseeded();
while set.len() < N_MEDIUM {
set.insert(rng.gen::<u32>());
}
let mut vec: Vec<u32> = set.into_iter().collect();
rng.shuffle(&mut vec[..]);
let _ = b.iter(|| {
let vec = vec.clone();
f(vec);
});
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.