file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
pretty.rs | // Pris -- A language for designing slides
// Copyright 2017 Ruud van Asseldonk
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 3. A copy
// of the License is available in the root of the repository.
//! The string formatting primitives in `std::fmt` are not really suitable for
//! pretty-printing code, because they do not support indentation in a proper
//! way. This module provides an alternative abstraction for pretty printing
//! that automatically inserts indents after newlines. It also assumes that
//! printing cannot fail, which avoids clumsy error handling.
use std::fmt::Write;
use std::rc::Rc;
// The compiler is wrong, this function *is* used, from the macro at the end of
// this file. And that macro itself is also used, in tests.
#[allow(dead_code)]
pub fn print<P: Print>(content: P) -> String {
let mut f = Formatter::new();
f.print(content);
f.into_string()
}
pub trait Print {
fn print(&self, f: &mut Formatter);
}
pub struct Formatter {
target: String,
indent: u32,
}
impl<'a, P: Print> Print for &'a P {
fn | (&self, f: &mut Formatter) {
(*self).print(f);
}
}
impl<P: Print> Print for Box<P> {
fn print(&self, f: &mut Formatter) {
(**self).print(f);
}
}
impl<P: Print> Print for Rc<P> {
fn print(&self, f: &mut Formatter) {
(**self).print(f);
}
}
impl<'a> Print for &'a str {
fn print(&self, f: &mut Formatter) {
f.target.push_str(self);
}
}
impl Print for i32 {
fn print(&self, f: &mut Formatter) {
write!(&mut f.target, "{}", self).unwrap();
}
}
impl Print for u32 {
fn print(&self, f: &mut Formatter) {
write!(&mut f.target, "{}", self).unwrap();
}
}
impl Print for usize {
fn print(&self, f: &mut Formatter) {
write!(&mut f.target, "{}", self).unwrap();
}
}
impl Print for f64 {
fn print(&self, f: &mut Formatter) {
write!(&mut f.target, "{}", self).unwrap();
}
}
impl Formatter {
pub fn new() -> Formatter {
Formatter { target: String::new(), indent: 0 }
}
pub fn indent_more(&mut self) {
self.indent += 1;
}
pub fn indent_less(&mut self) {
assert!(self.indent > 0);
self.indent -= 1;
}
pub fn print<P: Print>(&mut self, content: P) {
content.print(self);
}
pub fn println<P: Print>(&mut self, content: P) {
for _ in 0..self.indent * 2 {
self.target.push(' ');
}
self.print(content);
}
pub fn print_hex_byte(&mut self, content: u8) {
write!(&mut self.target, "{:2x}", content).unwrap();
}
pub fn into_string(self) -> String {
self.target
}
}
/// Assert that two values of type `P: Print` are equal.
///
/// This is similar to `assert_eq!`, but using `Print` rather than `fmt::Debug`.
#[macro_export]
macro_rules! assert_preq {
($left: expr, $right: expr) => {
{
use pretty;
let left = &$left;
let right = &$right;
assert!(left == right,
"\nExpected:\n\n{}\n\nBut found:\n\n{}\n\n",
pretty::print(right),
pretty::print(left));
}
}
}
| print | identifier_name |
grid_builder.rs | extern crate extra;
extern crate std;
use grid::{Row, Column, alive, dead, Cell, Grid};
mod grid;
/****************************************************************************
* Something to say Ben??
****************************************************************************/
/*
* Constructs an immutable grid from the contents of an array of strings.
*/
pub fn build_from_file_contents(file_contents: ~[~str]) -> Grid {
let height = file_contents.len();
assert!(height > 0u);
let width = file_contents[0].len();
assert!(width > 0u);
let cell_value = |file_value| {
return match file_value {
'o' => alive,
'.' => dead,
_ => fail!("Unexpected cell value found in file.")
};
};
return Grid {
inner: std::vec::from_fn(height, |row| {
std::vec::from_fn(width, |column| {
assert_eq!(width, file_contents[row].len());
let file_value = file_contents[row][column];
return Cell { value: cell_value(file_value as char) };
})
})
};
} // fn build_from_file_contents
/*
Returns a count for how many neighbors of a cell in the grid
are alive.
Starts to the cell to the left of the cell/row and sums up cell_alive
working in clockwise order.
*/
fn count_neighbors(Row(row): Row, Column(col): Column, grid: &Grid) -> uint {
let left_column = Column(col - 1);
let right_column = Column(col + 1);
let above_row = Row(row - 1);
let below_row = Row(row + 1);
return grid.cell_alive(Row(row), left_column) + // left
grid.cell_alive(above_row, left_column) + // left-above
grid.cell_alive(above_row, Column(col)) + // above
grid.cell_alive(above_row, right_column) + // above-right
grid.cell_alive(Row(row), right_column) + // right
grid.cell_alive(below_row, right_column) + // below-right
grid.cell_alive(below_row, Column(col)) + // below
grid.cell_alive(below_row, left_column); // below-left
} // fn count_neighbors
// 1)Any live cell with fewer than two live neighbours dies, as if caused by
// under-population.
// 2) Any live cell with two or three live neighbours lives on to the next
// generation.
// 3) Any live cell with more than three live neighbours dies, as if by
// overcrowding.
// 4) Any dead cell with exactly three live neighbours becomes a live cell, as
// if by reproduction.
pub fn build_from_grid(prevg: &Grid) -> Grid | // fn build_from_grid
| {
let cell_value = |row: uint, column: uint| {
let ncount = count_neighbors(Row(row), Column(column), prevg);
let cv = match (prevg.inner[row][column].value, ncount) {
(dead, 3) => alive,
(alive, 2..3) => alive,
_ => dead
};
return Cell { value: cv };
};
return Grid {
inner: std::vec::from_fn(prevg.height(), |row| {
std::vec::from_fn(prevg.width(), |column| {
cell_value(row, column)
})
})
};
} | identifier_body |
grid_builder.rs | extern crate extra;
extern crate std;
use grid::{Row, Column, alive, dead, Cell, Grid};
mod grid;
/****************************************************************************
* Something to say Ben??
****************************************************************************/
/*
* Constructs an immutable grid from the contents of an array of strings.
*/
pub fn build_from_file_contents(file_contents: ~[~str]) -> Grid {
let height = file_contents.len();
assert!(height > 0u);
let width = file_contents[0].len();
assert!(width > 0u);
let cell_value = |file_value| {
return match file_value {
'o' => alive,
'.' => dead, | std::vec::from_fn(width, |column| {
assert_eq!(width, file_contents[row].len());
let file_value = file_contents[row][column];
return Cell { value: cell_value(file_value as char) };
})
})
};
} // fn build_from_file_contents
/*
Returns a count for how many neighbors of a cell in the grid
are alive.
Starts to the cell to the left of the cell/row and sums up cell_alive
working in clockwise order.
*/
fn count_neighbors(Row(row): Row, Column(col): Column, grid: &Grid) -> uint {
let left_column = Column(col - 1);
let right_column = Column(col + 1);
let above_row = Row(row - 1);
let below_row = Row(row + 1);
return grid.cell_alive(Row(row), left_column) + // left
grid.cell_alive(above_row, left_column) + // left-above
grid.cell_alive(above_row, Column(col)) + // above
grid.cell_alive(above_row, right_column) + // above-right
grid.cell_alive(Row(row), right_column) + // right
grid.cell_alive(below_row, right_column) + // below-right
grid.cell_alive(below_row, Column(col)) + // below
grid.cell_alive(below_row, left_column); // below-left
} // fn count_neighbors
// 1)Any live cell with fewer than two live neighbours dies, as if caused by
// under-population.
// 2) Any live cell with two or three live neighbours lives on to the next
// generation.
// 3) Any live cell with more than three live neighbours dies, as if by
// overcrowding.
// 4) Any dead cell with exactly three live neighbours becomes a live cell, as
// if by reproduction.
pub fn build_from_grid(prevg: &Grid) -> Grid {
let cell_value = |row: uint, column: uint| {
let ncount = count_neighbors(Row(row), Column(column), prevg);
let cv = match (prevg.inner[row][column].value, ncount) {
(dead, 3) => alive,
(alive, 2..3) => alive,
_ => dead
};
return Cell { value: cv };
};
return Grid {
inner: std::vec::from_fn(prevg.height(), |row| {
std::vec::from_fn(prevg.width(), |column| {
cell_value(row, column)
})
})
};
} // fn build_from_grid | _ => fail!("Unexpected cell value found in file.")
};
};
return Grid {
inner: std::vec::from_fn(height, |row| { | random_line_split |
grid_builder.rs | extern crate extra;
extern crate std;
use grid::{Row, Column, alive, dead, Cell, Grid};
mod grid;
/****************************************************************************
* Something to say Ben??
****************************************************************************/
/*
* Constructs an immutable grid from the contents of an array of strings.
*/
pub fn build_from_file_contents(file_contents: ~[~str]) -> Grid {
let height = file_contents.len();
assert!(height > 0u);
let width = file_contents[0].len();
assert!(width > 0u);
let cell_value = |file_value| {
return match file_value {
'o' => alive,
'.' => dead,
_ => fail!("Unexpected cell value found in file.")
};
};
return Grid {
inner: std::vec::from_fn(height, |row| {
std::vec::from_fn(width, |column| {
assert_eq!(width, file_contents[row].len());
let file_value = file_contents[row][column];
return Cell { value: cell_value(file_value as char) };
})
})
};
} // fn build_from_file_contents
/*
Returns a count for how many neighbors of a cell in the grid
are alive.
Starts to the cell to the left of the cell/row and sums up cell_alive
working in clockwise order.
*/
fn | (Row(row): Row, Column(col): Column, grid: &Grid) -> uint {
let left_column = Column(col - 1);
let right_column = Column(col + 1);
let above_row = Row(row - 1);
let below_row = Row(row + 1);
return grid.cell_alive(Row(row), left_column) + // left
grid.cell_alive(above_row, left_column) + // left-above
grid.cell_alive(above_row, Column(col)) + // above
grid.cell_alive(above_row, right_column) + // above-right
grid.cell_alive(Row(row), right_column) + // right
grid.cell_alive(below_row, right_column) + // below-right
grid.cell_alive(below_row, Column(col)) + // below
grid.cell_alive(below_row, left_column); // below-left
} // fn count_neighbors
// 1)Any live cell with fewer than two live neighbours dies, as if caused by
// under-population.
// 2) Any live cell with two or three live neighbours lives on to the next
// generation.
// 3) Any live cell with more than three live neighbours dies, as if by
// overcrowding.
// 4) Any dead cell with exactly three live neighbours becomes a live cell, as
// if by reproduction.
pub fn build_from_grid(prevg: &Grid) -> Grid {
let cell_value = |row: uint, column: uint| {
let ncount = count_neighbors(Row(row), Column(column), prevg);
let cv = match (prevg.inner[row][column].value, ncount) {
(dead, 3) => alive,
(alive, 2..3) => alive,
_ => dead
};
return Cell { value: cv };
};
return Grid {
inner: std::vec::from_fn(prevg.height(), |row| {
std::vec::from_fn(prevg.width(), |column| {
cell_value(row, column)
})
})
};
} // fn build_from_grid
| count_neighbors | identifier_name |
utils.rs | use std::fmt;
use std::path::{Path, PathBuf};
use std::fs::{self, File};
use rustc_serialize::{Encodable, Encoder};
use url::Url;
use git2::{self, ObjectType};
use core::GitReference;
use util::{CargoResult, ChainError, human, ToUrl, internal};
#[derive(PartialEq, Clone, Debug)]
pub struct GitRevision(git2::Oid);
impl fmt::Display for GitRevision {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
/// GitRemote represents a remote repository. It gets cloned into a local
/// GitDatabase.
#[derive(PartialEq,Clone,Debug)]
pub struct GitRemote {
url: Url,
}
#[derive(PartialEq,Clone,RustcEncodable)]
struct EncodableGitRemote {
url: String,
}
impl Encodable for GitRemote {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
EncodableGitRemote {
url: self.url.to_string()
}.encode(s)
}
}
/// GitDatabase is a local clone of a remote repository's database. Multiple
/// GitCheckouts can be cloned from this GitDatabase.
pub struct GitDatabase {
remote: GitRemote,
path: PathBuf,
repo: git2::Repository,
}
#[derive(RustcEncodable)]
pub struct EncodableGitDatabase {
remote: GitRemote,
path: String,
}
impl Encodable for GitDatabase {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
EncodableGitDatabase {
remote: self.remote.clone(),
path: self.path.display().to_string()
}.encode(s)
}
}
/// GitCheckout is a local checkout of a particular revision. Calling
/// `clone_into` with a reference will resolve the reference into a revision,
/// and return a CargoError if no revision for that reference was found.
pub struct GitCheckout<'a> {
database: &'a GitDatabase,
location: PathBuf,
revision: GitRevision,
repo: git2::Repository,
}
#[derive(RustcEncodable)]
pub struct EncodableGitCheckout {
database: EncodableGitDatabase,
location: String,
revision: String,
}
impl<'a> Encodable for GitCheckout<'a> {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
EncodableGitCheckout {
location: self.location.display().to_string(),
revision: self.revision.to_string(),
database: EncodableGitDatabase {
remote: self.database.remote.clone(),
path: self.database.path.display().to_string(),
},
}.encode(s)
}
}
// Implementations
impl GitRemote {
pub fn new(url: &Url) -> GitRemote {
GitRemote { url: url.clone() }
}
pub fn url(&self) -> &Url {
&self.url
}
pub fn rev_for(&self, path: &Path, reference: &GitReference)
-> CargoResult<GitRevision> {
let db = try!(self.db_at(path));
db.rev_for(reference)
}
pub fn checkout(&self, into: &Path) -> CargoResult<GitDatabase> {
let repo = match git2::Repository::open(into) {
Ok(repo) => |
Err(..) => {
try!(self.clone_into(into).chain_error(|| {
human(format!("failed to clone into: {}", into.display()))
}))
}
};
Ok(GitDatabase {
remote: self.clone(),
path: into.to_path_buf(),
repo: repo,
})
}
pub fn db_at(&self, db_path: &Path) -> CargoResult<GitDatabase> {
let repo = try!(git2::Repository::open(db_path));
Ok(GitDatabase {
remote: self.clone(),
path: db_path.to_path_buf(),
repo: repo,
})
}
fn fetch_into(&self, dst: &git2::Repository) -> CargoResult<()> {
// Create a local anonymous remote in the repository to fetch the url
let url = self.url.to_string();
let refspec = "refs/heads/*:refs/heads/*";
fetch(dst, &url, refspec)
}
fn clone_into(&self, dst: &Path) -> CargoResult<git2::Repository> {
let url = self.url.to_string();
if fs::metadata(&dst).is_ok() {
try!(fs::remove_dir_all(dst));
}
try!(fs::create_dir_all(dst));
let repo = try!(git2::Repository::init_bare(dst));
try!(fetch(&repo, &url, "refs/heads/*:refs/heads/*"));
Ok(repo)
}
}
impl GitDatabase {
fn path<'a>(&'a self) -> &'a Path {
&self.path
}
pub fn copy_to(&self, rev: GitRevision, dest: &Path)
-> CargoResult<GitCheckout> {
let checkout = match git2::Repository::open(dest) {
Ok(repo) => {
let checkout = GitCheckout::new(dest, self, rev, repo);
if!checkout.is_fresh() {
try!(checkout.fetch());
try!(checkout.reset());
assert!(checkout.is_fresh());
}
checkout
}
Err(..) => try!(GitCheckout::clone_into(dest, self, rev)),
};
try!(checkout.update_submodules().chain_error(|| {
internal("failed to update submodules")
}));
Ok(checkout)
}
pub fn rev_for(&self, reference: &GitReference) -> CargoResult<GitRevision> {
let id = match *reference {
GitReference::Tag(ref s) => {
try!((|| {
let refname = format!("refs/tags/{}", s);
let id = try!(self.repo.refname_to_id(&refname));
let obj = try!(self.repo.find_object(id, None));
let obj = try!(obj.peel(ObjectType::Commit));
Ok(obj.id())
}).chain_error(|| {
human(format!("failed to find tag `{}`", s))
}))
}
GitReference::Branch(ref s) => {
try!((|| {
let b = try!(self.repo.find_branch(s, git2::BranchType::Local));
b.get().target().chain_error(|| {
human(format!("branch `{}` did not have a target", s))
})
}).chain_error(|| {
human(format!("failed to find branch `{}`", s))
}))
}
GitReference::Rev(ref s) => {
let obj = try!(self.repo.revparse_single(s));
obj.id()
}
};
Ok(GitRevision(id))
}
pub fn has_ref(&self, reference: &str) -> CargoResult<()> {
try!(self.repo.revparse_single(reference));
Ok(())
}
}
impl<'a> GitCheckout<'a> {
fn new(path: &Path, database: &'a GitDatabase, revision: GitRevision,
repo: git2::Repository)
-> GitCheckout<'a>
{
GitCheckout {
location: path.to_path_buf(),
database: database,
revision: revision,
repo: repo,
}
}
fn clone_into(into: &Path, database: &'a GitDatabase,
revision: GitRevision)
-> CargoResult<GitCheckout<'a>>
{
let repo = try!(GitCheckout::clone_repo(database.path(), into));
let checkout = GitCheckout::new(into, database, revision, repo);
try!(checkout.reset());
Ok(checkout)
}
fn clone_repo(source: &Path, into: &Path) -> CargoResult<git2::Repository> {
let dirname = into.parent().unwrap();
try!(fs::create_dir_all(&dirname).chain_error(|| {
human(format!("Couldn't mkdir {}", dirname.display()))
}));
if fs::metadata(&into).is_ok() {
try!(fs::remove_dir_all(into).chain_error(|| {
human(format!("Couldn't rmdir {}", into.display()))
}));
}
let url = try!(source.to_url().map_err(human));
let url = url.to_string();
let repo = try!(git2::Repository::clone(&url, into).chain_error(|| {
internal(format!("failed to clone {} into {}", source.display(),
into.display()))
}));
Ok(repo)
}
fn is_fresh(&self) -> bool {
match self.repo.revparse_single("HEAD") {
Ok(ref head) if head.id() == self.revision.0 => {
// See comments in reset() for why we check this
fs::metadata(self.location.join(".cargo-ok")).is_ok()
}
_ => false,
}
}
fn fetch(&self) -> CargoResult<()> {
info!("fetch {}", self.repo.path().display());
let url = try!(self.database.path.to_url().map_err(human));
let url = url.to_string();
let refspec = "refs/heads/*:refs/heads/*";
try!(fetch(&self.repo, &url, refspec));
Ok(())
}
fn reset(&self) -> CargoResult<()> {
// If we're interrupted while performing this reset (e.g. we die because
// of a signal) Cargo needs to be sure to try to check out this repo
// again on the next go-round.
//
// To enable this we have a dummy file in our checkout,.cargo-ok, which
// if present means that the repo has been successfully reset and is
// ready to go. Hence if we start to do a reset, we make sure this file
// *doesn't* exist, and then once we're done we create the file.
let ok_file = self.location.join(".cargo-ok");
let _ = fs::remove_file(&ok_file);
info!("reset {} to {}", self.repo.path().display(), self.revision);
let object = try!(self.repo.find_object(self.revision.0, None));
try!(self.repo.reset(&object, git2::ResetType::Hard, None));
try!(File::create(ok_file));
Ok(())
}
fn update_submodules(&self) -> CargoResult<()> {
return update_submodules(&self.repo);
fn update_submodules(repo: &git2::Repository) -> CargoResult<()> {
info!("update submodules for: {:?}", repo.workdir().unwrap());
for mut child in try!(repo.submodules()).into_iter() {
try!(child.init(false));
let url = try!(child.url().chain_error(|| {
internal("non-utf8 url for submodule")
}));
// A submodule which is listed in.gitmodules but not actually
// checked out will not have a head id, so we should ignore it.
let head = match child.head_id() {
Some(head) => head,
None => continue,
};
// If the submodule hasn't been checked out yet, we need to
// clone it. If it has been checked out and the head is the same
// as the submodule's head, then we can bail out and go to the
// next submodule.
let head_and_repo = child.open().and_then(|repo| {
let target = try!(repo.head()).target();
Ok((target, repo))
});
let repo = match head_and_repo {
Ok((head, repo)) => {
if child.head_id() == head {
continue
}
repo
}
Err(..) => {
let path = repo.workdir().unwrap().join(child.path());
try!(git2::Repository::clone(url, &path))
}
};
// Fetch data from origin and reset to the head commit
let refspec = "refs/heads/*:refs/heads/*";
try!(fetch(&repo, url, refspec).chain_error(|| {
internal(format!("failed to fetch submodule `{}` from {}",
child.name().unwrap_or(""), url))
}));
let obj = try!(repo.find_object(head, None));
try!(repo.reset(&obj, git2::ResetType::Hard, None));
try!(update_submodules(&repo));
}
Ok(())
}
}
}
fn with_authentication<T, F>(url: &str, cfg: &git2::Config, mut f: F)
-> CargoResult<T>
where F: FnMut(&mut git2::Credentials) -> CargoResult<T>
{
// Prepare the authentication callbacks.
//
// We check the `allowed` types of credentials, and we try to do as much as
// possible based on that:
//
// * Prioritize SSH keys from the local ssh agent as they're likely the most
// reliable. The username here is prioritized from the credential
// callback, then from whatever is configured in git itself, and finally
// we fall back to the generic user of `git`.
//
// * If a username/password is allowed, then we fallback to git2-rs's
// implementation of the credential helper. This is what is configured
// with `credential.helper` in git, and is the interface for the OSX
// keychain, for example.
//
// * After the above two have failed, we just kinda grapple attempting to
// return *something*.
//
// Note that we keep track of the number of times we've called this callback
// because libgit2 will repeatedly give us credentials until we give it a
// reason to not do so. If we've been called once and our credentials failed
// then we'll be called again, and in this case we assume that the reason
// was because the credentials were wrong.
let mut cred_helper = git2::CredentialHelper::new(url);
cred_helper.config(cfg);
let mut called = 0;
let res = f(&mut |url, username, allowed| {
called += 1;
if called >= 2 {
return Err(git2::Error::from_str("no authentication available"))
}
if allowed.contains(git2::SSH_KEY) ||
allowed.contains(git2::USERNAME) {
let user = username.map(|s| s.to_string())
.or_else(|| cred_helper.username.clone())
.unwrap_or("git".to_string());
if allowed.contains(git2::USERNAME) {
git2::Cred::username(&user)
} else {
git2::Cred::ssh_key_from_agent(&user)
}
} else if allowed.contains(git2::USER_PASS_PLAINTEXT) {
git2::Cred::credential_helper(cfg, url, username)
} else if allowed.contains(git2::DEFAULT) {
git2::Cred::default()
} else {
Err(git2::Error::from_str("no authentication available"))
}
});
if called > 0 {
res.chain_error(|| {
human("failed to authenticate when downloading repository")
})
} else {
res
}
}
pub fn fetch(repo: &git2::Repository, url: &str,
refspec: &str) -> CargoResult<()> {
// Create a local anonymous remote in the repository to fetch the url
with_authentication(url, &try!(repo.config()), |f| {
let mut cb = git2::RemoteCallbacks::new();
cb.credentials(f);
let mut remote = try!(repo.remote_anonymous(&url, Some(refspec)));
try!(remote.add_fetch("refs/tags/*:refs/tags/*"));
remote.set_callbacks(cb);
try!(remote.fetch(&["refs/tags/*:refs/tags/*", refspec], None));
Ok(())
})
}
| {
try!(self.fetch_into(&repo).chain_error(|| {
human(format!("failed to fetch into {}", into.display()))
}));
repo
} | conditional_block |
utils.rs | use std::fmt;
use std::path::{Path, PathBuf};
use std::fs::{self, File};
use rustc_serialize::{Encodable, Encoder};
use url::Url;
use git2::{self, ObjectType};
use core::GitReference;
use util::{CargoResult, ChainError, human, ToUrl, internal};
#[derive(PartialEq, Clone, Debug)]
pub struct GitRevision(git2::Oid);
impl fmt::Display for GitRevision {
fn | (&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
/// GitRemote represents a remote repository. It gets cloned into a local
/// GitDatabase.
#[derive(PartialEq,Clone,Debug)]
pub struct GitRemote {
url: Url,
}
#[derive(PartialEq,Clone,RustcEncodable)]
struct EncodableGitRemote {
url: String,
}
impl Encodable for GitRemote {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
EncodableGitRemote {
url: self.url.to_string()
}.encode(s)
}
}
/// GitDatabase is a local clone of a remote repository's database. Multiple
/// GitCheckouts can be cloned from this GitDatabase.
pub struct GitDatabase {
remote: GitRemote,
path: PathBuf,
repo: git2::Repository,
}
#[derive(RustcEncodable)]
pub struct EncodableGitDatabase {
remote: GitRemote,
path: String,
}
impl Encodable for GitDatabase {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
EncodableGitDatabase {
remote: self.remote.clone(),
path: self.path.display().to_string()
}.encode(s)
}
}
/// GitCheckout is a local checkout of a particular revision. Calling
/// `clone_into` with a reference will resolve the reference into a revision,
/// and return a CargoError if no revision for that reference was found.
pub struct GitCheckout<'a> {
database: &'a GitDatabase,
location: PathBuf,
revision: GitRevision,
repo: git2::Repository,
}
#[derive(RustcEncodable)]
pub struct EncodableGitCheckout {
database: EncodableGitDatabase,
location: String,
revision: String,
}
impl<'a> Encodable for GitCheckout<'a> {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
EncodableGitCheckout {
location: self.location.display().to_string(),
revision: self.revision.to_string(),
database: EncodableGitDatabase {
remote: self.database.remote.clone(),
path: self.database.path.display().to_string(),
},
}.encode(s)
}
}
// Implementations
impl GitRemote {
pub fn new(url: &Url) -> GitRemote {
GitRemote { url: url.clone() }
}
pub fn url(&self) -> &Url {
&self.url
}
pub fn rev_for(&self, path: &Path, reference: &GitReference)
-> CargoResult<GitRevision> {
let db = try!(self.db_at(path));
db.rev_for(reference)
}
pub fn checkout(&self, into: &Path) -> CargoResult<GitDatabase> {
let repo = match git2::Repository::open(into) {
Ok(repo) => {
try!(self.fetch_into(&repo).chain_error(|| {
human(format!("failed to fetch into {}", into.display()))
}));
repo
}
Err(..) => {
try!(self.clone_into(into).chain_error(|| {
human(format!("failed to clone into: {}", into.display()))
}))
}
};
Ok(GitDatabase {
remote: self.clone(),
path: into.to_path_buf(),
repo: repo,
})
}
pub fn db_at(&self, db_path: &Path) -> CargoResult<GitDatabase> {
let repo = try!(git2::Repository::open(db_path));
Ok(GitDatabase {
remote: self.clone(),
path: db_path.to_path_buf(),
repo: repo,
})
}
fn fetch_into(&self, dst: &git2::Repository) -> CargoResult<()> {
// Create a local anonymous remote in the repository to fetch the url
let url = self.url.to_string();
let refspec = "refs/heads/*:refs/heads/*";
fetch(dst, &url, refspec)
}
fn clone_into(&self, dst: &Path) -> CargoResult<git2::Repository> {
let url = self.url.to_string();
if fs::metadata(&dst).is_ok() {
try!(fs::remove_dir_all(dst));
}
try!(fs::create_dir_all(dst));
let repo = try!(git2::Repository::init_bare(dst));
try!(fetch(&repo, &url, "refs/heads/*:refs/heads/*"));
Ok(repo)
}
}
impl GitDatabase {
fn path<'a>(&'a self) -> &'a Path {
&self.path
}
pub fn copy_to(&self, rev: GitRevision, dest: &Path)
-> CargoResult<GitCheckout> {
let checkout = match git2::Repository::open(dest) {
Ok(repo) => {
let checkout = GitCheckout::new(dest, self, rev, repo);
if!checkout.is_fresh() {
try!(checkout.fetch());
try!(checkout.reset());
assert!(checkout.is_fresh());
}
checkout
}
Err(..) => try!(GitCheckout::clone_into(dest, self, rev)),
};
try!(checkout.update_submodules().chain_error(|| {
internal("failed to update submodules")
}));
Ok(checkout)
}
pub fn rev_for(&self, reference: &GitReference) -> CargoResult<GitRevision> {
let id = match *reference {
GitReference::Tag(ref s) => {
try!((|| {
let refname = format!("refs/tags/{}", s);
let id = try!(self.repo.refname_to_id(&refname));
let obj = try!(self.repo.find_object(id, None));
let obj = try!(obj.peel(ObjectType::Commit));
Ok(obj.id())
}).chain_error(|| {
human(format!("failed to find tag `{}`", s))
}))
}
GitReference::Branch(ref s) => {
try!((|| {
let b = try!(self.repo.find_branch(s, git2::BranchType::Local));
b.get().target().chain_error(|| {
human(format!("branch `{}` did not have a target", s))
})
}).chain_error(|| {
human(format!("failed to find branch `{}`", s))
}))
}
GitReference::Rev(ref s) => {
let obj = try!(self.repo.revparse_single(s));
obj.id()
}
};
Ok(GitRevision(id))
}
pub fn has_ref(&self, reference: &str) -> CargoResult<()> {
try!(self.repo.revparse_single(reference));
Ok(())
}
}
impl<'a> GitCheckout<'a> {
fn new(path: &Path, database: &'a GitDatabase, revision: GitRevision,
repo: git2::Repository)
-> GitCheckout<'a>
{
GitCheckout {
location: path.to_path_buf(),
database: database,
revision: revision,
repo: repo,
}
}
fn clone_into(into: &Path, database: &'a GitDatabase,
revision: GitRevision)
-> CargoResult<GitCheckout<'a>>
{
let repo = try!(GitCheckout::clone_repo(database.path(), into));
let checkout = GitCheckout::new(into, database, revision, repo);
try!(checkout.reset());
Ok(checkout)
}
fn clone_repo(source: &Path, into: &Path) -> CargoResult<git2::Repository> {
let dirname = into.parent().unwrap();
try!(fs::create_dir_all(&dirname).chain_error(|| {
human(format!("Couldn't mkdir {}", dirname.display()))
}));
if fs::metadata(&into).is_ok() {
try!(fs::remove_dir_all(into).chain_error(|| {
human(format!("Couldn't rmdir {}", into.display()))
}));
}
let url = try!(source.to_url().map_err(human));
let url = url.to_string();
let repo = try!(git2::Repository::clone(&url, into).chain_error(|| {
internal(format!("failed to clone {} into {}", source.display(),
into.display()))
}));
Ok(repo)
}
fn is_fresh(&self) -> bool {
match self.repo.revparse_single("HEAD") {
Ok(ref head) if head.id() == self.revision.0 => {
// See comments in reset() for why we check this
fs::metadata(self.location.join(".cargo-ok")).is_ok()
}
_ => false,
}
}
fn fetch(&self) -> CargoResult<()> {
info!("fetch {}", self.repo.path().display());
let url = try!(self.database.path.to_url().map_err(human));
let url = url.to_string();
let refspec = "refs/heads/*:refs/heads/*";
try!(fetch(&self.repo, &url, refspec));
Ok(())
}
fn reset(&self) -> CargoResult<()> {
// If we're interrupted while performing this reset (e.g. we die because
// of a signal) Cargo needs to be sure to try to check out this repo
// again on the next go-round.
//
// To enable this we have a dummy file in our checkout,.cargo-ok, which
// if present means that the repo has been successfully reset and is
// ready to go. Hence if we start to do a reset, we make sure this file
// *doesn't* exist, and then once we're done we create the file.
let ok_file = self.location.join(".cargo-ok");
let _ = fs::remove_file(&ok_file);
info!("reset {} to {}", self.repo.path().display(), self.revision);
let object = try!(self.repo.find_object(self.revision.0, None));
try!(self.repo.reset(&object, git2::ResetType::Hard, None));
try!(File::create(ok_file));
Ok(())
}
fn update_submodules(&self) -> CargoResult<()> {
return update_submodules(&self.repo);
fn update_submodules(repo: &git2::Repository) -> CargoResult<()> {
info!("update submodules for: {:?}", repo.workdir().unwrap());
for mut child in try!(repo.submodules()).into_iter() {
try!(child.init(false));
let url = try!(child.url().chain_error(|| {
internal("non-utf8 url for submodule")
}));
// A submodule which is listed in.gitmodules but not actually
// checked out will not have a head id, so we should ignore it.
let head = match child.head_id() {
Some(head) => head,
None => continue,
};
// If the submodule hasn't been checked out yet, we need to
// clone it. If it has been checked out and the head is the same
// as the submodule's head, then we can bail out and go to the
// next submodule.
let head_and_repo = child.open().and_then(|repo| {
let target = try!(repo.head()).target();
Ok((target, repo))
});
let repo = match head_and_repo {
Ok((head, repo)) => {
if child.head_id() == head {
continue
}
repo
}
Err(..) => {
let path = repo.workdir().unwrap().join(child.path());
try!(git2::Repository::clone(url, &path))
}
};
// Fetch data from origin and reset to the head commit
let refspec = "refs/heads/*:refs/heads/*";
try!(fetch(&repo, url, refspec).chain_error(|| {
internal(format!("failed to fetch submodule `{}` from {}",
child.name().unwrap_or(""), url))
}));
let obj = try!(repo.find_object(head, None));
try!(repo.reset(&obj, git2::ResetType::Hard, None));
try!(update_submodules(&repo));
}
Ok(())
}
}
}
fn with_authentication<T, F>(url: &str, cfg: &git2::Config, mut f: F)
-> CargoResult<T>
where F: FnMut(&mut git2::Credentials) -> CargoResult<T>
{
// Prepare the authentication callbacks.
//
// We check the `allowed` types of credentials, and we try to do as much as
// possible based on that:
//
// * Prioritize SSH keys from the local ssh agent as they're likely the most
// reliable. The username here is prioritized from the credential
// callback, then from whatever is configured in git itself, and finally
// we fall back to the generic user of `git`.
//
// * If a username/password is allowed, then we fallback to git2-rs's
// implementation of the credential helper. This is what is configured
// with `credential.helper` in git, and is the interface for the OSX
// keychain, for example.
//
// * After the above two have failed, we just kinda grapple attempting to
// return *something*.
//
// Note that we keep track of the number of times we've called this callback
// because libgit2 will repeatedly give us credentials until we give it a
// reason to not do so. If we've been called once and our credentials failed
// then we'll be called again, and in this case we assume that the reason
// was because the credentials were wrong.
let mut cred_helper = git2::CredentialHelper::new(url);
cred_helper.config(cfg);
let mut called = 0;
let res = f(&mut |url, username, allowed| {
called += 1;
if called >= 2 {
return Err(git2::Error::from_str("no authentication available"))
}
if allowed.contains(git2::SSH_KEY) ||
allowed.contains(git2::USERNAME) {
let user = username.map(|s| s.to_string())
.or_else(|| cred_helper.username.clone())
.unwrap_or("git".to_string());
if allowed.contains(git2::USERNAME) {
git2::Cred::username(&user)
} else {
git2::Cred::ssh_key_from_agent(&user)
}
} else if allowed.contains(git2::USER_PASS_PLAINTEXT) {
git2::Cred::credential_helper(cfg, url, username)
} else if allowed.contains(git2::DEFAULT) {
git2::Cred::default()
} else {
Err(git2::Error::from_str("no authentication available"))
}
});
if called > 0 {
res.chain_error(|| {
human("failed to authenticate when downloading repository")
})
} else {
res
}
}
pub fn fetch(repo: &git2::Repository, url: &str,
refspec: &str) -> CargoResult<()> {
// Create a local anonymous remote in the repository to fetch the url
with_authentication(url, &try!(repo.config()), |f| {
let mut cb = git2::RemoteCallbacks::new();
cb.credentials(f);
let mut remote = try!(repo.remote_anonymous(&url, Some(refspec)));
try!(remote.add_fetch("refs/tags/*:refs/tags/*"));
remote.set_callbacks(cb);
try!(remote.fetch(&["refs/tags/*:refs/tags/*", refspec], None));
Ok(())
})
}
| fmt | identifier_name |
utils.rs | use std::fmt;
use std::path::{Path, PathBuf};
use std::fs::{self, File};
use rustc_serialize::{Encodable, Encoder};
use url::Url;
use git2::{self, ObjectType};
use core::GitReference;
use util::{CargoResult, ChainError, human, ToUrl, internal};
#[derive(PartialEq, Clone, Debug)]
pub struct GitRevision(git2::Oid);
impl fmt::Display for GitRevision {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
/// GitRemote represents a remote repository. It gets cloned into a local
/// GitDatabase.
#[derive(PartialEq,Clone,Debug)]
pub struct GitRemote {
url: Url,
}
#[derive(PartialEq,Clone,RustcEncodable)]
struct EncodableGitRemote {
url: String,
}
impl Encodable for GitRemote {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
EncodableGitRemote {
url: self.url.to_string()
}.encode(s)
}
}
/// GitDatabase is a local clone of a remote repository's database. Multiple
/// GitCheckouts can be cloned from this GitDatabase.
pub struct GitDatabase {
remote: GitRemote,
path: PathBuf,
repo: git2::Repository,
}
#[derive(RustcEncodable)]
pub struct EncodableGitDatabase {
remote: GitRemote,
path: String,
}
impl Encodable for GitDatabase {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
EncodableGitDatabase {
remote: self.remote.clone(),
path: self.path.display().to_string()
}.encode(s)
}
}
/// GitCheckout is a local checkout of a particular revision. Calling
/// `clone_into` with a reference will resolve the reference into a revision,
/// and return a CargoError if no revision for that reference was found.
pub struct GitCheckout<'a> {
database: &'a GitDatabase,
location: PathBuf,
revision: GitRevision,
repo: git2::Repository,
}
#[derive(RustcEncodable)]
pub struct EncodableGitCheckout {
database: EncodableGitDatabase,
location: String,
revision: String,
}
impl<'a> Encodable for GitCheckout<'a> {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
EncodableGitCheckout {
location: self.location.display().to_string(),
revision: self.revision.to_string(),
database: EncodableGitDatabase {
remote: self.database.remote.clone(),
path: self.database.path.display().to_string(),
},
}.encode(s)
}
}
// Implementations
impl GitRemote {
pub fn new(url: &Url) -> GitRemote {
GitRemote { url: url.clone() }
}
pub fn url(&self) -> &Url {
&self.url
}
pub fn rev_for(&self, path: &Path, reference: &GitReference)
-> CargoResult<GitRevision> {
let db = try!(self.db_at(path));
db.rev_for(reference)
}
pub fn checkout(&self, into: &Path) -> CargoResult<GitDatabase> {
let repo = match git2::Repository::open(into) {
Ok(repo) => {
try!(self.fetch_into(&repo).chain_error(|| {
human(format!("failed to fetch into {}", into.display()))
}));
repo
}
Err(..) => {
try!(self.clone_into(into).chain_error(|| {
human(format!("failed to clone into: {}", into.display()))
}))
}
};
Ok(GitDatabase {
remote: self.clone(),
path: into.to_path_buf(),
repo: repo,
})
}
pub fn db_at(&self, db_path: &Path) -> CargoResult<GitDatabase> {
let repo = try!(git2::Repository::open(db_path));
Ok(GitDatabase {
remote: self.clone(),
path: db_path.to_path_buf(),
repo: repo,
})
}
fn fetch_into(&self, dst: &git2::Repository) -> CargoResult<()> {
// Create a local anonymous remote in the repository to fetch the url
let url = self.url.to_string();
let refspec = "refs/heads/*:refs/heads/*";
fetch(dst, &url, refspec)
}
fn clone_into(&self, dst: &Path) -> CargoResult<git2::Repository> {
let url = self.url.to_string();
if fs::metadata(&dst).is_ok() {
try!(fs::remove_dir_all(dst));
}
try!(fs::create_dir_all(dst));
let repo = try!(git2::Repository::init_bare(dst));
try!(fetch(&repo, &url, "refs/heads/*:refs/heads/*"));
Ok(repo)
}
}
impl GitDatabase {
fn path<'a>(&'a self) -> &'a Path {
&self.path
}
pub fn copy_to(&self, rev: GitRevision, dest: &Path)
-> CargoResult<GitCheckout> {
let checkout = match git2::Repository::open(dest) {
Ok(repo) => {
let checkout = GitCheckout::new(dest, self, rev, repo);
if!checkout.is_fresh() {
try!(checkout.fetch());
try!(checkout.reset());
assert!(checkout.is_fresh());
}
checkout
}
Err(..) => try!(GitCheckout::clone_into(dest, self, rev)),
};
try!(checkout.update_submodules().chain_error(|| {
internal("failed to update submodules")
}));
Ok(checkout)
}
pub fn rev_for(&self, reference: &GitReference) -> CargoResult<GitRevision> {
let id = match *reference {
GitReference::Tag(ref s) => {
try!((|| {
let refname = format!("refs/tags/{}", s);
let id = try!(self.repo.refname_to_id(&refname));
let obj = try!(self.repo.find_object(id, None));
let obj = try!(obj.peel(ObjectType::Commit));
Ok(obj.id())
}).chain_error(|| {
human(format!("failed to find tag `{}`", s))
}))
}
GitReference::Branch(ref s) => {
try!((|| {
let b = try!(self.repo.find_branch(s, git2::BranchType::Local));
b.get().target().chain_error(|| {
human(format!("branch `{}` did not have a target", s))
})
}).chain_error(|| {
human(format!("failed to find branch `{}`", s))
}))
}
GitReference::Rev(ref s) => {
let obj = try!(self.repo.revparse_single(s));
obj.id()
}
};
Ok(GitRevision(id))
}
pub fn has_ref(&self, reference: &str) -> CargoResult<()> {
try!(self.repo.revparse_single(reference));
Ok(())
}
}
impl<'a> GitCheckout<'a> {
fn new(path: &Path, database: &'a GitDatabase, revision: GitRevision,
repo: git2::Repository)
-> GitCheckout<'a>
{
GitCheckout {
location: path.to_path_buf(),
database: database,
revision: revision,
repo: repo,
}
}
fn clone_into(into: &Path, database: &'a GitDatabase,
revision: GitRevision)
-> CargoResult<GitCheckout<'a>>
{
let repo = try!(GitCheckout::clone_repo(database.path(), into));
let checkout = GitCheckout::new(into, database, revision, repo);
try!(checkout.reset());
Ok(checkout)
}
fn clone_repo(source: &Path, into: &Path) -> CargoResult<git2::Repository> | }
fn is_fresh(&self) -> bool {
match self.repo.revparse_single("HEAD") {
Ok(ref head) if head.id() == self.revision.0 => {
// See comments in reset() for why we check this
fs::metadata(self.location.join(".cargo-ok")).is_ok()
}
_ => false,
}
}
fn fetch(&self) -> CargoResult<()> {
info!("fetch {}", self.repo.path().display());
let url = try!(self.database.path.to_url().map_err(human));
let url = url.to_string();
let refspec = "refs/heads/*:refs/heads/*";
try!(fetch(&self.repo, &url, refspec));
Ok(())
}
fn reset(&self) -> CargoResult<()> {
// If we're interrupted while performing this reset (e.g. we die because
// of a signal) Cargo needs to be sure to try to check out this repo
// again on the next go-round.
//
// To enable this we have a dummy file in our checkout,.cargo-ok, which
// if present means that the repo has been successfully reset and is
// ready to go. Hence if we start to do a reset, we make sure this file
// *doesn't* exist, and then once we're done we create the file.
let ok_file = self.location.join(".cargo-ok");
let _ = fs::remove_file(&ok_file);
info!("reset {} to {}", self.repo.path().display(), self.revision);
let object = try!(self.repo.find_object(self.revision.0, None));
try!(self.repo.reset(&object, git2::ResetType::Hard, None));
try!(File::create(ok_file));
Ok(())
}
fn update_submodules(&self) -> CargoResult<()> {
return update_submodules(&self.repo);
fn update_submodules(repo: &git2::Repository) -> CargoResult<()> {
info!("update submodules for: {:?}", repo.workdir().unwrap());
for mut child in try!(repo.submodules()).into_iter() {
try!(child.init(false));
let url = try!(child.url().chain_error(|| {
internal("non-utf8 url for submodule")
}));
// A submodule which is listed in.gitmodules but not actually
// checked out will not have a head id, so we should ignore it.
let head = match child.head_id() {
Some(head) => head,
None => continue,
};
// If the submodule hasn't been checked out yet, we need to
// clone it. If it has been checked out and the head is the same
// as the submodule's head, then we can bail out and go to the
// next submodule.
let head_and_repo = child.open().and_then(|repo| {
let target = try!(repo.head()).target();
Ok((target, repo))
});
let repo = match head_and_repo {
Ok((head, repo)) => {
if child.head_id() == head {
continue
}
repo
}
Err(..) => {
let path = repo.workdir().unwrap().join(child.path());
try!(git2::Repository::clone(url, &path))
}
};
// Fetch data from origin and reset to the head commit
let refspec = "refs/heads/*:refs/heads/*";
try!(fetch(&repo, url, refspec).chain_error(|| {
internal(format!("failed to fetch submodule `{}` from {}",
child.name().unwrap_or(""), url))
}));
let obj = try!(repo.find_object(head, None));
try!(repo.reset(&obj, git2::ResetType::Hard, None));
try!(update_submodules(&repo));
}
Ok(())
}
}
}
fn with_authentication<T, F>(url: &str, cfg: &git2::Config, mut f: F)
-> CargoResult<T>
where F: FnMut(&mut git2::Credentials) -> CargoResult<T>
{
// Prepare the authentication callbacks.
//
// We check the `allowed` types of credentials, and we try to do as much as
// possible based on that:
//
// * Prioritize SSH keys from the local ssh agent as they're likely the most
// reliable. The username here is prioritized from the credential
// callback, then from whatever is configured in git itself, and finally
// we fall back to the generic user of `git`.
//
// * If a username/password is allowed, then we fallback to git2-rs's
// implementation of the credential helper. This is what is configured
// with `credential.helper` in git, and is the interface for the OSX
// keychain, for example.
//
// * After the above two have failed, we just kinda grapple attempting to
// return *something*.
//
// Note that we keep track of the number of times we've called this callback
// because libgit2 will repeatedly give us credentials until we give it a
// reason to not do so. If we've been called once and our credentials failed
// then we'll be called again, and in this case we assume that the reason
// was because the credentials were wrong.
let mut cred_helper = git2::CredentialHelper::new(url);
cred_helper.config(cfg);
let mut called = 0;
let res = f(&mut |url, username, allowed| {
called += 1;
if called >= 2 {
return Err(git2::Error::from_str("no authentication available"))
}
if allowed.contains(git2::SSH_KEY) ||
allowed.contains(git2::USERNAME) {
let user = username.map(|s| s.to_string())
.or_else(|| cred_helper.username.clone())
.unwrap_or("git".to_string());
if allowed.contains(git2::USERNAME) {
git2::Cred::username(&user)
} else {
git2::Cred::ssh_key_from_agent(&user)
}
} else if allowed.contains(git2::USER_PASS_PLAINTEXT) {
git2::Cred::credential_helper(cfg, url, username)
} else if allowed.contains(git2::DEFAULT) {
git2::Cred::default()
} else {
Err(git2::Error::from_str("no authentication available"))
}
});
if called > 0 {
res.chain_error(|| {
human("failed to authenticate when downloading repository")
})
} else {
res
}
}
pub fn fetch(repo: &git2::Repository, url: &str,
refspec: &str) -> CargoResult<()> {
// Create a local anonymous remote in the repository to fetch the url
with_authentication(url, &try!(repo.config()), |f| {
let mut cb = git2::RemoteCallbacks::new();
cb.credentials(f);
let mut remote = try!(repo.remote_anonymous(&url, Some(refspec)));
try!(remote.add_fetch("refs/tags/*:refs/tags/*"));
remote.set_callbacks(cb);
try!(remote.fetch(&["refs/tags/*:refs/tags/*", refspec], None));
Ok(())
})
}
| {
let dirname = into.parent().unwrap();
try!(fs::create_dir_all(&dirname).chain_error(|| {
human(format!("Couldn't mkdir {}", dirname.display()))
}));
if fs::metadata(&into).is_ok() {
try!(fs::remove_dir_all(into).chain_error(|| {
human(format!("Couldn't rmdir {}", into.display()))
}));
}
let url = try!(source.to_url().map_err(human));
let url = url.to_string();
let repo = try!(git2::Repository::clone(&url, into).chain_error(|| {
internal(format!("failed to clone {} into {}", source.display(),
into.display()))
}));
Ok(repo) | identifier_body |
utils.rs | use std::fmt;
use std::path::{Path, PathBuf};
use std::fs::{self, File};
use rustc_serialize::{Encodable, Encoder};
use url::Url;
use git2::{self, ObjectType};
use core::GitReference;
use util::{CargoResult, ChainError, human, ToUrl, internal};
#[derive(PartialEq, Clone, Debug)]
pub struct GitRevision(git2::Oid);
impl fmt::Display for GitRevision {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
/// GitRemote represents a remote repository. It gets cloned into a local
/// GitDatabase.
#[derive(PartialEq,Clone,Debug)]
pub struct GitRemote {
url: Url,
}
#[derive(PartialEq,Clone,RustcEncodable)]
struct EncodableGitRemote {
url: String,
}
impl Encodable for GitRemote {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
EncodableGitRemote {
url: self.url.to_string()
}.encode(s)
}
}
/// GitDatabase is a local clone of a remote repository's database. Multiple
/// GitCheckouts can be cloned from this GitDatabase.
pub struct GitDatabase {
remote: GitRemote,
path: PathBuf,
repo: git2::Repository,
}
#[derive(RustcEncodable)]
pub struct EncodableGitDatabase {
remote: GitRemote,
path: String,
}
impl Encodable for GitDatabase {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
EncodableGitDatabase {
remote: self.remote.clone(),
path: self.path.display().to_string()
}.encode(s)
}
}
/// GitCheckout is a local checkout of a particular revision. Calling
/// `clone_into` with a reference will resolve the reference into a revision,
/// and return a CargoError if no revision for that reference was found.
pub struct GitCheckout<'a> {
database: &'a GitDatabase,
location: PathBuf,
revision: GitRevision,
repo: git2::Repository,
}
#[derive(RustcEncodable)]
pub struct EncodableGitCheckout {
database: EncodableGitDatabase,
location: String,
revision: String,
}
impl<'a> Encodable for GitCheckout<'a> {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
EncodableGitCheckout {
location: self.location.display().to_string(),
revision: self.revision.to_string(),
database: EncodableGitDatabase {
remote: self.database.remote.clone(),
path: self.database.path.display().to_string(),
},
}.encode(s)
}
}
// Implementations
impl GitRemote {
pub fn new(url: &Url) -> GitRemote {
GitRemote { url: url.clone() }
}
pub fn url(&self) -> &Url {
&self.url
}
pub fn rev_for(&self, path: &Path, reference: &GitReference)
-> CargoResult<GitRevision> {
let db = try!(self.db_at(path));
db.rev_for(reference)
}
pub fn checkout(&self, into: &Path) -> CargoResult<GitDatabase> {
let repo = match git2::Repository::open(into) {
Ok(repo) => {
try!(self.fetch_into(&repo).chain_error(|| {
human(format!("failed to fetch into {}", into.display()))
}));
repo
}
Err(..) => {
try!(self.clone_into(into).chain_error(|| {
human(format!("failed to clone into: {}", into.display()))
}))
}
};
Ok(GitDatabase {
remote: self.clone(),
path: into.to_path_buf(),
repo: repo,
})
}
pub fn db_at(&self, db_path: &Path) -> CargoResult<GitDatabase> {
let repo = try!(git2::Repository::open(db_path));
Ok(GitDatabase {
remote: self.clone(),
path: db_path.to_path_buf(),
repo: repo,
})
}
fn fetch_into(&self, dst: &git2::Repository) -> CargoResult<()> {
// Create a local anonymous remote in the repository to fetch the url
let url = self.url.to_string();
let refspec = "refs/heads/*:refs/heads/*";
fetch(dst, &url, refspec)
}
fn clone_into(&self, dst: &Path) -> CargoResult<git2::Repository> {
let url = self.url.to_string();
if fs::metadata(&dst).is_ok() {
try!(fs::remove_dir_all(dst));
}
try!(fs::create_dir_all(dst));
let repo = try!(git2::Repository::init_bare(dst));
try!(fetch(&repo, &url, "refs/heads/*:refs/heads/*"));
Ok(repo)
}
}
impl GitDatabase {
fn path<'a>(&'a self) -> &'a Path {
&self.path
}
pub fn copy_to(&self, rev: GitRevision, dest: &Path)
-> CargoResult<GitCheckout> {
let checkout = match git2::Repository::open(dest) {
Ok(repo) => {
let checkout = GitCheckout::new(dest, self, rev, repo);
if!checkout.is_fresh() {
try!(checkout.fetch());
try!(checkout.reset());
assert!(checkout.is_fresh());
}
checkout
}
Err(..) => try!(GitCheckout::clone_into(dest, self, rev)),
};
try!(checkout.update_submodules().chain_error(|| {
internal("failed to update submodules")
}));
Ok(checkout)
}
pub fn rev_for(&self, reference: &GitReference) -> CargoResult<GitRevision> {
let id = match *reference {
GitReference::Tag(ref s) => {
try!((|| {
let refname = format!("refs/tags/{}", s);
let id = try!(self.repo.refname_to_id(&refname));
let obj = try!(self.repo.find_object(id, None));
let obj = try!(obj.peel(ObjectType::Commit));
Ok(obj.id())
}).chain_error(|| {
human(format!("failed to find tag `{}`", s))
}))
}
GitReference::Branch(ref s) => {
try!((|| {
let b = try!(self.repo.find_branch(s, git2::BranchType::Local));
b.get().target().chain_error(|| {
human(format!("branch `{}` did not have a target", s))
})
}).chain_error(|| {
human(format!("failed to find branch `{}`", s))
}))
}
GitReference::Rev(ref s) => {
let obj = try!(self.repo.revparse_single(s));
obj.id()
}
};
Ok(GitRevision(id))
}
pub fn has_ref(&self, reference: &str) -> CargoResult<()> {
try!(self.repo.revparse_single(reference));
Ok(())
}
}
impl<'a> GitCheckout<'a> {
fn new(path: &Path, database: &'a GitDatabase, revision: GitRevision,
repo: git2::Repository)
-> GitCheckout<'a>
{
GitCheckout {
location: path.to_path_buf(),
database: database,
revision: revision,
repo: repo,
}
}
fn clone_into(into: &Path, database: &'a GitDatabase,
revision: GitRevision)
-> CargoResult<GitCheckout<'a>>
{
let repo = try!(GitCheckout::clone_repo(database.path(), into));
let checkout = GitCheckout::new(into, database, revision, repo);
try!(checkout.reset());
Ok(checkout)
}
fn clone_repo(source: &Path, into: &Path) -> CargoResult<git2::Repository> {
let dirname = into.parent().unwrap();
try!(fs::create_dir_all(&dirname).chain_error(|| {
human(format!("Couldn't mkdir {}", dirname.display()))
}));
if fs::metadata(&into).is_ok() {
try!(fs::remove_dir_all(into).chain_error(|| {
human(format!("Couldn't rmdir {}", into.display()))
}));
}
let url = try!(source.to_url().map_err(human));
let url = url.to_string();
let repo = try!(git2::Repository::clone(&url, into).chain_error(|| {
internal(format!("failed to clone {} into {}", source.display(),
into.display()))
}));
Ok(repo)
}
fn is_fresh(&self) -> bool {
match self.repo.revparse_single("HEAD") {
Ok(ref head) if head.id() == self.revision.0 => {
// See comments in reset() for why we check this
fs::metadata(self.location.join(".cargo-ok")).is_ok()
}
_ => false,
}
}
fn fetch(&self) -> CargoResult<()> {
info!("fetch {}", self.repo.path().display());
let url = try!(self.database.path.to_url().map_err(human));
let url = url.to_string();
let refspec = "refs/heads/*:refs/heads/*";
try!(fetch(&self.repo, &url, refspec));
Ok(())
}
fn reset(&self) -> CargoResult<()> {
// If we're interrupted while performing this reset (e.g. we die because
// of a signal) Cargo needs to be sure to try to check out this repo
// again on the next go-round.
//
// To enable this we have a dummy file in our checkout,.cargo-ok, which
// if present means that the repo has been successfully reset and is
// ready to go. Hence if we start to do a reset, we make sure this file
// *doesn't* exist, and then once we're done we create the file.
let ok_file = self.location.join(".cargo-ok");
let _ = fs::remove_file(&ok_file);
info!("reset {} to {}", self.repo.path().display(), self.revision);
let object = try!(self.repo.find_object(self.revision.0, None));
try!(self.repo.reset(&object, git2::ResetType::Hard, None));
try!(File::create(ok_file));
Ok(())
}
fn update_submodules(&self) -> CargoResult<()> {
return update_submodules(&self.repo);
fn update_submodules(repo: &git2::Repository) -> CargoResult<()> {
info!("update submodules for: {:?}", repo.workdir().unwrap());
for mut child in try!(repo.submodules()).into_iter() {
try!(child.init(false));
let url = try!(child.url().chain_error(|| {
internal("non-utf8 url for submodule")
}));
// A submodule which is listed in.gitmodules but not actually
// checked out will not have a head id, so we should ignore it.
let head = match child.head_id() {
Some(head) => head,
None => continue,
};
// If the submodule hasn't been checked out yet, we need to
// clone it. If it has been checked out and the head is the same
// as the submodule's head, then we can bail out and go to the
// next submodule.
let head_and_repo = child.open().and_then(|repo| {
let target = try!(repo.head()).target();
Ok((target, repo))
});
let repo = match head_and_repo {
Ok((head, repo)) => {
if child.head_id() == head {
continue
}
repo
}
Err(..) => {
let path = repo.workdir().unwrap().join(child.path());
try!(git2::Repository::clone(url, &path))
}
};
// Fetch data from origin and reset to the head commit
let refspec = "refs/heads/*:refs/heads/*";
try!(fetch(&repo, url, refspec).chain_error(|| {
internal(format!("failed to fetch submodule `{}` from {}",
child.name().unwrap_or(""), url))
}));
let obj = try!(repo.find_object(head, None));
try!(repo.reset(&obj, git2::ResetType::Hard, None));
try!(update_submodules(&repo));
}
Ok(())
}
}
}
fn with_authentication<T, F>(url: &str, cfg: &git2::Config, mut f: F)
-> CargoResult<T>
where F: FnMut(&mut git2::Credentials) -> CargoResult<T>
{
// Prepare the authentication callbacks.
// | // We check the `allowed` types of credentials, and we try to do as much as
// possible based on that:
//
// * Prioritize SSH keys from the local ssh agent as they're likely the most
// reliable. The username here is prioritized from the credential
// callback, then from whatever is configured in git itself, and finally
// we fall back to the generic user of `git`.
//
// * If a username/password is allowed, then we fallback to git2-rs's
// implementation of the credential helper. This is what is configured
// with `credential.helper` in git, and is the interface for the OSX
// keychain, for example.
//
// * After the above two have failed, we just kinda grapple attempting to
// return *something*.
//
// Note that we keep track of the number of times we've called this callback
// because libgit2 will repeatedly give us credentials until we give it a
// reason to not do so. If we've been called once and our credentials failed
// then we'll be called again, and in this case we assume that the reason
// was because the credentials were wrong.
let mut cred_helper = git2::CredentialHelper::new(url);
cred_helper.config(cfg);
let mut called = 0;
let res = f(&mut |url, username, allowed| {
called += 1;
if called >= 2 {
return Err(git2::Error::from_str("no authentication available"))
}
if allowed.contains(git2::SSH_KEY) ||
allowed.contains(git2::USERNAME) {
let user = username.map(|s| s.to_string())
.or_else(|| cred_helper.username.clone())
.unwrap_or("git".to_string());
if allowed.contains(git2::USERNAME) {
git2::Cred::username(&user)
} else {
git2::Cred::ssh_key_from_agent(&user)
}
} else if allowed.contains(git2::USER_PASS_PLAINTEXT) {
git2::Cred::credential_helper(cfg, url, username)
} else if allowed.contains(git2::DEFAULT) {
git2::Cred::default()
} else {
Err(git2::Error::from_str("no authentication available"))
}
});
if called > 0 {
res.chain_error(|| {
human("failed to authenticate when downloading repository")
})
} else {
res
}
}
pub fn fetch(repo: &git2::Repository, url: &str,
refspec: &str) -> CargoResult<()> {
// Create a local anonymous remote in the repository to fetch the url
with_authentication(url, &try!(repo.config()), |f| {
let mut cb = git2::RemoteCallbacks::new();
cb.credentials(f);
let mut remote = try!(repo.remote_anonymous(&url, Some(refspec)));
try!(remote.add_fetch("refs/tags/*:refs/tags/*"));
remote.set_callbacks(cb);
try!(remote.fetch(&["refs/tags/*:refs/tags/*", refspec], None));
Ok(())
})
} | random_line_split |
|
gather_moves.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Computes moves.
*/
use mc = middle::mem_categorization;
use middle::borrowck::*;
use middle::borrowck::move_data::*;
use middle::moves;
use middle::ty;
use syntax::ast;
use syntax::ast_util;
use syntax::codemap::span;
use util::ppaux::{UserString};
pub fn gather_decl(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
decl_id: ast::node_id,
_decl_span: span,
var_id: ast::node_id) {
let loan_path = @LpVar(var_id);
move_data.add_move(bccx.tcx, loan_path, decl_id, Declared);
}
pub fn gather_move_from_expr(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
move_expr: @ast::expr,
cmt: mc::cmt) {
gather_move_from_expr_or_pat(bccx, move_data, move_expr.id,
MoveExpr(move_expr), cmt);
}
pub fn gather_move_from_pat(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
move_pat: @ast::pat,
cmt: mc::cmt) {
gather_move_from_expr_or_pat(bccx, move_data, move_pat.id,
MovePat(move_pat), cmt);
}
fn gather_move_from_expr_or_pat(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
move_id: ast::node_id,
move_kind: MoveKind,
cmt: mc::cmt) {
if!check_is_legal_to_move_from(bccx, cmt, cmt) {
return;
}
match opt_loan_path(cmt) {
Some(loan_path) => {
move_data.add_move(bccx.tcx, loan_path, move_id, move_kind);
}
None => {
// move from rvalue or unsafe pointer, hence ok
}
}
}
pub fn gather_captures(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
closure_expr: @ast::expr) {
let captured_vars = bccx.capture_map.get(&closure_expr.id);
for captured_vars.iter().advance |captured_var| {
match captured_var.mode {
moves::CapMove => {
let fvar_id = ast_util::def_id_of_def(captured_var.def).node;
let loan_path = @LpVar(fvar_id);
move_data.add_move(bccx.tcx, loan_path, closure_expr.id,
Captured(closure_expr));
}
moves::CapCopy | moves::CapRef => {}
}
}
}
pub fn gather_assignment(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
assignment_id: ast::node_id,
assignment_span: span,
assignee_loan_path: @LoanPath,
assignee_id: ast::node_id) {
move_data.add_assignment(bccx.tcx,
assignee_loan_path,
assignment_id,
assignment_span,
assignee_id);
}
fn check_is_legal_to_move_from(bccx: @BorrowckCtxt,
cmt0: mc::cmt,
cmt: mc::cmt) -> bool {
match cmt.cat {
mc::cat_implicit_self(*) |
mc::cat_deref(_, _, mc::region_ptr(*)) |
mc::cat_deref(_, _, mc::gc_ptr(*)) |
mc::cat_deref(_, _, mc::unsafe_ptr(*)) => {
bccx.span_err(
cmt0.span,
fmt!("cannot move out of %s",
bccx.cmt_to_str(cmt)));
false
}
// These are separate from the above cases for a better error message.
mc::cat_stack_upvar(*) |
mc::cat_copied_upvar(mc::CopiedUpvar { onceness: ast::Many, _ }) => {
let once_hint = if bccx.tcx.sess.once_fns() {
" (unless the destination closure type is `once fn')"
} else {
""
};
bccx.span_err(
cmt0.span,
fmt!("cannot move out of %s%s", bccx.cmt_to_str(cmt), once_hint));
false
}
// Can move out of captured upvars only if the destination closure
// type is 'once'. 1-shot stack closures emit the copied_upvar form
// (see mem_categorization.rs).
mc::cat_copied_upvar(mc::CopiedUpvar { onceness: ast::Once, _ }) => {
true
}
// It seems strange to allow a move out of a static item,
// but what happens in practice is that you have a
// reference to a constant with a type that should be
// moved, like `None::<~int>`. The type of this constant
// is technically `Option<~int>`, which moves, but we know
// that the content of static items will never actually
// contain allocated pointers, so we can just memcpy it.
// Since static items can never have allocated memory,
// this is ok. For now anyhow.
mc::cat_static_item => {
true
}
mc::cat_rvalue(*) |
mc::cat_local(*) |
mc::cat_arg(*) |
mc::cat_self(*) => {
true
}
mc::cat_downcast(b) |
mc::cat_interior(b, _) => |
mc::cat_deref(b, _, mc::uniq_ptr(*)) |
mc::cat_discr(b, _) => {
check_is_legal_to_move_from(bccx, cmt0, b)
}
}
}
| {
match ty::get(b.ty).sty {
ty::ty_struct(did, _) | ty::ty_enum(did, _) => {
if ty::has_dtor(bccx.tcx, did) {
bccx.span_err(
cmt0.span,
fmt!("cannot move out of type `%s`, \
which defines the `Drop` trait",
b.ty.user_string(bccx.tcx)));
false
} else {
check_is_legal_to_move_from(bccx, cmt0, b)
}
}
_ => {
check_is_legal_to_move_from(bccx, cmt0, b)
}
}
} | conditional_block |
gather_moves.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Computes moves.
*/
use mc = middle::mem_categorization;
use middle::borrowck::*;
use middle::borrowck::move_data::*;
use middle::moves;
use middle::ty;
use syntax::ast;
use syntax::ast_util;
use syntax::codemap::span;
use util::ppaux::{UserString};
pub fn gather_decl(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
decl_id: ast::node_id,
_decl_span: span,
var_id: ast::node_id) {
let loan_path = @LpVar(var_id);
move_data.add_move(bccx.tcx, loan_path, decl_id, Declared);
}
pub fn gather_move_from_expr(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
move_expr: @ast::expr,
cmt: mc::cmt) {
gather_move_from_expr_or_pat(bccx, move_data, move_expr.id,
MoveExpr(move_expr), cmt);
}
pub fn gather_move_from_pat(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
move_pat: @ast::pat,
cmt: mc::cmt) {
gather_move_from_expr_or_pat(bccx, move_data, move_pat.id,
MovePat(move_pat), cmt);
}
fn gather_move_from_expr_or_pat(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
move_id: ast::node_id,
move_kind: MoveKind,
cmt: mc::cmt) {
if!check_is_legal_to_move_from(bccx, cmt, cmt) {
return;
}
match opt_loan_path(cmt) {
Some(loan_path) => {
move_data.add_move(bccx.tcx, loan_path, move_id, move_kind);
}
None => {
// move from rvalue or unsafe pointer, hence ok
}
}
}
pub fn gather_captures(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
closure_expr: @ast::expr) {
let captured_vars = bccx.capture_map.get(&closure_expr.id);
for captured_vars.iter().advance |captured_var| {
match captured_var.mode {
moves::CapMove => {
let fvar_id = ast_util::def_id_of_def(captured_var.def).node;
let loan_path = @LpVar(fvar_id);
move_data.add_move(bccx.tcx, loan_path, closure_expr.id,
Captured(closure_expr));
}
moves::CapCopy | moves::CapRef => {}
}
}
}
pub fn gather_assignment(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
assignment_id: ast::node_id,
assignment_span: span,
assignee_loan_path: @LoanPath,
assignee_id: ast::node_id) {
move_data.add_assignment(bccx.tcx,
assignee_loan_path,
assignment_id,
assignment_span,
assignee_id);
}
fn | (bccx: @BorrowckCtxt,
cmt0: mc::cmt,
cmt: mc::cmt) -> bool {
match cmt.cat {
mc::cat_implicit_self(*) |
mc::cat_deref(_, _, mc::region_ptr(*)) |
mc::cat_deref(_, _, mc::gc_ptr(*)) |
mc::cat_deref(_, _, mc::unsafe_ptr(*)) => {
bccx.span_err(
cmt0.span,
fmt!("cannot move out of %s",
bccx.cmt_to_str(cmt)));
false
}
// These are separate from the above cases for a better error message.
mc::cat_stack_upvar(*) |
mc::cat_copied_upvar(mc::CopiedUpvar { onceness: ast::Many, _ }) => {
let once_hint = if bccx.tcx.sess.once_fns() {
" (unless the destination closure type is `once fn')"
} else {
""
};
bccx.span_err(
cmt0.span,
fmt!("cannot move out of %s%s", bccx.cmt_to_str(cmt), once_hint));
false
}
// Can move out of captured upvars only if the destination closure
// type is 'once'. 1-shot stack closures emit the copied_upvar form
// (see mem_categorization.rs).
mc::cat_copied_upvar(mc::CopiedUpvar { onceness: ast::Once, _ }) => {
true
}
// It seems strange to allow a move out of a static item,
// but what happens in practice is that you have a
// reference to a constant with a type that should be
// moved, like `None::<~int>`. The type of this constant
// is technically `Option<~int>`, which moves, but we know
// that the content of static items will never actually
// contain allocated pointers, so we can just memcpy it.
// Since static items can never have allocated memory,
// this is ok. For now anyhow.
mc::cat_static_item => {
true
}
mc::cat_rvalue(*) |
mc::cat_local(*) |
mc::cat_arg(*) |
mc::cat_self(*) => {
true
}
mc::cat_downcast(b) |
mc::cat_interior(b, _) => {
match ty::get(b.ty).sty {
ty::ty_struct(did, _) | ty::ty_enum(did, _) => {
if ty::has_dtor(bccx.tcx, did) {
bccx.span_err(
cmt0.span,
fmt!("cannot move out of type `%s`, \
which defines the `Drop` trait",
b.ty.user_string(bccx.tcx)));
false
} else {
check_is_legal_to_move_from(bccx, cmt0, b)
}
}
_ => {
check_is_legal_to_move_from(bccx, cmt0, b)
}
}
}
mc::cat_deref(b, _, mc::uniq_ptr(*)) |
mc::cat_discr(b, _) => {
check_is_legal_to_move_from(bccx, cmt0, b)
}
}
}
| check_is_legal_to_move_from | identifier_name |
gather_moves.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Computes moves.
*/
use mc = middle::mem_categorization;
use middle::borrowck::*;
use middle::borrowck::move_data::*;
use middle::moves;
use middle::ty;
use syntax::ast;
use syntax::ast_util;
use syntax::codemap::span;
use util::ppaux::{UserString};
pub fn gather_decl(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
decl_id: ast::node_id,
_decl_span: span,
var_id: ast::node_id) {
let loan_path = @LpVar(var_id);
move_data.add_move(bccx.tcx, loan_path, decl_id, Declared);
}
pub fn gather_move_from_expr(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
move_expr: @ast::expr,
cmt: mc::cmt) {
gather_move_from_expr_or_pat(bccx, move_data, move_expr.id,
MoveExpr(move_expr), cmt);
}
pub fn gather_move_from_pat(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
move_pat: @ast::pat,
cmt: mc::cmt) {
gather_move_from_expr_or_pat(bccx, move_data, move_pat.id,
MovePat(move_pat), cmt);
}
fn gather_move_from_expr_or_pat(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
move_id: ast::node_id,
move_kind: MoveKind,
cmt: mc::cmt) {
if!check_is_legal_to_move_from(bccx, cmt, cmt) {
return;
}
match opt_loan_path(cmt) {
Some(loan_path) => {
move_data.add_move(bccx.tcx, loan_path, move_id, move_kind);
}
None => {
// move from rvalue or unsafe pointer, hence ok
}
}
}
pub fn gather_captures(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
closure_expr: @ast::expr) {
let captured_vars = bccx.capture_map.get(&closure_expr.id);
for captured_vars.iter().advance |captured_var| {
match captured_var.mode { | move_data.add_move(bccx.tcx, loan_path, closure_expr.id,
Captured(closure_expr));
}
moves::CapCopy | moves::CapRef => {}
}
}
}
pub fn gather_assignment(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
assignment_id: ast::node_id,
assignment_span: span,
assignee_loan_path: @LoanPath,
assignee_id: ast::node_id) {
move_data.add_assignment(bccx.tcx,
assignee_loan_path,
assignment_id,
assignment_span,
assignee_id);
}
fn check_is_legal_to_move_from(bccx: @BorrowckCtxt,
cmt0: mc::cmt,
cmt: mc::cmt) -> bool {
match cmt.cat {
mc::cat_implicit_self(*) |
mc::cat_deref(_, _, mc::region_ptr(*)) |
mc::cat_deref(_, _, mc::gc_ptr(*)) |
mc::cat_deref(_, _, mc::unsafe_ptr(*)) => {
bccx.span_err(
cmt0.span,
fmt!("cannot move out of %s",
bccx.cmt_to_str(cmt)));
false
}
// These are separate from the above cases for a better error message.
mc::cat_stack_upvar(*) |
mc::cat_copied_upvar(mc::CopiedUpvar { onceness: ast::Many, _ }) => {
let once_hint = if bccx.tcx.sess.once_fns() {
" (unless the destination closure type is `once fn')"
} else {
""
};
bccx.span_err(
cmt0.span,
fmt!("cannot move out of %s%s", bccx.cmt_to_str(cmt), once_hint));
false
}
// Can move out of captured upvars only if the destination closure
// type is 'once'. 1-shot stack closures emit the copied_upvar form
// (see mem_categorization.rs).
mc::cat_copied_upvar(mc::CopiedUpvar { onceness: ast::Once, _ }) => {
true
}
// It seems strange to allow a move out of a static item,
// but what happens in practice is that you have a
// reference to a constant with a type that should be
// moved, like `None::<~int>`. The type of this constant
// is technically `Option<~int>`, which moves, but we know
// that the content of static items will never actually
// contain allocated pointers, so we can just memcpy it.
// Since static items can never have allocated memory,
// this is ok. For now anyhow.
mc::cat_static_item => {
true
}
mc::cat_rvalue(*) |
mc::cat_local(*) |
mc::cat_arg(*) |
mc::cat_self(*) => {
true
}
mc::cat_downcast(b) |
mc::cat_interior(b, _) => {
match ty::get(b.ty).sty {
ty::ty_struct(did, _) | ty::ty_enum(did, _) => {
if ty::has_dtor(bccx.tcx, did) {
bccx.span_err(
cmt0.span,
fmt!("cannot move out of type `%s`, \
which defines the `Drop` trait",
b.ty.user_string(bccx.tcx)));
false
} else {
check_is_legal_to_move_from(bccx, cmt0, b)
}
}
_ => {
check_is_legal_to_move_from(bccx, cmt0, b)
}
}
}
mc::cat_deref(b, _, mc::uniq_ptr(*)) |
mc::cat_discr(b, _) => {
check_is_legal_to_move_from(bccx, cmt0, b)
}
}
} | moves::CapMove => {
let fvar_id = ast_util::def_id_of_def(captured_var.def).node;
let loan_path = @LpVar(fvar_id); | random_line_split |
gather_moves.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Computes moves.
*/
use mc = middle::mem_categorization;
use middle::borrowck::*;
use middle::borrowck::move_data::*;
use middle::moves;
use middle::ty;
use syntax::ast;
use syntax::ast_util;
use syntax::codemap::span;
use util::ppaux::{UserString};
pub fn gather_decl(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
decl_id: ast::node_id,
_decl_span: span,
var_id: ast::node_id) {
let loan_path = @LpVar(var_id);
move_data.add_move(bccx.tcx, loan_path, decl_id, Declared);
}
pub fn gather_move_from_expr(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
move_expr: @ast::expr,
cmt: mc::cmt) |
pub fn gather_move_from_pat(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
move_pat: @ast::pat,
cmt: mc::cmt) {
gather_move_from_expr_or_pat(bccx, move_data, move_pat.id,
MovePat(move_pat), cmt);
}
fn gather_move_from_expr_or_pat(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
move_id: ast::node_id,
move_kind: MoveKind,
cmt: mc::cmt) {
if!check_is_legal_to_move_from(bccx, cmt, cmt) {
return;
}
match opt_loan_path(cmt) {
Some(loan_path) => {
move_data.add_move(bccx.tcx, loan_path, move_id, move_kind);
}
None => {
// move from rvalue or unsafe pointer, hence ok
}
}
}
pub fn gather_captures(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
closure_expr: @ast::expr) {
let captured_vars = bccx.capture_map.get(&closure_expr.id);
for captured_vars.iter().advance |captured_var| {
match captured_var.mode {
moves::CapMove => {
let fvar_id = ast_util::def_id_of_def(captured_var.def).node;
let loan_path = @LpVar(fvar_id);
move_data.add_move(bccx.tcx, loan_path, closure_expr.id,
Captured(closure_expr));
}
moves::CapCopy | moves::CapRef => {}
}
}
}
pub fn gather_assignment(bccx: @BorrowckCtxt,
move_data: &mut MoveData,
assignment_id: ast::node_id,
assignment_span: span,
assignee_loan_path: @LoanPath,
assignee_id: ast::node_id) {
move_data.add_assignment(bccx.tcx,
assignee_loan_path,
assignment_id,
assignment_span,
assignee_id);
}
fn check_is_legal_to_move_from(bccx: @BorrowckCtxt,
cmt0: mc::cmt,
cmt: mc::cmt) -> bool {
match cmt.cat {
mc::cat_implicit_self(*) |
mc::cat_deref(_, _, mc::region_ptr(*)) |
mc::cat_deref(_, _, mc::gc_ptr(*)) |
mc::cat_deref(_, _, mc::unsafe_ptr(*)) => {
bccx.span_err(
cmt0.span,
fmt!("cannot move out of %s",
bccx.cmt_to_str(cmt)));
false
}
// These are separate from the above cases for a better error message.
mc::cat_stack_upvar(*) |
mc::cat_copied_upvar(mc::CopiedUpvar { onceness: ast::Many, _ }) => {
let once_hint = if bccx.tcx.sess.once_fns() {
" (unless the destination closure type is `once fn')"
} else {
""
};
bccx.span_err(
cmt0.span,
fmt!("cannot move out of %s%s", bccx.cmt_to_str(cmt), once_hint));
false
}
// Can move out of captured upvars only if the destination closure
// type is 'once'. 1-shot stack closures emit the copied_upvar form
// (see mem_categorization.rs).
mc::cat_copied_upvar(mc::CopiedUpvar { onceness: ast::Once, _ }) => {
true
}
// It seems strange to allow a move out of a static item,
// but what happens in practice is that you have a
// reference to a constant with a type that should be
// moved, like `None::<~int>`. The type of this constant
// is technically `Option<~int>`, which moves, but we know
// that the content of static items will never actually
// contain allocated pointers, so we can just memcpy it.
// Since static items can never have allocated memory,
// this is ok. For now anyhow.
mc::cat_static_item => {
true
}
mc::cat_rvalue(*) |
mc::cat_local(*) |
mc::cat_arg(*) |
mc::cat_self(*) => {
true
}
mc::cat_downcast(b) |
mc::cat_interior(b, _) => {
match ty::get(b.ty).sty {
ty::ty_struct(did, _) | ty::ty_enum(did, _) => {
if ty::has_dtor(bccx.tcx, did) {
bccx.span_err(
cmt0.span,
fmt!("cannot move out of type `%s`, \
which defines the `Drop` trait",
b.ty.user_string(bccx.tcx)));
false
} else {
check_is_legal_to_move_from(bccx, cmt0, b)
}
}
_ => {
check_is_legal_to_move_from(bccx, cmt0, b)
}
}
}
mc::cat_deref(b, _, mc::uniq_ptr(*)) |
mc::cat_discr(b, _) => {
check_is_legal_to_move_from(bccx, cmt0, b)
}
}
}
| {
gather_move_from_expr_or_pat(bccx, move_data, move_expr.id,
MoveExpr(move_expr), cmt);
} | identifier_body |
mod.rs | // c4puter embedded controller firmware
// Copyright (C) 2017 Chris Pavlina
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | //
// You should have received a copy of the GNU General Public License along
// with this program; if not, write to the Free Software Foundation, Inc.,
// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
//
//! Utilities for processing and displaying data
mod parseint;
mod hexprint;
pub mod base64;
pub mod utf;
pub use self::parseint::ParseInt;
pub use self::hexprint::hexprint; | // GNU General Public License for more details. | random_line_split |
enum-discrim-manual-sizing.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
use std::mem::{size_of, align_of};
#[repr(i8)]
enum Ei8 {
Ai8 = 0,
Bi8 = 1
}
#[repr(u8)]
enum Eu8 {
Au8 = 0,
Bu8 = 1
}
#[repr(i16)]
enum Ei16 {
Ai16 = 0,
Bi16 = 1
}
#[repr(u16)]
enum Eu16 {
Au16 = 0,
Bu16 = 1
}
#[repr(i32)]
enum Ei32 {
Ai32 = 0,
Bi32 = 1
}
#[repr(u32)]
enum Eu32 {
Au32 = 0,
Bu32 = 1
}
#[repr(i64)]
enum Ei64 {
Ai64 = 0,
Bi64 = 1
}
#[repr(u64)]
enum Eu64 {
Au64 = 0,
Bu64 = 1
}
#[repr(isize)]
enum Eint {
Aint = 0,
Bint = 1
}
#[repr(usize)]
enum Euint {
Auint = 0,
Buint = 1
}
#[repr(u8)]
enum Eu8NonCLike<T> {
_None,
_Some(T),
}
#[repr(i64)]
enum Ei64NonCLike<T> {
_None,
_Some(T),
}
#[repr(u64)]
enum Eu64NonCLike<T> { |
pub fn main() {
assert_eq!(size_of::<Ei8>(), 1);
assert_eq!(size_of::<Eu8>(), 1);
assert_eq!(size_of::<Ei16>(), 2);
assert_eq!(size_of::<Eu16>(), 2);
assert_eq!(size_of::<Ei32>(), 4);
assert_eq!(size_of::<Eu32>(), 4);
assert_eq!(size_of::<Ei64>(), 8);
assert_eq!(size_of::<Eu64>(), 8);
assert_eq!(size_of::<Eint>(), size_of::<isize>());
assert_eq!(size_of::<Euint>(), size_of::<usize>());
assert_eq!(size_of::<Eu8NonCLike<()>>(), 1);
assert_eq!(size_of::<Ei64NonCLike<()>>(), 8);
assert_eq!(size_of::<Eu64NonCLike<()>>(), 8);
let u8_expected_size = round_up(9, align_of::<Eu64NonCLike<u8>>());
assert_eq!(size_of::<Eu64NonCLike<u8>>(), u8_expected_size);
let array_expected_size = round_up(28, align_of::<Eu64NonCLike<[u32; 5]>>());
assert_eq!(size_of::<Eu64NonCLike<[u32; 5]>>(), array_expected_size);
assert_eq!(size_of::<Eu64NonCLike<[u32; 6]>>(), 32);
assert_eq!(align_of::<Eu32>(), align_of::<u32>());
assert_eq!(align_of::<Eu64NonCLike<u8>>(), align_of::<u64>());
}
// Rounds x up to the next multiple of a
fn round_up(x: usize, a: usize) -> usize {
((x + (a - 1)) / a) * a
} | _None,
_Some(T),
} | random_line_split |
enum-discrim-manual-sizing.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
use std::mem::{size_of, align_of};
#[repr(i8)]
enum Ei8 {
Ai8 = 0,
Bi8 = 1
}
#[repr(u8)]
enum Eu8 {
Au8 = 0,
Bu8 = 1
}
#[repr(i16)]
enum Ei16 {
Ai16 = 0,
Bi16 = 1
}
#[repr(u16)]
enum Eu16 {
Au16 = 0,
Bu16 = 1
}
#[repr(i32)]
enum Ei32 {
Ai32 = 0,
Bi32 = 1
}
#[repr(u32)]
enum | {
Au32 = 0,
Bu32 = 1
}
#[repr(i64)]
enum Ei64 {
Ai64 = 0,
Bi64 = 1
}
#[repr(u64)]
enum Eu64 {
Au64 = 0,
Bu64 = 1
}
#[repr(isize)]
enum Eint {
Aint = 0,
Bint = 1
}
#[repr(usize)]
enum Euint {
Auint = 0,
Buint = 1
}
#[repr(u8)]
enum Eu8NonCLike<T> {
_None,
_Some(T),
}
#[repr(i64)]
enum Ei64NonCLike<T> {
_None,
_Some(T),
}
#[repr(u64)]
enum Eu64NonCLike<T> {
_None,
_Some(T),
}
pub fn main() {
assert_eq!(size_of::<Ei8>(), 1);
assert_eq!(size_of::<Eu8>(), 1);
assert_eq!(size_of::<Ei16>(), 2);
assert_eq!(size_of::<Eu16>(), 2);
assert_eq!(size_of::<Ei32>(), 4);
assert_eq!(size_of::<Eu32>(), 4);
assert_eq!(size_of::<Ei64>(), 8);
assert_eq!(size_of::<Eu64>(), 8);
assert_eq!(size_of::<Eint>(), size_of::<isize>());
assert_eq!(size_of::<Euint>(), size_of::<usize>());
assert_eq!(size_of::<Eu8NonCLike<()>>(), 1);
assert_eq!(size_of::<Ei64NonCLike<()>>(), 8);
assert_eq!(size_of::<Eu64NonCLike<()>>(), 8);
let u8_expected_size = round_up(9, align_of::<Eu64NonCLike<u8>>());
assert_eq!(size_of::<Eu64NonCLike<u8>>(), u8_expected_size);
let array_expected_size = round_up(28, align_of::<Eu64NonCLike<[u32; 5]>>());
assert_eq!(size_of::<Eu64NonCLike<[u32; 5]>>(), array_expected_size);
assert_eq!(size_of::<Eu64NonCLike<[u32; 6]>>(), 32);
assert_eq!(align_of::<Eu32>(), align_of::<u32>());
assert_eq!(align_of::<Eu64NonCLike<u8>>(), align_of::<u64>());
}
// Rounds x up to the next multiple of a
fn round_up(x: usize, a: usize) -> usize {
((x + (a - 1)) / a) * a
}
| Eu32 | identifier_name |
node_style.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// Style retrieval from DOM elements.
use css::node_util::NodeUtil;
use layout::incremental::RestyleDamage;
use layout::wrapper::LayoutNode;
use extra::arc::Arc;
use style::ComputedValues;
/// Node mixin providing `style` method that returns a `NodeStyle`
pub trait StyledNode {
fn style<'a>(&'a self) -> &'a Arc<ComputedValues>;
fn restyle_damage(&self) -> RestyleDamage;
}
impl<'self> StyledNode for LayoutNode<'self> {
#[inline]
fn style<'a>(&'a self) -> &'a Arc<ComputedValues> {
self.get_css_select_results()
}
fn restyle_damage(&self) -> RestyleDamage |
}
| {
self.get_restyle_damage()
} | identifier_body |
node_style.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// Style retrieval from DOM elements.
use css::node_util::NodeUtil;
use layout::incremental::RestyleDamage;
use layout::wrapper::LayoutNode;
| /// Node mixin providing `style` method that returns a `NodeStyle`
pub trait StyledNode {
fn style<'a>(&'a self) -> &'a Arc<ComputedValues>;
fn restyle_damage(&self) -> RestyleDamage;
}
impl<'self> StyledNode for LayoutNode<'self> {
#[inline]
fn style<'a>(&'a self) -> &'a Arc<ComputedValues> {
self.get_css_select_results()
}
fn restyle_damage(&self) -> RestyleDamage {
self.get_restyle_damage()
}
} | use extra::arc::Arc;
use style::ComputedValues;
| random_line_split |
node_style.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// Style retrieval from DOM elements.
use css::node_util::NodeUtil;
use layout::incremental::RestyleDamage;
use layout::wrapper::LayoutNode;
use extra::arc::Arc;
use style::ComputedValues;
/// Node mixin providing `style` method that returns a `NodeStyle`
pub trait StyledNode {
fn style<'a>(&'a self) -> &'a Arc<ComputedValues>;
fn restyle_damage(&self) -> RestyleDamage;
}
impl<'self> StyledNode for LayoutNode<'self> {
#[inline]
fn | <'a>(&'a self) -> &'a Arc<ComputedValues> {
self.get_css_select_results()
}
fn restyle_damage(&self) -> RestyleDamage {
self.get_restyle_damage()
}
}
| style | identifier_name |
regions-infer-at-fn-not-param.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct parameterized1<'self> {
g: &'self fn()
}
struct not_parameterized1 {
g: @fn()
}
struct | {
g: @fn()
}
fn take1(p: parameterized1) -> parameterized1 { p } //~ ERROR mismatched types
fn take3(p: not_parameterized1) -> not_parameterized1 { p }
fn take4(p: not_parameterized2) -> not_parameterized2 { p }
fn main() {}
| not_parameterized2 | identifier_name |
regions-infer-at-fn-not-param.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct parameterized1<'self> {
g: &'self fn()
}
struct not_parameterized1 { | g: @fn()
}
fn take1(p: parameterized1) -> parameterized1 { p } //~ ERROR mismatched types
fn take3(p: not_parameterized1) -> not_parameterized1 { p }
fn take4(p: not_parameterized2) -> not_parameterized2 { p }
fn main() {} | g: @fn()
}
struct not_parameterized2 { | random_line_split |
proxy.rs | //! A proxy that forwards data to another server and forwards that server's
//! responses back to clients.
//!
//! You can showcase this by running this in one terminal:
//!
//! cargo run --example proxy
//!
//! This in another terminal
//!
//! cargo run --example echo
//!
//! And finally this in another terminal
//!
//! cargo run --example connect 127.0.0.1:8081
//!
//! This final terminal will connect to our proxy, which will in turn connect to
//! the echo server, and you'll be able to see data flowing between them.
extern crate futures;
extern crate futures_cpupool;
extern crate tokio;
extern crate tokio_io;
use std::sync::Arc;
use std::env;
use std::net::{Shutdown, SocketAddr};
use std::io::{self, Read, Write};
use futures::stream::Stream;
use futures::{Future, Poll};
use futures::future::Executor;
use futures_cpupool::CpuPool;
use tokio::net::{TcpListener, TcpStream};
use tokio::reactor::Core;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::io::{copy, shutdown};
fn main() {
let listen_addr = env::args().nth(1).unwrap_or("127.0.0.1:8081".to_string());
let listen_addr = listen_addr.parse::<SocketAddr>().unwrap();
let server_addr = env::args().nth(2).unwrap_or("127.0.0.1:8080".to_string());
let server_addr = server_addr.parse::<SocketAddr>().unwrap();
// Create the event loop that will drive this server.
let mut l = Core::new().unwrap();
let handle = l.handle();
let pool = CpuPool::new(1);
// Create a TCP listener which will listen for incoming connections.
let socket = TcpListener::bind(&listen_addr, &l.handle()).unwrap();
println!("Listening on: {}", listen_addr);
println!("Proxying to: {}", server_addr);
let done = socket.incoming().for_each(move |(client, client_addr)| {
let server = TcpStream::connect(&server_addr, &handle);
let amounts = server.and_then(move |server| {
// Create separate read/write handles for the TCP clients that we're
// proxying data between. Note that typically you'd use
// `AsyncRead::split` for this operation, but we want our writer
// handles to have a custom implementation of `shutdown` which
// actually calls `TcpStream::shutdown` to ensure that EOF is
// transmitted properly across the proxied connection.
//
// As a result, we wrap up our client/server manually in arcs and
// use the impls below on our custom `MyTcpStream` type.
let client_reader = MyTcpStream(Arc::new(client));
let client_writer = client_reader.clone();
let server_reader = MyTcpStream(Arc::new(server));
let server_writer = server_reader.clone();
// Copy the data (in parallel) between the client and the server.
// After the copy is done we indicate to the remote side that we've
// finished by shutting down the connection.
let client_to_server = copy(client_reader, server_writer)
.and_then(|(n, _, server_writer)| {
shutdown(server_writer).map(move |_| n)
});
let server_to_client = copy(server_reader, client_writer)
.and_then(|(n, _, client_writer)| {
shutdown(client_writer).map(move |_| n)
});
client_to_server.join(server_to_client)
});
let msg = amounts.map(move |(from_client, from_server)| {
println!("client at {} wrote {} bytes and received {} bytes",
client_addr, from_client, from_server);
}).map_err(|e| {
// Don't panic. Maybe the client just disconnected too soon.
println!("error: {}", e);
});
pool.execute(msg).unwrap();
Ok(())
});
l.run(done).unwrap();
}
// This is a custom type used to have a custom implementation of the
// `AsyncWrite::shutdown` method which actually calls `TcpStream::shutdown` to
// notify the remote end that we're done writing.
#[derive(Clone)]
struct MyTcpStream(Arc<TcpStream>);
impl Read for MyTcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
(&*self.0).read(buf)
}
}
impl Write for MyTcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
(&*self.0).write(buf)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
} | impl AsyncWrite for MyTcpStream {
fn shutdown(&mut self) -> Poll<(), io::Error> {
try!(self.0.shutdown(Shutdown::Write));
Ok(().into())
}
} | }
impl AsyncRead for MyTcpStream {}
| random_line_split |
proxy.rs | //! A proxy that forwards data to another server and forwards that server's
//! responses back to clients.
//!
//! You can showcase this by running this in one terminal:
//!
//! cargo run --example proxy
//!
//! This in another terminal
//!
//! cargo run --example echo
//!
//! And finally this in another terminal
//!
//! cargo run --example connect 127.0.0.1:8081
//!
//! This final terminal will connect to our proxy, which will in turn connect to
//! the echo server, and you'll be able to see data flowing between them.
extern crate futures;
extern crate futures_cpupool;
extern crate tokio;
extern crate tokio_io;
use std::sync::Arc;
use std::env;
use std::net::{Shutdown, SocketAddr};
use std::io::{self, Read, Write};
use futures::stream::Stream;
use futures::{Future, Poll};
use futures::future::Executor;
use futures_cpupool::CpuPool;
use tokio::net::{TcpListener, TcpStream};
use tokio::reactor::Core;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::io::{copy, shutdown};
fn main() {
let listen_addr = env::args().nth(1).unwrap_or("127.0.0.1:8081".to_string());
let listen_addr = listen_addr.parse::<SocketAddr>().unwrap();
let server_addr = env::args().nth(2).unwrap_or("127.0.0.1:8080".to_string());
let server_addr = server_addr.parse::<SocketAddr>().unwrap();
// Create the event loop that will drive this server.
let mut l = Core::new().unwrap();
let handle = l.handle();
let pool = CpuPool::new(1);
// Create a TCP listener which will listen for incoming connections.
let socket = TcpListener::bind(&listen_addr, &l.handle()).unwrap();
println!("Listening on: {}", listen_addr);
println!("Proxying to: {}", server_addr);
let done = socket.incoming().for_each(move |(client, client_addr)| {
let server = TcpStream::connect(&server_addr, &handle);
let amounts = server.and_then(move |server| {
// Create separate read/write handles for the TCP clients that we're
// proxying data between. Note that typically you'd use
// `AsyncRead::split` for this operation, but we want our writer
// handles to have a custom implementation of `shutdown` which
// actually calls `TcpStream::shutdown` to ensure that EOF is
// transmitted properly across the proxied connection.
//
// As a result, we wrap up our client/server manually in arcs and
// use the impls below on our custom `MyTcpStream` type.
let client_reader = MyTcpStream(Arc::new(client));
let client_writer = client_reader.clone();
let server_reader = MyTcpStream(Arc::new(server));
let server_writer = server_reader.clone();
// Copy the data (in parallel) between the client and the server.
// After the copy is done we indicate to the remote side that we've
// finished by shutting down the connection.
let client_to_server = copy(client_reader, server_writer)
.and_then(|(n, _, server_writer)| {
shutdown(server_writer).map(move |_| n)
});
let server_to_client = copy(server_reader, client_writer)
.and_then(|(n, _, client_writer)| {
shutdown(client_writer).map(move |_| n)
});
client_to_server.join(server_to_client)
});
let msg = amounts.map(move |(from_client, from_server)| {
println!("client at {} wrote {} bytes and received {} bytes",
client_addr, from_client, from_server);
}).map_err(|e| {
// Don't panic. Maybe the client just disconnected too soon.
println!("error: {}", e);
});
pool.execute(msg).unwrap();
Ok(())
});
l.run(done).unwrap();
}
// This is a custom type used to have a custom implementation of the
// `AsyncWrite::shutdown` method which actually calls `TcpStream::shutdown` to
// notify the remote end that we're done writing.
#[derive(Clone)]
struct | (Arc<TcpStream>);
impl Read for MyTcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
(&*self.0).read(buf)
}
}
impl Write for MyTcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
(&*self.0).write(buf)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl AsyncRead for MyTcpStream {}
impl AsyncWrite for MyTcpStream {
fn shutdown(&mut self) -> Poll<(), io::Error> {
try!(self.0.shutdown(Shutdown::Write));
Ok(().into())
}
}
| MyTcpStream | identifier_name |
proxy.rs | //! A proxy that forwards data to another server and forwards that server's
//! responses back to clients.
//!
//! You can showcase this by running this in one terminal:
//!
//! cargo run --example proxy
//!
//! This in another terminal
//!
//! cargo run --example echo
//!
//! And finally this in another terminal
//!
//! cargo run --example connect 127.0.0.1:8081
//!
//! This final terminal will connect to our proxy, which will in turn connect to
//! the echo server, and you'll be able to see data flowing between them.
extern crate futures;
extern crate futures_cpupool;
extern crate tokio;
extern crate tokio_io;
use std::sync::Arc;
use std::env;
use std::net::{Shutdown, SocketAddr};
use std::io::{self, Read, Write};
use futures::stream::Stream;
use futures::{Future, Poll};
use futures::future::Executor;
use futures_cpupool::CpuPool;
use tokio::net::{TcpListener, TcpStream};
use tokio::reactor::Core;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::io::{copy, shutdown};
fn main() {
let listen_addr = env::args().nth(1).unwrap_or("127.0.0.1:8081".to_string());
let listen_addr = listen_addr.parse::<SocketAddr>().unwrap();
let server_addr = env::args().nth(2).unwrap_or("127.0.0.1:8080".to_string());
let server_addr = server_addr.parse::<SocketAddr>().unwrap();
// Create the event loop that will drive this server.
let mut l = Core::new().unwrap();
let handle = l.handle();
let pool = CpuPool::new(1);
// Create a TCP listener which will listen for incoming connections.
let socket = TcpListener::bind(&listen_addr, &l.handle()).unwrap();
println!("Listening on: {}", listen_addr);
println!("Proxying to: {}", server_addr);
let done = socket.incoming().for_each(move |(client, client_addr)| {
let server = TcpStream::connect(&server_addr, &handle);
let amounts = server.and_then(move |server| {
// Create separate read/write handles for the TCP clients that we're
// proxying data between. Note that typically you'd use
// `AsyncRead::split` for this operation, but we want our writer
// handles to have a custom implementation of `shutdown` which
// actually calls `TcpStream::shutdown` to ensure that EOF is
// transmitted properly across the proxied connection.
//
// As a result, we wrap up our client/server manually in arcs and
// use the impls below on our custom `MyTcpStream` type.
let client_reader = MyTcpStream(Arc::new(client));
let client_writer = client_reader.clone();
let server_reader = MyTcpStream(Arc::new(server));
let server_writer = server_reader.clone();
// Copy the data (in parallel) between the client and the server.
// After the copy is done we indicate to the remote side that we've
// finished by shutting down the connection.
let client_to_server = copy(client_reader, server_writer)
.and_then(|(n, _, server_writer)| {
shutdown(server_writer).map(move |_| n)
});
let server_to_client = copy(server_reader, client_writer)
.and_then(|(n, _, client_writer)| {
shutdown(client_writer).map(move |_| n)
});
client_to_server.join(server_to_client)
});
let msg = amounts.map(move |(from_client, from_server)| {
println!("client at {} wrote {} bytes and received {} bytes",
client_addr, from_client, from_server);
}).map_err(|e| {
// Don't panic. Maybe the client just disconnected too soon.
println!("error: {}", e);
});
pool.execute(msg).unwrap();
Ok(())
});
l.run(done).unwrap();
}
// This is a custom type used to have a custom implementation of the
// `AsyncWrite::shutdown` method which actually calls `TcpStream::shutdown` to
// notify the remote end that we're done writing.
#[derive(Clone)]
struct MyTcpStream(Arc<TcpStream>);
impl Read for MyTcpStream {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> |
}
impl Write for MyTcpStream {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
(&*self.0).write(buf)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl AsyncRead for MyTcpStream {}
impl AsyncWrite for MyTcpStream {
fn shutdown(&mut self) -> Poll<(), io::Error> {
try!(self.0.shutdown(Shutdown::Write));
Ok(().into())
}
}
| {
(&*self.0).read(buf)
} | identifier_body |
example.rs | static ABC: &str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
pub fn get_diamond(diamond_char: char) -> Vec<String> {
let mut result: Vec<String> = Vec::new();
let diamond_char = diamond_char.to_ascii_uppercase();
if ABC.find(diamond_char).is_none() {
return result;
}
if diamond_char == 'A' {
return vec![String::from("A")];
}
//build first half
for char_in_abc in ABC.chars() {
result.push(get_line(char_in_abc, diamond_char).clone());
if char_in_abc == diamond_char {
break;
}
}
//build second half
let mut rev = result.clone();
rev.pop(); //remove middle piece to avoid duplicates
for line in rev.drain(..).rev() {
result.push(line);
}
result
}
fn get_line(char_in_abc: char, diamond_char: char) -> String |
fn get_letter_line(char_in_abc: char) -> String {
let mut r = String::new();
let odd = (0..)
.filter(|x| x % 2!= 0)
.nth(ABC.find(char_in_abc).unwrap())
.unwrap();
for i in 0..odd {
if i == 0 || i == odd - 1 {
r.push(char_in_abc);
} else {
r.push(' ');
}
}
r
}
| {
let mut r = String::new();
let letter_e = get_letter_line(char_in_abc);
let letter_c = get_letter_line(diamond_char);
let ws = letter_c.len() - letter_e.len(); //number of whitespaces
//left
for _ in 0..ws / 2 {
r.push(' ');
}
//letter line
for i in letter_e.chars() {
r.push(i)
}
//right
for _ in 0..ws / 2 {
r.push(' ');
}
r
} | identifier_body |
example.rs | static ABC: &str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
pub fn get_diamond(diamond_char: char) -> Vec<String> {
let mut result: Vec<String> = Vec::new();
let diamond_char = diamond_char.to_ascii_uppercase();
if ABC.find(diamond_char).is_none() {
return result;
}
if diamond_char == 'A' {
return vec![String::from("A")];
}
//build first half
for char_in_abc in ABC.chars() {
result.push(get_line(char_in_abc, diamond_char).clone());
if char_in_abc == diamond_char {
break;
}
}
//build second half
let mut rev = result.clone();
rev.pop(); //remove middle piece to avoid duplicates
for line in rev.drain(..).rev() {
result.push(line);
}
result
}
fn get_line(char_in_abc: char, diamond_char: char) -> String {
let mut r = String::new();
let letter_e = get_letter_line(char_in_abc);
let letter_c = get_letter_line(diamond_char);
let ws = letter_c.len() - letter_e.len(); //number of whitespaces
| for i in letter_e.chars() {
r.push(i)
}
//right
for _ in 0..ws / 2 {
r.push(' ');
}
r
}
fn get_letter_line(char_in_abc: char) -> String {
let mut r = String::new();
let odd = (0..)
.filter(|x| x % 2!= 0)
.nth(ABC.find(char_in_abc).unwrap())
.unwrap();
for i in 0..odd {
if i == 0 || i == odd - 1 {
r.push(char_in_abc);
} else {
r.push(' ');
}
}
r
} | //left
for _ in 0..ws / 2 {
r.push(' ');
}
//letter line | random_line_split |
example.rs | static ABC: &str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
pub fn get_diamond(diamond_char: char) -> Vec<String> {
let mut result: Vec<String> = Vec::new();
let diamond_char = diamond_char.to_ascii_uppercase();
if ABC.find(diamond_char).is_none() {
return result;
}
if diamond_char == 'A' |
//build first half
for char_in_abc in ABC.chars() {
result.push(get_line(char_in_abc, diamond_char).clone());
if char_in_abc == diamond_char {
break;
}
}
//build second half
let mut rev = result.clone();
rev.pop(); //remove middle piece to avoid duplicates
for line in rev.drain(..).rev() {
result.push(line);
}
result
}
fn get_line(char_in_abc: char, diamond_char: char) -> String {
let mut r = String::new();
let letter_e = get_letter_line(char_in_abc);
let letter_c = get_letter_line(diamond_char);
let ws = letter_c.len() - letter_e.len(); //number of whitespaces
//left
for _ in 0..ws / 2 {
r.push(' ');
}
//letter line
for i in letter_e.chars() {
r.push(i)
}
//right
for _ in 0..ws / 2 {
r.push(' ');
}
r
}
fn get_letter_line(char_in_abc: char) -> String {
let mut r = String::new();
let odd = (0..)
.filter(|x| x % 2!= 0)
.nth(ABC.find(char_in_abc).unwrap())
.unwrap();
for i in 0..odd {
if i == 0 || i == odd - 1 {
r.push(char_in_abc);
} else {
r.push(' ');
}
}
r
}
| {
return vec![String::from("A")];
} | conditional_block |
example.rs | static ABC: &str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
pub fn get_diamond(diamond_char: char) -> Vec<String> {
let mut result: Vec<String> = Vec::new();
let diamond_char = diamond_char.to_ascii_uppercase();
if ABC.find(diamond_char).is_none() {
return result;
}
if diamond_char == 'A' {
return vec![String::from("A")];
}
//build first half
for char_in_abc in ABC.chars() {
result.push(get_line(char_in_abc, diamond_char).clone());
if char_in_abc == diamond_char {
break;
}
}
//build second half
let mut rev = result.clone();
rev.pop(); //remove middle piece to avoid duplicates
for line in rev.drain(..).rev() {
result.push(line);
}
result
}
fn get_line(char_in_abc: char, diamond_char: char) -> String {
let mut r = String::new();
let letter_e = get_letter_line(char_in_abc);
let letter_c = get_letter_line(diamond_char);
let ws = letter_c.len() - letter_e.len(); //number of whitespaces
//left
for _ in 0..ws / 2 {
r.push(' ');
}
//letter line
for i in letter_e.chars() {
r.push(i)
}
//right
for _ in 0..ws / 2 {
r.push(' ');
}
r
}
fn | (char_in_abc: char) -> String {
let mut r = String::new();
let odd = (0..)
.filter(|x| x % 2!= 0)
.nth(ABC.find(char_in_abc).unwrap())
.unwrap();
for i in 0..odd {
if i == 0 || i == odd - 1 {
r.push(char_in_abc);
} else {
r.push(' ');
}
}
r
}
| get_letter_line | identifier_name |
lib.rs | // Copyright 2016 Phillip Oppermann, Calvin Lee and JJ Garzella.
// See the README.md file at the top-level directory of this
// distribution.
//
// Licensed under the MIT license <LICENSE or
// http://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_snake_case)]
#![feature(lang_items)]
#![feature(alloc)]
#![feature(const_fn)]
#![feature(associated_type_defaults)]
#![feature(asm, naked_functions, core_intrinsics)]
#![feature(abi_x86_interrupt)]
#![feature(ptr_internals)]
#![no_std]
// crates.io crates
extern crate rlibc;
/// Bare metal Mutex
extern crate spin;
/// Abstraction of the multiboot2 info structure
extern crate multiboot2;
extern crate x86_64;
extern crate bit_field;
#[macro_use]
extern crate bitflags;
/// A macro for running a function only once
#[macro_use]
extern crate once;
// Features involving allocation
/// Heap allocator for rust code
extern crate hole_list_allocator;
/// Higher-level data structures that use the heap
extern crate alloc;
#[macro_use]
/// Abstraction of the VGA text buffer
mod vga_buffer;
/// Memory management
mod memory;
/// Interrupts code
mod interrupts;
/// IO abstractions in Rust
#[macro_use]
mod cpuio;
mod sync;
mod scheduler;
/// Utilities for multi-CPU processing
mod smp;
/// Testing
#[cfg(feature = "test")]
mod tap;
extern "C" {
/// The kernel exit point. It disables interrupts, enters an infinite loop,
/// and halts the processor
fn KEXIT() ->!;
}
/// The Rust entry point
///
/// This clears the screen, initializes each module and enters an infinite
/// loop.
#[no_mangle]
pub extern "C" fn rust_main(multiboot_info_address: usize) ->! {
vga_buffer::clear_screen();
println!("Hello Rust log \x01");
let boot_info = unsafe { multiboot2::load(multiboot_info_address) };
for module in boot_info.module_tags() {
if module.name() == "keyboard" {
let addr = module.start_address() as usize + memory::KERNEL_BASE;
unsafe {
interrupts::KEYBOARD.lock()
.change_kbmap(&*(addr as *const [u8; 128]));
}
}
}
// Initialize memory
memory::init(&boot_info); | unsafe {
smp::CpuLocal::init()
};
// Initialize the IDT
interrupts::init();
// Initialize the serial port
cpuio::init();
println!("Try to write some things!");
vga_buffer::change_color(vga_buffer::Color::White, vga_buffer::Color::Black);
#[cfg(feature = "test")] {
run_tests();
shutdown();
}
loop {
// We are waiting for interrupts here, so don't bother doing anything
unsafe { asm!("hlt" :::: "volatile") }
}
}
#[cfg(feature = "test")]
fn shutdown() ->! {
use cpuio::port::Port;
let mut p: Port<u8> = unsafe { Port::new(0xf4) };
p.write(0x00);
unreachable!();
}
#[cfg(feature = "test")]
fn run_tests() {
memory::tests::run();
scheduler::tests::run();
smp::tests::run();
interrupts::tests::run();
cpuio::tests::run();
}
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn _Unwind_Resume() ->! {
unsafe { KEXIT() }
}
/// Used for unwinding, unsupported
#[lang = "eh_personality"]
fn eh_personality() {}
use core::alloc::Layout;
/// Runs when the allocator is out of memory
#[lang = "oom"]
fn oom(_: Layout) ->! {
panic!("Error, out of memory");
}
/// Runs during a `panic!()`
#[panic_handler]
extern "C" fn panic_fmt(pi: &core::panic::PanicInfo) ->! {
vga_buffer::change_color(vga_buffer::Color::Red, vga_buffer::Color::Black);
println!("\n\nESALP {}", pi);
#[cfg(feature = "test")] {
serial_println!("Bail out! - {}", pi);
shutdown();
}
unsafe { KEXIT() }
} |
// Initialize CPU local variables and the scheduler | random_line_split |
lib.rs | // Copyright 2016 Phillip Oppermann, Calvin Lee and JJ Garzella.
// See the README.md file at the top-level directory of this
// distribution.
//
// Licensed under the MIT license <LICENSE or
// http://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_snake_case)]
#![feature(lang_items)]
#![feature(alloc)]
#![feature(const_fn)]
#![feature(associated_type_defaults)]
#![feature(asm, naked_functions, core_intrinsics)]
#![feature(abi_x86_interrupt)]
#![feature(ptr_internals)]
#![no_std]
// crates.io crates
extern crate rlibc;
/// Bare metal Mutex
extern crate spin;
/// Abstraction of the multiboot2 info structure
extern crate multiboot2;
extern crate x86_64;
extern crate bit_field;
#[macro_use]
extern crate bitflags;
/// A macro for running a function only once
#[macro_use]
extern crate once;
// Features involving allocation
/// Heap allocator for rust code
extern crate hole_list_allocator;
/// Higher-level data structures that use the heap
extern crate alloc;
#[macro_use]
/// Abstraction of the VGA text buffer
mod vga_buffer;
/// Memory management
mod memory;
/// Interrupts code
mod interrupts;
/// IO abstractions in Rust
#[macro_use]
mod cpuio;
mod sync;
mod scheduler;
/// Utilities for multi-CPU processing
mod smp;
/// Testing
#[cfg(feature = "test")]
mod tap;
extern "C" {
/// The kernel exit point. It disables interrupts, enters an infinite loop,
/// and halts the processor
fn KEXIT() ->!;
}
/// The Rust entry point
///
/// This clears the screen, initializes each module and enters an infinite
/// loop.
#[no_mangle]
pub extern "C" fn rust_main(multiboot_info_address: usize) ->! {
vga_buffer::clear_screen();
println!("Hello Rust log \x01");
let boot_info = unsafe { multiboot2::load(multiboot_info_address) };
for module in boot_info.module_tags() {
if module.name() == "keyboard" |
}
// Initialize memory
memory::init(&boot_info);
// Initialize CPU local variables and the scheduler
unsafe {
smp::CpuLocal::init()
};
// Initialize the IDT
interrupts::init();
// Initialize the serial port
cpuio::init();
println!("Try to write some things!");
vga_buffer::change_color(vga_buffer::Color::White, vga_buffer::Color::Black);
#[cfg(feature = "test")] {
run_tests();
shutdown();
}
loop {
// We are waiting for interrupts here, so don't bother doing anything
unsafe { asm!("hlt" :::: "volatile") }
}
}
#[cfg(feature = "test")]
fn shutdown() ->! {
use cpuio::port::Port;
let mut p: Port<u8> = unsafe { Port::new(0xf4) };
p.write(0x00);
unreachable!();
}
#[cfg(feature = "test")]
fn run_tests() {
memory::tests::run();
scheduler::tests::run();
smp::tests::run();
interrupts::tests::run();
cpuio::tests::run();
}
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn _Unwind_Resume() ->! {
unsafe { KEXIT() }
}
/// Used for unwinding, unsupported
#[lang = "eh_personality"]
fn eh_personality() {}
use core::alloc::Layout;
/// Runs when the allocator is out of memory
#[lang = "oom"]
fn oom(_: Layout) ->! {
panic!("Error, out of memory");
}
/// Runs during a `panic!()`
#[panic_handler]
extern "C" fn panic_fmt(pi: &core::panic::PanicInfo) ->! {
vga_buffer::change_color(vga_buffer::Color::Red, vga_buffer::Color::Black);
println!("\n\nESALP {}", pi);
#[cfg(feature = "test")] {
serial_println!("Bail out! - {}", pi);
shutdown();
}
unsafe { KEXIT() }
}
| {
let addr = module.start_address() as usize + memory::KERNEL_BASE;
unsafe {
interrupts::KEYBOARD.lock()
.change_kbmap(&*(addr as *const [u8; 128]));
}
} | conditional_block |
lib.rs | // Copyright 2016 Phillip Oppermann, Calvin Lee and JJ Garzella.
// See the README.md file at the top-level directory of this
// distribution.
//
// Licensed under the MIT license <LICENSE or
// http://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_snake_case)]
#![feature(lang_items)]
#![feature(alloc)]
#![feature(const_fn)]
#![feature(associated_type_defaults)]
#![feature(asm, naked_functions, core_intrinsics)]
#![feature(abi_x86_interrupt)]
#![feature(ptr_internals)]
#![no_std]
// crates.io crates
extern crate rlibc;
/// Bare metal Mutex
extern crate spin;
/// Abstraction of the multiboot2 info structure
extern crate multiboot2;
extern crate x86_64;
extern crate bit_field;
#[macro_use]
extern crate bitflags;
/// A macro for running a function only once
#[macro_use]
extern crate once;
// Features involving allocation
/// Heap allocator for rust code
extern crate hole_list_allocator;
/// Higher-level data structures that use the heap
extern crate alloc;
#[macro_use]
/// Abstraction of the VGA text buffer
mod vga_buffer;
/// Memory management
mod memory;
/// Interrupts code
mod interrupts;
/// IO abstractions in Rust
#[macro_use]
mod cpuio;
mod sync;
mod scheduler;
/// Utilities for multi-CPU processing
mod smp;
/// Testing
#[cfg(feature = "test")]
mod tap;
extern "C" {
/// The kernel exit point. It disables interrupts, enters an infinite loop,
/// and halts the processor
fn KEXIT() ->!;
}
/// The Rust entry point
///
/// This clears the screen, initializes each module and enters an infinite
/// loop.
#[no_mangle]
pub extern "C" fn rust_main(multiboot_info_address: usize) ->! {
vga_buffer::clear_screen();
println!("Hello Rust log \x01");
let boot_info = unsafe { multiboot2::load(multiboot_info_address) };
for module in boot_info.module_tags() {
if module.name() == "keyboard" {
let addr = module.start_address() as usize + memory::KERNEL_BASE;
unsafe {
interrupts::KEYBOARD.lock()
.change_kbmap(&*(addr as *const [u8; 128]));
}
}
}
// Initialize memory
memory::init(&boot_info);
// Initialize CPU local variables and the scheduler
unsafe {
smp::CpuLocal::init()
};
// Initialize the IDT
interrupts::init();
// Initialize the serial port
cpuio::init();
println!("Try to write some things!");
vga_buffer::change_color(vga_buffer::Color::White, vga_buffer::Color::Black);
#[cfg(feature = "test")] {
run_tests();
shutdown();
}
loop {
// We are waiting for interrupts here, so don't bother doing anything
unsafe { asm!("hlt" :::: "volatile") }
}
}
#[cfg(feature = "test")]
fn shutdown() ->! {
use cpuio::port::Port;
let mut p: Port<u8> = unsafe { Port::new(0xf4) };
p.write(0x00);
unreachable!();
}
#[cfg(feature = "test")]
fn run_tests() {
memory::tests::run();
scheduler::tests::run();
smp::tests::run();
interrupts::tests::run();
cpuio::tests::run();
}
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn _Unwind_Resume() ->! {
unsafe { KEXIT() }
}
/// Used for unwinding, unsupported
#[lang = "eh_personality"]
fn eh_personality() {}
use core::alloc::Layout;
/// Runs when the allocator is out of memory
#[lang = "oom"]
fn | (_: Layout) ->! {
panic!("Error, out of memory");
}
/// Runs during a `panic!()`
#[panic_handler]
extern "C" fn panic_fmt(pi: &core::panic::PanicInfo) ->! {
vga_buffer::change_color(vga_buffer::Color::Red, vga_buffer::Color::Black);
println!("\n\nESALP {}", pi);
#[cfg(feature = "test")] {
serial_println!("Bail out! - {}", pi);
shutdown();
}
unsafe { KEXIT() }
}
| oom | identifier_name |
lib.rs | // Copyright 2016 Phillip Oppermann, Calvin Lee and JJ Garzella.
// See the README.md file at the top-level directory of this
// distribution.
//
// Licensed under the MIT license <LICENSE or
// http://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_snake_case)]
#![feature(lang_items)]
#![feature(alloc)]
#![feature(const_fn)]
#![feature(associated_type_defaults)]
#![feature(asm, naked_functions, core_intrinsics)]
#![feature(abi_x86_interrupt)]
#![feature(ptr_internals)]
#![no_std]
// crates.io crates
extern crate rlibc;
/// Bare metal Mutex
extern crate spin;
/// Abstraction of the multiboot2 info structure
extern crate multiboot2;
extern crate x86_64;
extern crate bit_field;
#[macro_use]
extern crate bitflags;
/// A macro for running a function only once
#[macro_use]
extern crate once;
// Features involving allocation
/// Heap allocator for rust code
extern crate hole_list_allocator;
/// Higher-level data structures that use the heap
extern crate alloc;
#[macro_use]
/// Abstraction of the VGA text buffer
mod vga_buffer;
/// Memory management
mod memory;
/// Interrupts code
mod interrupts;
/// IO abstractions in Rust
#[macro_use]
mod cpuio;
mod sync;
mod scheduler;
/// Utilities for multi-CPU processing
mod smp;
/// Testing
#[cfg(feature = "test")]
mod tap;
extern "C" {
/// The kernel exit point. It disables interrupts, enters an infinite loop,
/// and halts the processor
fn KEXIT() ->!;
}
/// The Rust entry point
///
/// This clears the screen, initializes each module and enters an infinite
/// loop.
#[no_mangle]
pub extern "C" fn rust_main(multiboot_info_address: usize) ->! | unsafe {
smp::CpuLocal::init()
};
// Initialize the IDT
interrupts::init();
// Initialize the serial port
cpuio::init();
println!("Try to write some things!");
vga_buffer::change_color(vga_buffer::Color::White, vga_buffer::Color::Black);
#[cfg(feature = "test")] {
run_tests();
shutdown();
}
loop {
// We are waiting for interrupts here, so don't bother doing anything
unsafe { asm!("hlt" :::: "volatile") }
}
}
#[cfg(feature = "test")]
fn shutdown() ->! {
use cpuio::port::Port;
let mut p: Port<u8> = unsafe { Port::new(0xf4) };
p.write(0x00);
unreachable!();
}
#[cfg(feature = "test")]
fn run_tests() {
memory::tests::run();
scheduler::tests::run();
smp::tests::run();
interrupts::tests::run();
cpuio::tests::run();
}
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn _Unwind_Resume() ->! {
unsafe { KEXIT() }
}
/// Used for unwinding, unsupported
#[lang = "eh_personality"]
fn eh_personality() {}
use core::alloc::Layout;
/// Runs when the allocator is out of memory
#[lang = "oom"]
fn oom(_: Layout) ->! {
panic!("Error, out of memory");
}
/// Runs during a `panic!()`
#[panic_handler]
extern "C" fn panic_fmt(pi: &core::panic::PanicInfo) ->! {
vga_buffer::change_color(vga_buffer::Color::Red, vga_buffer::Color::Black);
println!("\n\nESALP {}", pi);
#[cfg(feature = "test")] {
serial_println!("Bail out! - {}", pi);
shutdown();
}
unsafe { KEXIT() }
}
| {
vga_buffer::clear_screen();
println!("Hello Rust log \x01");
let boot_info = unsafe { multiboot2::load(multiboot_info_address) };
for module in boot_info.module_tags() {
if module.name() == "keyboard" {
let addr = module.start_address() as usize + memory::KERNEL_BASE;
unsafe {
interrupts::KEYBOARD.lock()
.change_kbmap(&*(addr as *const [u8; 128]));
}
}
}
// Initialize memory
memory::init(&boot_info);
// Initialize CPU local variables and the scheduler | identifier_body |
aabb.rs | use cgmath::prelude::*;
use cgmath::Point3;
use crate::consts;
use crate::float::*;
use crate::intersect::{Intersect, Ray};
#[derive(Clone, Debug)]
pub struct Aabb {
pub min: Point3<Float>,
pub max: Point3<Float>,
}
impl Aabb {
pub fn empty() -> Aabb {
Aabb {
min: Point3::max_value(),
max: Point3::min_value(),
}
}
/// Update the bounding box to enclose other aswell
pub fn add_aabb(&mut self, other: &Aabb) {
self.min = min_point(&self.min, &other.min);
self.max = max_point(&self.max, &other.max);
}
/// Get the center of the scene as defined by the bounding box
pub fn center(&self) -> Point3<Float> {
if self.max.x < self.min.x {
panic!("Tried to get center of an empty Aabb");
}
Point3::midpoint(self.min, self.max)
}
pub fn longest_edge(&self) -> Float {
let mut longest: Float = 0.0;
for i in 0..3 {
longest = longest.max(self.max[i] - self.min[i]);
}
longest
}
pub fn longest_edge_i(&self) -> usize {
let mut longest = 0.0;
let mut index = 0;
for i in 0..3 {
let length = self.max[i] - self.min[i];
if length > longest {
longest = length;
index = i;
}
}
index
}
pub fn area(&self) -> Float {
let lengths = self.max - self.min;
2.0 * (lengths.x * lengths.y + lengths.y * lengths.z + lengths.z * lengths.x).max(0.0)
}
}
impl Intersect<'_, Float> for Aabb {
fn intersect(&self, ray: &Ray) -> Option<Float> {
let t1 = (self.min - ray.orig).mul_element_wise(ray.reciprocal_dir);
let t2 = (self.max - ray.orig).mul_element_wise(ray.reciprocal_dir);
let mut start = consts::MIN;
let mut end = consts::MAX;
for i in 0..3 {
if ray.dir[i] == 0.0 && (ray.orig[i] < self.min[i] || ray.orig[i] > self.max[i]) {
// Can't hit
return None;
} else if ray.neg_dir[i] {
start = start.max(t2[i]);
end = end.min(t1[i]);
} else {
start = start.max(t1[i]);
end = end.min(t2[i]);
}
}
if start <= end && end > 0.0 && start < ray.length {
Some(start)
} else {
None
}
}
} | }
p_min
}
pub fn max_point(p1: &Point3<Float>, p2: &Point3<Float>) -> Point3<Float> {
let mut p_max = Point3::min_value();
for i in 0..3 {
p_max[i] = p1[i].max(p2[i]);
}
p_max
} |
pub fn min_point(p1: &Point3<Float>, p2: &Point3<Float>) -> Point3<Float> {
let mut p_min = Point3::max_value();
for i in 0..3 {
p_min[i] = p1[i].min(p2[i]); | random_line_split |
aabb.rs | use cgmath::prelude::*;
use cgmath::Point3;
use crate::consts;
use crate::float::*;
use crate::intersect::{Intersect, Ray};
#[derive(Clone, Debug)]
pub struct Aabb {
pub min: Point3<Float>,
pub max: Point3<Float>,
}
impl Aabb {
pub fn empty() -> Aabb {
Aabb {
min: Point3::max_value(),
max: Point3::min_value(),
}
}
/// Update the bounding box to enclose other aswell
pub fn add_aabb(&mut self, other: &Aabb) {
self.min = min_point(&self.min, &other.min);
self.max = max_point(&self.max, &other.max);
}
/// Get the center of the scene as defined by the bounding box
pub fn center(&self) -> Point3<Float> {
if self.max.x < self.min.x {
panic!("Tried to get center of an empty Aabb");
}
Point3::midpoint(self.min, self.max)
}
pub fn longest_edge(&self) -> Float {
let mut longest: Float = 0.0;
for i in 0..3 {
longest = longest.max(self.max[i] - self.min[i]);
}
longest
}
pub fn longest_edge_i(&self) -> usize {
let mut longest = 0.0;
let mut index = 0;
for i in 0..3 {
let length = self.max[i] - self.min[i];
if length > longest {
longest = length;
index = i;
}
}
index
}
pub fn area(&self) -> Float |
}
impl Intersect<'_, Float> for Aabb {
fn intersect(&self, ray: &Ray) -> Option<Float> {
let t1 = (self.min - ray.orig).mul_element_wise(ray.reciprocal_dir);
let t2 = (self.max - ray.orig).mul_element_wise(ray.reciprocal_dir);
let mut start = consts::MIN;
let mut end = consts::MAX;
for i in 0..3 {
if ray.dir[i] == 0.0 && (ray.orig[i] < self.min[i] || ray.orig[i] > self.max[i]) {
// Can't hit
return None;
} else if ray.neg_dir[i] {
start = start.max(t2[i]);
end = end.min(t1[i]);
} else {
start = start.max(t1[i]);
end = end.min(t2[i]);
}
}
if start <= end && end > 0.0 && start < ray.length {
Some(start)
} else {
None
}
}
}
pub fn min_point(p1: &Point3<Float>, p2: &Point3<Float>) -> Point3<Float> {
let mut p_min = Point3::max_value();
for i in 0..3 {
p_min[i] = p1[i].min(p2[i]);
}
p_min
}
pub fn max_point(p1: &Point3<Float>, p2: &Point3<Float>) -> Point3<Float> {
let mut p_max = Point3::min_value();
for i in 0..3 {
p_max[i] = p1[i].max(p2[i]);
}
p_max
}
| {
let lengths = self.max - self.min;
2.0 * (lengths.x * lengths.y + lengths.y * lengths.z + lengths.z * lengths.x).max(0.0)
} | identifier_body |
aabb.rs | use cgmath::prelude::*;
use cgmath::Point3;
use crate::consts;
use crate::float::*;
use crate::intersect::{Intersect, Ray};
#[derive(Clone, Debug)]
pub struct Aabb {
pub min: Point3<Float>,
pub max: Point3<Float>,
}
impl Aabb {
pub fn empty() -> Aabb {
Aabb {
min: Point3::max_value(),
max: Point3::min_value(),
}
}
/// Update the bounding box to enclose other aswell
pub fn add_aabb(&mut self, other: &Aabb) {
self.min = min_point(&self.min, &other.min);
self.max = max_point(&self.max, &other.max);
}
/// Get the center of the scene as defined by the bounding box
pub fn center(&self) -> Point3<Float> {
if self.max.x < self.min.x {
panic!("Tried to get center of an empty Aabb");
}
Point3::midpoint(self.min, self.max)
}
pub fn longest_edge(&self) -> Float {
let mut longest: Float = 0.0;
for i in 0..3 {
longest = longest.max(self.max[i] - self.min[i]);
}
longest
}
pub fn longest_edge_i(&self) -> usize {
let mut longest = 0.0;
let mut index = 0;
for i in 0..3 {
let length = self.max[i] - self.min[i];
if length > longest {
longest = length;
index = i;
}
}
index
}
pub fn area(&self) -> Float {
let lengths = self.max - self.min;
2.0 * (lengths.x * lengths.y + lengths.y * lengths.z + lengths.z * lengths.x).max(0.0)
}
}
impl Intersect<'_, Float> for Aabb {
fn intersect(&self, ray: &Ray) -> Option<Float> {
let t1 = (self.min - ray.orig).mul_element_wise(ray.reciprocal_dir);
let t2 = (self.max - ray.orig).mul_element_wise(ray.reciprocal_dir);
let mut start = consts::MIN;
let mut end = consts::MAX;
for i in 0..3 {
if ray.dir[i] == 0.0 && (ray.orig[i] < self.min[i] || ray.orig[i] > self.max[i]) {
// Can't hit
return None;
} else if ray.neg_dir[i] {
start = start.max(t2[i]);
end = end.min(t1[i]);
} else {
start = start.max(t1[i]);
end = end.min(t2[i]);
}
}
if start <= end && end > 0.0 && start < ray.length {
Some(start)
} else |
}
}
pub fn min_point(p1: &Point3<Float>, p2: &Point3<Float>) -> Point3<Float> {
let mut p_min = Point3::max_value();
for i in 0..3 {
p_min[i] = p1[i].min(p2[i]);
}
p_min
}
pub fn max_point(p1: &Point3<Float>, p2: &Point3<Float>) -> Point3<Float> {
let mut p_max = Point3::min_value();
for i in 0..3 {
p_max[i] = p1[i].max(p2[i]);
}
p_max
}
| {
None
} | conditional_block |
aabb.rs | use cgmath::prelude::*;
use cgmath::Point3;
use crate::consts;
use crate::float::*;
use crate::intersect::{Intersect, Ray};
#[derive(Clone, Debug)]
pub struct | {
pub min: Point3<Float>,
pub max: Point3<Float>,
}
impl Aabb {
pub fn empty() -> Aabb {
Aabb {
min: Point3::max_value(),
max: Point3::min_value(),
}
}
/// Update the bounding box to enclose other aswell
pub fn add_aabb(&mut self, other: &Aabb) {
self.min = min_point(&self.min, &other.min);
self.max = max_point(&self.max, &other.max);
}
/// Get the center of the scene as defined by the bounding box
pub fn center(&self) -> Point3<Float> {
if self.max.x < self.min.x {
panic!("Tried to get center of an empty Aabb");
}
Point3::midpoint(self.min, self.max)
}
pub fn longest_edge(&self) -> Float {
let mut longest: Float = 0.0;
for i in 0..3 {
longest = longest.max(self.max[i] - self.min[i]);
}
longest
}
pub fn longest_edge_i(&self) -> usize {
let mut longest = 0.0;
let mut index = 0;
for i in 0..3 {
let length = self.max[i] - self.min[i];
if length > longest {
longest = length;
index = i;
}
}
index
}
pub fn area(&self) -> Float {
let lengths = self.max - self.min;
2.0 * (lengths.x * lengths.y + lengths.y * lengths.z + lengths.z * lengths.x).max(0.0)
}
}
impl Intersect<'_, Float> for Aabb {
fn intersect(&self, ray: &Ray) -> Option<Float> {
let t1 = (self.min - ray.orig).mul_element_wise(ray.reciprocal_dir);
let t2 = (self.max - ray.orig).mul_element_wise(ray.reciprocal_dir);
let mut start = consts::MIN;
let mut end = consts::MAX;
for i in 0..3 {
if ray.dir[i] == 0.0 && (ray.orig[i] < self.min[i] || ray.orig[i] > self.max[i]) {
// Can't hit
return None;
} else if ray.neg_dir[i] {
start = start.max(t2[i]);
end = end.min(t1[i]);
} else {
start = start.max(t1[i]);
end = end.min(t2[i]);
}
}
if start <= end && end > 0.0 && start < ray.length {
Some(start)
} else {
None
}
}
}
pub fn min_point(p1: &Point3<Float>, p2: &Point3<Float>) -> Point3<Float> {
let mut p_min = Point3::max_value();
for i in 0..3 {
p_min[i] = p1[i].min(p2[i]);
}
p_min
}
pub fn max_point(p1: &Point3<Float>, p2: &Point3<Float>) -> Point3<Float> {
let mut p_max = Point3::min_value();
for i in 0..3 {
p_max[i] = p1[i].max(p2[i]);
}
p_max
}
| Aabb | identifier_name |
step1_read_print.rs | use rust_mal_lib::env::{Env, Environment};
use rust_mal_lib::reader;
use rust_mal_lib::types::{MalError, MalResult, MalValue};
use rust_mal_steps::scaffold::*;
fn read(string: &str) -> MalResult {
reader::read_str(string)
}
fn eval(ast: MalValue) -> MalResult {
Ok(ast)
}
fn print(expr: MalValue) -> String {
expr.pr_str(true)
}
struct Step1ReadPrint;
impl InterpreterScaffold<Env> for Step1ReadPrint {
const STEP_NAME: &'static str = "step1_read_print";
fn | () -> Result<Env, MalError> {
Ok(Environment::new(None))
}
fn rep(input: &str, _: &Env) -> Result<String, MalError> {
let ast = read(input)?;
let expr = eval(ast)?;
Ok(print(expr))
}
}
fn main() -> Result<(), String> {
cli_loop::<Env, Step1ReadPrint>()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_step1_spec() {
assert_eq!(
validate_against_spec::<Env, Step1ReadPrint>("step1_read_print.mal"),
Ok(())
);
}
}
| create_env | identifier_name |
step1_read_print.rs | use rust_mal_lib::env::{Env, Environment};
use rust_mal_lib::reader;
use rust_mal_lib::types::{MalError, MalResult, MalValue};
use rust_mal_steps::scaffold::*;
fn read(string: &str) -> MalResult {
reader::read_str(string)
}
fn eval(ast: MalValue) -> MalResult {
Ok(ast)
}
fn print(expr: MalValue) -> String {
expr.pr_str(true)
}
struct Step1ReadPrint;
impl InterpreterScaffold<Env> for Step1ReadPrint {
const STEP_NAME: &'static str = "step1_read_print";
| Ok(Environment::new(None))
}
fn rep(input: &str, _: &Env) -> Result<String, MalError> {
let ast = read(input)?;
let expr = eval(ast)?;
Ok(print(expr))
}
}
fn main() -> Result<(), String> {
cli_loop::<Env, Step1ReadPrint>()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_step1_spec() {
assert_eq!(
validate_against_spec::<Env, Step1ReadPrint>("step1_read_print.mal"),
Ok(())
);
}
} | fn create_env() -> Result<Env, MalError> { | random_line_split |
geth.rs | // Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::env;
use std::path::PathBuf;
use ethkey::Address;
use {SafeAccount, Error};
use super::{KeyDirectory, DiskDirectory, DirectoryType};
#[cfg(target_os = "macos")]
fn geth_dir_path() -> PathBuf {
let mut home = env::home_dir().expect("Failed to get home dir");
home.push("Library");
home.push("Ethereum");
home
}
#[cfg(windows)]
/// Default path for ethereum installation on Windows
pub fn geth_dir_path() -> PathBuf {
let mut home = env::home_dir().expect("Failed to get home dir");
home.push("AppData");
home.push("Roaming");
home.push("Ethereum");
home
}
#[cfg(not(any(target_os = "macos", windows)))]
/// Default path for ethereum installation on posix system which is not Mac OS
pub fn geth_dir_path() -> PathBuf {
let mut home = env::home_dir().expect("Failed to get home dir");
home.push(".ethereum");
home
}
fn geth_keystore(t: DirectoryType) -> PathBuf {
let mut dir = geth_dir_path();
match t {
DirectoryType::Testnet => {
dir.push("testnet");
dir.push("keystore");
},
DirectoryType::Main => {
dir.push("keystore");
}
}
dir
}
pub struct GethDirectory {
dir: DiskDirectory,
}
impl GethDirectory {
pub fn create(t: DirectoryType) -> Result<Self, Error> {
let result = GethDirectory {
dir: try!(DiskDirectory::create(geth_keystore(t))),
};
Ok(result)
}
pub fn open(t: DirectoryType) -> Self {
GethDirectory {
dir: DiskDirectory::at(geth_keystore(t)),
}
}
}
impl KeyDirectory for GethDirectory {
fn load(&self) -> Result<Vec<SafeAccount>, Error> {
self.dir.load()
}
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.insert(account)
}
fn | (&self, address: &Address) -> Result<(), Error> {
self.dir.remove(address)
}
}
| remove | identifier_name |
geth.rs | // Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::env;
use std::path::PathBuf;
use ethkey::Address;
use {SafeAccount, Error};
use super::{KeyDirectory, DiskDirectory, DirectoryType};
#[cfg(target_os = "macos")]
fn geth_dir_path() -> PathBuf {
let mut home = env::home_dir().expect("Failed to get home dir");
home.push("Library"); | }
#[cfg(windows)]
/// Default path for ethereum installation on Windows
pub fn geth_dir_path() -> PathBuf {
let mut home = env::home_dir().expect("Failed to get home dir");
home.push("AppData");
home.push("Roaming");
home.push("Ethereum");
home
}
#[cfg(not(any(target_os = "macos", windows)))]
/// Default path for ethereum installation on posix system which is not Mac OS
pub fn geth_dir_path() -> PathBuf {
let mut home = env::home_dir().expect("Failed to get home dir");
home.push(".ethereum");
home
}
fn geth_keystore(t: DirectoryType) -> PathBuf {
let mut dir = geth_dir_path();
match t {
DirectoryType::Testnet => {
dir.push("testnet");
dir.push("keystore");
},
DirectoryType::Main => {
dir.push("keystore");
}
}
dir
}
pub struct GethDirectory {
dir: DiskDirectory,
}
impl GethDirectory {
pub fn create(t: DirectoryType) -> Result<Self, Error> {
let result = GethDirectory {
dir: try!(DiskDirectory::create(geth_keystore(t))),
};
Ok(result)
}
pub fn open(t: DirectoryType) -> Self {
GethDirectory {
dir: DiskDirectory::at(geth_keystore(t)),
}
}
}
impl KeyDirectory for GethDirectory {
fn load(&self) -> Result<Vec<SafeAccount>, Error> {
self.dir.load()
}
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.insert(account)
}
fn remove(&self, address: &Address) -> Result<(), Error> {
self.dir.remove(address)
}
} | home.push("Ethereum");
home | random_line_split |
geth.rs | // Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::env;
use std::path::PathBuf;
use ethkey::Address;
use {SafeAccount, Error};
use super::{KeyDirectory, DiskDirectory, DirectoryType};
#[cfg(target_os = "macos")]
fn geth_dir_path() -> PathBuf {
let mut home = env::home_dir().expect("Failed to get home dir");
home.push("Library");
home.push("Ethereum");
home
}
#[cfg(windows)]
/// Default path for ethereum installation on Windows
pub fn geth_dir_path() -> PathBuf {
let mut home = env::home_dir().expect("Failed to get home dir");
home.push("AppData");
home.push("Roaming");
home.push("Ethereum");
home
}
#[cfg(not(any(target_os = "macos", windows)))]
/// Default path for ethereum installation on posix system which is not Mac OS
pub fn geth_dir_path() -> PathBuf {
let mut home = env::home_dir().expect("Failed to get home dir");
home.push(".ethereum");
home
}
fn geth_keystore(t: DirectoryType) -> PathBuf {
let mut dir = geth_dir_path();
match t {
DirectoryType::Testnet => {
dir.push("testnet");
dir.push("keystore");
},
DirectoryType::Main => {
dir.push("keystore");
}
}
dir
}
pub struct GethDirectory {
dir: DiskDirectory,
}
impl GethDirectory {
pub fn create(t: DirectoryType) -> Result<Self, Error> |
pub fn open(t: DirectoryType) -> Self {
GethDirectory {
dir: DiskDirectory::at(geth_keystore(t)),
}
}
}
impl KeyDirectory for GethDirectory {
fn load(&self) -> Result<Vec<SafeAccount>, Error> {
self.dir.load()
}
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.insert(account)
}
fn remove(&self, address: &Address) -> Result<(), Error> {
self.dir.remove(address)
}
}
| {
let result = GethDirectory {
dir: try!(DiskDirectory::create(geth_keystore(t))),
};
Ok(result)
} | identifier_body |
part1.rs | // adventofcode - day 12
// part 1
use std::io::prelude::*;
use std::fs::File;
fn main() |
0
},
_ if last.is_digit(10) => {
let tmp = tmp_val * multiplier;
tmp_val = 0;
multiplier = 1;
tmp
}
_ => 0,
};
last = ch;
}
println!("Value: {}", value);
}
// This function simply imports the data set from a file called input.txt
fn import_data() -> String {
let mut file = match File::open("../../inputs/12.txt") {
Ok(f) => f,
Err(e) => panic!("file error: {}", e),
};
let mut data = String::new();
match file.read_to_string(&mut data){
Ok(_) => {},
Err(e) => panic!("file error: {}", e),
};
data
}
| {
println!("Advent of Code - day 12 | part 1");
// import data
let data = import_data();
let mut value = 0i32;
let mut tmp_val = 0i32;
let mut last: char = '\x00';
let mut multiplier = 1;
for ch in data.chars() {
value += match ch {
'0'...'9' => {
if last == '-' {
multiplier = -1;
}
tmp_val = tmp_val * 10 + match ch.to_string().parse::<i32>() {
Ok(x) => x,
Err(e) => panic!("Help! {}", e),
}; | identifier_body |
part1.rs | // adventofcode - day 12
// part 1
use std::io::prelude::*;
use std::fs::File;
fn main(){
println!("Advent of Code - day 12 | part 1");
// import data
let data = import_data();
let mut value = 0i32;
let mut tmp_val = 0i32;
let mut last: char = '\x00';
let mut multiplier = 1;
for ch in data.chars() {
value += match ch {
'0'...'9' => {
if last == '-' {
multiplier = -1;
}
tmp_val = tmp_val * 10 + match ch.to_string().parse::<i32>() {
Ok(x) => x,
Err(e) => panic!("Help! {}", e),
};
0
},
_ if last.is_digit(10) => {
let tmp = tmp_val * multiplier;
tmp_val = 0;
multiplier = 1;
tmp
}
_ => 0,
};
last = ch;
}
println!("Value: {}", value);
}
// This function simply imports the data set from a file called input.txt
fn import_data() -> String {
let mut file = match File::open("../../inputs/12.txt") {
Ok(f) => f,
Err(e) => panic!("file error: {}", e),
};
let mut data = String::new();
match file.read_to_string(&mut data){
Ok(_) => {},
Err(e) => panic!("file error: {}", e),
};
data | } | random_line_split |
|
part1.rs | // adventofcode - day 12
// part 1
use std::io::prelude::*;
use std::fs::File;
fn main(){
println!("Advent of Code - day 12 | part 1");
// import data
let data = import_data();
let mut value = 0i32;
let mut tmp_val = 0i32;
let mut last: char = '\x00';
let mut multiplier = 1;
for ch in data.chars() {
value += match ch {
'0'...'9' => {
if last == '-' {
multiplier = -1;
}
tmp_val = tmp_val * 10 + match ch.to_string().parse::<i32>() {
Ok(x) => x,
Err(e) => panic!("Help! {}", e),
};
0
},
_ if last.is_digit(10) => {
let tmp = tmp_val * multiplier;
tmp_val = 0;
multiplier = 1;
tmp
}
_ => 0,
};
last = ch;
}
println!("Value: {}", value);
}
// This function simply imports the data set from a file called input.txt
fn | () -> String {
let mut file = match File::open("../../inputs/12.txt") {
Ok(f) => f,
Err(e) => panic!("file error: {}", e),
};
let mut data = String::new();
match file.read_to_string(&mut data){
Ok(_) => {},
Err(e) => panic!("file error: {}", e),
};
data
}
| import_data | identifier_name |
data.rs | use redox::prelude::v1::*;
use table::NodeTable;
/// A data node (file/dir)
pub enum Data {
/// File
File(File),
/// Directory
Dir(Dir),
/// Nothing
Nil,
}
impl Data {
pub fn name(&self) -> &str {
match self {
&Data::File(ref f) => &f.name,
&Data::Dir(ref d) => &d.name,
&Data::Nil => "\0",
}
} |
/// A file
pub struct File {
/// The name of the file
name: String,
/// The actual content of the file
data: Vec<u8>,
}
impl File {
/// Create a file from a slice of bytes
pub fn from_bytes(b: &[u8]) -> Self {
let name = unsafe {
String::from_utf8_unchecked(b[0..64].to_vec())
};
let data = b[256..].to_vec();
File {
name: name,
data: data,
}
}
}
/// A directory
pub struct Dir {
/// The name of the directory
name: String,
/// The table of the directory
nodes: Vec<DataPtr>,
}
impl Dir {
/// Create a new directory from a slice of bytes
pub fn from_bytes(b: &[u8]) -> Self {
let name = unsafe {
String::from_utf8_unchecked(b[0..64].to_vec())
};
let mut n = 0;
while let Some(&35) = b.get(n + 256 - 1) {
n += 256;
}
let nodes = b[n..].to_vec().iter().splitn(16).map(|x| DataPtr::from_bytes(x)).collect();
Dir {
name: name,
nodes: nodes,
}
}
/// Get the table represented by this directory
pub fn get_table<'a>(&'a self) -> NodeTable<'a> {
NodeTable::from_bytes(&self.data[..])
}
} | } | random_line_split |
data.rs | use redox::prelude::v1::*;
use table::NodeTable;
/// A data node (file/dir)
pub enum Data {
/// File
File(File),
/// Directory
Dir(Dir),
/// Nothing
Nil,
}
impl Data {
pub fn | (&self) -> &str {
match self {
&Data::File(ref f) => &f.name,
&Data::Dir(ref d) => &d.name,
&Data::Nil => "\0",
}
}
}
/// A file
pub struct File {
/// The name of the file
name: String,
/// The actual content of the file
data: Vec<u8>,
}
impl File {
/// Create a file from a slice of bytes
pub fn from_bytes(b: &[u8]) -> Self {
let name = unsafe {
String::from_utf8_unchecked(b[0..64].to_vec())
};
let data = b[256..].to_vec();
File {
name: name,
data: data,
}
}
}
/// A directory
pub struct Dir {
/// The name of the directory
name: String,
/// The table of the directory
nodes: Vec<DataPtr>,
}
impl Dir {
/// Create a new directory from a slice of bytes
pub fn from_bytes(b: &[u8]) -> Self {
let name = unsafe {
String::from_utf8_unchecked(b[0..64].to_vec())
};
let mut n = 0;
while let Some(&35) = b.get(n + 256 - 1) {
n += 256;
}
let nodes = b[n..].to_vec().iter().splitn(16).map(|x| DataPtr::from_bytes(x)).collect();
Dir {
name: name,
nodes: nodes,
}
}
/// Get the table represented by this directory
pub fn get_table<'a>(&'a self) -> NodeTable<'a> {
NodeTable::from_bytes(&self.data[..])
}
}
| name | identifier_name |
parser.rs | mod lex;
use lex::lexeme::Lexeme;
pub struct Parser<'a> {
next: usize,
tokens: Vec<Lexeme>
}
impl<'a> Parser<'a> {
pub fn new(tokens: Vec<Lexeme>) -> Parser {
Parser {
next: 0,
tokens: tokens
}
}
pub fn parse(&self) -> Box<Expr> {
let mut block: Vec<Box<Expr>> = Vec::new();
while!self.limit() {
let stmt = match self.token.clone() {
Lexeme::Number(ref x) => Box::new(Expr {node: self.parse_integer()}),
Lexeme::StringLiteral(ref x) => Box::new(Expr {node: self.parse_string()}),
Lexeme::Identifier(ref x) => Box::new(Expr {node: self.parse_id()}),
}
}
}
fn unexpected_token(&self, token: Lexeme) {
panic!("Unexpected token found. Expected: {:?}, Found: {:?} instead.", token, self.get_token());
}
fn get_token(&self) -> Lexeme {
tokens[self.next]
}
fn advance(&self) {
self.next += 1;
}
fn limit(&self) -> bool {
self.next == tokens.len()
}
fn expect(&self, token: Lexeme) -> bool {
if self.get_token() == token { true } else { false }
}
fn | (&self) -> bool {
match self.get_token() {
Lexeme::Plus | Lexeme::Minus | Lexeme::Multiply | Lexeme::Divide | Lexeme::Modulo => true,
_ => false
}
}
}
| operator | identifier_name |
parser.rs | mod lex;
use lex::lexeme::Lexeme;
pub struct Parser<'a> {
next: usize,
tokens: Vec<Lexeme>
}
impl<'a> Parser<'a> {
pub fn new(tokens: Vec<Lexeme>) -> Parser {
Parser {
next: 0,
tokens: tokens
}
}
pub fn parse(&self) -> Box<Expr> {
let mut block: Vec<Box<Expr>> = Vec::new();
while!self.limit() {
let stmt = match self.token.clone() {
Lexeme::Number(ref x) => Box::new(Expr {node: self.parse_integer()}),
Lexeme::StringLiteral(ref x) => Box::new(Expr {node: self.parse_string()}),
Lexeme::Identifier(ref x) => Box::new(Expr {node: self.parse_id()}),
}
}
}
fn unexpected_token(&self, token: Lexeme) {
panic!("Unexpected token found. Expected: {:?}, Found: {:?} instead.", token, self.get_token());
}
fn get_token(&self) -> Lexeme {
tokens[self.next]
}
fn advance(&self) {
self.next += 1;
}
fn limit(&self) -> bool |
fn expect(&self, token: Lexeme) -> bool {
if self.get_token() == token { true } else { false }
}
fn operator(&self) -> bool {
match self.get_token() {
Lexeme::Plus | Lexeme::Minus | Lexeme::Multiply | Lexeme::Divide | Lexeme::Modulo => true,
_ => false
}
}
}
| {
self.next == tokens.len()
} | identifier_body |
parser.rs | mod lex; | pub struct Parser<'a> {
next: usize,
tokens: Vec<Lexeme>
}
impl<'a> Parser<'a> {
pub fn new(tokens: Vec<Lexeme>) -> Parser {
Parser {
next: 0,
tokens: tokens
}
}
pub fn parse(&self) -> Box<Expr> {
let mut block: Vec<Box<Expr>> = Vec::new();
while!self.limit() {
let stmt = match self.token.clone() {
Lexeme::Number(ref x) => Box::new(Expr {node: self.parse_integer()}),
Lexeme::StringLiteral(ref x) => Box::new(Expr {node: self.parse_string()}),
Lexeme::Identifier(ref x) => Box::new(Expr {node: self.parse_id()}),
}
}
}
fn unexpected_token(&self, token: Lexeme) {
panic!("Unexpected token found. Expected: {:?}, Found: {:?} instead.", token, self.get_token());
}
fn get_token(&self) -> Lexeme {
tokens[self.next]
}
fn advance(&self) {
self.next += 1;
}
fn limit(&self) -> bool {
self.next == tokens.len()
}
fn expect(&self, token: Lexeme) -> bool {
if self.get_token() == token { true } else { false }
}
fn operator(&self) -> bool {
match self.get_token() {
Lexeme::Plus | Lexeme::Minus | Lexeme::Multiply | Lexeme::Divide | Lexeme::Modulo => true,
_ => false
}
}
} | use lex::lexeme::Lexeme;
| random_line_split |
regions-infer-borrow-scope.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
#![feature(box_syntax)]
struct Point {x: isize, y: isize}
fn x_coord(p: &Point) -> &isize |
pub fn main() {
let p: Box<_> = box Point {x: 3, y: 4};
let xc = x_coord(&*p);
assert_eq!(*xc, 3);
}
| {
return &p.x;
} | identifier_body |
regions-infer-borrow-scope.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
#![feature(box_syntax)]
struct Point {x: isize, y: isize}
fn x_coord(p: &Point) -> &isize {
return &p.x;
} | pub fn main() {
let p: Box<_> = box Point {x: 3, y: 4};
let xc = x_coord(&*p);
assert_eq!(*xc, 3);
} | random_line_split |
|
regions-infer-borrow-scope.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(dead_code)]
#![feature(box_syntax)]
struct | {x: isize, y: isize}
fn x_coord(p: &Point) -> &isize {
return &p.x;
}
pub fn main() {
let p: Box<_> = box Point {x: 3, y: 4};
let xc = x_coord(&*p);
assert_eq!(*xc, 3);
}
| Point | identifier_name |
stability-attribute-sanity-2.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// More checks that stability attributes are used correctly
#![feature(staged_api)]
#![stable(feature = "stable_test_feature", since = "1.0.0")]
#[stable(feature = "a", feature = "b", since = "1.0.0")] //~ ERROR multiple 'feature' items
fn f1() { }
#[stable(feature = "a", sinse = "1.0.0")] //~ ERROR unknown meta item'sinse'
fn f2() { }
#[unstable(feature = "a", issue = "no")] //~ ERROR incorrect 'issue'
fn | () { }
fn main() { }
| f3 | identifier_name |
stability-attribute-sanity-2.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// More checks that stability attributes are used correctly
#![feature(staged_api)]
#![stable(feature = "stable_test_feature", since = "1.0.0")]
#[stable(feature = "a", feature = "b", since = "1.0.0")] //~ ERROR multiple 'feature' items
fn f1() { }
#[stable(feature = "a", sinse = "1.0.0")] //~ ERROR unknown meta item'sinse'
fn f2() { }
#[unstable(feature = "a", issue = "no")] //~ ERROR incorrect 'issue' | fn main() { } | fn f3() { }
| random_line_split |
stability-attribute-sanity-2.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// More checks that stability attributes are used correctly
#![feature(staged_api)]
#![stable(feature = "stable_test_feature", since = "1.0.0")]
#[stable(feature = "a", feature = "b", since = "1.0.0")] //~ ERROR multiple 'feature' items
fn f1() { }
#[stable(feature = "a", sinse = "1.0.0")] //~ ERROR unknown meta item'sinse'
fn f2() { }
#[unstable(feature = "a", issue = "no")] //~ ERROR incorrect 'issue'
fn f3() |
fn main() { }
| { } | identifier_body |
similarity_matrix.rs | use crate::{Edges, Graph, NodeColorMatching, ScoreNorm};
use approx::relative_eq;
use closed01::Closed01;
use munkres::{solve_assignment, Position, WeightMatrix};
use ndarray::{Array2, FoldWhile, Zip};
use std::{cmp, mem};
type Matrix = Array2<f32>;
#[derive(Debug)]
pub struct SimilarityMatrix<'a, F, G, E, N>
where
F: NodeColorMatching<N>,
G: Graph<EDGE = E, NODE = N> + 'a,
E: Edges,
N: Clone,
{
graph_a: &'a G,
graph_b: &'a G,
node_color_matching: F,
// current version of similarity matrix
current: Matrix,
// previous version of similarity matrix
previous: Matrix,
// current number of iterations
num_iterations: usize,
}
impl<'a, F, G, E, N> SimilarityMatrix<'a, F, G, E, N>
where
F: NodeColorMatching<N>,
G: Graph<EDGE = E, NODE = N>,
E: Edges,
N: Clone,
{
pub fn new(
graph_a: &'a G,
graph_b: &'a G,
node_color_matching: F,
) -> SimilarityMatrix<'a, F, G, E, N> {
// `x` is the node-similarity matrix.
// we initialize `x`, so that x[i,j]=1 for all i in A.edges() and j in
// B.edges().
let shape = (graph_a.num_nodes(), graph_b.num_nodes());
let x = Matrix::from_shape_fn(shape, |(i, j)| {
if graph_a.node_degree(i) > 0 && graph_b.node_degree(j) > 0 {
// this is normally set to 1.0 (i.e. without node color matching).
node_color_matching
.node_color_matching(graph_a.node_value(i), graph_b.node_value(j))
} else {
Closed01::zero()
}
.get()
});
let new_x = Matrix::from_elem(shape, Closed01::zero().get());
SimilarityMatrix {
graph_a,
graph_b,
node_color_matching,
current: x,
previous: new_x,
num_iterations: 0,
}
}
fn in_eps(&self, eps: f32) -> bool {
Zip::from(&self.previous)
.and(&self.current)
.fold_while(true, |all_prev_in_eps, x, y| {
if all_prev_in_eps && relative_eq!(x, y, epsilon = eps) {
FoldWhile::Continue(true)
} else {
FoldWhile::Done(false)
}
})
.into_inner()
}
/// Calculates the next iteration of the similarity matrix (x[k+1]).
pub fn next(&mut self) {
{
let x = &self.current;
for ((i, j), new_x_ij) in self.previous.indexed_iter_mut() {
let scale = self
.node_color_matching
.node_color_matching(self.graph_a.node_value(i), self.graph_b.node_value(j));
let in_score = s_next(self.graph_a.in_edges_of(i), self.graph_b.in_edges_of(j), x);
let out_score = s_next(
self.graph_a.out_edges_of(i),
self.graph_b.out_edges_of(j),
x,
);
*new_x_ij = in_score.average(out_score).mul(scale).get();
}
}
mem::swap(&mut self.previous, &mut self.current);
self.num_iterations += 1;
}
/// Iteratively calculate the similarity matrix.
///
/// `stop_after_iter`: Stop after iteration (Calculate x(stop_after_iter))
/// `eps`: When to stop the iteration
pub fn iterate(&mut self, stop_after_iter: usize, eps: f32) {
for _ in 0..stop_after_iter {
if self.in_eps(eps) {
break;
}
self.next();
}
}
pub fn matrix(&self) -> &Matrix {
&self.current
}
pub fn num_iterations(&self) -> usize {
self.num_iterations
}
pub fn min_nodes(&self) -> usize {
cmp::min(self.current.nrows(), self.current.ncols())
}
pub fn max_nodes(&self) -> usize {
cmp::max(self.current.nrows(), self.current.ncols())
}
pub fn optimal_node_assignment(&self) -> Vec<Position> {
let n = self.min_nodes();
let assignment = if n > 0 {
let mut w = WeightMatrix::from_fn(n, |ij| similarity_cost(self.current[ij]));
solve_assignment(&mut w).unwrap()
} else {
Vec::new()
};
assert!(assignment.len() == n);
assignment
}
fn score_optimal_sum(&self, node_assignment: Option<&[Position]>) -> f32 {
match node_assignment {
Some(node_assignment) => {
assert!(node_assignment.len() == self.min_nodes());
node_assignment
.iter()
.fold(0.0, |acc, &Position { row, column }| {
acc + self.current[(row, column)]
})
}
None => {
let node_assignment = self.optimal_node_assignment();
assert!(node_assignment.len() == self.min_nodes());
node_assignment
.iter()
.fold(0.0, |acc, &Position { row, column }| {
acc + self.current[(row, column)]
})
}
}
}
/// Calculate a measure how good the edge weights match up.
///
/// We start by calculating the optimal node assignment between nodes of graph A and graph B,
/// then compare all outgoing edges of similar-assigned nodes by again using an assignment
/// between the edge-weight differences of all edge pairs.
pub fn score_outgoing_edge_weights_sum_norm(
&self,
node_assignment: &[Position],
norm: ScoreNorm,
) -> Closed01<f32> {
let n = self.min_nodes();
let m = self.max_nodes();
debug_assert!(m >= n);
assert!(node_assignment.len() == n);
// we sum up all edge weight scores
let sum: f32 = node_assignment.iter().fold(
0.0,
|acc,
&Position {
row: node_i,
column: node_j,
}| {
let score_ij = self.score_outgoing_edge_weights_of(node_i, node_j);
acc + score_ij.get()
},
);
assert!(sum >= 0.0 && sum <= n as f32);
match norm {
// Not penalize missing nodes.
ScoreNorm::MinDegree => Closed01::new(sum / n as f32),
// To penalize for missing nodes divide by the maximum number of nodes `m`.
ScoreNorm::MaxDegree => Closed01::new(sum / m as f32),
}
}
/// Calculate a similarity measure of outgoing edges of nodes `node_i` of graph A and `node_j`
/// of graph B. A score of 1.0 means, the edges weights match up perfectly. 0.0 means, no
/// similarity.
fn score_outgoing_edge_weights_of(&self, node_i: usize, node_j: usize) -> Closed01<f32> {
let out_i = self.graph_a.out_edges_of(node_i);
let out_j = self.graph_b.out_edges_of(node_j);
let max_deg = cmp::max(out_i.num_edges(), out_j.num_edges());
if max_deg == 0 {
// Nodes with no edges are perfectly similar
return Closed01::one();
}
// Calculates the edge weight distance between edges i and j.
let edge_weight_distance = &|(i, j)| {
match (out_i.nth_edge_weight(i), out_j.nth_edge_weight(j)) {
(Some(w_i), Some(w_j)) => w_i.distance(w_j),
_ => {
// Maximum penalty between two weighted edges
// NOTE: missing edges could be penalized more, but we already
// penalize for that in the node similarity measure.
Closed01::one()
}
}
.get()
};
let mut w = WeightMatrix::from_fn(max_deg, edge_weight_distance);
// calculate optimal edge weight assignement.
let assignment = solve_assignment(&mut w).unwrap();
assert!(assignment.len() == max_deg);
// The sum is the sum of all weight differences on the optimal `path`.
// It's range is from 0.0 (perfect matching) to max_deg*1.0 (bad matching).
let sum: f32 = assignment
.iter()
.fold(0.0, |acc, &Position { row, column }| {
acc + edge_weight_distance((row, column))
});
debug_assert!(sum >= 0.0 && sum <= max_deg as f32);
// we "invert" the normalized sum so that 1.0 means perfect matching and 0.0
// no matching.
Closed01::new(sum / max_deg as f32).inv()
}
/// Sums the optimal assignment of the node similarities and normalizes (divides)
/// by the min/max degree of both graphs.
/// ScoreNorm::MinDegree is used as default in the paper.
pub fn score_optimal_sum_norm(
&self,
node_assignment: Option<&[Position]>,
norm: ScoreNorm,
) -> Closed01<f32> {
let n = self.min_nodes();
let m = self.max_nodes();
if n > 0 {
assert!(m > 0);
let sum = self.score_optimal_sum(node_assignment);
assert!(sum >= 0.0 && sum <= n as f32);
match norm {
// Not penalize missing nodes.
ScoreNorm::MinDegree => Closed01::new(sum / n as f32),
// To penalize for missing nodes, divide by the maximum number of nodes `m`.
ScoreNorm::MaxDegree => Closed01::new(sum / m as f32),
}
} else {
Closed01::zero()
}
}
/// Calculates the average over the whole node similarity matrix. This is faster,
/// as no assignment has to be found. "Graphs with greater number of automorphisms
/// would be considered to be more self-similar than graphs without automorphisms."
pub fn | (&self) -> Closed01<f32> {
let n = self.min_nodes();
if n > 0 {
let sum: f32 = self.current.iter().fold(0.0, |acc, &v| acc + v);
let len = self.current.shape().len();
assert!(len > 0);
Closed01::new(sum / len as f32)
} else {
Closed01::zero()
}
}
}
/// Calculates the similarity of two nodes `i` and `j`.
///
/// `n_i` contains the neighborhood of i (either in or out neighbors, not both)
/// `n_j` contains the neighborhood of j (either in or out neighbors, not both)
/// `x` the similarity matrix.
fn s_next<T: Edges>(n_i: &T, n_j: &T, x: &Array2<f32>) -> Closed01<f32> {
let max_deg = cmp::max(n_i.num_edges(), n_j.num_edges());
let min_deg = cmp::min(n_i.num_edges(), n_j.num_edges());
debug_assert!(min_deg <= max_deg);
if max_deg == 0 {
debug_assert!(n_i.num_edges() == 0);
debug_assert!(n_j.num_edges() == 0);
// in the paper, 0/0 is defined as 1.0
// Two nodes without any edges are perfectly similar.
return Closed01::one();
}
if min_deg == 0 {
// A node without any edges is not similar at all to a node with edges.
return Closed01::zero();
}
assert!(min_deg > 0 && max_deg > 0);
// map indicies from 0..min(degree) to the node indices
let mapidx = |(a, b)| (n_i.nth_edge(a).unwrap(), n_j.nth_edge(b).unwrap());
let mut w = WeightMatrix::from_fn(min_deg, |ab| similarity_cost(x[mapidx(ab)]));
let assignment = solve_assignment(&mut w).unwrap();
assert!(assignment.len() == min_deg);
let sum: f32 = assignment
.iter()
.fold(0.0, |acc, &Position { row, column }| {
acc + x[mapidx((row, column))]
});
Closed01::new(sum / max_deg as f32)
}
// NOTE: Our weight matrix minimizes the cost, while our similarity matrix
// wants to maximize the similarity score. That's why we have to convert
// the cost with 1.0 - x.
fn similarity_cost(weight: f32) -> f32 {
debug_assert!(weight >= 0.0 && weight <= 1.0);
1.0 - weight
}
| score_average | identifier_name |
similarity_matrix.rs | use crate::{Edges, Graph, NodeColorMatching, ScoreNorm};
use approx::relative_eq;
use closed01::Closed01;
use munkres::{solve_assignment, Position, WeightMatrix};
use ndarray::{Array2, FoldWhile, Zip};
use std::{cmp, mem};
type Matrix = Array2<f32>;
#[derive(Debug)]
pub struct SimilarityMatrix<'a, F, G, E, N>
where
F: NodeColorMatching<N>,
G: Graph<EDGE = E, NODE = N> + 'a,
E: Edges,
N: Clone,
{
graph_a: &'a G,
graph_b: &'a G,
node_color_matching: F,
// current version of similarity matrix
current: Matrix,
// previous version of similarity matrix
previous: Matrix,
// current number of iterations
num_iterations: usize,
}
impl<'a, F, G, E, N> SimilarityMatrix<'a, F, G, E, N>
where
F: NodeColorMatching<N>,
G: Graph<EDGE = E, NODE = N>,
E: Edges,
N: Clone,
{
pub fn new(
graph_a: &'a G,
graph_b: &'a G,
node_color_matching: F,
) -> SimilarityMatrix<'a, F, G, E, N> {
// `x` is the node-similarity matrix.
// we initialize `x`, so that x[i,j]=1 for all i in A.edges() and j in
// B.edges().
let shape = (graph_a.num_nodes(), graph_b.num_nodes());
let x = Matrix::from_shape_fn(shape, |(i, j)| {
if graph_a.node_degree(i) > 0 && graph_b.node_degree(j) > 0 {
// this is normally set to 1.0 (i.e. without node color matching).
node_color_matching
.node_color_matching(graph_a.node_value(i), graph_b.node_value(j))
} else {
Closed01::zero()
}
.get()
});
let new_x = Matrix::from_elem(shape, Closed01::zero().get());
SimilarityMatrix {
graph_a,
graph_b,
node_color_matching,
current: x,
previous: new_x,
num_iterations: 0,
}
}
fn in_eps(&self, eps: f32) -> bool {
Zip::from(&self.previous)
.and(&self.current)
.fold_while(true, |all_prev_in_eps, x, y| {
if all_prev_in_eps && relative_eq!(x, y, epsilon = eps) {
FoldWhile::Continue(true)
} else {
FoldWhile::Done(false)
}
})
.into_inner()
}
/// Calculates the next iteration of the similarity matrix (x[k+1]).
pub fn next(&mut self) {
{
let x = &self.current;
for ((i, j), new_x_ij) in self.previous.indexed_iter_mut() {
let scale = self
.node_color_matching
.node_color_matching(self.graph_a.node_value(i), self.graph_b.node_value(j));
let in_score = s_next(self.graph_a.in_edges_of(i), self.graph_b.in_edges_of(j), x);
let out_score = s_next(
self.graph_a.out_edges_of(i),
self.graph_b.out_edges_of(j),
x,
);
*new_x_ij = in_score.average(out_score).mul(scale).get();
}
}
mem::swap(&mut self.previous, &mut self.current);
self.num_iterations += 1;
}
/// Iteratively calculate the similarity matrix.
///
/// `stop_after_iter`: Stop after iteration (Calculate x(stop_after_iter))
/// `eps`: When to stop the iteration
pub fn iterate(&mut self, stop_after_iter: usize, eps: f32) {
for _ in 0..stop_after_iter {
if self.in_eps(eps) {
break;
}
self.next();
}
}
pub fn matrix(&self) -> &Matrix {
&self.current
}
pub fn num_iterations(&self) -> usize {
self.num_iterations
} | pub fn max_nodes(&self) -> usize {
cmp::max(self.current.nrows(), self.current.ncols())
}
pub fn optimal_node_assignment(&self) -> Vec<Position> {
let n = self.min_nodes();
let assignment = if n > 0 {
let mut w = WeightMatrix::from_fn(n, |ij| similarity_cost(self.current[ij]));
solve_assignment(&mut w).unwrap()
} else {
Vec::new()
};
assert!(assignment.len() == n);
assignment
}
fn score_optimal_sum(&self, node_assignment: Option<&[Position]>) -> f32 {
match node_assignment {
Some(node_assignment) => {
assert!(node_assignment.len() == self.min_nodes());
node_assignment
.iter()
.fold(0.0, |acc, &Position { row, column }| {
acc + self.current[(row, column)]
})
}
None => {
let node_assignment = self.optimal_node_assignment();
assert!(node_assignment.len() == self.min_nodes());
node_assignment
.iter()
.fold(0.0, |acc, &Position { row, column }| {
acc + self.current[(row, column)]
})
}
}
}
/// Calculate a measure how good the edge weights match up.
///
/// We start by calculating the optimal node assignment between nodes of graph A and graph B,
/// then compare all outgoing edges of similar-assigned nodes by again using an assignment
/// between the edge-weight differences of all edge pairs.
pub fn score_outgoing_edge_weights_sum_norm(
&self,
node_assignment: &[Position],
norm: ScoreNorm,
) -> Closed01<f32> {
let n = self.min_nodes();
let m = self.max_nodes();
debug_assert!(m >= n);
assert!(node_assignment.len() == n);
// we sum up all edge weight scores
let sum: f32 = node_assignment.iter().fold(
0.0,
|acc,
&Position {
row: node_i,
column: node_j,
}| {
let score_ij = self.score_outgoing_edge_weights_of(node_i, node_j);
acc + score_ij.get()
},
);
assert!(sum >= 0.0 && sum <= n as f32);
match norm {
// Not penalize missing nodes.
ScoreNorm::MinDegree => Closed01::new(sum / n as f32),
// To penalize for missing nodes divide by the maximum number of nodes `m`.
ScoreNorm::MaxDegree => Closed01::new(sum / m as f32),
}
}
/// Calculate a similarity measure of outgoing edges of nodes `node_i` of graph A and `node_j`
/// of graph B. A score of 1.0 means, the edges weights match up perfectly. 0.0 means, no
/// similarity.
fn score_outgoing_edge_weights_of(&self, node_i: usize, node_j: usize) -> Closed01<f32> {
let out_i = self.graph_a.out_edges_of(node_i);
let out_j = self.graph_b.out_edges_of(node_j);
let max_deg = cmp::max(out_i.num_edges(), out_j.num_edges());
if max_deg == 0 {
// Nodes with no edges are perfectly similar
return Closed01::one();
}
// Calculates the edge weight distance between edges i and j.
let edge_weight_distance = &|(i, j)| {
match (out_i.nth_edge_weight(i), out_j.nth_edge_weight(j)) {
(Some(w_i), Some(w_j)) => w_i.distance(w_j),
_ => {
// Maximum penalty between two weighted edges
// NOTE: missing edges could be penalized more, but we already
// penalize for that in the node similarity measure.
Closed01::one()
}
}
.get()
};
let mut w = WeightMatrix::from_fn(max_deg, edge_weight_distance);
// calculate optimal edge weight assignement.
let assignment = solve_assignment(&mut w).unwrap();
assert!(assignment.len() == max_deg);
// The sum is the sum of all weight differences on the optimal `path`.
// It's range is from 0.0 (perfect matching) to max_deg*1.0 (bad matching).
let sum: f32 = assignment
.iter()
.fold(0.0, |acc, &Position { row, column }| {
acc + edge_weight_distance((row, column))
});
debug_assert!(sum >= 0.0 && sum <= max_deg as f32);
// we "invert" the normalized sum so that 1.0 means perfect matching and 0.0
// no matching.
Closed01::new(sum / max_deg as f32).inv()
}
/// Sums the optimal assignment of the node similarities and normalizes (divides)
/// by the min/max degree of both graphs.
/// ScoreNorm::MinDegree is used as default in the paper.
pub fn score_optimal_sum_norm(
&self,
node_assignment: Option<&[Position]>,
norm: ScoreNorm,
) -> Closed01<f32> {
let n = self.min_nodes();
let m = self.max_nodes();
if n > 0 {
assert!(m > 0);
let sum = self.score_optimal_sum(node_assignment);
assert!(sum >= 0.0 && sum <= n as f32);
match norm {
// Not penalize missing nodes.
ScoreNorm::MinDegree => Closed01::new(sum / n as f32),
// To penalize for missing nodes, divide by the maximum number of nodes `m`.
ScoreNorm::MaxDegree => Closed01::new(sum / m as f32),
}
} else {
Closed01::zero()
}
}
/// Calculates the average over the whole node similarity matrix. This is faster,
/// as no assignment has to be found. "Graphs with greater number of automorphisms
/// would be considered to be more self-similar than graphs without automorphisms."
pub fn score_average(&self) -> Closed01<f32> {
let n = self.min_nodes();
if n > 0 {
let sum: f32 = self.current.iter().fold(0.0, |acc, &v| acc + v);
let len = self.current.shape().len();
assert!(len > 0);
Closed01::new(sum / len as f32)
} else {
Closed01::zero()
}
}
}
/// Calculates the similarity of two nodes `i` and `j`.
///
/// `n_i` contains the neighborhood of i (either in or out neighbors, not both)
/// `n_j` contains the neighborhood of j (either in or out neighbors, not both)
/// `x` the similarity matrix.
fn s_next<T: Edges>(n_i: &T, n_j: &T, x: &Array2<f32>) -> Closed01<f32> {
let max_deg = cmp::max(n_i.num_edges(), n_j.num_edges());
let min_deg = cmp::min(n_i.num_edges(), n_j.num_edges());
debug_assert!(min_deg <= max_deg);
if max_deg == 0 {
debug_assert!(n_i.num_edges() == 0);
debug_assert!(n_j.num_edges() == 0);
// in the paper, 0/0 is defined as 1.0
// Two nodes without any edges are perfectly similar.
return Closed01::one();
}
if min_deg == 0 {
// A node without any edges is not similar at all to a node with edges.
return Closed01::zero();
}
assert!(min_deg > 0 && max_deg > 0);
// map indicies from 0..min(degree) to the node indices
let mapidx = |(a, b)| (n_i.nth_edge(a).unwrap(), n_j.nth_edge(b).unwrap());
let mut w = WeightMatrix::from_fn(min_deg, |ab| similarity_cost(x[mapidx(ab)]));
let assignment = solve_assignment(&mut w).unwrap();
assert!(assignment.len() == min_deg);
let sum: f32 = assignment
.iter()
.fold(0.0, |acc, &Position { row, column }| {
acc + x[mapidx((row, column))]
});
Closed01::new(sum / max_deg as f32)
}
// NOTE: Our weight matrix minimizes the cost, while our similarity matrix
// wants to maximize the similarity score. That's why we have to convert
// the cost with 1.0 - x.
fn similarity_cost(weight: f32) -> f32 {
debug_assert!(weight >= 0.0 && weight <= 1.0);
1.0 - weight
} |
pub fn min_nodes(&self) -> usize {
cmp::min(self.current.nrows(), self.current.ncols())
}
| random_line_split |
similarity_matrix.rs | use crate::{Edges, Graph, NodeColorMatching, ScoreNorm};
use approx::relative_eq;
use closed01::Closed01;
use munkres::{solve_assignment, Position, WeightMatrix};
use ndarray::{Array2, FoldWhile, Zip};
use std::{cmp, mem};
type Matrix = Array2<f32>;
#[derive(Debug)]
pub struct SimilarityMatrix<'a, F, G, E, N>
where
F: NodeColorMatching<N>,
G: Graph<EDGE = E, NODE = N> + 'a,
E: Edges,
N: Clone,
{
graph_a: &'a G,
graph_b: &'a G,
node_color_matching: F,
// current version of similarity matrix
current: Matrix,
// previous version of similarity matrix
previous: Matrix,
// current number of iterations
num_iterations: usize,
}
impl<'a, F, G, E, N> SimilarityMatrix<'a, F, G, E, N>
where
F: NodeColorMatching<N>,
G: Graph<EDGE = E, NODE = N>,
E: Edges,
N: Clone,
{
pub fn new(
graph_a: &'a G,
graph_b: &'a G,
node_color_matching: F,
) -> SimilarityMatrix<'a, F, G, E, N> {
// `x` is the node-similarity matrix.
// we initialize `x`, so that x[i,j]=1 for all i in A.edges() and j in
// B.edges().
let shape = (graph_a.num_nodes(), graph_b.num_nodes());
let x = Matrix::from_shape_fn(shape, |(i, j)| {
if graph_a.node_degree(i) > 0 && graph_b.node_degree(j) > 0 {
// this is normally set to 1.0 (i.e. without node color matching).
node_color_matching
.node_color_matching(graph_a.node_value(i), graph_b.node_value(j))
} else {
Closed01::zero()
}
.get()
});
let new_x = Matrix::from_elem(shape, Closed01::zero().get());
SimilarityMatrix {
graph_a,
graph_b,
node_color_matching,
current: x,
previous: new_x,
num_iterations: 0,
}
}
fn in_eps(&self, eps: f32) -> bool |
/// Calculates the next iteration of the similarity matrix (x[k+1]).
pub fn next(&mut self) {
{
let x = &self.current;
for ((i, j), new_x_ij) in self.previous.indexed_iter_mut() {
let scale = self
.node_color_matching
.node_color_matching(self.graph_a.node_value(i), self.graph_b.node_value(j));
let in_score = s_next(self.graph_a.in_edges_of(i), self.graph_b.in_edges_of(j), x);
let out_score = s_next(
self.graph_a.out_edges_of(i),
self.graph_b.out_edges_of(j),
x,
);
*new_x_ij = in_score.average(out_score).mul(scale).get();
}
}
mem::swap(&mut self.previous, &mut self.current);
self.num_iterations += 1;
}
/// Iteratively calculate the similarity matrix.
///
/// `stop_after_iter`: Stop after iteration (Calculate x(stop_after_iter))
/// `eps`: When to stop the iteration
pub fn iterate(&mut self, stop_after_iter: usize, eps: f32) {
for _ in 0..stop_after_iter {
if self.in_eps(eps) {
break;
}
self.next();
}
}
pub fn matrix(&self) -> &Matrix {
&self.current
}
pub fn num_iterations(&self) -> usize {
self.num_iterations
}
pub fn min_nodes(&self) -> usize {
cmp::min(self.current.nrows(), self.current.ncols())
}
pub fn max_nodes(&self) -> usize {
cmp::max(self.current.nrows(), self.current.ncols())
}
pub fn optimal_node_assignment(&self) -> Vec<Position> {
let n = self.min_nodes();
let assignment = if n > 0 {
let mut w = WeightMatrix::from_fn(n, |ij| similarity_cost(self.current[ij]));
solve_assignment(&mut w).unwrap()
} else {
Vec::new()
};
assert!(assignment.len() == n);
assignment
}
fn score_optimal_sum(&self, node_assignment: Option<&[Position]>) -> f32 {
match node_assignment {
Some(node_assignment) => {
assert!(node_assignment.len() == self.min_nodes());
node_assignment
.iter()
.fold(0.0, |acc, &Position { row, column }| {
acc + self.current[(row, column)]
})
}
None => {
let node_assignment = self.optimal_node_assignment();
assert!(node_assignment.len() == self.min_nodes());
node_assignment
.iter()
.fold(0.0, |acc, &Position { row, column }| {
acc + self.current[(row, column)]
})
}
}
}
/// Calculate a measure how good the edge weights match up.
///
/// We start by calculating the optimal node assignment between nodes of graph A and graph B,
/// then compare all outgoing edges of similar-assigned nodes by again using an assignment
/// between the edge-weight differences of all edge pairs.
pub fn score_outgoing_edge_weights_sum_norm(
&self,
node_assignment: &[Position],
norm: ScoreNorm,
) -> Closed01<f32> {
let n = self.min_nodes();
let m = self.max_nodes();
debug_assert!(m >= n);
assert!(node_assignment.len() == n);
// we sum up all edge weight scores
let sum: f32 = node_assignment.iter().fold(
0.0,
|acc,
&Position {
row: node_i,
column: node_j,
}| {
let score_ij = self.score_outgoing_edge_weights_of(node_i, node_j);
acc + score_ij.get()
},
);
assert!(sum >= 0.0 && sum <= n as f32);
match norm {
// Not penalize missing nodes.
ScoreNorm::MinDegree => Closed01::new(sum / n as f32),
// To penalize for missing nodes divide by the maximum number of nodes `m`.
ScoreNorm::MaxDegree => Closed01::new(sum / m as f32),
}
}
/// Calculate a similarity measure of outgoing edges of nodes `node_i` of graph A and `node_j`
/// of graph B. A score of 1.0 means, the edges weights match up perfectly. 0.0 means, no
/// similarity.
fn score_outgoing_edge_weights_of(&self, node_i: usize, node_j: usize) -> Closed01<f32> {
let out_i = self.graph_a.out_edges_of(node_i);
let out_j = self.graph_b.out_edges_of(node_j);
let max_deg = cmp::max(out_i.num_edges(), out_j.num_edges());
if max_deg == 0 {
// Nodes with no edges are perfectly similar
return Closed01::one();
}
// Calculates the edge weight distance between edges i and j.
let edge_weight_distance = &|(i, j)| {
match (out_i.nth_edge_weight(i), out_j.nth_edge_weight(j)) {
(Some(w_i), Some(w_j)) => w_i.distance(w_j),
_ => {
// Maximum penalty between two weighted edges
// NOTE: missing edges could be penalized more, but we already
// penalize for that in the node similarity measure.
Closed01::one()
}
}
.get()
};
let mut w = WeightMatrix::from_fn(max_deg, edge_weight_distance);
// calculate optimal edge weight assignement.
let assignment = solve_assignment(&mut w).unwrap();
assert!(assignment.len() == max_deg);
// The sum is the sum of all weight differences on the optimal `path`.
// It's range is from 0.0 (perfect matching) to max_deg*1.0 (bad matching).
let sum: f32 = assignment
.iter()
.fold(0.0, |acc, &Position { row, column }| {
acc + edge_weight_distance((row, column))
});
debug_assert!(sum >= 0.0 && sum <= max_deg as f32);
// we "invert" the normalized sum so that 1.0 means perfect matching and 0.0
// no matching.
Closed01::new(sum / max_deg as f32).inv()
}
/// Sums the optimal assignment of the node similarities and normalizes (divides)
/// by the min/max degree of both graphs.
/// ScoreNorm::MinDegree is used as default in the paper.
pub fn score_optimal_sum_norm(
&self,
node_assignment: Option<&[Position]>,
norm: ScoreNorm,
) -> Closed01<f32> {
let n = self.min_nodes();
let m = self.max_nodes();
if n > 0 {
assert!(m > 0);
let sum = self.score_optimal_sum(node_assignment);
assert!(sum >= 0.0 && sum <= n as f32);
match norm {
// Not penalize missing nodes.
ScoreNorm::MinDegree => Closed01::new(sum / n as f32),
// To penalize for missing nodes, divide by the maximum number of nodes `m`.
ScoreNorm::MaxDegree => Closed01::new(sum / m as f32),
}
} else {
Closed01::zero()
}
}
/// Calculates the average over the whole node similarity matrix. This is faster,
/// as no assignment has to be found. "Graphs with greater number of automorphisms
/// would be considered to be more self-similar than graphs without automorphisms."
pub fn score_average(&self) -> Closed01<f32> {
let n = self.min_nodes();
if n > 0 {
let sum: f32 = self.current.iter().fold(0.0, |acc, &v| acc + v);
let len = self.current.shape().len();
assert!(len > 0);
Closed01::new(sum / len as f32)
} else {
Closed01::zero()
}
}
}
/// Calculates the similarity of two nodes `i` and `j`.
///
/// `n_i` contains the neighborhood of i (either in or out neighbors, not both)
/// `n_j` contains the neighborhood of j (either in or out neighbors, not both)
/// `x` the similarity matrix.
fn s_next<T: Edges>(n_i: &T, n_j: &T, x: &Array2<f32>) -> Closed01<f32> {
let max_deg = cmp::max(n_i.num_edges(), n_j.num_edges());
let min_deg = cmp::min(n_i.num_edges(), n_j.num_edges());
debug_assert!(min_deg <= max_deg);
if max_deg == 0 {
debug_assert!(n_i.num_edges() == 0);
debug_assert!(n_j.num_edges() == 0);
// in the paper, 0/0 is defined as 1.0
// Two nodes without any edges are perfectly similar.
return Closed01::one();
}
if min_deg == 0 {
// A node without any edges is not similar at all to a node with edges.
return Closed01::zero();
}
assert!(min_deg > 0 && max_deg > 0);
// map indicies from 0..min(degree) to the node indices
let mapidx = |(a, b)| (n_i.nth_edge(a).unwrap(), n_j.nth_edge(b).unwrap());
let mut w = WeightMatrix::from_fn(min_deg, |ab| similarity_cost(x[mapidx(ab)]));
let assignment = solve_assignment(&mut w).unwrap();
assert!(assignment.len() == min_deg);
let sum: f32 = assignment
.iter()
.fold(0.0, |acc, &Position { row, column }| {
acc + x[mapidx((row, column))]
});
Closed01::new(sum / max_deg as f32)
}
// NOTE: Our weight matrix minimizes the cost, while our similarity matrix
// wants to maximize the similarity score. That's why we have to convert
// the cost with 1.0 - x.
fn similarity_cost(weight: f32) -> f32 {
debug_assert!(weight >= 0.0 && weight <= 1.0);
1.0 - weight
}
| {
Zip::from(&self.previous)
.and(&self.current)
.fold_while(true, |all_prev_in_eps, x, y| {
if all_prev_in_eps && relative_eq!(x, y, epsilon = eps) {
FoldWhile::Continue(true)
} else {
FoldWhile::Done(false)
}
})
.into_inner()
} | identifier_body |
parallel.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Implements parallel traversals over the DOM and flow trees.
//!
//! This code is highly unsafe. Keep this file small and easy to audit.
#![allow(unsafe_code)]
use context::{LayoutContext, SharedLayoutContext};
use flow::{self, Flow, MutableFlowUtils, PostorderFlowTraversal, PreorderFlowTraversal};
use flow_ref::{self, FlowRef};
use profile_traits::time::{self, TimerMetadata, profile};
use std::mem;
use std::sync::atomic::{AtomicIsize, Ordering};
use style::dom::{TNode, UnsafeNode};
use style::parallel::{CHUNK_SIZE, WorkQueueData};
use style::parallel::{run_queue_with_custom_work_data_type};
use traversal::AssignBSizes;
use traversal::{AssignISizes, BubbleISizes, PostorderNodeMutTraversal};
use util::opts;
use util::workqueue::{WorkQueue, WorkUnit, WorkerProxy};
pub use style::parallel::traverse_dom;
#[allow(dead_code)]
fn static_assertion(node: UnsafeNode) {
unsafe {
let _: UnsafeFlow = ::std::intrinsics::transmute(node);
}
}
/// Vtable + pointer representation of a Flow trait object.
pub type UnsafeFlow = (usize, usize);
fn null_unsafe_flow() -> UnsafeFlow {
(0, 0)
}
pub fn mut_owned_flow_to_unsafe_flow(flow: *mut FlowRef) -> UnsafeFlow {
unsafe {
mem::transmute::<&Flow, UnsafeFlow>(&**flow)
}
}
pub fn borrowed_flow_to_unsafe_flow(flow: &Flow) -> UnsafeFlow {
unsafe {
mem::transmute::<&Flow, UnsafeFlow>(flow)
}
}
pub type UnsafeFlowList = (Box<Vec<UnsafeNode>>, usize);
pub type ChunkedFlowTraversalFunction =
extern "Rust" fn(UnsafeFlowList, &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>);
pub type FlowTraversalFunction = extern "Rust" fn(UnsafeFlow, &SharedLayoutContext);
/// Information that we need stored in each flow.
pub struct FlowParallelInfo {
/// The number of children that still need work done.
pub children_count: AtomicIsize,
/// The address of the parent flow.
pub parent: UnsafeFlow,
}
impl FlowParallelInfo {
pub fn new() -> FlowParallelInfo {
FlowParallelInfo {
children_count: AtomicIsize::new(0),
parent: null_unsafe_flow(),
}
}
}
/// A parallel bottom-up flow traversal.
trait ParallelPostorderFlowTraversal : PostorderFlowTraversal {
/// Process current flow and potentially traverse its ancestors.
///
/// If we are the last child that finished processing, recursively process
/// our parent. Else, stop. Also, stop at the root.
///
/// Thus, if we start with all the leaves of a tree, we end up traversing
/// the whole tree bottom-up because each parent will be processed exactly
/// once (by the last child that finishes processing).
///
/// The only communication between siblings is that they both
/// fetch-and-subtract the parent's children count.
fn run_parallel(&self, mut unsafe_flow: UnsafeFlow) {
loop {
// Get a real flow.
let flow: &mut Flow = unsafe {
mem::transmute(unsafe_flow)
};
// Perform the appropriate traversal.
if self.should_process(flow) {
self.process(flow);
}
let base = flow::mut_base(flow);
// Reset the count of children for the next layout traversal.
base.parallel.children_count.store(base.children.len() as isize,
Ordering::Relaxed);
// Possibly enqueue the parent.
let unsafe_parent = base.parallel.parent;
if unsafe_parent == null_unsafe_flow() {
// We're done!
break
}
// No, we're not at the root yet. Then are we the last child
// of our parent to finish processing? If so, we can continue
// on with our parent; otherwise, we've gotta wait.
let parent: &mut Flow = unsafe {
mem::transmute(unsafe_parent)
};
let parent_base = flow::mut_base(parent);
if parent_base.parallel.children_count.fetch_sub(1, Ordering::Relaxed) == 1 {
// We were the last child of our parent. Reflow our parent.
unsafe_flow = unsafe_parent
} else {
// Stop.
break
}
}
}
}
/// A parallel top-down flow traversal.
trait ParallelPreorderFlowTraversal : PreorderFlowTraversal {
fn run_parallel(&self,
unsafe_flows: UnsafeFlowList,
proxy: &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>);
fn should_record_thread_ids(&self) -> bool;
#[inline(always)]
fn run_parallel_helper(&self,
unsafe_flows: UnsafeFlowList,
proxy: &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>,
top_down_func: ChunkedFlowTraversalFunction,
bottom_up_func: FlowTraversalFunction) {
let mut discovered_child_flows = Vec::new();
for unsafe_flow in *unsafe_flows.0 {
let mut had_children = false;
unsafe {
// Get a real flow.
let flow: &mut Flow = mem::transmute(unsafe_flow);
if self.should_record_thread_ids() {
flow::mut_base(flow).thread_id = proxy.worker_index();
}
if self.should_process(flow) {
// Perform the appropriate traversal.
self.process(flow);
}
// Possibly enqueue the children.
for kid in flow::child_iter(flow) {
had_children = true;
discovered_child_flows.push(borrowed_flow_to_unsafe_flow(kid));
}
}
// If there were no more children, start assigning block-sizes.
if!had_children {
bottom_up_func(unsafe_flow, proxy.user_data())
}
}
for chunk in discovered_child_flows.chunks(CHUNK_SIZE) {
proxy.push(WorkUnit {
fun: top_down_func,
data: (box chunk.iter().cloned().collect(), 0),
});
}
}
}
impl<'a> ParallelPreorderFlowTraversal for AssignISizes<'a> {
fn run_parallel(&self,
unsafe_flows: UnsafeFlowList,
proxy: &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>) {
self.run_parallel_helper(unsafe_flows,
proxy,
assign_inline_sizes,
assign_block_sizes_and_store_overflow)
}
fn should_record_thread_ids(&self) -> bool {
true
}
}
impl<'a> ParallelPostorderFlowTraversal for AssignBSizes<'a> {}
fn assign_inline_sizes(unsafe_flows: UnsafeFlowList,
proxy: &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>) {
let shared_layout_context = proxy.user_data();
let layout_context = LayoutContext::new(shared_layout_context);
let assign_inline_sizes_traversal = AssignISizes {
layout_context: &layout_context,
};
assign_inline_sizes_traversal.run_parallel(unsafe_flows, proxy)
}
fn assign_block_sizes_and_store_overflow(
unsafe_flow: UnsafeFlow,
shared_layout_context: &SharedLayoutContext) {
let layout_context = LayoutContext::new(shared_layout_context);
let assign_block_sizes_traversal = AssignBSizes {
layout_context: &layout_context,
};
assign_block_sizes_traversal.run_parallel(unsafe_flow)
}
pub fn traverse_flow_tree_preorder(
root: &mut FlowRef,
profiler_metadata: Option<TimerMetadata>,
time_profiler_chan: time::ProfilerChan,
shared_layout_context: &SharedLayoutContext,
queue: &mut WorkQueue<SharedLayoutContext, WorkQueueData>) {
if opts::get().bubble_inline_sizes_separately {
let layout_context = LayoutContext::new(shared_layout_context);
let bubble_inline_sizes = BubbleISizes { layout_context: &layout_context };
flow_ref::deref_mut(root).traverse_postorder(&bubble_inline_sizes);
}
run_queue_with_custom_work_data_type(queue, |queue| {
profile(time::ProfilerCategory::LayoutParallelWarmup, profiler_metadata,
time_profiler_chan, || {
queue.push(WorkUnit {
fun: assign_inline_sizes,
data: (box vec![mut_owned_flow_to_unsafe_flow(root)], 0),
}) | }, shared_layout_context);
} | }); | random_line_split |
parallel.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Implements parallel traversals over the DOM and flow trees.
//!
//! This code is highly unsafe. Keep this file small and easy to audit.
#![allow(unsafe_code)]
use context::{LayoutContext, SharedLayoutContext};
use flow::{self, Flow, MutableFlowUtils, PostorderFlowTraversal, PreorderFlowTraversal};
use flow_ref::{self, FlowRef};
use profile_traits::time::{self, TimerMetadata, profile};
use std::mem;
use std::sync::atomic::{AtomicIsize, Ordering};
use style::dom::{TNode, UnsafeNode};
use style::parallel::{CHUNK_SIZE, WorkQueueData};
use style::parallel::{run_queue_with_custom_work_data_type};
use traversal::AssignBSizes;
use traversal::{AssignISizes, BubbleISizes, PostorderNodeMutTraversal};
use util::opts;
use util::workqueue::{WorkQueue, WorkUnit, WorkerProxy};
pub use style::parallel::traverse_dom;
#[allow(dead_code)]
fn static_assertion(node: UnsafeNode) {
unsafe {
let _: UnsafeFlow = ::std::intrinsics::transmute(node);
}
}
/// Vtable + pointer representation of a Flow trait object.
pub type UnsafeFlow = (usize, usize);
fn null_unsafe_flow() -> UnsafeFlow {
(0, 0)
}
pub fn mut_owned_flow_to_unsafe_flow(flow: *mut FlowRef) -> UnsafeFlow {
unsafe {
mem::transmute::<&Flow, UnsafeFlow>(&**flow)
}
}
pub fn borrowed_flow_to_unsafe_flow(flow: &Flow) -> UnsafeFlow {
unsafe {
mem::transmute::<&Flow, UnsafeFlow>(flow)
}
}
pub type UnsafeFlowList = (Box<Vec<UnsafeNode>>, usize);
pub type ChunkedFlowTraversalFunction =
extern "Rust" fn(UnsafeFlowList, &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>);
pub type FlowTraversalFunction = extern "Rust" fn(UnsafeFlow, &SharedLayoutContext);
/// Information that we need stored in each flow.
pub struct FlowParallelInfo {
/// The number of children that still need work done.
pub children_count: AtomicIsize,
/// The address of the parent flow.
pub parent: UnsafeFlow,
}
impl FlowParallelInfo {
pub fn | () -> FlowParallelInfo {
FlowParallelInfo {
children_count: AtomicIsize::new(0),
parent: null_unsafe_flow(),
}
}
}
/// A parallel bottom-up flow traversal.
trait ParallelPostorderFlowTraversal : PostorderFlowTraversal {
/// Process current flow and potentially traverse its ancestors.
///
/// If we are the last child that finished processing, recursively process
/// our parent. Else, stop. Also, stop at the root.
///
/// Thus, if we start with all the leaves of a tree, we end up traversing
/// the whole tree bottom-up because each parent will be processed exactly
/// once (by the last child that finishes processing).
///
/// The only communication between siblings is that they both
/// fetch-and-subtract the parent's children count.
fn run_parallel(&self, mut unsafe_flow: UnsafeFlow) {
loop {
// Get a real flow.
let flow: &mut Flow = unsafe {
mem::transmute(unsafe_flow)
};
// Perform the appropriate traversal.
if self.should_process(flow) {
self.process(flow);
}
let base = flow::mut_base(flow);
// Reset the count of children for the next layout traversal.
base.parallel.children_count.store(base.children.len() as isize,
Ordering::Relaxed);
// Possibly enqueue the parent.
let unsafe_parent = base.parallel.parent;
if unsafe_parent == null_unsafe_flow() {
// We're done!
break
}
// No, we're not at the root yet. Then are we the last child
// of our parent to finish processing? If so, we can continue
// on with our parent; otherwise, we've gotta wait.
let parent: &mut Flow = unsafe {
mem::transmute(unsafe_parent)
};
let parent_base = flow::mut_base(parent);
if parent_base.parallel.children_count.fetch_sub(1, Ordering::Relaxed) == 1 {
// We were the last child of our parent. Reflow our parent.
unsafe_flow = unsafe_parent
} else {
// Stop.
break
}
}
}
}
/// A parallel top-down flow traversal.
trait ParallelPreorderFlowTraversal : PreorderFlowTraversal {
fn run_parallel(&self,
unsafe_flows: UnsafeFlowList,
proxy: &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>);
fn should_record_thread_ids(&self) -> bool;
#[inline(always)]
fn run_parallel_helper(&self,
unsafe_flows: UnsafeFlowList,
proxy: &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>,
top_down_func: ChunkedFlowTraversalFunction,
bottom_up_func: FlowTraversalFunction) {
let mut discovered_child_flows = Vec::new();
for unsafe_flow in *unsafe_flows.0 {
let mut had_children = false;
unsafe {
// Get a real flow.
let flow: &mut Flow = mem::transmute(unsafe_flow);
if self.should_record_thread_ids() {
flow::mut_base(flow).thread_id = proxy.worker_index();
}
if self.should_process(flow) {
// Perform the appropriate traversal.
self.process(flow);
}
// Possibly enqueue the children.
for kid in flow::child_iter(flow) {
had_children = true;
discovered_child_flows.push(borrowed_flow_to_unsafe_flow(kid));
}
}
// If there were no more children, start assigning block-sizes.
if!had_children {
bottom_up_func(unsafe_flow, proxy.user_data())
}
}
for chunk in discovered_child_flows.chunks(CHUNK_SIZE) {
proxy.push(WorkUnit {
fun: top_down_func,
data: (box chunk.iter().cloned().collect(), 0),
});
}
}
}
impl<'a> ParallelPreorderFlowTraversal for AssignISizes<'a> {
fn run_parallel(&self,
unsafe_flows: UnsafeFlowList,
proxy: &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>) {
self.run_parallel_helper(unsafe_flows,
proxy,
assign_inline_sizes,
assign_block_sizes_and_store_overflow)
}
fn should_record_thread_ids(&self) -> bool {
true
}
}
impl<'a> ParallelPostorderFlowTraversal for AssignBSizes<'a> {}
fn assign_inline_sizes(unsafe_flows: UnsafeFlowList,
proxy: &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>) {
let shared_layout_context = proxy.user_data();
let layout_context = LayoutContext::new(shared_layout_context);
let assign_inline_sizes_traversal = AssignISizes {
layout_context: &layout_context,
};
assign_inline_sizes_traversal.run_parallel(unsafe_flows, proxy)
}
fn assign_block_sizes_and_store_overflow(
unsafe_flow: UnsafeFlow,
shared_layout_context: &SharedLayoutContext) {
let layout_context = LayoutContext::new(shared_layout_context);
let assign_block_sizes_traversal = AssignBSizes {
layout_context: &layout_context,
};
assign_block_sizes_traversal.run_parallel(unsafe_flow)
}
pub fn traverse_flow_tree_preorder(
root: &mut FlowRef,
profiler_metadata: Option<TimerMetadata>,
time_profiler_chan: time::ProfilerChan,
shared_layout_context: &SharedLayoutContext,
queue: &mut WorkQueue<SharedLayoutContext, WorkQueueData>) {
if opts::get().bubble_inline_sizes_separately {
let layout_context = LayoutContext::new(shared_layout_context);
let bubble_inline_sizes = BubbleISizes { layout_context: &layout_context };
flow_ref::deref_mut(root).traverse_postorder(&bubble_inline_sizes);
}
run_queue_with_custom_work_data_type(queue, |queue| {
profile(time::ProfilerCategory::LayoutParallelWarmup, profiler_metadata,
time_profiler_chan, || {
queue.push(WorkUnit {
fun: assign_inline_sizes,
data: (box vec![mut_owned_flow_to_unsafe_flow(root)], 0),
})
});
}, shared_layout_context);
}
| new | identifier_name |
parallel.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Implements parallel traversals over the DOM and flow trees.
//!
//! This code is highly unsafe. Keep this file small and easy to audit.
#![allow(unsafe_code)]
use context::{LayoutContext, SharedLayoutContext};
use flow::{self, Flow, MutableFlowUtils, PostorderFlowTraversal, PreorderFlowTraversal};
use flow_ref::{self, FlowRef};
use profile_traits::time::{self, TimerMetadata, profile};
use std::mem;
use std::sync::atomic::{AtomicIsize, Ordering};
use style::dom::{TNode, UnsafeNode};
use style::parallel::{CHUNK_SIZE, WorkQueueData};
use style::parallel::{run_queue_with_custom_work_data_type};
use traversal::AssignBSizes;
use traversal::{AssignISizes, BubbleISizes, PostorderNodeMutTraversal};
use util::opts;
use util::workqueue::{WorkQueue, WorkUnit, WorkerProxy};
pub use style::parallel::traverse_dom;
#[allow(dead_code)]
fn static_assertion(node: UnsafeNode) {
unsafe {
let _: UnsafeFlow = ::std::intrinsics::transmute(node);
}
}
/// Vtable + pointer representation of a Flow trait object.
pub type UnsafeFlow = (usize, usize);
fn null_unsafe_flow() -> UnsafeFlow {
(0, 0)
}
pub fn mut_owned_flow_to_unsafe_flow(flow: *mut FlowRef) -> UnsafeFlow {
unsafe {
mem::transmute::<&Flow, UnsafeFlow>(&**flow)
}
}
pub fn borrowed_flow_to_unsafe_flow(flow: &Flow) -> UnsafeFlow {
unsafe {
mem::transmute::<&Flow, UnsafeFlow>(flow)
}
}
pub type UnsafeFlowList = (Box<Vec<UnsafeNode>>, usize);
pub type ChunkedFlowTraversalFunction =
extern "Rust" fn(UnsafeFlowList, &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>);
pub type FlowTraversalFunction = extern "Rust" fn(UnsafeFlow, &SharedLayoutContext);
/// Information that we need stored in each flow.
pub struct FlowParallelInfo {
/// The number of children that still need work done.
pub children_count: AtomicIsize,
/// The address of the parent flow.
pub parent: UnsafeFlow,
}
impl FlowParallelInfo {
pub fn new() -> FlowParallelInfo {
FlowParallelInfo {
children_count: AtomicIsize::new(0),
parent: null_unsafe_flow(),
}
}
}
/// A parallel bottom-up flow traversal.
trait ParallelPostorderFlowTraversal : PostorderFlowTraversal {
/// Process current flow and potentially traverse its ancestors.
///
/// If we are the last child that finished processing, recursively process
/// our parent. Else, stop. Also, stop at the root.
///
/// Thus, if we start with all the leaves of a tree, we end up traversing
/// the whole tree bottom-up because each parent will be processed exactly
/// once (by the last child that finishes processing).
///
/// The only communication between siblings is that they both
/// fetch-and-subtract the parent's children count.
fn run_parallel(&self, mut unsafe_flow: UnsafeFlow) {
loop {
// Get a real flow.
let flow: &mut Flow = unsafe {
mem::transmute(unsafe_flow)
};
// Perform the appropriate traversal.
if self.should_process(flow) {
self.process(flow);
}
let base = flow::mut_base(flow);
// Reset the count of children for the next layout traversal.
base.parallel.children_count.store(base.children.len() as isize,
Ordering::Relaxed);
// Possibly enqueue the parent.
let unsafe_parent = base.parallel.parent;
if unsafe_parent == null_unsafe_flow() {
// We're done!
break
}
// No, we're not at the root yet. Then are we the last child
// of our parent to finish processing? If so, we can continue
// on with our parent; otherwise, we've gotta wait.
let parent: &mut Flow = unsafe {
mem::transmute(unsafe_parent)
};
let parent_base = flow::mut_base(parent);
if parent_base.parallel.children_count.fetch_sub(1, Ordering::Relaxed) == 1 {
// We were the last child of our parent. Reflow our parent.
unsafe_flow = unsafe_parent
} else {
// Stop.
break
}
}
}
}
/// A parallel top-down flow traversal.
trait ParallelPreorderFlowTraversal : PreorderFlowTraversal {
fn run_parallel(&self,
unsafe_flows: UnsafeFlowList,
proxy: &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>);
fn should_record_thread_ids(&self) -> bool;
#[inline(always)]
fn run_parallel_helper(&self,
unsafe_flows: UnsafeFlowList,
proxy: &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>,
top_down_func: ChunkedFlowTraversalFunction,
bottom_up_func: FlowTraversalFunction) {
let mut discovered_child_flows = Vec::new();
for unsafe_flow in *unsafe_flows.0 {
let mut had_children = false;
unsafe {
// Get a real flow.
let flow: &mut Flow = mem::transmute(unsafe_flow);
if self.should_record_thread_ids() {
flow::mut_base(flow).thread_id = proxy.worker_index();
}
if self.should_process(flow) {
// Perform the appropriate traversal.
self.process(flow);
}
// Possibly enqueue the children.
for kid in flow::child_iter(flow) {
had_children = true;
discovered_child_flows.push(borrowed_flow_to_unsafe_flow(kid));
}
}
// If there were no more children, start assigning block-sizes.
if!had_children {
bottom_up_func(unsafe_flow, proxy.user_data())
}
}
for chunk in discovered_child_flows.chunks(CHUNK_SIZE) {
proxy.push(WorkUnit {
fun: top_down_func,
data: (box chunk.iter().cloned().collect(), 0),
});
}
}
}
impl<'a> ParallelPreorderFlowTraversal for AssignISizes<'a> {
fn run_parallel(&self,
unsafe_flows: UnsafeFlowList,
proxy: &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>) {
self.run_parallel_helper(unsafe_flows,
proxy,
assign_inline_sizes,
assign_block_sizes_and_store_overflow)
}
fn should_record_thread_ids(&self) -> bool {
true
}
}
impl<'a> ParallelPostorderFlowTraversal for AssignBSizes<'a> {}
fn assign_inline_sizes(unsafe_flows: UnsafeFlowList,
proxy: &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>) {
let shared_layout_context = proxy.user_data();
let layout_context = LayoutContext::new(shared_layout_context);
let assign_inline_sizes_traversal = AssignISizes {
layout_context: &layout_context,
};
assign_inline_sizes_traversal.run_parallel(unsafe_flows, proxy)
}
fn assign_block_sizes_and_store_overflow(
unsafe_flow: UnsafeFlow,
shared_layout_context: &SharedLayoutContext) {
let layout_context = LayoutContext::new(shared_layout_context);
let assign_block_sizes_traversal = AssignBSizes {
layout_context: &layout_context,
};
assign_block_sizes_traversal.run_parallel(unsafe_flow)
}
pub fn traverse_flow_tree_preorder(
root: &mut FlowRef,
profiler_metadata: Option<TimerMetadata>,
time_profiler_chan: time::ProfilerChan,
shared_layout_context: &SharedLayoutContext,
queue: &mut WorkQueue<SharedLayoutContext, WorkQueueData>) | {
if opts::get().bubble_inline_sizes_separately {
let layout_context = LayoutContext::new(shared_layout_context);
let bubble_inline_sizes = BubbleISizes { layout_context: &layout_context };
flow_ref::deref_mut(root).traverse_postorder(&bubble_inline_sizes);
}
run_queue_with_custom_work_data_type(queue, |queue| {
profile(time::ProfilerCategory::LayoutParallelWarmup, profiler_metadata,
time_profiler_chan, || {
queue.push(WorkUnit {
fun: assign_inline_sizes,
data: (box vec![mut_owned_flow_to_unsafe_flow(root)], 0),
})
});
}, shared_layout_context);
} | identifier_body |
|
parallel.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Implements parallel traversals over the DOM and flow trees.
//!
//! This code is highly unsafe. Keep this file small and easy to audit.
#![allow(unsafe_code)]
use context::{LayoutContext, SharedLayoutContext};
use flow::{self, Flow, MutableFlowUtils, PostorderFlowTraversal, PreorderFlowTraversal};
use flow_ref::{self, FlowRef};
use profile_traits::time::{self, TimerMetadata, profile};
use std::mem;
use std::sync::atomic::{AtomicIsize, Ordering};
use style::dom::{TNode, UnsafeNode};
use style::parallel::{CHUNK_SIZE, WorkQueueData};
use style::parallel::{run_queue_with_custom_work_data_type};
use traversal::AssignBSizes;
use traversal::{AssignISizes, BubbleISizes, PostorderNodeMutTraversal};
use util::opts;
use util::workqueue::{WorkQueue, WorkUnit, WorkerProxy};
pub use style::parallel::traverse_dom;
#[allow(dead_code)]
fn static_assertion(node: UnsafeNode) {
unsafe {
let _: UnsafeFlow = ::std::intrinsics::transmute(node);
}
}
/// Vtable + pointer representation of a Flow trait object.
pub type UnsafeFlow = (usize, usize);
fn null_unsafe_flow() -> UnsafeFlow {
(0, 0)
}
pub fn mut_owned_flow_to_unsafe_flow(flow: *mut FlowRef) -> UnsafeFlow {
unsafe {
mem::transmute::<&Flow, UnsafeFlow>(&**flow)
}
}
pub fn borrowed_flow_to_unsafe_flow(flow: &Flow) -> UnsafeFlow {
unsafe {
mem::transmute::<&Flow, UnsafeFlow>(flow)
}
}
pub type UnsafeFlowList = (Box<Vec<UnsafeNode>>, usize);
pub type ChunkedFlowTraversalFunction =
extern "Rust" fn(UnsafeFlowList, &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>);
pub type FlowTraversalFunction = extern "Rust" fn(UnsafeFlow, &SharedLayoutContext);
/// Information that we need stored in each flow.
pub struct FlowParallelInfo {
/// The number of children that still need work done.
pub children_count: AtomicIsize,
/// The address of the parent flow.
pub parent: UnsafeFlow,
}
impl FlowParallelInfo {
pub fn new() -> FlowParallelInfo {
FlowParallelInfo {
children_count: AtomicIsize::new(0),
parent: null_unsafe_flow(),
}
}
}
/// A parallel bottom-up flow traversal.
trait ParallelPostorderFlowTraversal : PostorderFlowTraversal {
/// Process current flow and potentially traverse its ancestors.
///
/// If we are the last child that finished processing, recursively process
/// our parent. Else, stop. Also, stop at the root.
///
/// Thus, if we start with all the leaves of a tree, we end up traversing
/// the whole tree bottom-up because each parent will be processed exactly
/// once (by the last child that finishes processing).
///
/// The only communication between siblings is that they both
/// fetch-and-subtract the parent's children count.
fn run_parallel(&self, mut unsafe_flow: UnsafeFlow) {
loop {
// Get a real flow.
let flow: &mut Flow = unsafe {
mem::transmute(unsafe_flow)
};
// Perform the appropriate traversal.
if self.should_process(flow) {
self.process(flow);
}
let base = flow::mut_base(flow);
// Reset the count of children for the next layout traversal.
base.parallel.children_count.store(base.children.len() as isize,
Ordering::Relaxed);
// Possibly enqueue the parent.
let unsafe_parent = base.parallel.parent;
if unsafe_parent == null_unsafe_flow() {
// We're done!
break
}
// No, we're not at the root yet. Then are we the last child
// of our parent to finish processing? If so, we can continue
// on with our parent; otherwise, we've gotta wait.
let parent: &mut Flow = unsafe {
mem::transmute(unsafe_parent)
};
let parent_base = flow::mut_base(parent);
if parent_base.parallel.children_count.fetch_sub(1, Ordering::Relaxed) == 1 {
// We were the last child of our parent. Reflow our parent.
unsafe_flow = unsafe_parent
} else {
// Stop.
break
}
}
}
}
/// A parallel top-down flow traversal.
trait ParallelPreorderFlowTraversal : PreorderFlowTraversal {
fn run_parallel(&self,
unsafe_flows: UnsafeFlowList,
proxy: &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>);
fn should_record_thread_ids(&self) -> bool;
#[inline(always)]
fn run_parallel_helper(&self,
unsafe_flows: UnsafeFlowList,
proxy: &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>,
top_down_func: ChunkedFlowTraversalFunction,
bottom_up_func: FlowTraversalFunction) {
let mut discovered_child_flows = Vec::new();
for unsafe_flow in *unsafe_flows.0 {
let mut had_children = false;
unsafe {
// Get a real flow.
let flow: &mut Flow = mem::transmute(unsafe_flow);
if self.should_record_thread_ids() |
if self.should_process(flow) {
// Perform the appropriate traversal.
self.process(flow);
}
// Possibly enqueue the children.
for kid in flow::child_iter(flow) {
had_children = true;
discovered_child_flows.push(borrowed_flow_to_unsafe_flow(kid));
}
}
// If there were no more children, start assigning block-sizes.
if!had_children {
bottom_up_func(unsafe_flow, proxy.user_data())
}
}
for chunk in discovered_child_flows.chunks(CHUNK_SIZE) {
proxy.push(WorkUnit {
fun: top_down_func,
data: (box chunk.iter().cloned().collect(), 0),
});
}
}
}
impl<'a> ParallelPreorderFlowTraversal for AssignISizes<'a> {
fn run_parallel(&self,
unsafe_flows: UnsafeFlowList,
proxy: &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>) {
self.run_parallel_helper(unsafe_flows,
proxy,
assign_inline_sizes,
assign_block_sizes_and_store_overflow)
}
fn should_record_thread_ids(&self) -> bool {
true
}
}
impl<'a> ParallelPostorderFlowTraversal for AssignBSizes<'a> {}
fn assign_inline_sizes(unsafe_flows: UnsafeFlowList,
proxy: &mut WorkerProxy<SharedLayoutContext, UnsafeFlowList>) {
let shared_layout_context = proxy.user_data();
let layout_context = LayoutContext::new(shared_layout_context);
let assign_inline_sizes_traversal = AssignISizes {
layout_context: &layout_context,
};
assign_inline_sizes_traversal.run_parallel(unsafe_flows, proxy)
}
fn assign_block_sizes_and_store_overflow(
unsafe_flow: UnsafeFlow,
shared_layout_context: &SharedLayoutContext) {
let layout_context = LayoutContext::new(shared_layout_context);
let assign_block_sizes_traversal = AssignBSizes {
layout_context: &layout_context,
};
assign_block_sizes_traversal.run_parallel(unsafe_flow)
}
pub fn traverse_flow_tree_preorder(
root: &mut FlowRef,
profiler_metadata: Option<TimerMetadata>,
time_profiler_chan: time::ProfilerChan,
shared_layout_context: &SharedLayoutContext,
queue: &mut WorkQueue<SharedLayoutContext, WorkQueueData>) {
if opts::get().bubble_inline_sizes_separately {
let layout_context = LayoutContext::new(shared_layout_context);
let bubble_inline_sizes = BubbleISizes { layout_context: &layout_context };
flow_ref::deref_mut(root).traverse_postorder(&bubble_inline_sizes);
}
run_queue_with_custom_work_data_type(queue, |queue| {
profile(time::ProfilerCategory::LayoutParallelWarmup, profiler_metadata,
time_profiler_chan, || {
queue.push(WorkUnit {
fun: assign_inline_sizes,
data: (box vec![mut_owned_flow_to_unsafe_flow(root)], 0),
})
});
}, shared_layout_context);
}
| {
flow::mut_base(flow).thread_id = proxy.worker_index();
} | conditional_block |
error.rs | use std::error;
use std::ffi::CStr;
use std::fmt;
use std::os::raw::c_int;
use std::result;
use std::str;
/// Error codes that the library might return.
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash, PartialOrd, Ord)]
pub enum Error {
/// DNS server returned answer with no data.
ENODATA = c_ares_sys::ARES_ENODATA as isize,
/// DNS server claims query was misformatted.
EFORMERR = c_ares_sys::ARES_EFORMERR as isize,
/// DNS server returned general failure.
ESERVFAIL = c_ares_sys::ARES_ESERVFAIL as isize,
/// Domain name not found.
ENOTFOUND = c_ares_sys::ARES_ENOTFOUND as isize,
/// DNS server does not implement requested operation.
ENOTIMP = c_ares_sys::ARES_ENOTIMP as isize,
/// DNS server refused query.
EREFUSED = c_ares_sys::ARES_EREFUSED as isize,
/// Misformatted DNS query.
EBADQUERY = c_ares_sys::ARES_EBADQUERY as isize,
/// Misformatted domain name.
EBADNAME = c_ares_sys::ARES_EBADNAME as isize,
/// Unsupported address family.
EBADFAMILY = c_ares_sys::ARES_EBADFAMILY as isize,
/// Misformatted DNS reply.
EBADRESP = c_ares_sys::ARES_EBADRESP as isize,
/// Could not contact DNS servers.
ECONNREFUSED = c_ares_sys::ARES_ECONNREFUSED as isize,
/// Timeout while contacting DNS servers.
ETIMEOUT = c_ares_sys::ARES_ETIMEOUT as isize,
/// End of file.
EOF = c_ares_sys::ARES_EOF as isize,
/// Error reading file.
EFILE = c_ares_sys::ARES_EFILE as isize,
/// Out of memory.
ENOMEM = c_ares_sys::ARES_ENOMEM as isize,
/// Channel is being destroyed.
EDESTRUCTION = c_ares_sys::ARES_EDESTRUCTION as isize,
/// Misformatted string.
EBADSTR = c_ares_sys::ARES_EBADSTR as isize,
/// Illegal flags specified.
EBADFLAGS = c_ares_sys::ARES_EBADFLAGS as isize,
/// Given hostname is not numeric.
ENONAME = c_ares_sys::ARES_ENONAME as isize,
/// Illegal hints flags specified.
EBADHINTS = c_ares_sys::ARES_EBADHINTS as isize,
/// c-ares library initialization not yet performed.
ENOTINITIALIZED = c_ares_sys::ARES_ENOTINITIALIZED as isize,
/// Error loading iphlpapi.dll.
ELOADIPHLPAPI = c_ares_sys::ARES_ELOADIPHLPAPI as isize,
/// Could not find GetNetworkParams function.
EADDRGETNETWORKPARAMS = c_ares_sys::ARES_EADDRGETNETWORKPARAMS as isize,
/// DNS query cancelled.
ECANCELLED = c_ares_sys::ARES_ECANCELLED as isize,
/// Unknown error.
UNKNOWN,
}
impl error::Error for Error {}
impl fmt::Display for Error {
fn | (&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
let text = unsafe {
let ptr = c_ares_sys::ares_strerror(*self as c_int);
let buf = CStr::from_ptr(ptr).to_bytes();
str::from_utf8_unchecked(buf)
};
fmt.write_str(text)
}
}
impl From<i32> for Error {
fn from(code: i32) -> Self {
match code {
c_ares_sys::ARES_ENODATA => Error::ENODATA,
c_ares_sys::ARES_EFORMERR => Error::EFORMERR,
c_ares_sys::ARES_ESERVFAIL => Error::ESERVFAIL,
c_ares_sys::ARES_ENOTFOUND => Error::ENOTFOUND,
c_ares_sys::ARES_ENOTIMP => Error::ENOTIMP,
c_ares_sys::ARES_EREFUSED => Error::EREFUSED,
c_ares_sys::ARES_EBADQUERY => Error::EBADQUERY,
c_ares_sys::ARES_EBADNAME => Error::EBADNAME,
c_ares_sys::ARES_EBADFAMILY => Error::EBADFAMILY,
c_ares_sys::ARES_EBADRESP => Error::EBADRESP,
c_ares_sys::ARES_ECONNREFUSED => Error::ECONNREFUSED,
c_ares_sys::ARES_ETIMEOUT => Error::ETIMEOUT,
c_ares_sys::ARES_EOF => Error::EOF,
c_ares_sys::ARES_EFILE => Error::EFILE,
c_ares_sys::ARES_ENOMEM => Error::ENOMEM,
c_ares_sys::ARES_EDESTRUCTION => Error::EDESTRUCTION,
c_ares_sys::ARES_EBADSTR => Error::EBADSTR,
c_ares_sys::ARES_EBADFLAGS => Error::EBADFLAGS,
c_ares_sys::ARES_ENONAME => Error::ENONAME,
c_ares_sys::ARES_EBADHINTS => Error::EBADHINTS,
c_ares_sys::ARES_ENOTINITIALIZED => Error::ENOTINITIALIZED,
c_ares_sys::ARES_ELOADIPHLPAPI => Error::ELOADIPHLPAPI,
c_ares_sys::ARES_EADDRGETNETWORKPARAMS => Error::EADDRGETNETWORKPARAMS,
c_ares_sys::ARES_ECANCELLED => Error::ECANCELLED,
_ => Error::UNKNOWN,
}
}
}
/// The type used by this library for methods that might fail.
pub type Result<T> = result::Result<T, Error>;
| fmt | identifier_name |
error.rs | use std::error;
use std::ffi::CStr;
use std::fmt;
use std::os::raw::c_int;
use std::result;
use std::str;
/// Error codes that the library might return. | ENODATA = c_ares_sys::ARES_ENODATA as isize,
/// DNS server claims query was misformatted.
EFORMERR = c_ares_sys::ARES_EFORMERR as isize,
/// DNS server returned general failure.
ESERVFAIL = c_ares_sys::ARES_ESERVFAIL as isize,
/// Domain name not found.
ENOTFOUND = c_ares_sys::ARES_ENOTFOUND as isize,
/// DNS server does not implement requested operation.
ENOTIMP = c_ares_sys::ARES_ENOTIMP as isize,
/// DNS server refused query.
EREFUSED = c_ares_sys::ARES_EREFUSED as isize,
/// Misformatted DNS query.
EBADQUERY = c_ares_sys::ARES_EBADQUERY as isize,
/// Misformatted domain name.
EBADNAME = c_ares_sys::ARES_EBADNAME as isize,
/// Unsupported address family.
EBADFAMILY = c_ares_sys::ARES_EBADFAMILY as isize,
/// Misformatted DNS reply.
EBADRESP = c_ares_sys::ARES_EBADRESP as isize,
/// Could not contact DNS servers.
ECONNREFUSED = c_ares_sys::ARES_ECONNREFUSED as isize,
/// Timeout while contacting DNS servers.
ETIMEOUT = c_ares_sys::ARES_ETIMEOUT as isize,
/// End of file.
EOF = c_ares_sys::ARES_EOF as isize,
/// Error reading file.
EFILE = c_ares_sys::ARES_EFILE as isize,
/// Out of memory.
ENOMEM = c_ares_sys::ARES_ENOMEM as isize,
/// Channel is being destroyed.
EDESTRUCTION = c_ares_sys::ARES_EDESTRUCTION as isize,
/// Misformatted string.
EBADSTR = c_ares_sys::ARES_EBADSTR as isize,
/// Illegal flags specified.
EBADFLAGS = c_ares_sys::ARES_EBADFLAGS as isize,
/// Given hostname is not numeric.
ENONAME = c_ares_sys::ARES_ENONAME as isize,
/// Illegal hints flags specified.
EBADHINTS = c_ares_sys::ARES_EBADHINTS as isize,
/// c-ares library initialization not yet performed.
ENOTINITIALIZED = c_ares_sys::ARES_ENOTINITIALIZED as isize,
/// Error loading iphlpapi.dll.
ELOADIPHLPAPI = c_ares_sys::ARES_ELOADIPHLPAPI as isize,
/// Could not find GetNetworkParams function.
EADDRGETNETWORKPARAMS = c_ares_sys::ARES_EADDRGETNETWORKPARAMS as isize,
/// DNS query cancelled.
ECANCELLED = c_ares_sys::ARES_ECANCELLED as isize,
/// Unknown error.
UNKNOWN,
}
impl error::Error for Error {}
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
let text = unsafe {
let ptr = c_ares_sys::ares_strerror(*self as c_int);
let buf = CStr::from_ptr(ptr).to_bytes();
str::from_utf8_unchecked(buf)
};
fmt.write_str(text)
}
}
impl From<i32> for Error {
fn from(code: i32) -> Self {
match code {
c_ares_sys::ARES_ENODATA => Error::ENODATA,
c_ares_sys::ARES_EFORMERR => Error::EFORMERR,
c_ares_sys::ARES_ESERVFAIL => Error::ESERVFAIL,
c_ares_sys::ARES_ENOTFOUND => Error::ENOTFOUND,
c_ares_sys::ARES_ENOTIMP => Error::ENOTIMP,
c_ares_sys::ARES_EREFUSED => Error::EREFUSED,
c_ares_sys::ARES_EBADQUERY => Error::EBADQUERY,
c_ares_sys::ARES_EBADNAME => Error::EBADNAME,
c_ares_sys::ARES_EBADFAMILY => Error::EBADFAMILY,
c_ares_sys::ARES_EBADRESP => Error::EBADRESP,
c_ares_sys::ARES_ECONNREFUSED => Error::ECONNREFUSED,
c_ares_sys::ARES_ETIMEOUT => Error::ETIMEOUT,
c_ares_sys::ARES_EOF => Error::EOF,
c_ares_sys::ARES_EFILE => Error::EFILE,
c_ares_sys::ARES_ENOMEM => Error::ENOMEM,
c_ares_sys::ARES_EDESTRUCTION => Error::EDESTRUCTION,
c_ares_sys::ARES_EBADSTR => Error::EBADSTR,
c_ares_sys::ARES_EBADFLAGS => Error::EBADFLAGS,
c_ares_sys::ARES_ENONAME => Error::ENONAME,
c_ares_sys::ARES_EBADHINTS => Error::EBADHINTS,
c_ares_sys::ARES_ENOTINITIALIZED => Error::ENOTINITIALIZED,
c_ares_sys::ARES_ELOADIPHLPAPI => Error::ELOADIPHLPAPI,
c_ares_sys::ARES_EADDRGETNETWORKPARAMS => Error::EADDRGETNETWORKPARAMS,
c_ares_sys::ARES_ECANCELLED => Error::ECANCELLED,
_ => Error::UNKNOWN,
}
}
}
/// The type used by this library for methods that might fail.
pub type Result<T> = result::Result<T, Error>; | #[derive(Copy, Clone, Eq, PartialEq, Debug, Hash, PartialOrd, Ord)]
pub enum Error {
/// DNS server returned answer with no data. | random_line_split |
error.rs | use std::error;
use std::ffi::CStr;
use std::fmt;
use std::os::raw::c_int;
use std::result;
use std::str;
/// Error codes that the library might return.
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash, PartialOrd, Ord)]
pub enum Error {
/// DNS server returned answer with no data.
ENODATA = c_ares_sys::ARES_ENODATA as isize,
/// DNS server claims query was misformatted.
EFORMERR = c_ares_sys::ARES_EFORMERR as isize,
/// DNS server returned general failure.
ESERVFAIL = c_ares_sys::ARES_ESERVFAIL as isize,
/// Domain name not found.
ENOTFOUND = c_ares_sys::ARES_ENOTFOUND as isize,
/// DNS server does not implement requested operation.
ENOTIMP = c_ares_sys::ARES_ENOTIMP as isize,
/// DNS server refused query.
EREFUSED = c_ares_sys::ARES_EREFUSED as isize,
/// Misformatted DNS query.
EBADQUERY = c_ares_sys::ARES_EBADQUERY as isize,
/// Misformatted domain name.
EBADNAME = c_ares_sys::ARES_EBADNAME as isize,
/// Unsupported address family.
EBADFAMILY = c_ares_sys::ARES_EBADFAMILY as isize,
/// Misformatted DNS reply.
EBADRESP = c_ares_sys::ARES_EBADRESP as isize,
/// Could not contact DNS servers.
ECONNREFUSED = c_ares_sys::ARES_ECONNREFUSED as isize,
/// Timeout while contacting DNS servers.
ETIMEOUT = c_ares_sys::ARES_ETIMEOUT as isize,
/// End of file.
EOF = c_ares_sys::ARES_EOF as isize,
/// Error reading file.
EFILE = c_ares_sys::ARES_EFILE as isize,
/// Out of memory.
ENOMEM = c_ares_sys::ARES_ENOMEM as isize,
/// Channel is being destroyed.
EDESTRUCTION = c_ares_sys::ARES_EDESTRUCTION as isize,
/// Misformatted string.
EBADSTR = c_ares_sys::ARES_EBADSTR as isize,
/// Illegal flags specified.
EBADFLAGS = c_ares_sys::ARES_EBADFLAGS as isize,
/// Given hostname is not numeric.
ENONAME = c_ares_sys::ARES_ENONAME as isize,
/// Illegal hints flags specified.
EBADHINTS = c_ares_sys::ARES_EBADHINTS as isize,
/// c-ares library initialization not yet performed.
ENOTINITIALIZED = c_ares_sys::ARES_ENOTINITIALIZED as isize,
/// Error loading iphlpapi.dll.
ELOADIPHLPAPI = c_ares_sys::ARES_ELOADIPHLPAPI as isize,
/// Could not find GetNetworkParams function.
EADDRGETNETWORKPARAMS = c_ares_sys::ARES_EADDRGETNETWORKPARAMS as isize,
/// DNS query cancelled.
ECANCELLED = c_ares_sys::ARES_ECANCELLED as isize,
/// Unknown error.
UNKNOWN,
}
impl error::Error for Error {}
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> |
}
impl From<i32> for Error {
fn from(code: i32) -> Self {
match code {
c_ares_sys::ARES_ENODATA => Error::ENODATA,
c_ares_sys::ARES_EFORMERR => Error::EFORMERR,
c_ares_sys::ARES_ESERVFAIL => Error::ESERVFAIL,
c_ares_sys::ARES_ENOTFOUND => Error::ENOTFOUND,
c_ares_sys::ARES_ENOTIMP => Error::ENOTIMP,
c_ares_sys::ARES_EREFUSED => Error::EREFUSED,
c_ares_sys::ARES_EBADQUERY => Error::EBADQUERY,
c_ares_sys::ARES_EBADNAME => Error::EBADNAME,
c_ares_sys::ARES_EBADFAMILY => Error::EBADFAMILY,
c_ares_sys::ARES_EBADRESP => Error::EBADRESP,
c_ares_sys::ARES_ECONNREFUSED => Error::ECONNREFUSED,
c_ares_sys::ARES_ETIMEOUT => Error::ETIMEOUT,
c_ares_sys::ARES_EOF => Error::EOF,
c_ares_sys::ARES_EFILE => Error::EFILE,
c_ares_sys::ARES_ENOMEM => Error::ENOMEM,
c_ares_sys::ARES_EDESTRUCTION => Error::EDESTRUCTION,
c_ares_sys::ARES_EBADSTR => Error::EBADSTR,
c_ares_sys::ARES_EBADFLAGS => Error::EBADFLAGS,
c_ares_sys::ARES_ENONAME => Error::ENONAME,
c_ares_sys::ARES_EBADHINTS => Error::EBADHINTS,
c_ares_sys::ARES_ENOTINITIALIZED => Error::ENOTINITIALIZED,
c_ares_sys::ARES_ELOADIPHLPAPI => Error::ELOADIPHLPAPI,
c_ares_sys::ARES_EADDRGETNETWORKPARAMS => Error::EADDRGETNETWORKPARAMS,
c_ares_sys::ARES_ECANCELLED => Error::ECANCELLED,
_ => Error::UNKNOWN,
}
}
}
/// The type used by this library for methods that might fail.
pub type Result<T> = result::Result<T, Error>;
| {
let text = unsafe {
let ptr = c_ares_sys::ares_strerror(*self as c_int);
let buf = CStr::from_ptr(ptr).to_bytes();
str::from_utf8_unchecked(buf)
};
fmt.write_str(text)
} | identifier_body |
extends.rs | // The MIT License (MIT)
//
// Copyright (c) 2015 Vladislav Orlov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use std::io::Result;
use std::slice::Iter;
use ast::NodeType;
use ast::ExtendsNode;
use scanner::Token;
pub fn | (body: String, _iter: &mut Iter<Token>) -> Result<Option<NodeType>> {
Ok(Some(NodeType::Extends(ExtendsNode::new(body))))
} | build | identifier_name |
extends.rs | // The MIT License (MIT)
//
// Copyright (c) 2015 Vladislav Orlov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use std::io::Result;
use std::slice::Iter;
use ast::NodeType;
use ast::ExtendsNode;
use scanner::Token;
pub fn build(body: String, _iter: &mut Iter<Token>) -> Result<Option<NodeType>> | {
Ok(Some(NodeType::Extends(ExtendsNode::new(body))))
} | identifier_body |
|
extends.rs | // The MIT License (MIT)
//
// Copyright (c) 2015 Vladislav Orlov
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use std::io::Result;
use std::slice::Iter;
use ast::NodeType; | use ast::ExtendsNode;
use scanner::Token;
pub fn build(body: String, _iter: &mut Iter<Token>) -> Result<Option<NodeType>> {
Ok(Some(NodeType::Extends(ExtendsNode::new(body))))
} | random_line_split |
|
media_queries.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::ascii::StrAsciiExt;
use cssparser::parse_rule_list;
use cssparser::ast::*;
use errors::{ErrorLoggerIterator, log_css_error};
use stylesheets::{CSSRule, CSSMediaRule, parse_style_rule, parse_nested_at_rule};
use namespaces::NamespaceMap; | rules: ~[CSSRule],
}
pub struct MediaQueryList {
// "not all" is omitted from the list.
// An empty list never matches.
media_queries: ~[MediaQuery]
}
// For now, this is a "Level 2 MQ", ie. a media type.
struct MediaQuery {
media_type: MediaQueryType,
// TODO: Level 3 MQ expressions
}
enum MediaQueryType {
All, // Always true
MediaType(MediaType),
}
#[deriving(Eq)]
pub enum MediaType {
Screen,
Print,
}
pub struct Device {
media_type: MediaType,
// TODO: Level 3 MQ data: viewport size, etc.
}
pub fn parse_media_rule(rule: AtRule, parent_rules: &mut ~[CSSRule],
namespaces: &NamespaceMap) {
let media_queries = parse_media_query_list(rule.prelude);
let block = match rule.block {
Some(block) => block,
None => {
log_css_error(rule.location, "Invalid @media rule");
return
}
};
let mut rules = ~[];
for rule in ErrorLoggerIterator(parse_rule_list(block.move_iter())) {
match rule {
QualifiedRule(rule) => parse_style_rule(rule, &mut rules, namespaces),
AtRule(rule) => parse_nested_at_rule(
rule.name.to_ascii_lower(), rule, &mut rules, namespaces),
}
}
parent_rules.push(CSSMediaRule(MediaRule {
media_queries: media_queries,
rules: rules,
}))
}
pub fn parse_media_query_list(input: &[ComponentValue]) -> MediaQueryList {
let iter = &mut input.skip_whitespace();
let mut next = iter.next();
if next.is_none() {
return MediaQueryList{ media_queries: ~[MediaQuery{media_type: All}] }
}
let mut queries = ~[];
loop {
let mq = match next {
Some(&Ident(ref value)) => {
// FIXME: Workaround for https://github.com/mozilla/rust/issues/10683
let value_lower = value.to_ascii_lower();
match value_lower.as_slice() {
"screen" => Some(MediaQuery{ media_type: MediaType(Screen) }),
"print" => Some(MediaQuery{ media_type: MediaType(Print) }),
"all" => Some(MediaQuery{ media_type: All }),
_ => None
}
},
_ => None
};
match iter.next() {
None => {
for mq in mq.move_iter() {
queries.push(mq);
}
return MediaQueryList{ media_queries: queries }
},
Some(&Comma) => {
for mq in mq.move_iter() {
queries.push(mq);
}
},
// Ingnore this comma-separated part
_ => loop {
match iter.next() {
Some(&Comma) => break,
None => return MediaQueryList{ media_queries: queries },
_ => (),
}
},
}
next = iter.next();
}
}
impl MediaQueryList {
pub fn evaluate(&self, device: &Device) -> bool {
do self.media_queries.iter().any |mq| {
match mq.media_type {
MediaType(media_type) => media_type == device.media_type,
All => true,
}
// TODO: match Level 3 expressions
}
}
} |
pub struct MediaRule {
media_queries: MediaQueryList, | random_line_split |
media_queries.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::ascii::StrAsciiExt;
use cssparser::parse_rule_list;
use cssparser::ast::*;
use errors::{ErrorLoggerIterator, log_css_error};
use stylesheets::{CSSRule, CSSMediaRule, parse_style_rule, parse_nested_at_rule};
use namespaces::NamespaceMap;
pub struct MediaRule {
media_queries: MediaQueryList,
rules: ~[CSSRule],
}
pub struct MediaQueryList {
// "not all" is omitted from the list.
// An empty list never matches.
media_queries: ~[MediaQuery]
}
// For now, this is a "Level 2 MQ", ie. a media type.
struct | {
media_type: MediaQueryType,
// TODO: Level 3 MQ expressions
}
enum MediaQueryType {
All, // Always true
MediaType(MediaType),
}
#[deriving(Eq)]
pub enum MediaType {
Screen,
Print,
}
pub struct Device {
media_type: MediaType,
// TODO: Level 3 MQ data: viewport size, etc.
}
pub fn parse_media_rule(rule: AtRule, parent_rules: &mut ~[CSSRule],
namespaces: &NamespaceMap) {
let media_queries = parse_media_query_list(rule.prelude);
let block = match rule.block {
Some(block) => block,
None => {
log_css_error(rule.location, "Invalid @media rule");
return
}
};
let mut rules = ~[];
for rule in ErrorLoggerIterator(parse_rule_list(block.move_iter())) {
match rule {
QualifiedRule(rule) => parse_style_rule(rule, &mut rules, namespaces),
AtRule(rule) => parse_nested_at_rule(
rule.name.to_ascii_lower(), rule, &mut rules, namespaces),
}
}
parent_rules.push(CSSMediaRule(MediaRule {
media_queries: media_queries,
rules: rules,
}))
}
pub fn parse_media_query_list(input: &[ComponentValue]) -> MediaQueryList {
let iter = &mut input.skip_whitespace();
let mut next = iter.next();
if next.is_none() {
return MediaQueryList{ media_queries: ~[MediaQuery{media_type: All}] }
}
let mut queries = ~[];
loop {
let mq = match next {
Some(&Ident(ref value)) => {
// FIXME: Workaround for https://github.com/mozilla/rust/issues/10683
let value_lower = value.to_ascii_lower();
match value_lower.as_slice() {
"screen" => Some(MediaQuery{ media_type: MediaType(Screen) }),
"print" => Some(MediaQuery{ media_type: MediaType(Print) }),
"all" => Some(MediaQuery{ media_type: All }),
_ => None
}
},
_ => None
};
match iter.next() {
None => {
for mq in mq.move_iter() {
queries.push(mq);
}
return MediaQueryList{ media_queries: queries }
},
Some(&Comma) => {
for mq in mq.move_iter() {
queries.push(mq);
}
},
// Ingnore this comma-separated part
_ => loop {
match iter.next() {
Some(&Comma) => break,
None => return MediaQueryList{ media_queries: queries },
_ => (),
}
},
}
next = iter.next();
}
}
impl MediaQueryList {
pub fn evaluate(&self, device: &Device) -> bool {
do self.media_queries.iter().any |mq| {
match mq.media_type {
MediaType(media_type) => media_type == device.media_type,
All => true,
}
// TODO: match Level 3 expressions
}
}
}
| MediaQuery | identifier_name |
media_queries.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::ascii::StrAsciiExt;
use cssparser::parse_rule_list;
use cssparser::ast::*;
use errors::{ErrorLoggerIterator, log_css_error};
use stylesheets::{CSSRule, CSSMediaRule, parse_style_rule, parse_nested_at_rule};
use namespaces::NamespaceMap;
pub struct MediaRule {
media_queries: MediaQueryList,
rules: ~[CSSRule],
}
pub struct MediaQueryList {
// "not all" is omitted from the list.
// An empty list never matches.
media_queries: ~[MediaQuery]
}
// For now, this is a "Level 2 MQ", ie. a media type.
struct MediaQuery {
media_type: MediaQueryType,
// TODO: Level 3 MQ expressions
}
enum MediaQueryType {
All, // Always true
MediaType(MediaType),
}
#[deriving(Eq)]
pub enum MediaType {
Screen,
Print,
}
pub struct Device {
media_type: MediaType,
// TODO: Level 3 MQ data: viewport size, etc.
}
pub fn parse_media_rule(rule: AtRule, parent_rules: &mut ~[CSSRule],
namespaces: &NamespaceMap) {
let media_queries = parse_media_query_list(rule.prelude);
let block = match rule.block {
Some(block) => block,
None => {
log_css_error(rule.location, "Invalid @media rule");
return
}
};
let mut rules = ~[];
for rule in ErrorLoggerIterator(parse_rule_list(block.move_iter())) {
match rule {
QualifiedRule(rule) => parse_style_rule(rule, &mut rules, namespaces),
AtRule(rule) => parse_nested_at_rule(
rule.name.to_ascii_lower(), rule, &mut rules, namespaces),
}
}
parent_rules.push(CSSMediaRule(MediaRule {
media_queries: media_queries,
rules: rules,
}))
}
pub fn parse_media_query_list(input: &[ComponentValue]) -> MediaQueryList {
let iter = &mut input.skip_whitespace();
let mut next = iter.next();
if next.is_none() {
return MediaQueryList{ media_queries: ~[MediaQuery{media_type: All}] }
}
let mut queries = ~[];
loop {
let mq = match next {
Some(&Ident(ref value)) => {
// FIXME: Workaround for https://github.com/mozilla/rust/issues/10683
let value_lower = value.to_ascii_lower();
match value_lower.as_slice() {
"screen" => Some(MediaQuery{ media_type: MediaType(Screen) }),
"print" => Some(MediaQuery{ media_type: MediaType(Print) }),
"all" => Some(MediaQuery{ media_type: All }),
_ => None
}
},
_ => None
};
match iter.next() {
None => {
for mq in mq.move_iter() {
queries.push(mq);
}
return MediaQueryList{ media_queries: queries }
},
Some(&Comma) => | ,
// Ingnore this comma-separated part
_ => loop {
match iter.next() {
Some(&Comma) => break,
None => return MediaQueryList{ media_queries: queries },
_ => (),
}
},
}
next = iter.next();
}
}
impl MediaQueryList {
pub fn evaluate(&self, device: &Device) -> bool {
do self.media_queries.iter().any |mq| {
match mq.media_type {
MediaType(media_type) => media_type == device.media_type,
All => true,
}
// TODO: match Level 3 expressions
}
}
}
| {
for mq in mq.move_iter() {
queries.push(mq);
}
} | conditional_block |
call_once.rs | #![feature(core, unboxed_closures)]
extern crate core;
#[cfg(test)]
mod tests {
use core::ops::FnMut;
use core::ops::FnOnce;
type T = i32;
struct F;
type Args = (T,);
impl FnOnce<Args> for F {
type Output = T;
extern "rust-call" fn call_once(self, (x,): Args) -> Self::Output {
x + 1
}
}
impl FnMut<Args> for F {
extern "rust-call" fn call_mut(&mut self, (x,): Args) -> Self::Output {
x + 2
}
}
#[test]
fn call_once_test1() {
let mut f: F = F;
let f_ptr: &mut F = &mut f;
let x: T = 68;
let result: T = f_ptr.call_once((x,)); | assert_eq!(result, 70);
}
#[test]
fn call_once_test2() {
fn foo<F: FnOnce(T) -> T>(f: F, x: T) -> T {
f(x)
}
let mut f: F = F;
let f_ptr: &mut F = &mut f;
let x: T = 68;
let result: T = foo(f_ptr, x);
assert_eq!(result, x + 2);
assert_eq!(result, 70);
}
} |
assert_eq!(result, x + 2); | random_line_split |
call_once.rs | #![feature(core, unboxed_closures)]
extern crate core;
#[cfg(test)]
mod tests {
use core::ops::FnMut;
use core::ops::FnOnce;
type T = i32;
struct F;
type Args = (T,);
impl FnOnce<Args> for F {
type Output = T;
extern "rust-call" fn | (self, (x,): Args) -> Self::Output {
x + 1
}
}
impl FnMut<Args> for F {
extern "rust-call" fn call_mut(&mut self, (x,): Args) -> Self::Output {
x + 2
}
}
#[test]
fn call_once_test1() {
let mut f: F = F;
let f_ptr: &mut F = &mut f;
let x: T = 68;
let result: T = f_ptr.call_once((x,));
assert_eq!(result, x + 2);
assert_eq!(result, 70);
}
#[test]
fn call_once_test2() {
fn foo<F: FnOnce(T) -> T>(f: F, x: T) -> T {
f(x)
}
let mut f: F = F;
let f_ptr: &mut F = &mut f;
let x: T = 68;
let result: T = foo(f_ptr, x);
assert_eq!(result, x + 2);
assert_eq!(result, 70);
}
}
| call_once | identifier_name |
call_once.rs | #![feature(core, unboxed_closures)]
extern crate core;
#[cfg(test)]
mod tests {
use core::ops::FnMut;
use core::ops::FnOnce;
type T = i32;
struct F;
type Args = (T,);
impl FnOnce<Args> for F {
type Output = T;
extern "rust-call" fn call_once(self, (x,): Args) -> Self::Output |
}
impl FnMut<Args> for F {
extern "rust-call" fn call_mut(&mut self, (x,): Args) -> Self::Output {
x + 2
}
}
#[test]
fn call_once_test1() {
let mut f: F = F;
let f_ptr: &mut F = &mut f;
let x: T = 68;
let result: T = f_ptr.call_once((x,));
assert_eq!(result, x + 2);
assert_eq!(result, 70);
}
#[test]
fn call_once_test2() {
fn foo<F: FnOnce(T) -> T>(f: F, x: T) -> T {
f(x)
}
let mut f: F = F;
let f_ptr: &mut F = &mut f;
let x: T = 68;
let result: T = foo(f_ptr, x);
assert_eq!(result, x + 2);
assert_eq!(result, 70);
}
}
| {
x + 1
} | identifier_body |
lzw.rs | //! This modules provides an implementation of the Lempel–Ziv–Welch Compression Algorithm
// Note: This implementation borrows heavily from the work of Julius Pettersson
// See http://www.cplusplus.com/articles/iL18T05o/ for his extensive explanations
// and a C++ implementatation
use std::io;
use std::io::Read;
use bitstream::{Bits, BitReader, BitWriter};
const MAX_CODESIZE: u8 = 12;
const MAX_ENTRIES: usize = 1 << MAX_CODESIZE as usize;
/// Alias for a LZW code point
type Code = u16;
/// Decoding dictionary.
///
/// It is not generic due to current limitations of Rust
/// Inspired by http://www.cplusplus.com/articles/iL18T05o/
#[derive(Debug)]
struct DecodingDict {
min_size: u8,
table: Vec<(Option<Code>, u8)>,
buffer: Vec<u8>,
}
impl DecodingDict {
/// Creates a new dict
fn new(min_size: u8) -> DecodingDict {
DecodingDict {
min_size: min_size,
table: Vec::with_capacity(512),
buffer: Vec::with_capacity((1 << MAX_CODESIZE as usize) - 1),
}
}
/// Resets the dictionary
fn reset(&mut self) {
self.table.clear();
for i in 0..(1u16 << self.min_size as usize) {
self.table.push((None, i as u8));
}
}
/// Inserts a value into the dict
#[inline(always)]
fn push(&mut self, key: Option<Code>, value: u8) {
self.table.push((key, value))
}
/// Reconstructs the data for the corresponding code
fn reconstruct(&mut self, code: Option<Code>) -> io::Result<&[u8]> {
self.buffer.clear();
let mut code = code;
let mut cha;
// Check the first access more thoroughly since a bad code
// could occur if the data is malformed
if let Some(k) = code {
match self.table.get(k as usize) {
Some(&(code_, cha_)) => {
code = code_;
cha = cha_;
}
None => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
&*format!(
"Invalid code {:X}, expected code <= {:X}",
k,
self.table.len()
),
))
}
}
self.buffer.push(cha);
}
while let Some(k) = code {
if self.buffer.len() >= MAX_ENTRIES {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid code sequence. Cycle in decoding table.",
));
}
//(code, cha) = self.table[k as usize];
// Note: This could possibly be replaced with an unchecked array access if
// - value is asserted to be < self.next_code() in push
// - min_size is asserted to be < MAX_CODESIZE
let entry = self.table[k as usize];
code = entry.0;
cha = entry.1;
self.buffer.push(cha);
}
self.buffer.reverse();
Ok(&self.buffer)
}
/// Returns the buffer constructed by the last reconstruction
#[inline(always)]
fn buffer(&self) -> &[u8] {
&self.buffer
}
/// Number of entries in the dictionary
#[inline(always)]
fn next_code(&self) -> u16 {
self.table.len() as u16
}
}
macro_rules! define_decoder_struct {
{$(
$name:ident, $offset:expr, #[$doc:meta];
)*} => {
$( // START struct definition
#[$doc]
/// The maximum supported code size is 16 bits. The decoder
/// assumes two
/// special code word to be present in the stream:
///
/// * `CLEAR_CODE == 1 << min_code_size`
/// * `END_CODE == CLEAR_CODE + 1`
///
/// Furthermore the decoder expects the stream to start
/// with a `CLEAR_CODE`. This corresponds to the
/// implementation needed for en- and decoding GIF and TIFF files.
#[derive(Debug)]
pub struct $name<R: BitReader> {
r: R,
prev: Option<Code>,
table: DecodingDict,
buf: [u8; 1],
code_size: u8,
min_code_size: u8,
clear_code: Code,
end_code: Code,
}
impl<R> $name<R> where R: BitReader {
/// Creates a new LZW decoder.
pub fn new(reader: R, min_code_size: u8) -> $name<R> {
$name {
r: reader,
prev: None,
table: DecodingDict::new(min_code_size),
buf: [0; 1],
code_size: min_code_size + 1,
min_code_size: min_code_size,
clear_code: 1 << min_code_size,
end_code: (1 << min_code_size) + 1,
}
}
/// Tries to obtain and decode a code word from `bytes`.
///
/// Returns the number of bytes that have been consumed from `bytes`. An empty
/// slice does not indicate `EOF`.
pub fn decode_bytes(&mut self, bytes: &[u8]) -> io::Result<(usize, &[u8])> {
Ok(match self.r.read_bits(bytes, self.code_size) {
Bits::Some(consumed, code) => {
(consumed, if code == self.clear_code {
self.table.reset();
self.table.push(None, 0); // clear code
self.table.push(None, 0); // end code
self.code_size = self.min_code_size + 1;
self.prev = None;
&[]
} else if code == self.end_code {
&[]
} else {
let next_code = self.table.next_code();
if code > next_code {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
&*format!("Invalid code {:X}, expected code <= {:X}",
code,
next_code
)
))
}
let prev = self.prev;
let result = if prev.is_none() {
self.buf = [code as u8];
&self.buf[..]
} else {
let data = if code == next_code {
let cha = try!(self.table.reconstruct(prev))[0];
self.table.push(prev, cha);
try!(self.table.reconstruct(Some(code)))
} else if code < next_code {
let cha = try!(self.table.reconstruct(Some(code)))[0];
self.table.push(prev, cha);
self.table.buffer()
} else {
// code > next_code is already tested a few lines earlier
unreachable!()
};
data
};
if next_code == (1 << self.code_size as usize) - 1 - $offset
&& self.code_size < MAX_CODESIZE {
self.code_size += 1;
}
self.prev = Some(code);
result
})
},
Bits::None(consumed) => {
(consumed, &[])
}
})
}
}
)* // END struct definition
}
}
define_decoder_struct!{
Decoder, 0, #[doc = "
Decoder for a LZW compressed
stream (this algorithm is
used for GIF files)."];
DecoderEarlyChange, 1, #[doc = "
Decoder for a LZW compressed
stream using an “early change”
algorithm (used in TIFF files)."];
}
struct Node {
prefix: Option<Code>,
c: u8,
left: Option<Code>,
right: Option<Code>,
}
impl Node {
#[inline(always)]
fn new(c: u8) -> Node {
Node {
prefix: None,
c: c,
left: None,
right: None,
}
}
}
struct EncodingDict {
table: Vec<Node>,
min_size: u8,
}
/// Encoding dictionary based on a binary tree
impl EncodingDict {
fn new(min_size: u8) -> EncodingDict {
let mut this = EncodingDict {
table: Vec::with_capacity(MAX_ENTRIES),
min_size: min_size,
};
this.reset();
this
}
fn reset(&mut self) {
self.table.clear();
for i in 0..(1u16 << self.min_size as usize) {
self.push_node(Node::new(i as u8));
}
}
#[inline(always)]
fn push_node(&mut self, node: Node) {
self.table.push(node)
}
#[inline(always)]
fn clear_code(&self) -> Code {
1u16 << self.min_size
}
#[inline(always)]
fn end_code(&self) -> Code {
self.clear_code() + 1
}
// Searches for a new prefix
fn search_and_insert(&mut self, i: Option<Code>, c: u8) -> Option<Code> {
if let Some(i) = i.map(|v| v as usize) {
let table_size = self.table.len() as Code;
if let Some(mut j) = self.table[i].prefix {
loop {
let entry = &mut self.table[j as usize];
if c < entry.c {
| c > entry.c {
if let Some(k) = entry.right {
j = k
} else {
entry.right = Some(table_size);
break;
}
} else {
return Some(j);
}
}
} else {
self.table[i].prefix = Some(table_size);
}
self.table.push(Node::new(c));
None
} else {
Some(self.search_initials(c as Code))
}
}
fn next_code(&self) -> usize {
self.table.len()
}
fn search_initials(&self, i: Code) -> Code {
self.table[i as usize].c as Code
}
}
/// Convenience function that reads and compresses all bytes from `R`.
pub fn encode<R, W>(r: R, mut w: W, min_code_size: u8) -> io::Result<()>
where
R: Read,
W: BitWriter,
{
let mut dict = EncodingDict::new(min_code_size);
dict.push_node(Node::new(0)); // clear code
dict.push_node(Node::new(0)); // end code
let mut code_size = min_code_size + 1;
let mut i = None;
// gif spec: first clear code
try!(w.write_bits(dict.clear_code(), code_size));
let mut r = r.bytes();
while let Some(Ok(c)) = r.next() {
let prev = i;
i = dict.search_and_insert(prev, c);
if i.is_none() {
if let Some(code) = prev {
try!(w.write_bits(code, code_size));
}
i = Some(dict.search_initials(c as Code))
}
// There is a hit: do not write out code but continue
let next_code = dict.next_code();
if next_code > (1 << code_size as usize) && code_size < MAX_CODESIZE {
code_size += 1;
}
if next_code > MAX_ENTRIES {
dict.reset();
dict.push_node(Node::new(0)); // clear code
dict.push_node(Node::new(0)); // end code
try!(w.write_bits(dict.clear_code(), code_size));
code_size = min_code_size + 1;
}
}
if let Some(code) = i {
try!(w.write_bits(code, code_size));
}
try!(w.write_bits(dict.end_code(), code_size));
try!(w.flush());
Ok(())
}
/// LZW encoder using the algorithm of GIF files.
pub struct Encoder<W: BitWriter> {
w: W,
dict: EncodingDict,
min_code_size: u8,
code_size: u8,
i: Option<Code>,
}
impl<W: BitWriter> Encoder<W> {
/// Creates a new LZW encoder.
///
/// **Note**: If `min_code_size < 8` then `Self::encode_bytes` might panic when
/// the supplied data containts values that exceed `1 << min_code_size`.
pub fn new(mut w: W, min_code_size: u8) -> io::Result<Encoder<W>> {
let mut dict = EncodingDict::new(min_code_size);
dict.push_node(Node::new(0)); // clear code
dict.push_node(Node::new(0)); // end code
let code_size = min_code_size + 1;
try!(w.write_bits(dict.clear_code(), code_size));
Ok(Encoder {
w: w,
dict: dict,
min_code_size: min_code_size,
code_size: code_size,
i: None,
})
}
/// Compresses `bytes` and writes the result into the writer.
///
/// ## Panics
///
/// This function might panic if any of the input bytes exceeds `1 << min_code_size`.
/// This cannot happen if `min_code_size >= 8`.
pub fn encode_bytes(&mut self, bytes: &[u8]) -> io::Result<()> {
let w = &mut self.w;
let dict = &mut self.dict;
let code_size = &mut self.code_size;
let i = &mut self.i;
for &c in bytes {
let prev = *i;
*i = dict.search_and_insert(prev, c);
if i.is_none() {
if let Some(code) = prev {
try!(w.write_bits(code, *code_size));
}
*i = Some(dict.search_initials(c as Code))
}
// There is a hit: do not write out code but continue
let next_code = dict.next_code();
if next_code > (1 << *code_size as usize) && *code_size < MAX_CODESIZE {
*code_size += 1;
}
if next_code > MAX_ENTRIES {
dict.reset();
dict.push_node(Node::new(0)); // clear code
dict.push_node(Node::new(0)); // end code
try!(w.write_bits(dict.clear_code(), *code_size));
*code_size = self.min_code_size + 1;
}
}
Ok(())
}
}
impl<W: BitWriter> Drop for Encoder<W> {
#[cfg(feature = "raii_no_panic")]
fn drop(&mut self) {
let w = &mut self.w;
let code_size = &mut self.code_size;
if let Some(code) = self.i {
let _ = w.write_bits(code, *code_size);
}
let _ = w.write_bits(self.dict.end_code(), *code_size);
let _ = w.flush();
}
#[cfg(not(feature = "raii_no_panic"))]
fn drop(&mut self) {
(|| {
let w = &mut self.w;
let code_size = &mut self.code_size;
if let Some(code) = self.i {
try!(w.write_bits(code, *code_size));
}
try!(w.write_bits(self.dict.end_code(), *code_size));
w.flush()
})().unwrap()
}
}
#[cfg(test)]
#[test]
fn round_trip() {
use {LsbWriter, LsbReader};
let size = 8;
let data = b"TOBEORNOTTOBEORTOBEORNOT";
let mut compressed = vec![];
{
let mut enc = Encoder::new(LsbWriter::new(&mut compressed), size).unwrap();
enc.encode_bytes(data).unwrap();
}
println!("{:?}", compressed);
let mut dec = Decoder::new(LsbReader::new(), size);
let mut compressed = &compressed[..];
let mut data2 = vec![];
while compressed.len() > 0 {
let (start, bytes) = dec.decode_bytes(&compressed).unwrap();
compressed = &compressed[start..];
data2.extend(bytes.iter().map(|&i| i));
}
assert_eq!(data2, data)
}
| if let Some(k) = entry.left {
j = k
} else {
entry.left = Some(table_size);
break;
}
} else if | conditional_block |
lzw.rs | //! This modules provides an implementation of the Lempel–Ziv–Welch Compression Algorithm
// Note: This implementation borrows heavily from the work of Julius Pettersson
// See http://www.cplusplus.com/articles/iL18T05o/ for his extensive explanations
// and a C++ implementatation
use std::io;
use std::io::Read;
use bitstream::{Bits, BitReader, BitWriter};
const MAX_CODESIZE: u8 = 12;
const MAX_ENTRIES: usize = 1 << MAX_CODESIZE as usize;
/// Alias for a LZW code point
type Code = u16;
/// Decoding dictionary.
///
/// It is not generic due to current limitations of Rust
/// Inspired by http://www.cplusplus.com/articles/iL18T05o/
#[derive(Debug)]
struct DecodingDict {
min_size: u8,
table: Vec<(Option<Code>, u8)>,
buffer: Vec<u8>,
}
impl DecodingDict {
/// Creates a new dict
fn new(min_size: u8) -> DecodingDict {
DecodingDict {
min_size: min_size,
table: Vec::with_capacity(512),
buffer: Vec::with_capacity((1 << MAX_CODESIZE as usize) - 1),
}
}
/// Resets the dictionary
fn reset(&mut self) {
self.table.clear();
for i in 0..(1u16 << self.min_size as usize) {
self.table.push((None, i as u8));
}
}
/// Inserts a value into the dict
#[inline(always)]
fn push(&mut self, key: Option<Code>, value: u8) {
self.table.push((key, value))
}
/// Reconstructs the data for the corresponding code
fn reconstruct(&mut self, code: Option<Code>) -> io::Result<&[u8]> {
self.buffer.clear();
let mut code = code;
let mut cha;
// Check the first access more thoroughly since a bad code
// could occur if the data is malformed
if let Some(k) = code {
match self.table.get(k as usize) {
Some(&(code_, cha_)) => {
code = code_;
cha = cha_;
}
None => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
&*format!(
"Invalid code {:X}, expected code <= {:X}",
k,
self.table.len()
),
))
}
}
self.buffer.push(cha);
}
while let Some(k) = code {
if self.buffer.len() >= MAX_ENTRIES {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid code sequence. Cycle in decoding table.",
));
}
//(code, cha) = self.table[k as usize];
// Note: This could possibly be replaced with an unchecked array access if
// - value is asserted to be < self.next_code() in push
// - min_size is asserted to be < MAX_CODESIZE
let entry = self.table[k as usize];
code = entry.0;
cha = entry.1;
self.buffer.push(cha);
}
self.buffer.reverse();
Ok(&self.buffer)
}
/// Returns the buffer constructed by the last reconstruction
#[inline(always)]
fn buffer(&self) -> &[u8] {
&self.buffer
}
/// Number of entries in the dictionary
#[inline(always)]
fn next_code(&self) -> u16 {
self.table.len() as u16
}
}
macro_rules! define_decoder_struct {
{$(
$name:ident, $offset:expr, #[$doc:meta];
)*} => {
$( // START struct definition
#[$doc]
/// The maximum supported code size is 16 bits. The decoder
/// assumes two
/// special code word to be present in the stream:
///
/// * `CLEAR_CODE == 1 << min_code_size`
/// * `END_CODE == CLEAR_CODE + 1`
///
/// Furthermore the decoder expects the stream to start
/// with a `CLEAR_CODE`. This corresponds to the
/// implementation needed for en- and decoding GIF and TIFF files.
#[derive(Debug)]
pub struct $name<R: BitReader> {
r: R,
prev: Option<Code>,
table: DecodingDict,
buf: [u8; 1],
code_size: u8,
min_code_size: u8,
clear_code: Code,
end_code: Code,
}
impl<R> $name<R> where R: BitReader {
/// Creates a new LZW decoder.
pub fn new(reader: R, min_code_size: u8) -> $name<R> {
$name {
r: reader,
prev: None,
table: DecodingDict::new(min_code_size),
buf: [0; 1],
code_size: min_code_size + 1,
min_code_size: min_code_size,
clear_code: 1 << min_code_size,
end_code: (1 << min_code_size) + 1,
}
}
/// Tries to obtain and decode a code word from `bytes`.
///
/// Returns the number of bytes that have been consumed from `bytes`. An empty
/// slice does not indicate `EOF`.
pub fn decode_bytes(&mut self, bytes: &[u8]) -> io::Result<(usize, &[u8])> {
Ok(match self.r.read_bits(bytes, self.code_size) {
Bits::Some(consumed, code) => {
(consumed, if code == self.clear_code {
self.table.reset();
self.table.push(None, 0); // clear code
self.table.push(None, 0); // end code
self.code_size = self.min_code_size + 1;
self.prev = None;
&[]
} else if code == self.end_code {
&[]
} else {
let next_code = self.table.next_code();
if code > next_code {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
&*format!("Invalid code {:X}, expected code <= {:X}",
code,
next_code
)
))
}
let prev = self.prev;
let result = if prev.is_none() {
self.buf = [code as u8];
&self.buf[..]
} else {
let data = if code == next_code {
let cha = try!(self.table.reconstruct(prev))[0];
self.table.push(prev, cha);
try!(self.table.reconstruct(Some(code)))
} else if code < next_code {
let cha = try!(self.table.reconstruct(Some(code)))[0];
self.table.push(prev, cha);
self.table.buffer()
} else {
// code > next_code is already tested a few lines earlier
unreachable!()
};
data
};
if next_code == (1 << self.code_size as usize) - 1 - $offset
&& self.code_size < MAX_CODESIZE {
self.code_size += 1;
}
self.prev = Some(code);
result
})
},
Bits::None(consumed) => {
(consumed, &[])
}
})
}
}
)* // END struct definition
}
}
define_decoder_struct!{
Decoder, 0, #[doc = "
Decoder for a LZW compressed
stream (this algorithm is
used for GIF files)."];
DecoderEarlyChange, 1, #[doc = "
Decoder for a LZW compressed
stream using an “early change”
algorithm (used in TIFF files)."];
}
struct Node {
prefix: Option<Code>,
c: u8,
left: Option<Code>,
right: Option<Code>,
}
impl Node {
#[inline(always)]
fn new(c: u8) -> Node {
Node {
prefix: None,
c: c,
left: None,
right: None,
}
}
}
struct EncodingDict {
table: Vec<Node>,
min_size: u8,
}
/// Encoding dictionary based on a binary tree
impl EncodingDict {
fn new(min_size: u8) -> EncodingDict {
let mut this = EncodingDict {
table: Vec::with_capacity(MAX_ENTRIES),
min_size: min_size,
};
this.reset();
this
}
fn reset(&mut self) {
self.table.clear();
for i in 0..(1u16 << self.min_size as usize) {
self.push_node(Node::new(i as u8));
}
}
#[inline(always)]
fn push_node(&mut self, node: Node) {
self.table.push(node)
}
#[inline(always)]
fn clear_cod | > Code {
1u16 << self.min_size
}
#[inline(always)]
fn end_code(&self) -> Code {
self.clear_code() + 1
}
// Searches for a new prefix
fn search_and_insert(&mut self, i: Option<Code>, c: u8) -> Option<Code> {
if let Some(i) = i.map(|v| v as usize) {
let table_size = self.table.len() as Code;
if let Some(mut j) = self.table[i].prefix {
loop {
let entry = &mut self.table[j as usize];
if c < entry.c {
if let Some(k) = entry.left {
j = k
} else {
entry.left = Some(table_size);
break;
}
} else if c > entry.c {
if let Some(k) = entry.right {
j = k
} else {
entry.right = Some(table_size);
break;
}
} else {
return Some(j);
}
}
} else {
self.table[i].prefix = Some(table_size);
}
self.table.push(Node::new(c));
None
} else {
Some(self.search_initials(c as Code))
}
}
fn next_code(&self) -> usize {
self.table.len()
}
fn search_initials(&self, i: Code) -> Code {
self.table[i as usize].c as Code
}
}
/// Convenience function that reads and compresses all bytes from `R`.
pub fn encode<R, W>(r: R, mut w: W, min_code_size: u8) -> io::Result<()>
where
R: Read,
W: BitWriter,
{
let mut dict = EncodingDict::new(min_code_size);
dict.push_node(Node::new(0)); // clear code
dict.push_node(Node::new(0)); // end code
let mut code_size = min_code_size + 1;
let mut i = None;
// gif spec: first clear code
try!(w.write_bits(dict.clear_code(), code_size));
let mut r = r.bytes();
while let Some(Ok(c)) = r.next() {
let prev = i;
i = dict.search_and_insert(prev, c);
if i.is_none() {
if let Some(code) = prev {
try!(w.write_bits(code, code_size));
}
i = Some(dict.search_initials(c as Code))
}
// There is a hit: do not write out code but continue
let next_code = dict.next_code();
if next_code > (1 << code_size as usize) && code_size < MAX_CODESIZE {
code_size += 1;
}
if next_code > MAX_ENTRIES {
dict.reset();
dict.push_node(Node::new(0)); // clear code
dict.push_node(Node::new(0)); // end code
try!(w.write_bits(dict.clear_code(), code_size));
code_size = min_code_size + 1;
}
}
if let Some(code) = i {
try!(w.write_bits(code, code_size));
}
try!(w.write_bits(dict.end_code(), code_size));
try!(w.flush());
Ok(())
}
/// LZW encoder using the algorithm of GIF files.
pub struct Encoder<W: BitWriter> {
w: W,
dict: EncodingDict,
min_code_size: u8,
code_size: u8,
i: Option<Code>,
}
impl<W: BitWriter> Encoder<W> {
/// Creates a new LZW encoder.
///
/// **Note**: If `min_code_size < 8` then `Self::encode_bytes` might panic when
/// the supplied data containts values that exceed `1 << min_code_size`.
pub fn new(mut w: W, min_code_size: u8) -> io::Result<Encoder<W>> {
let mut dict = EncodingDict::new(min_code_size);
dict.push_node(Node::new(0)); // clear code
dict.push_node(Node::new(0)); // end code
let code_size = min_code_size + 1;
try!(w.write_bits(dict.clear_code(), code_size));
Ok(Encoder {
w: w,
dict: dict,
min_code_size: min_code_size,
code_size: code_size,
i: None,
})
}
/// Compresses `bytes` and writes the result into the writer.
///
/// ## Panics
///
/// This function might panic if any of the input bytes exceeds `1 << min_code_size`.
/// This cannot happen if `min_code_size >= 8`.
pub fn encode_bytes(&mut self, bytes: &[u8]) -> io::Result<()> {
let w = &mut self.w;
let dict = &mut self.dict;
let code_size = &mut self.code_size;
let i = &mut self.i;
for &c in bytes {
let prev = *i;
*i = dict.search_and_insert(prev, c);
if i.is_none() {
if let Some(code) = prev {
try!(w.write_bits(code, *code_size));
}
*i = Some(dict.search_initials(c as Code))
}
// There is a hit: do not write out code but continue
let next_code = dict.next_code();
if next_code > (1 << *code_size as usize) && *code_size < MAX_CODESIZE {
*code_size += 1;
}
if next_code > MAX_ENTRIES {
dict.reset();
dict.push_node(Node::new(0)); // clear code
dict.push_node(Node::new(0)); // end code
try!(w.write_bits(dict.clear_code(), *code_size));
*code_size = self.min_code_size + 1;
}
}
Ok(())
}
}
impl<W: BitWriter> Drop for Encoder<W> {
#[cfg(feature = "raii_no_panic")]
fn drop(&mut self) {
let w = &mut self.w;
let code_size = &mut self.code_size;
if let Some(code) = self.i {
let _ = w.write_bits(code, *code_size);
}
let _ = w.write_bits(self.dict.end_code(), *code_size);
let _ = w.flush();
}
#[cfg(not(feature = "raii_no_panic"))]
fn drop(&mut self) {
(|| {
let w = &mut self.w;
let code_size = &mut self.code_size;
if let Some(code) = self.i {
try!(w.write_bits(code, *code_size));
}
try!(w.write_bits(self.dict.end_code(), *code_size));
w.flush()
})().unwrap()
}
}
#[cfg(test)]
#[test]
fn round_trip() {
use {LsbWriter, LsbReader};
let size = 8;
let data = b"TOBEORNOTTOBEORTOBEORNOT";
let mut compressed = vec![];
{
let mut enc = Encoder::new(LsbWriter::new(&mut compressed), size).unwrap();
enc.encode_bytes(data).unwrap();
}
println!("{:?}", compressed);
let mut dec = Decoder::new(LsbReader::new(), size);
let mut compressed = &compressed[..];
let mut data2 = vec![];
while compressed.len() > 0 {
let (start, bytes) = dec.decode_bytes(&compressed).unwrap();
compressed = &compressed[start..];
data2.extend(bytes.iter().map(|&i| i));
}
assert_eq!(data2, data)
}
| e(&self) - | identifier_name |
lzw.rs | //! This modules provides an implementation of the Lempel–Ziv–Welch Compression Algorithm
// Note: This implementation borrows heavily from the work of Julius Pettersson
// See http://www.cplusplus.com/articles/iL18T05o/ for his extensive explanations
// and a C++ implementatation
use std::io;
use std::io::Read;
use bitstream::{Bits, BitReader, BitWriter};
const MAX_CODESIZE: u8 = 12;
const MAX_ENTRIES: usize = 1 << MAX_CODESIZE as usize;
/// Alias for a LZW code point
type Code = u16;
/// Decoding dictionary.
///
/// It is not generic due to current limitations of Rust
/// Inspired by http://www.cplusplus.com/articles/iL18T05o/
#[derive(Debug)]
struct DecodingDict {
min_size: u8,
table: Vec<(Option<Code>, u8)>,
buffer: Vec<u8>,
}
impl DecodingDict {
/// Creates a new dict
fn new(min_size: u8) -> DecodingDict {
DecodingDict {
min_size: min_size,
table: Vec::with_capacity(512),
buffer: Vec::with_capacity((1 << MAX_CODESIZE as usize) - 1),
}
}
/// Resets the dictionary
fn reset(&mut self) {
self.table.clear();
for i in 0..(1u16 << self.min_size as usize) {
self.table.push((None, i as u8));
}
}
/// Inserts a value into the dict
#[inline(always)]
fn push(&mut self, key: Option<Code>, value: u8) {
self.table.push((key, value))
}
/// Reconstructs the data for the corresponding code
fn reconstruct(&mut self, code: Option<Code>) -> io::Result<&[u8]> {
self.buffer.clear();
let mut code = code;
let mut cha;
// Check the first access more thoroughly since a bad code
// could occur if the data is malformed
if let Some(k) = code {
match self.table.get(k as usize) {
Some(&(code_, cha_)) => {
code = code_;
cha = cha_;
}
None => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
&*format!(
"Invalid code {:X}, expected code <= {:X}",
k,
self.table.len()
),
))
}
}
self.buffer.push(cha);
}
while let Some(k) = code {
if self.buffer.len() >= MAX_ENTRIES {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid code sequence. Cycle in decoding table.",
));
}
//(code, cha) = self.table[k as usize];
// Note: This could possibly be replaced with an unchecked array access if
// - value is asserted to be < self.next_code() in push
// - min_size is asserted to be < MAX_CODESIZE
let entry = self.table[k as usize];
code = entry.0;
cha = entry.1;
self.buffer.push(cha);
}
self.buffer.reverse();
Ok(&self.buffer)
}
/// Returns the buffer constructed by the last reconstruction
#[inline(always)]
fn buffer(&self) -> &[u8] {
&self.buffer
}
/// Number of entries in the dictionary
#[inline(always)]
fn next_code(&self) -> u16 {
self.table.len() as u16
}
}
macro_rules! define_decoder_struct {
{$(
$name:ident, $offset:expr, #[$doc:meta];
)*} => {
$( // START struct definition
#[$doc]
/// The maximum supported code size is 16 bits. The decoder
/// assumes two
/// special code word to be present in the stream:
///
/// * `CLEAR_CODE == 1 << min_code_size`
/// * `END_CODE == CLEAR_CODE + 1`
///
/// Furthermore the decoder expects the stream to start
/// with a `CLEAR_CODE`. This corresponds to the
/// implementation needed for en- and decoding GIF and TIFF files.
#[derive(Debug)]
pub struct $name<R: BitReader> {
r: R,
prev: Option<Code>,
table: DecodingDict,
buf: [u8; 1],
code_size: u8,
min_code_size: u8,
clear_code: Code,
end_code: Code,
}
impl<R> $name<R> where R: BitReader {
/// Creates a new LZW decoder.
pub fn new(reader: R, min_code_size: u8) -> $name<R> {
$name {
r: reader,
prev: None,
table: DecodingDict::new(min_code_size),
buf: [0; 1],
code_size: min_code_size + 1,
min_code_size: min_code_size,
clear_code: 1 << min_code_size,
end_code: (1 << min_code_size) + 1,
}
}
/// Tries to obtain and decode a code word from `bytes`.
///
/// Returns the number of bytes that have been consumed from `bytes`. An empty
/// slice does not indicate `EOF`.
pub fn decode_bytes(&mut self, bytes: &[u8]) -> io::Result<(usize, &[u8])> {
Ok(match self.r.read_bits(bytes, self.code_size) {
Bits::Some(consumed, code) => {
(consumed, if code == self.clear_code {
self.table.reset();
self.table.push(None, 0); // clear code
self.table.push(None, 0); // end code
self.code_size = self.min_code_size + 1;
self.prev = None;
&[]
} else if code == self.end_code {
&[]
} else {
let next_code = self.table.next_code();
if code > next_code {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
&*format!("Invalid code {:X}, expected code <= {:X}",
code,
next_code
)
))
}
let prev = self.prev;
let result = if prev.is_none() {
self.buf = [code as u8];
&self.buf[..]
} else {
let data = if code == next_code {
let cha = try!(self.table.reconstruct(prev))[0];
self.table.push(prev, cha);
try!(self.table.reconstruct(Some(code)))
} else if code < next_code {
let cha = try!(self.table.reconstruct(Some(code)))[0];
self.table.push(prev, cha);
self.table.buffer()
} else {
// code > next_code is already tested a few lines earlier
unreachable!()
};
data
};
if next_code == (1 << self.code_size as usize) - 1 - $offset
&& self.code_size < MAX_CODESIZE {
self.code_size += 1;
}
self.prev = Some(code);
result
})
},
Bits::None(consumed) => {
(consumed, &[])
}
})
}
}
)* // END struct definition
}
}
define_decoder_struct!{
Decoder, 0, #[doc = "
Decoder for a LZW compressed
stream (this algorithm is
used for GIF files)."];
DecoderEarlyChange, 1, #[doc = "
Decoder for a LZW compressed
stream using an “early change”
algorithm (used in TIFF files)."];
}
struct Node {
prefix: Option<Code>,
c: u8,
left: Option<Code>,
right: Option<Code>,
}
impl Node {
#[inline(always)]
fn new(c: u8) -> Node {
Node {
prefix: None,
c: c,
left: None,
right: None,
}
}
}
struct EncodingDict {
table: Vec<Node>,
min_size: u8,
}
/// Encoding dictionary based on a binary tree
impl EncodingDict {
fn new(min_size: u8) -> EncodingDict {
let mut this = EncodingDict {
table: Vec::with_capacity(MAX_ENTRIES),
min_size: min_size,
};
this.reset();
this
}
fn reset(&mut self) {
self.table.clear();
for i in 0..(1u16 << self.min_size as usize) {
self.push_node(Node::new(i as u8));
}
}
#[inline(always)]
fn push_node(&mut self, node: Node) {
self.table.push(node)
}
#[inline(always)]
fn clear_code(&self) -> Code {
1u16 << self.min_size
}
#[inline(always)]
fn end_code(&self) -> Code {
self.clear_code() + 1
}
// Searches for a new prefix
fn search_and_insert(&mut self, i: Option<Code>, c: u8) -> Option<Code> {
if let Some(i) = i.map(|v| v as usize) {
let table_size = self.table.len() as Code;
if let Some(mut j) = self.table[i].prefix {
loop {
let entry = &mut self.table[j as usize];
if c < entry.c {
if let Some(k) = entry.left {
j = k
} else {
entry.left = Some(table_size);
break;
}
} else if c > entry.c {
if let Some(k) = entry.right {
j = k
} else {
entry.right = Some(table_size);
break;
}
} else {
return Some(j);
}
}
} else {
self.table[i].prefix = Some(table_size);
}
self.table.push(Node::new(c));
None
} else {
Some(self.search_initials(c as Code))
}
}
fn next_code(&self) -> usize {
| search_initials(&self, i: Code) -> Code {
self.table[i as usize].c as Code
}
}
/// Convenience function that reads and compresses all bytes from `R`.
pub fn encode<R, W>(r: R, mut w: W, min_code_size: u8) -> io::Result<()>
where
R: Read,
W: BitWriter,
{
let mut dict = EncodingDict::new(min_code_size);
dict.push_node(Node::new(0)); // clear code
dict.push_node(Node::new(0)); // end code
let mut code_size = min_code_size + 1;
let mut i = None;
// gif spec: first clear code
try!(w.write_bits(dict.clear_code(), code_size));
let mut r = r.bytes();
while let Some(Ok(c)) = r.next() {
let prev = i;
i = dict.search_and_insert(prev, c);
if i.is_none() {
if let Some(code) = prev {
try!(w.write_bits(code, code_size));
}
i = Some(dict.search_initials(c as Code))
}
// There is a hit: do not write out code but continue
let next_code = dict.next_code();
if next_code > (1 << code_size as usize) && code_size < MAX_CODESIZE {
code_size += 1;
}
if next_code > MAX_ENTRIES {
dict.reset();
dict.push_node(Node::new(0)); // clear code
dict.push_node(Node::new(0)); // end code
try!(w.write_bits(dict.clear_code(), code_size));
code_size = min_code_size + 1;
}
}
if let Some(code) = i {
try!(w.write_bits(code, code_size));
}
try!(w.write_bits(dict.end_code(), code_size));
try!(w.flush());
Ok(())
}
/// LZW encoder using the algorithm of GIF files.
pub struct Encoder<W: BitWriter> {
w: W,
dict: EncodingDict,
min_code_size: u8,
code_size: u8,
i: Option<Code>,
}
impl<W: BitWriter> Encoder<W> {
/// Creates a new LZW encoder.
///
/// **Note**: If `min_code_size < 8` then `Self::encode_bytes` might panic when
/// the supplied data containts values that exceed `1 << min_code_size`.
pub fn new(mut w: W, min_code_size: u8) -> io::Result<Encoder<W>> {
let mut dict = EncodingDict::new(min_code_size);
dict.push_node(Node::new(0)); // clear code
dict.push_node(Node::new(0)); // end code
let code_size = min_code_size + 1;
try!(w.write_bits(dict.clear_code(), code_size));
Ok(Encoder {
w: w,
dict: dict,
min_code_size: min_code_size,
code_size: code_size,
i: None,
})
}
/// Compresses `bytes` and writes the result into the writer.
///
/// ## Panics
///
/// This function might panic if any of the input bytes exceeds `1 << min_code_size`.
/// This cannot happen if `min_code_size >= 8`.
pub fn encode_bytes(&mut self, bytes: &[u8]) -> io::Result<()> {
let w = &mut self.w;
let dict = &mut self.dict;
let code_size = &mut self.code_size;
let i = &mut self.i;
for &c in bytes {
let prev = *i;
*i = dict.search_and_insert(prev, c);
if i.is_none() {
if let Some(code) = prev {
try!(w.write_bits(code, *code_size));
}
*i = Some(dict.search_initials(c as Code))
}
// There is a hit: do not write out code but continue
let next_code = dict.next_code();
if next_code > (1 << *code_size as usize) && *code_size < MAX_CODESIZE {
*code_size += 1;
}
if next_code > MAX_ENTRIES {
dict.reset();
dict.push_node(Node::new(0)); // clear code
dict.push_node(Node::new(0)); // end code
try!(w.write_bits(dict.clear_code(), *code_size));
*code_size = self.min_code_size + 1;
}
}
Ok(())
}
}
impl<W: BitWriter> Drop for Encoder<W> {
#[cfg(feature = "raii_no_panic")]
fn drop(&mut self) {
let w = &mut self.w;
let code_size = &mut self.code_size;
if let Some(code) = self.i {
let _ = w.write_bits(code, *code_size);
}
let _ = w.write_bits(self.dict.end_code(), *code_size);
let _ = w.flush();
}
#[cfg(not(feature = "raii_no_panic"))]
fn drop(&mut self) {
(|| {
let w = &mut self.w;
let code_size = &mut self.code_size;
if let Some(code) = self.i {
try!(w.write_bits(code, *code_size));
}
try!(w.write_bits(self.dict.end_code(), *code_size));
w.flush()
})().unwrap()
}
}
#[cfg(test)]
#[test]
fn round_trip() {
use {LsbWriter, LsbReader};
let size = 8;
let data = b"TOBEORNOTTOBEORTOBEORNOT";
let mut compressed = vec![];
{
let mut enc = Encoder::new(LsbWriter::new(&mut compressed), size).unwrap();
enc.encode_bytes(data).unwrap();
}
println!("{:?}", compressed);
let mut dec = Decoder::new(LsbReader::new(), size);
let mut compressed = &compressed[..];
let mut data2 = vec![];
while compressed.len() > 0 {
let (start, bytes) = dec.decode_bytes(&compressed).unwrap();
compressed = &compressed[start..];
data2.extend(bytes.iter().map(|&i| i));
}
assert_eq!(data2, data)
}
| self.table.len()
}
fn | identifier_body |
lzw.rs | //! This modules provides an implementation of the Lempel–Ziv–Welch Compression Algorithm
// Note: This implementation borrows heavily from the work of Julius Pettersson
// See http://www.cplusplus.com/articles/iL18T05o/ for his extensive explanations
// and a C++ implementatation
use std::io;
use std::io::Read;
use bitstream::{Bits, BitReader, BitWriter};
const MAX_CODESIZE: u8 = 12;
const MAX_ENTRIES: usize = 1 << MAX_CODESIZE as usize;
/// Alias for a LZW code point
type Code = u16;
/// Decoding dictionary.
///
/// It is not generic due to current limitations of Rust
/// Inspired by http://www.cplusplus.com/articles/iL18T05o/
#[derive(Debug)]
struct DecodingDict {
min_size: u8,
table: Vec<(Option<Code>, u8)>,
buffer: Vec<u8>,
}
impl DecodingDict {
/// Creates a new dict
fn new(min_size: u8) -> DecodingDict {
DecodingDict {
min_size: min_size,
table: Vec::with_capacity(512),
buffer: Vec::with_capacity((1 << MAX_CODESIZE as usize) - 1),
}
}
/// Resets the dictionary
fn reset(&mut self) {
self.table.clear();
for i in 0..(1u16 << self.min_size as usize) {
self.table.push((None, i as u8));
}
}
/// Inserts a value into the dict
#[inline(always)]
fn push(&mut self, key: Option<Code>, value: u8) {
self.table.push((key, value))
}
/// Reconstructs the data for the corresponding code
fn reconstruct(&mut self, code: Option<Code>) -> io::Result<&[u8]> {
self.buffer.clear();
let mut code = code;
let mut cha;
// Check the first access more thoroughly since a bad code
// could occur if the data is malformed
if let Some(k) = code {
match self.table.get(k as usize) {
Some(&(code_, cha_)) => {
code = code_;
cha = cha_;
}
None => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
&*format!(
"Invalid code {:X}, expected code <= {:X}",
k,
self.table.len()
),
))
}
}
self.buffer.push(cha);
}
while let Some(k) = code {
if self.buffer.len() >= MAX_ENTRIES {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid code sequence. Cycle in decoding table.",
));
}
//(code, cha) = self.table[k as usize];
// Note: This could possibly be replaced with an unchecked array access if
// - value is asserted to be < self.next_code() in push
// - min_size is asserted to be < MAX_CODESIZE
let entry = self.table[k as usize];
code = entry.0;
cha = entry.1;
self.buffer.push(cha);
}
self.buffer.reverse();
Ok(&self.buffer)
}
/// Returns the buffer constructed by the last reconstruction
#[inline(always)]
fn buffer(&self) -> &[u8] {
&self.buffer
}
/// Number of entries in the dictionary
#[inline(always)]
fn next_code(&self) -> u16 {
self.table.len() as u16
}
}
macro_rules! define_decoder_struct {
{$(
$name:ident, $offset:expr, #[$doc:meta];
)*} => {
$( // START struct definition
#[$doc]
/// The maximum supported code size is 16 bits. The decoder
/// assumes two
/// special code word to be present in the stream:
///
/// * `CLEAR_CODE == 1 << min_code_size`
/// * `END_CODE == CLEAR_CODE + 1`
///
/// Furthermore the decoder expects the stream to start
/// with a `CLEAR_CODE`. This corresponds to the
/// implementation needed for en- and decoding GIF and TIFF files.
#[derive(Debug)]
pub struct $name<R: BitReader> {
r: R,
prev: Option<Code>,
table: DecodingDict,
buf: [u8; 1],
code_size: u8,
min_code_size: u8,
clear_code: Code,
end_code: Code,
}
impl<R> $name<R> where R: BitReader {
/// Creates a new LZW decoder.
pub fn new(reader: R, min_code_size: u8) -> $name<R> {
$name {
r: reader,
prev: None,
table: DecodingDict::new(min_code_size),
buf: [0; 1],
code_size: min_code_size + 1,
min_code_size: min_code_size,
clear_code: 1 << min_code_size,
end_code: (1 << min_code_size) + 1,
}
}
/// Tries to obtain and decode a code word from `bytes`.
///
/// Returns the number of bytes that have been consumed from `bytes`. An empty
/// slice does not indicate `EOF`.
pub fn decode_bytes(&mut self, bytes: &[u8]) -> io::Result<(usize, &[u8])> {
Ok(match self.r.read_bits(bytes, self.code_size) {
Bits::Some(consumed, code) => {
(consumed, if code == self.clear_code {
self.table.reset();
self.table.push(None, 0); // clear code
self.table.push(None, 0); // end code
self.code_size = self.min_code_size + 1;
self.prev = None;
&[]
} else if code == self.end_code {
&[]
} else {
let next_code = self.table.next_code();
if code > next_code {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
&*format!("Invalid code {:X}, expected code <= {:X}",
code,
next_code
)
))
}
let prev = self.prev;
let result = if prev.is_none() {
self.buf = [code as u8];
&self.buf[..]
} else {
let data = if code == next_code {
let cha = try!(self.table.reconstruct(prev))[0];
self.table.push(prev, cha);
try!(self.table.reconstruct(Some(code)))
} else if code < next_code {
let cha = try!(self.table.reconstruct(Some(code)))[0];
self.table.push(prev, cha);
self.table.buffer()
} else {
// code > next_code is already tested a few lines earlier
unreachable!()
};
data
};
if next_code == (1 << self.code_size as usize) - 1 - $offset
&& self.code_size < MAX_CODESIZE {
self.code_size += 1;
}
self.prev = Some(code);
result
})
},
Bits::None(consumed) => {
(consumed, &[])
}
})
}
}
)* // END struct definition
}
}
define_decoder_struct!{
Decoder, 0, #[doc = "
Decoder for a LZW compressed
stream (this algorithm is
used for GIF files)."];
DecoderEarlyChange, 1, #[doc = "
Decoder for a LZW compressed
stream using an “early change”
algorithm (used in TIFF files)."];
}
struct Node {
prefix: Option<Code>,
c: u8,
left: Option<Code>,
right: Option<Code>,
}
impl Node {
#[inline(always)]
fn new(c: u8) -> Node {
Node {
prefix: None,
c: c,
left: None,
right: None,
}
}
}
struct EncodingDict {
table: Vec<Node>,
min_size: u8,
}
/// Encoding dictionary based on a binary tree
impl EncodingDict {
fn new(min_size: u8) -> EncodingDict {
let mut this = EncodingDict {
table: Vec::with_capacity(MAX_ENTRIES),
min_size: min_size,
};
this.reset();
this | self.push_node(Node::new(i as u8));
}
}
#[inline(always)]
fn push_node(&mut self, node: Node) {
self.table.push(node)
}
#[inline(always)]
fn clear_code(&self) -> Code {
1u16 << self.min_size
}
#[inline(always)]
fn end_code(&self) -> Code {
self.clear_code() + 1
}
// Searches for a new prefix
fn search_and_insert(&mut self, i: Option<Code>, c: u8) -> Option<Code> {
if let Some(i) = i.map(|v| v as usize) {
let table_size = self.table.len() as Code;
if let Some(mut j) = self.table[i].prefix {
loop {
let entry = &mut self.table[j as usize];
if c < entry.c {
if let Some(k) = entry.left {
j = k
} else {
entry.left = Some(table_size);
break;
}
} else if c > entry.c {
if let Some(k) = entry.right {
j = k
} else {
entry.right = Some(table_size);
break;
}
} else {
return Some(j);
}
}
} else {
self.table[i].prefix = Some(table_size);
}
self.table.push(Node::new(c));
None
} else {
Some(self.search_initials(c as Code))
}
}
fn next_code(&self) -> usize {
self.table.len()
}
fn search_initials(&self, i: Code) -> Code {
self.table[i as usize].c as Code
}
}
/// Convenience function that reads and compresses all bytes from `R`.
pub fn encode<R, W>(r: R, mut w: W, min_code_size: u8) -> io::Result<()>
where
R: Read,
W: BitWriter,
{
let mut dict = EncodingDict::new(min_code_size);
dict.push_node(Node::new(0)); // clear code
dict.push_node(Node::new(0)); // end code
let mut code_size = min_code_size + 1;
let mut i = None;
// gif spec: first clear code
try!(w.write_bits(dict.clear_code(), code_size));
let mut r = r.bytes();
while let Some(Ok(c)) = r.next() {
let prev = i;
i = dict.search_and_insert(prev, c);
if i.is_none() {
if let Some(code) = prev {
try!(w.write_bits(code, code_size));
}
i = Some(dict.search_initials(c as Code))
}
// There is a hit: do not write out code but continue
let next_code = dict.next_code();
if next_code > (1 << code_size as usize) && code_size < MAX_CODESIZE {
code_size += 1;
}
if next_code > MAX_ENTRIES {
dict.reset();
dict.push_node(Node::new(0)); // clear code
dict.push_node(Node::new(0)); // end code
try!(w.write_bits(dict.clear_code(), code_size));
code_size = min_code_size + 1;
}
}
if let Some(code) = i {
try!(w.write_bits(code, code_size));
}
try!(w.write_bits(dict.end_code(), code_size));
try!(w.flush());
Ok(())
}
/// LZW encoder using the algorithm of GIF files.
pub struct Encoder<W: BitWriter> {
w: W,
dict: EncodingDict,
min_code_size: u8,
code_size: u8,
i: Option<Code>,
}
impl<W: BitWriter> Encoder<W> {
/// Creates a new LZW encoder.
///
/// **Note**: If `min_code_size < 8` then `Self::encode_bytes` might panic when
/// the supplied data containts values that exceed `1 << min_code_size`.
pub fn new(mut w: W, min_code_size: u8) -> io::Result<Encoder<W>> {
let mut dict = EncodingDict::new(min_code_size);
dict.push_node(Node::new(0)); // clear code
dict.push_node(Node::new(0)); // end code
let code_size = min_code_size + 1;
try!(w.write_bits(dict.clear_code(), code_size));
Ok(Encoder {
w: w,
dict: dict,
min_code_size: min_code_size,
code_size: code_size,
i: None,
})
}
/// Compresses `bytes` and writes the result into the writer.
///
/// ## Panics
///
/// This function might panic if any of the input bytes exceeds `1 << min_code_size`.
/// This cannot happen if `min_code_size >= 8`.
pub fn encode_bytes(&mut self, bytes: &[u8]) -> io::Result<()> {
let w = &mut self.w;
let dict = &mut self.dict;
let code_size = &mut self.code_size;
let i = &mut self.i;
for &c in bytes {
let prev = *i;
*i = dict.search_and_insert(prev, c);
if i.is_none() {
if let Some(code) = prev {
try!(w.write_bits(code, *code_size));
}
*i = Some(dict.search_initials(c as Code))
}
// There is a hit: do not write out code but continue
let next_code = dict.next_code();
if next_code > (1 << *code_size as usize) && *code_size < MAX_CODESIZE {
*code_size += 1;
}
if next_code > MAX_ENTRIES {
dict.reset();
dict.push_node(Node::new(0)); // clear code
dict.push_node(Node::new(0)); // end code
try!(w.write_bits(dict.clear_code(), *code_size));
*code_size = self.min_code_size + 1;
}
}
Ok(())
}
}
impl<W: BitWriter> Drop for Encoder<W> {
#[cfg(feature = "raii_no_panic")]
fn drop(&mut self) {
let w = &mut self.w;
let code_size = &mut self.code_size;
if let Some(code) = self.i {
let _ = w.write_bits(code, *code_size);
}
let _ = w.write_bits(self.dict.end_code(), *code_size);
let _ = w.flush();
}
#[cfg(not(feature = "raii_no_panic"))]
fn drop(&mut self) {
(|| {
let w = &mut self.w;
let code_size = &mut self.code_size;
if let Some(code) = self.i {
try!(w.write_bits(code, *code_size));
}
try!(w.write_bits(self.dict.end_code(), *code_size));
w.flush()
})().unwrap()
}
}
#[cfg(test)]
#[test]
fn round_trip() {
use {LsbWriter, LsbReader};
let size = 8;
let data = b"TOBEORNOTTOBEORTOBEORNOT";
let mut compressed = vec![];
{
let mut enc = Encoder::new(LsbWriter::new(&mut compressed), size).unwrap();
enc.encode_bytes(data).unwrap();
}
println!("{:?}", compressed);
let mut dec = Decoder::new(LsbReader::new(), size);
let mut compressed = &compressed[..];
let mut data2 = vec![];
while compressed.len() > 0 {
let (start, bytes) = dec.decode_bytes(&compressed).unwrap();
compressed = &compressed[start..];
data2.extend(bytes.iter().map(|&i| i));
}
assert_eq!(data2, data)
} | }
fn reset(&mut self) {
self.table.clear();
for i in 0..(1u16 << self.min_size as usize) { | random_line_split |
htmldivelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLDivElementBinding::{self, HTMLDivElementMethods};
use dom::bindings::js::Root;
use dom::document::Document;
use dom::htmlelement::HTMLElement;
use dom::node::Node;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLDivElement {
htmlelement: HTMLElement
}
impl HTMLDivElement {
fn | (localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLDivElement {
HTMLDivElement {
htmlelement: HTMLElement::new_inherited(localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLDivElement> {
let element = HTMLDivElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLDivElementBinding::Wrap)
}
}
impl HTMLDivElementMethods for HTMLDivElement {
// https://html.spec.whatwg.org/multipage/#dom-div-align
make_getter!(Align, "align");
// https://html.spec.whatwg.org/multipage/#dom-div-align
make_setter!(SetAlign, "align");
}
| new_inherited | identifier_name |
htmldivelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLDivElementBinding::{self, HTMLDivElementMethods};
use dom::bindings::js::Root;
use dom::document::Document;
use dom::htmlelement::HTMLElement;
use dom::node::Node;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLDivElement {
htmlelement: HTMLElement
}
impl HTMLDivElement {
fn new_inherited(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLDivElement {
HTMLDivElement {
htmlelement: HTMLElement::new_inherited(localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLDivElement> {
let element = HTMLDivElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLDivElementBinding::Wrap)
}
}
impl HTMLDivElementMethods for HTMLDivElement {
// https://html.spec.whatwg.org/multipage/#dom-div-align
make_getter!(Align, "align");
| // https://html.spec.whatwg.org/multipage/#dom-div-align
make_setter!(SetAlign, "align");
} | random_line_split |
|
problem_0005.rs | extern crate projecteuler;
use projecteuler::primes::*;
use std::collections::HashMap;
fn main() | result *= x.0.pow(x.1 as u32);//gather all the prime exponents together
}
result
};
println!("{}", result);
}
| {
let mut result_factorized = HashMap::<u64,u64>::new();
let mut primes = Primes::new();
for i in 1..21{//TODO: Make this a LCM function.
let factorization = primes.factorize(i);
for factor in factorization{
let current_factor = match result_factorized.get(&factor.0){
Some(i) => {*i},
_ => {0}
};
if current_factor < factor.1{
result_factorized.insert(factor.0, factor.1);
}
}
}
let result = {
let mut result = 1;
for x in result_factorized{ | identifier_body |
problem_0005.rs | extern crate projecteuler;
use projecteuler::primes::*;
use std::collections::HashMap;
fn | (){
let mut result_factorized = HashMap::<u64,u64>::new();
let mut primes = Primes::new();
for i in 1..21{//TODO: Make this a LCM function.
let factorization = primes.factorize(i);
for factor in factorization{
let current_factor = match result_factorized.get(&factor.0){
Some(i) => {*i},
_ => {0}
};
if current_factor < factor.1{
result_factorized.insert(factor.0, factor.1);
}
}
}
let result = {
let mut result = 1;
for x in result_factorized{
result *= x.0.pow(x.1 as u32);//gather all the prime exponents together
}
result
};
println!("{}", result);
}
| main | identifier_name |
problem_0005.rs | extern crate projecteuler;
use projecteuler::primes::*;
use std::collections::HashMap;
fn main(){
let mut result_factorized = HashMap::<u64,u64>::new(); | let mut primes = Primes::new();
for i in 1..21{//TODO: Make this a LCM function.
let factorization = primes.factorize(i);
for factor in factorization{
let current_factor = match result_factorized.get(&factor.0){
Some(i) => {*i},
_ => {0}
};
if current_factor < factor.1{
result_factorized.insert(factor.0, factor.1);
}
}
}
let result = {
let mut result = 1;
for x in result_factorized{
result *= x.0.pow(x.1 as u32);//gather all the prime exponents together
}
result
};
println!("{}", result);
} | random_line_split |
|
problem_0005.rs | extern crate projecteuler;
use projecteuler::primes::*;
use std::collections::HashMap;
fn main(){
let mut result_factorized = HashMap::<u64,u64>::new();
let mut primes = Primes::new();
for i in 1..21{//TODO: Make this a LCM function.
let factorization = primes.factorize(i);
for factor in factorization{
let current_factor = match result_factorized.get(&factor.0){
Some(i) => {*i},
_ => |
};
if current_factor < factor.1{
result_factorized.insert(factor.0, factor.1);
}
}
}
let result = {
let mut result = 1;
for x in result_factorized{
result *= x.0.pow(x.1 as u32);//gather all the prime exponents together
}
result
};
println!("{}", result);
}
| {0} | conditional_block |
expect.rs | use std::fmt;
use std::str;
use unicase::UniCase;
use header::{Header, HeaderFormat};
/// The `Expect` header.
///
/// > The "Expect" header field in a request indicates a certain set of
/// > behaviors (expectations) that need to be supported by the server in
/// > order to properly handle this request. The only such expectation
/// > defined by this specification is 100-continue.
/// >
/// > Expect = "100-continue"
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum Expect {
/// The value `100-continue`.
Continue
}
const EXPECT_CONTINUE: UniCase<&'static str> = UniCase("100-continue");
impl Header for Expect {
fn header_name() -> &'static str |
fn parse_header(raw: &[Vec<u8>]) -> Option<Expect> {
if raw.len() == 1 {
let text = unsafe {
// safe because:
// 1. we just checked raw.len == 1
// 2. we don't actually care if it's utf8, we just want to
// compare the bytes with the "case" normalized. If it's not
// utf8, then the byte comparison will fail, and we'll return
// None. No big deal.
str::from_utf8_unchecked(raw.get_unchecked(0))
};
if UniCase(text) == EXPECT_CONTINUE {
Some(Expect::Continue)
} else {
None
}
} else {
None
}
}
}
impl HeaderFormat for Expect {
fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("100-continue")
}
}
| {
"Expect"
} | identifier_body |
expect.rs | use std::fmt;
use std::str;
use unicase::UniCase;
use header::{Header, HeaderFormat};
/// The `Expect` header.
///
/// > The "Expect" header field in a request indicates a certain set of
/// > behaviors (expectations) that need to be supported by the server in
/// > order to properly handle this request. The only such expectation
/// > defined by this specification is 100-continue.
/// >
/// > Expect = "100-continue"
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum Expect {
/// The value `100-continue`.
Continue
}
const EXPECT_CONTINUE: UniCase<&'static str> = UniCase("100-continue");
impl Header for Expect {
fn header_name() -> &'static str {
"Expect"
}
fn | (raw: &[Vec<u8>]) -> Option<Expect> {
if raw.len() == 1 {
let text = unsafe {
// safe because:
// 1. we just checked raw.len == 1
// 2. we don't actually care if it's utf8, we just want to
// compare the bytes with the "case" normalized. If it's not
// utf8, then the byte comparison will fail, and we'll return
// None. No big deal.
str::from_utf8_unchecked(raw.get_unchecked(0))
};
if UniCase(text) == EXPECT_CONTINUE {
Some(Expect::Continue)
} else {
None
}
} else {
None
}
}
}
impl HeaderFormat for Expect {
fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("100-continue")
}
}
| parse_header | identifier_name |
expect.rs | use std::fmt;
use std::str;
use unicase::UniCase;
use header::{Header, HeaderFormat};
/// The `Expect` header.
///
/// > The "Expect" header field in a request indicates a certain set of
/// > behaviors (expectations) that need to be supported by the server in
/// > order to properly handle this request. The only such expectation
/// > defined by this specification is 100-continue.
/// >
/// > Expect = "100-continue"
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum Expect {
/// The value `100-continue`.
Continue
}
const EXPECT_CONTINUE: UniCase<&'static str> = UniCase("100-continue");
impl Header for Expect {
fn header_name() -> &'static str {
"Expect"
}
fn parse_header(raw: &[Vec<u8>]) -> Option<Expect> {
if raw.len() == 1 | else {
None
}
}
}
impl HeaderFormat for Expect {
fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("100-continue")
}
}
| {
let text = unsafe {
// safe because:
// 1. we just checked raw.len == 1
// 2. we don't actually care if it's utf8, we just want to
// compare the bytes with the "case" normalized. If it's not
// utf8, then the byte comparison will fail, and we'll return
// None. No big deal.
str::from_utf8_unchecked(raw.get_unchecked(0))
};
if UniCase(text) == EXPECT_CONTINUE {
Some(Expect::Continue)
} else {
None
}
} | conditional_block |
expect.rs | use std::fmt;
use std::str;
use unicase::UniCase;
use header::{Header, HeaderFormat};
/// The `Expect` header.
///
/// > The "Expect" header field in a request indicates a certain set of
/// > behaviors (expectations) that need to be supported by the server in
/// > order to properly handle this request. The only such expectation
/// > defined by this specification is 100-continue.
/// >
/// > Expect = "100-continue"
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum Expect {
/// The value `100-continue`.
Continue
}
const EXPECT_CONTINUE: UniCase<&'static str> = UniCase("100-continue");
impl Header for Expect {
fn header_name() -> &'static str {
"Expect"
}
fn parse_header(raw: &[Vec<u8>]) -> Option<Expect> {
if raw.len() == 1 {
let text = unsafe {
// safe because: | str::from_utf8_unchecked(raw.get_unchecked(0))
};
if UniCase(text) == EXPECT_CONTINUE {
Some(Expect::Continue)
} else {
None
}
} else {
None
}
}
}
impl HeaderFormat for Expect {
fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("100-continue")
}
} | // 1. we just checked raw.len == 1
// 2. we don't actually care if it's utf8, we just want to
// compare the bytes with the "case" normalized. If it's not
// utf8, then the byte comparison will fail, and we'll return
// None. No big deal. | random_line_split |
search_errors.rs | use std::env;
use std::fs::File;
use std::io::{BufWriter, Write};
use std::path::PathBuf;
use calamine::{open_workbook_auto, DataType, Error, Reader};
use glob::{glob, GlobError, GlobResult};
#[derive(Debug)]
enum FileStatus {
VbaError(Error),
RangeError(Error),
Glob(GlobError),
}
fn main() {
// Search recursively for all excel files matching argument pattern
// Output statistics: nb broken references, nb broken cells etc...
let folder = env::args().nth(1).unwrap_or_else(|| ".".to_string());
let pattern = format!("{}/**/*.xl*", folder);
let mut filecount = 0;
let mut output = pattern
.chars()
.take_while(|c| *c!= '*')
.filter_map(|c| match c {
':' => None,
'/' | '\\' |'' => Some('_'),
c => Some(c),
})
.collect::<String>();
output.push_str("_errors.csv");
let mut output = BufWriter::new(File::create(output).unwrap());
for f in glob(&pattern).expect(
"Failed to read excel glob,\
the first argument must correspond to a directory",
) {
filecount += 1;
match run(f) {
Ok((f, missing, cell_errors)) => {
writeln!(output, "{:?}~{:?}~{}", f, missing, cell_errors)
}
Err(e) => writeln!(output, "{:?}", e),
}
.unwrap_or_else(|e| println!("{:?}", e))
}
println!("Found {} excel files", filecount);
}
fn | (f: GlobResult) -> Result<(PathBuf, Option<usize>, usize), FileStatus> {
let f = f.map_err(FileStatus::Glob)?;
println!("Analysing {:?}", f.display());
let mut xl = open_workbook_auto(&f).unwrap();
let mut missing = None;
let mut cell_errors = 0;
match xl.vba_project() {
Some(Ok(vba)) => {
missing = Some(
vba.get_references()
.iter()
.filter(|r| r.is_missing())
.count(),
);
}
Some(Err(e)) => return Err(FileStatus::VbaError(e)),
None => (),
}
// get owned sheet names
let sheets = xl.sheet_names().to_owned();
for s in sheets {
let range = xl
.worksheet_range(&s)
.unwrap()
.map_err(FileStatus::RangeError)?;
cell_errors += range
.rows()
.flat_map(|r| {
r.iter().filter(|c| {
if let DataType::Error(_) = **c {
true
} else {
false
}
})
})
.count();
}
Ok((f, missing, cell_errors))
}
| run | identifier_name |
search_errors.rs | use std::env;
use std::fs::File;
use std::io::{BufWriter, Write};
use std::path::PathBuf;
use calamine::{open_workbook_auto, DataType, Error, Reader};
use glob::{glob, GlobError, GlobResult};
#[derive(Debug)]
enum FileStatus {
VbaError(Error),
RangeError(Error),
Glob(GlobError),
} | // Output statistics: nb broken references, nb broken cells etc...
let folder = env::args().nth(1).unwrap_or_else(|| ".".to_string());
let pattern = format!("{}/**/*.xl*", folder);
let mut filecount = 0;
let mut output = pattern
.chars()
.take_while(|c| *c!= '*')
.filter_map(|c| match c {
':' => None,
'/' | '\\' |'' => Some('_'),
c => Some(c),
})
.collect::<String>();
output.push_str("_errors.csv");
let mut output = BufWriter::new(File::create(output).unwrap());
for f in glob(&pattern).expect(
"Failed to read excel glob,\
the first argument must correspond to a directory",
) {
filecount += 1;
match run(f) {
Ok((f, missing, cell_errors)) => {
writeln!(output, "{:?}~{:?}~{}", f, missing, cell_errors)
}
Err(e) => writeln!(output, "{:?}", e),
}
.unwrap_or_else(|e| println!("{:?}", e))
}
println!("Found {} excel files", filecount);
}
fn run(f: GlobResult) -> Result<(PathBuf, Option<usize>, usize), FileStatus> {
let f = f.map_err(FileStatus::Glob)?;
println!("Analysing {:?}", f.display());
let mut xl = open_workbook_auto(&f).unwrap();
let mut missing = None;
let mut cell_errors = 0;
match xl.vba_project() {
Some(Ok(vba)) => {
missing = Some(
vba.get_references()
.iter()
.filter(|r| r.is_missing())
.count(),
);
}
Some(Err(e)) => return Err(FileStatus::VbaError(e)),
None => (),
}
// get owned sheet names
let sheets = xl.sheet_names().to_owned();
for s in sheets {
let range = xl
.worksheet_range(&s)
.unwrap()
.map_err(FileStatus::RangeError)?;
cell_errors += range
.rows()
.flat_map(|r| {
r.iter().filter(|c| {
if let DataType::Error(_) = **c {
true
} else {
false
}
})
})
.count();
}
Ok((f, missing, cell_errors))
} |
fn main() {
// Search recursively for all excel files matching argument pattern | random_line_split |
webhooks.rs | use std::io::Read;
use crypto::hmac::Hmac;
use crypto::mac::Mac;
use crypto::mac::MacResult;
use crypto::sha1::Sha1;
use hex::FromHex;
use iron;
use serde_json;
use DB_POOL;
use builds;
use config::CONFIG;
use error::DashResult;
use github::models::{CommentFromJson, IssueFromJson, PullRequestFromJson};
use github::{handle_comment, handle_issue, handle_pr};
/// signature for request
/// see [this document](https://developer.github.com/webhooks/securing/) for more information
header! {(XHubSignature, "X-Hub-Signature") => [String]}
/// name of Github event
/// see [this document](https://developer.github.com/webhooks/#events) for available types
header! {(XGithubEvent, "X-Github-Event") => [String]}
/// unique id for each delivery
header! {(XGithubDelivery, "X-Github-Delivery") => [String]}
pub fn handler(req: &mut iron::Request) -> iron::IronResult<iron::Response> {
match inner_handler(req) {
Ok(()) => (),
Err(why) => error!("Error processing webhook: {:?}", why),
}
Ok(iron::Response::with((iron::status::Ok, "ok")))
}
fn inner_handler(req: &mut iron::Request) -> DashResult<()> {
if let (Some(&XGithubEvent(ref event_name)),
Some(&XGithubDelivery(ref delivery_id)),
Some(&XHubSignature(ref signature))) =
(req.headers.get::<XGithubEvent>(),
req.headers.get::<XGithubDelivery>(),
req.headers.get::<XHubSignature>()) {
// unfortunately we need to read untrusted input before authenticating
// b/c we need to sha the request payload
let mut body = String::new();
req.body.read_to_string(&mut body)?;
let mut authenticated = false;
for secret in &CONFIG.github_webhook_secrets {
if authenticate(secret, &body, signature) {
// once we know it's from github, we'll parse it
authenticated = true;
let payload = parse_event(event_name, &body)?;
let full_event = Event {
delivery_id: delivery_id.to_owned(),
event_name: event_name.to_owned(),
payload: payload,
};
info!("Received valid webhook ({} id {})",
full_event.event_name,
full_event.delivery_id);
authenticated_handler(full_event)?;
break;
}
}
if!authenticated {
warn!("Received invalid webhook: {:?}", req);
}
}
Ok(())
}
fn authenticate(secret: &str, payload: &str, signature: &str) -> bool {
// https://developer.github.com/webhooks/securing/#validating-payloads-from-github
let sans_prefix = signature[5..].as_bytes();
match Vec::from_hex(sans_prefix) {
Ok(sigbytes) => {
let mut mac = Hmac::new(Sha1::new(), secret.as_bytes());
mac.input(payload.as_bytes());
// constant time comparison
mac.result() == MacResult::new(&sigbytes)
}
Err(_) => false,
}
}
fn parse_event(event_name: &str, body: &str) -> DashResult<Payload> | "public" |
"pull_request_review_comment" |
"pull_request_review" |
"push" |
"repository" |
"release" |
"team" |
"team_add" |
"watch" => {
info!("Received {} event, ignoring...", event_name);
Ok(Payload::Unsupported)
}
_ => {
warn!("Received unrecognized event {}, check GitHub's API to see what's updated.",
event_name);
Ok(Payload::Unsupported)
}
}
}
fn authenticated_handler(event: Event) -> DashResult<()> {
let conn = &*DB_POOL.get()?;
match event.payload {
Payload::Issues(issue_event) => {
handle_issue(conn, issue_event.issue, &issue_event.repository.full_name)?;
}
Payload::PullRequest(pr_event) => {
handle_pr(conn, pr_event.pull_request, &pr_event.repository.full_name)?;
}
Payload::IssueComment(comment_event) => {
// possible race conditions if we get a comment hook before the issue one (or we
// missed the issue one), so make sure the issue exists first
if comment_event.action!= "deleted" {
// TODO handle deleted comments properly
handle_issue(conn, comment_event.issue, &comment_event.repository.full_name)?;
handle_comment(conn, comment_event.comment, &comment_event.repository.full_name)?;
}
}
Payload::Status(status_event) => {
if status_event.state!= "pending" &&
status_event.commit.committer.login == "bors"
{
if let Some(url) = status_event.target_url {
builds::ingest_status_event(url)?
}
}
},
Payload::Unsupported => (),
}
Ok(())
}
#[derive(Debug)]
struct Event {
delivery_id: String,
event_name: String,
payload: Payload,
}
#[derive(Debug)]
enum Payload {
Issues(IssuesEvent),
IssueComment(IssueCommentEvent),
PullRequest(PullRequestEvent),
Status(StatusEvent),
Unsupported,
}
#[derive(Debug, Deserialize)]
struct IssuesEvent {
action: String,
issue: IssueFromJson,
repository: Repository,
}
#[derive(Debug, Deserialize)]
struct IssueCommentEvent {
action: String,
issue: IssueFromJson,
repository: Repository,
comment: CommentFromJson,
}
#[derive(Debug, Deserialize)]
struct PullRequestEvent {
action: String,
repository: Repository,
number: i32,
pull_request: PullRequestFromJson,
}
#[derive(Debug, Deserialize)]
struct Repository {
full_name: String,
}
#[derive(Debug, Deserialize)]
struct StatusEvent {
commit: Commit,
state: String,
target_url: Option<String>,
}
#[derive(Debug, Deserialize)]
struct Commit {
committer: Committer,
}
#[derive(Debug, Deserialize)]
struct Committer {
login: String,
}
| {
match event_name {
"issue_comment" => Ok(Payload::IssueComment(serde_json::from_str(body)?)),
"issues" => Ok(Payload::Issues(serde_json::from_str(body)?)),
"pull_request" => Ok(Payload::PullRequest(serde_json::from_str(body)?)),
"status" => Ok(Payload::Status(serde_json::from_str(body)?)),
"commit_comment" |
"create" |
"delete" |
"deployment" |
"deployment_status" |
"fork" |
"gollum" |
"label" |
"member" |
"membership" |
"milestone" |
"organization" |
"page_build" | | identifier_body |
webhooks.rs | use std::io::Read;
use crypto::hmac::Hmac;
use crypto::mac::Mac;
use crypto::mac::MacResult;
use crypto::sha1::Sha1;
use hex::FromHex;
use iron;
use serde_json;
use DB_POOL;
use builds;
use config::CONFIG;
use error::DashResult;
use github::models::{CommentFromJson, IssueFromJson, PullRequestFromJson};
use github::{handle_comment, handle_issue, handle_pr};
/// signature for request
/// see [this document](https://developer.github.com/webhooks/securing/) for more information
header! {(XHubSignature, "X-Hub-Signature") => [String]}
/// name of Github event
/// see [this document](https://developer.github.com/webhooks/#events) for available types
header! {(XGithubEvent, "X-Github-Event") => [String]}
/// unique id for each delivery
header! {(XGithubDelivery, "X-Github-Delivery") => [String]}
pub fn handler(req: &mut iron::Request) -> iron::IronResult<iron::Response> {
match inner_handler(req) {
Ok(()) => (),
Err(why) => error!("Error processing webhook: {:?}", why),
}
Ok(iron::Response::with((iron::status::Ok, "ok")))
}
fn inner_handler(req: &mut iron::Request) -> DashResult<()> {
if let (Some(&XGithubEvent(ref event_name)),
Some(&XGithubDelivery(ref delivery_id)),
Some(&XHubSignature(ref signature))) =
(req.headers.get::<XGithubEvent>(),
req.headers.get::<XGithubDelivery>(),
req.headers.get::<XHubSignature>()) {
// unfortunately we need to read untrusted input before authenticating
// b/c we need to sha the request payload
let mut body = String::new();
req.body.read_to_string(&mut body)?;
let mut authenticated = false;
for secret in &CONFIG.github_webhook_secrets {
if authenticate(secret, &body, signature) {
// once we know it's from github, we'll parse it
authenticated = true;
let payload = parse_event(event_name, &body)?;
let full_event = Event {
delivery_id: delivery_id.to_owned(),
event_name: event_name.to_owned(),
payload: payload,
};
info!("Received valid webhook ({} id {})",
full_event.event_name,
full_event.delivery_id);
authenticated_handler(full_event)?;
break;
}
}
if!authenticated {
warn!("Received invalid webhook: {:?}", req);
}
}
Ok(())
}
fn authenticate(secret: &str, payload: &str, signature: &str) -> bool {
// https://developer.github.com/webhooks/securing/#validating-payloads-from-github
let sans_prefix = signature[5..].as_bytes();
match Vec::from_hex(sans_prefix) {
Ok(sigbytes) => {
let mut mac = Hmac::new(Sha1::new(), secret.as_bytes());
mac.input(payload.as_bytes());
// constant time comparison
mac.result() == MacResult::new(&sigbytes)
}
Err(_) => false,
}
}
fn parse_event(event_name: &str, body: &str) -> DashResult<Payload> {
match event_name {
"issue_comment" => Ok(Payload::IssueComment(serde_json::from_str(body)?)),
"issues" => Ok(Payload::Issues(serde_json::from_str(body)?)),
"pull_request" => Ok(Payload::PullRequest(serde_json::from_str(body)?)),
"status" => Ok(Payload::Status(serde_json::from_str(body)?)),
"commit_comment" |
"create" |
"delete" |
"deployment" |
"deployment_status" |
"fork" |
"gollum" |
"label" |
"member" |
"membership" |
"milestone" |
"organization" |
"page_build" |
"public" |
"pull_request_review_comment" |
"pull_request_review" |
"push" |
"repository" |
"release" |
"team" |
"team_add" |
"watch" => {
info!("Received {} event, ignoring...", event_name);
Ok(Payload::Unsupported)
}
_ => {
warn!("Received unrecognized event {}, check GitHub's API to see what's updated.",
event_name);
Ok(Payload::Unsupported)
}
}
}
fn authenticated_handler(event: Event) -> DashResult<()> {
let conn = &*DB_POOL.get()?;
match event.payload {
Payload::Issues(issue_event) => {
handle_issue(conn, issue_event.issue, &issue_event.repository.full_name)?;
}
Payload::PullRequest(pr_event) => {
handle_pr(conn, pr_event.pull_request, &pr_event.repository.full_name)?;
}
Payload::IssueComment(comment_event) => {
// possible race conditions if we get a comment hook before the issue one (or we
// missed the issue one), so make sure the issue exists first
if comment_event.action!= "deleted" {
// TODO handle deleted comments properly
handle_issue(conn, comment_event.issue, &comment_event.repository.full_name)?;
handle_comment(conn, comment_event.comment, &comment_event.repository.full_name)?;
}
}
Payload::Status(status_event) => {
if status_event.state!= "pending" &&
status_event.commit.committer.login == "bors"
{
if let Some(url) = status_event.target_url {
builds::ingest_status_event(url)?
}
}
},
Payload::Unsupported => (),
}
Ok(())
}
#[derive(Debug)]
struct Event {
delivery_id: String,
event_name: String,
payload: Payload,
}
#[derive(Debug)]
enum Payload {
Issues(IssuesEvent),
IssueComment(IssueCommentEvent),
PullRequest(PullRequestEvent),
Status(StatusEvent),
Unsupported,
}
#[derive(Debug, Deserialize)]
struct IssuesEvent {
action: String,
issue: IssueFromJson,
repository: Repository,
}
#[derive(Debug, Deserialize)]
struct | {
action: String,
issue: IssueFromJson,
repository: Repository,
comment: CommentFromJson,
}
#[derive(Debug, Deserialize)]
struct PullRequestEvent {
action: String,
repository: Repository,
number: i32,
pull_request: PullRequestFromJson,
}
#[derive(Debug, Deserialize)]
struct Repository {
full_name: String,
}
#[derive(Debug, Deserialize)]
struct StatusEvent {
commit: Commit,
state: String,
target_url: Option<String>,
}
#[derive(Debug, Deserialize)]
struct Commit {
committer: Committer,
}
#[derive(Debug, Deserialize)]
struct Committer {
login: String,
}
| IssueCommentEvent | identifier_name |
webhooks.rs | use std::io::Read;
use crypto::hmac::Hmac;
use crypto::mac::Mac;
use crypto::mac::MacResult;
use crypto::sha1::Sha1;
use hex::FromHex;
use iron;
use serde_json;
use DB_POOL;
use builds;
use config::CONFIG;
use error::DashResult;
use github::models::{CommentFromJson, IssueFromJson, PullRequestFromJson};
use github::{handle_comment, handle_issue, handle_pr};
/// signature for request
/// see [this document](https://developer.github.com/webhooks/securing/) for more information
header! {(XHubSignature, "X-Hub-Signature") => [String]}
/// name of Github event
/// see [this document](https://developer.github.com/webhooks/#events) for available types
header! {(XGithubEvent, "X-Github-Event") => [String]}
/// unique id for each delivery
header! {(XGithubDelivery, "X-Github-Delivery") => [String]}
pub fn handler(req: &mut iron::Request) -> iron::IronResult<iron::Response> {
match inner_handler(req) {
Ok(()) => (),
Err(why) => error!("Error processing webhook: {:?}", why),
}
Ok(iron::Response::with((iron::status::Ok, "ok")))
}
fn inner_handler(req: &mut iron::Request) -> DashResult<()> {
if let (Some(&XGithubEvent(ref event_name)),
Some(&XGithubDelivery(ref delivery_id)),
Some(&XHubSignature(ref signature))) =
(req.headers.get::<XGithubEvent>(),
req.headers.get::<XGithubDelivery>(),
req.headers.get::<XHubSignature>()) {
// unfortunately we need to read untrusted input before authenticating
// b/c we need to sha the request payload
let mut body = String::new();
req.body.read_to_string(&mut body)?;
let mut authenticated = false;
for secret in &CONFIG.github_webhook_secrets {
if authenticate(secret, &body, signature) {
// once we know it's from github, we'll parse it
authenticated = true;
let payload = parse_event(event_name, &body)?;
let full_event = Event {
delivery_id: delivery_id.to_owned(),
event_name: event_name.to_owned(),
payload: payload,
};
info!("Received valid webhook ({} id {})",
full_event.event_name,
full_event.delivery_id);
authenticated_handler(full_event)?;
break;
}
}
if!authenticated {
warn!("Received invalid webhook: {:?}", req);
}
}
Ok(())
}
fn authenticate(secret: &str, payload: &str, signature: &str) -> bool {
// https://developer.github.com/webhooks/securing/#validating-payloads-from-github
let sans_prefix = signature[5..].as_bytes();
match Vec::from_hex(sans_prefix) {
Ok(sigbytes) => {
let mut mac = Hmac::new(Sha1::new(), secret.as_bytes());
mac.input(payload.as_bytes());
// constant time comparison
mac.result() == MacResult::new(&sigbytes)
}
Err(_) => false,
}
}
fn parse_event(event_name: &str, body: &str) -> DashResult<Payload> {
match event_name {
"issue_comment" => Ok(Payload::IssueComment(serde_json::from_str(body)?)),
"issues" => Ok(Payload::Issues(serde_json::from_str(body)?)),
"pull_request" => Ok(Payload::PullRequest(serde_json::from_str(body)?)),
"status" => Ok(Payload::Status(serde_json::from_str(body)?)),
"commit_comment" |
"create" |
"delete" |
"deployment" |
"deployment_status" |
"fork" |
"gollum" |
"label" |
"member" |
"membership" |
"milestone" |
"organization" |
"page_build" |
"public" |
"pull_request_review_comment" |
"pull_request_review" |
"push" |
"repository" |
"release" |
"team" |
"team_add" |
"watch" => {
info!("Received {} event, ignoring...", event_name);
Ok(Payload::Unsupported)
}
_ => {
warn!("Received unrecognized event {}, check GitHub's API to see what's updated.",
event_name);
Ok(Payload::Unsupported)
}
}
}
fn authenticated_handler(event: Event) -> DashResult<()> {
let conn = &*DB_POOL.get()?;
match event.payload {
Payload::Issues(issue_event) => {
handle_issue(conn, issue_event.issue, &issue_event.repository.full_name)?;
}
Payload::PullRequest(pr_event) => {
handle_pr(conn, pr_event.pull_request, &pr_event.repository.full_name)?;
}
Payload::IssueComment(comment_event) => {
// possible race conditions if we get a comment hook before the issue one (or we
// missed the issue one), so make sure the issue exists first
if comment_event.action!= "deleted" {
// TODO handle deleted comments properly
handle_issue(conn, comment_event.issue, &comment_event.repository.full_name)?;
handle_comment(conn, comment_event.comment, &comment_event.repository.full_name)?;
}
}
Payload::Status(status_event) => {
if status_event.state!= "pending" &&
status_event.commit.committer.login == "bors"
|
},
Payload::Unsupported => (),
}
Ok(())
}
#[derive(Debug)]
struct Event {
delivery_id: String,
event_name: String,
payload: Payload,
}
#[derive(Debug)]
enum Payload {
Issues(IssuesEvent),
IssueComment(IssueCommentEvent),
PullRequest(PullRequestEvent),
Status(StatusEvent),
Unsupported,
}
#[derive(Debug, Deserialize)]
struct IssuesEvent {
action: String,
issue: IssueFromJson,
repository: Repository,
}
#[derive(Debug, Deserialize)]
struct IssueCommentEvent {
action: String,
issue: IssueFromJson,
repository: Repository,
comment: CommentFromJson,
}
#[derive(Debug, Deserialize)]
struct PullRequestEvent {
action: String,
repository: Repository,
number: i32,
pull_request: PullRequestFromJson,
}
#[derive(Debug, Deserialize)]
struct Repository {
full_name: String,
}
#[derive(Debug, Deserialize)]
struct StatusEvent {
commit: Commit,
state: String,
target_url: Option<String>,
}
#[derive(Debug, Deserialize)]
struct Commit {
committer: Committer,
}
#[derive(Debug, Deserialize)]
struct Committer {
login: String,
}
| {
if let Some(url) = status_event.target_url {
builds::ingest_status_event(url)?
}
} | conditional_block |
webhooks.rs | use std::io::Read;
use crypto::hmac::Hmac;
use crypto::mac::Mac;
use crypto::mac::MacResult;
use crypto::sha1::Sha1;
use hex::FromHex;
use iron;
use serde_json;
use DB_POOL;
use builds;
use config::CONFIG;
use error::DashResult;
use github::models::{CommentFromJson, IssueFromJson, PullRequestFromJson};
use github::{handle_comment, handle_issue, handle_pr};
/// signature for request
/// see [this document](https://developer.github.com/webhooks/securing/) for more information
header! {(XHubSignature, "X-Hub-Signature") => [String]}
/// name of Github event
/// see [this document](https://developer.github.com/webhooks/#events) for available types
header! {(XGithubEvent, "X-Github-Event") => [String]}
/// unique id for each delivery
header! {(XGithubDelivery, "X-Github-Delivery") => [String]}
pub fn handler(req: &mut iron::Request) -> iron::IronResult<iron::Response> {
match inner_handler(req) {
Ok(()) => (),
Err(why) => error!("Error processing webhook: {:?}", why),
}
Ok(iron::Response::with((iron::status::Ok, "ok")))
}
fn inner_handler(req: &mut iron::Request) -> DashResult<()> {
if let (Some(&XGithubEvent(ref event_name)),
Some(&XGithubDelivery(ref delivery_id)),
Some(&XHubSignature(ref signature))) =
(req.headers.get::<XGithubEvent>(),
req.headers.get::<XGithubDelivery>(),
req.headers.get::<XHubSignature>()) {
// unfortunately we need to read untrusted input before authenticating
// b/c we need to sha the request payload
let mut body = String::new();
req.body.read_to_string(&mut body)?;
let mut authenticated = false;
for secret in &CONFIG.github_webhook_secrets {
if authenticate(secret, &body, signature) {
// once we know it's from github, we'll parse it
authenticated = true;
let payload = parse_event(event_name, &body)?;
let full_event = Event {
delivery_id: delivery_id.to_owned(),
event_name: event_name.to_owned(),
payload: payload,
};
info!("Received valid webhook ({} id {})",
full_event.event_name,
full_event.delivery_id);
authenticated_handler(full_event)?;
break;
}
}
if!authenticated {
warn!("Received invalid webhook: {:?}", req);
}
}
| }
fn authenticate(secret: &str, payload: &str, signature: &str) -> bool {
// https://developer.github.com/webhooks/securing/#validating-payloads-from-github
let sans_prefix = signature[5..].as_bytes();
match Vec::from_hex(sans_prefix) {
Ok(sigbytes) => {
let mut mac = Hmac::new(Sha1::new(), secret.as_bytes());
mac.input(payload.as_bytes());
// constant time comparison
mac.result() == MacResult::new(&sigbytes)
}
Err(_) => false,
}
}
fn parse_event(event_name: &str, body: &str) -> DashResult<Payload> {
match event_name {
"issue_comment" => Ok(Payload::IssueComment(serde_json::from_str(body)?)),
"issues" => Ok(Payload::Issues(serde_json::from_str(body)?)),
"pull_request" => Ok(Payload::PullRequest(serde_json::from_str(body)?)),
"status" => Ok(Payload::Status(serde_json::from_str(body)?)),
"commit_comment" |
"create" |
"delete" |
"deployment" |
"deployment_status" |
"fork" |
"gollum" |
"label" |
"member" |
"membership" |
"milestone" |
"organization" |
"page_build" |
"public" |
"pull_request_review_comment" |
"pull_request_review" |
"push" |
"repository" |
"release" |
"team" |
"team_add" |
"watch" => {
info!("Received {} event, ignoring...", event_name);
Ok(Payload::Unsupported)
}
_ => {
warn!("Received unrecognized event {}, check GitHub's API to see what's updated.",
event_name);
Ok(Payload::Unsupported)
}
}
}
fn authenticated_handler(event: Event) -> DashResult<()> {
let conn = &*DB_POOL.get()?;
match event.payload {
Payload::Issues(issue_event) => {
handle_issue(conn, issue_event.issue, &issue_event.repository.full_name)?;
}
Payload::PullRequest(pr_event) => {
handle_pr(conn, pr_event.pull_request, &pr_event.repository.full_name)?;
}
Payload::IssueComment(comment_event) => {
// possible race conditions if we get a comment hook before the issue one (or we
// missed the issue one), so make sure the issue exists first
if comment_event.action!= "deleted" {
// TODO handle deleted comments properly
handle_issue(conn, comment_event.issue, &comment_event.repository.full_name)?;
handle_comment(conn, comment_event.comment, &comment_event.repository.full_name)?;
}
}
Payload::Status(status_event) => {
if status_event.state!= "pending" &&
status_event.commit.committer.login == "bors"
{
if let Some(url) = status_event.target_url {
builds::ingest_status_event(url)?
}
}
},
Payload::Unsupported => (),
}
Ok(())
}
#[derive(Debug)]
struct Event {
delivery_id: String,
event_name: String,
payload: Payload,
}
#[derive(Debug)]
enum Payload {
Issues(IssuesEvent),
IssueComment(IssueCommentEvent),
PullRequest(PullRequestEvent),
Status(StatusEvent),
Unsupported,
}
#[derive(Debug, Deserialize)]
struct IssuesEvent {
action: String,
issue: IssueFromJson,
repository: Repository,
}
#[derive(Debug, Deserialize)]
struct IssueCommentEvent {
action: String,
issue: IssueFromJson,
repository: Repository,
comment: CommentFromJson,
}
#[derive(Debug, Deserialize)]
struct PullRequestEvent {
action: String,
repository: Repository,
number: i32,
pull_request: PullRequestFromJson,
}
#[derive(Debug, Deserialize)]
struct Repository {
full_name: String,
}
#[derive(Debug, Deserialize)]
struct StatusEvent {
commit: Commit,
state: String,
target_url: Option<String>,
}
#[derive(Debug, Deserialize)]
struct Commit {
committer: Committer,
}
#[derive(Debug, Deserialize)]
struct Committer {
login: String,
} | Ok(()) | random_line_split |
test.rs | use intern::intern;
use grammar::repr::*;
use test_util::{normalized_grammar};
use super::lalr_states;
use super::super::interpret::interpret;
fn | (t: &str) -> NonterminalString {
NonterminalString(intern(t))
}
macro_rules! tokens {
($($x:expr),*) => {
vec![$(TerminalString::Quoted(intern($x))),*].into_iter()
}
}
#[test]
fn figure9_23() {
let grammar = normalized_grammar(r#"
grammar;
extern { enum Tok { } }
S: () = E => ();
E: () = {
E "-" T => ();
T => ();
};
T: () = {
"N" => ();
"(" E ")" => ();
};
"#);
let states = lalr_states(&grammar, nt("S")).unwrap();
println!("{:#?}", states);
let tree = interpret(&states, tokens!["N", "-", "(", "N", "-", "N", ")"]).unwrap();
assert_eq!(
&format!("{:?}", tree)[..],
r#"[S: [E: [E: [T: "N"]], "-", [T: "(", [E: [E: [T: "N"]], "-", [T: "N"]], ")"]]]"#);
}
| nt | identifier_name |
test.rs | use intern::intern;
use grammar::repr::*;
use test_util::{normalized_grammar};
use super::lalr_states;
use super::super::interpret::interpret;
fn nt(t: &str) -> NonterminalString |
macro_rules! tokens {
($($x:expr),*) => {
vec![$(TerminalString::Quoted(intern($x))),*].into_iter()
}
}
#[test]
fn figure9_23() {
let grammar = normalized_grammar(r#"
grammar;
extern { enum Tok { } }
S: () = E => ();
E: () = {
E "-" T => ();
T => ();
};
T: () = {
"N" => ();
"(" E ")" => ();
};
"#);
let states = lalr_states(&grammar, nt("S")).unwrap();
println!("{:#?}", states);
let tree = interpret(&states, tokens!["N", "-", "(", "N", "-", "N", ")"]).unwrap();
assert_eq!(
&format!("{:?}", tree)[..],
r#"[S: [E: [E: [T: "N"]], "-", [T: "(", [E: [E: [T: "N"]], "-", [T: "N"]], ")"]]]"#);
}
| {
NonterminalString(intern(t))
} | identifier_body |
test.rs | use intern::intern;
use grammar::repr::*;
use test_util::{normalized_grammar};
use super::lalr_states;
use super::super::interpret::interpret;
fn nt(t: &str) -> NonterminalString {
NonterminalString(intern(t))
}
macro_rules! tokens {
($($x:expr),*) => {
vec![$(TerminalString::Quoted(intern($x))),*].into_iter()
}
}
#[test]
fn figure9_23() {
let grammar = normalized_grammar(r#"
grammar; | E "-" T => ();
T => ();
};
T: () = {
"N" => ();
"(" E ")" => ();
};
"#);
let states = lalr_states(&grammar, nt("S")).unwrap();
println!("{:#?}", states);
let tree = interpret(&states, tokens!["N", "-", "(", "N", "-", "N", ")"]).unwrap();
assert_eq!(
&format!("{:?}", tree)[..],
r#"[S: [E: [E: [T: "N"]], "-", [T: "(", [E: [E: [T: "N"]], "-", [T: "N"]], ")"]]]"#);
} | extern { enum Tok { } }
S: () = E => ();
E: () = { | random_line_split |
lib.rs | #![warn(rust_2018_idioms, missing_debug_implementations, missing_docs)]
//! `nib` provides useful abstractions for working with data that is described/structure in 4-bit
//! chunks.
//!
//!
//! ```
//! use quartet::NibSlice;
//! let nib_slice = NibSlice::from_bytes_skip_last(&[0x12, 0x34, 0x50]);
//! assert_eq!(nib_slice.index(1), 2);
//! ```
//use std::fmt;
use std::ops;
/// A slice (`[T]`) over nibs (4-bit values)
///
/// Internally, it operates on an array of bytes, and interprets them as pairs of nibs. This is
/// intended to allow use of a `NibSlice` to examine binary structures that are composed of nibs.
///
/// For each byte, the nibble composed of the lower bits (mask = `0x0f`) is considered to come
/// before the nibble composed of the higher bits (mask = `0xf0`).
#[derive(Clone, Copy, Debug)]
pub struct NibSlice<'a> {
exclude: Exclude,
inner: &'a [u8],
}
impl<'a> NibSlice<'a> {
/// Create a [`NibSlice`] from a slice of bytes and whether to exclude either end of the slice
pub fn from_bytes_exclude(inner: &'a [u8], exclude: Exclude) -> Self {
if inner.len() == 0 {
assert_eq!(exclude, Exclude::None_);
}
if inner.len() == 1 {
assert_ne!(exclude, Exclude::Both);
}
Self {
inner,
exclude,
}
}
/// Create a [`NibSlice`] from a slice of bytes, excluding the last nibble in the slice
pub fn from_bytes_skip_last(inner: &'a [u8]) -> Self {
Self::from_bytes_exclude(inner, Exclude::Last)
}
/// Create a [`NibSlice`] from a slice of bytes, including all nibbles in the given bytes
///
/// The resulting [`NibSlice`] will have `.len()` equal to `2 * inner.len()`.
pub fn from_bytes(inner: &'a [u8]) -> Self {
Self::from_bytes_exclude(inner, Exclude::None_)
}
/// The number of nibbles in the [`NibSlice`]
pub fn len(&self) -> usize {
self.inner.len() * 2 - self.exclude.len_excluded()
}
/// Split the [`NibSlice`] into 2 [`NibSlice`]s at the nibble offset given
pub fn split_at(&self, _offset: usize) -> (NibSlice<'a>, NibSlice<'a>) {
unimplemented!()
}
/// Index, using various ranges, a `NibSlice` into `NibSlice`s that are sub-slices
///
/// # Examples
///
///
/// ```
/// use quartet::{NibSlice, Exclude};
/// let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34, 0x56], Exclude::Both);
/// let n = ns.index(2..4);
///
/// assert_eq!(n, NibSlice::from_bytes(&[0x45]));
/// ```
pub fn index<S: SliceIndex<'a>>(&self, idx: S) -> S::Output {
self.get(idx).unwrap()
}
/// Get the [`NibSlice`] refered to by the indexing value, or return `None` if index is out of
/// range
pub fn get<S: SliceIndex<'a>>(&self, idx: S) -> Option<S::Output> {
idx.get(self)
}
/// If the slice refers to a single nibble, return that nibble as a byte. Panic if slice does
/// not have exactly one nibble
///
/// # Examples
///
/// ```
/// use quartet::{NibSlice, Exclude};
///
/// let orig_s = NibSlice::from_bytes_exclude(&[0x02, 0x34], Exclude::First);
/// let nib_s = orig_s.index(..1);
/// assert_eq!(nib_s.len(), 1);
///
/// let nib = nib_s.nibble();
///
/// assert_eq!(nib, 0x2);
/// ```
pub fn nibble(&self) -> u8 {
self.try_nibble().unwrap()
}
/// If the slice refers to a single nibble, return that nibble as a byte. Return None if the
/// slice does not have exactly one nibble
///
/// # Examples
///
/// ```
/// use quartet::{NibSlice, Exclude};
/// let orig_s = NibSlice::from_bytes_exclude(&[0x02, 0x34], Exclude::First);
/// let nib_s = orig_s.index(..1);
/// let nib = nib_s.try_nibble();
/// assert_eq!(nib, Some(0x2));
///
/// // more than 1 nibble
/// assert_eq!(orig_s.index(1..3).try_nibble(), None);
/// ```
pub fn try_nibble(&self) -> Option<u8> {
if self.len()!= 1 {
return None
}
let b = self.inner[0];
Some(match self.exclude {
Exclude::First => { b & 0xf },
Exclude::Last => { b >> 4 },
_ => panic!(),
})
}
/// Create an iterator over the [`NibSlice`], where each item is a nibble
pub fn iter(&self) -> Iter<'a> {
Iter { inner: *self }
}
/// Decompose the [`NibSlice`] into byte-oriented parts
///
/// The first and last members of the tuple are the non-byte aligned nibbles optionally at the
/// start and end of the [`NibSlice`]. The middle member is the byte-aligned nibbles organized
/// into bytes
pub fn byte_parts(&self) -> (Option<u8>, &[u8], Option<u8>) {
let (rem, first) = if self.exclude.is_first_excluded() {
(&self.inner[1..], Some(self.inner[0] & 0x0f))
} else {
(self.inner, None)
};
let (rem, last) = if self.exclude.is_last_excluded() {
let l = rem.len();
(&rem[..l - 1], Some(rem[rem.len() - 1] >> 4))
} else {
(rem, None)
};
(first, rem, last)
}
}
/// Iterate over a [`NibSlice`], returning a nibble for each item
#[derive(Debug)]
pub struct Iter<'a> {
inner: NibSlice<'a>,
}
impl<'a> Iterator for Iter<'a> {
type Item = u8;
fn next(&mut self) -> Option<Self::Item> {
if self.inner.len() == 0 {
return None;
}
let v = if self.inner.exclude.is_first_excluded() {
let v = self.inner.inner[0] & 0x0f;
self.inner.inner = &self.inner.inner[1..];
self.inner.exclude = Exclude::from_excludes(false, self.inner.exclude.is_last_excluded());
v
} else {
let v = self.inner.inner[0] >> 4;
self.inner.exclude = Exclude::from_excludes(true, self.inner.exclude.is_last_excluded());
v
};
if self.inner.inner.len() == 0 {
self.inner.exclude = Exclude::None_;
}
if self.inner.inner.len() == 1 && self.inner.exclude == Exclude::Both {
self.inner.exclude = Exclude::None_;
self.inner.inner = &[];
}
Some(v)
/*
let n = self.inner.index(0);
self.inner = self.inner.index(1..);
Some(n)
*/
}
}
impl PartialEq<NibSlice<'_>> for NibSlice<'_> {
fn eq(&self, other: &NibSlice<'_>) -> bool {
let i1 = self.iter();
let i2 = other.iter();
// NOTE: performance of this (doing a nibble-based comparison via an iterator) is probably
// really bad. Ideally, we'd pick a faster method in cases where we have byte-alignment
// (ie: where both slices have the same `exclude.is_first_excluded()` value. Should really
// speed things up.
i1.eq(i2)
}
}
impl Eq for NibSlice<'_> {}
/// A helper trait used for indexing operations
///
/// This is modeled after `std::slice::SliceIndex`, which slight modification to return owned types
/// (as is required for our double-fat slice references).
pub trait SliceIndex<'a> {
/// Type returned by this indexing
type Output;
/// Returns a shared reference to the output at this location, if in bounds.
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output>;
}
// ```norust
// ab | cd | ef
// ex: .
// o:1: ^
//
// index: 2
// boffs: 1
// is_low: false
//
// ab | cd | ef
// ex: . .
// o:3: ^
//
// index: 4
// boffs: 2
// is_low: false
// ```
fn b(exclude: Exclude, offs: usize) -> (usize, bool) {
let index = offs + if exclude.is_first_excluded() { 1 } else { 0 };
let b_idx = index >> 1;
let is_low = index & 1 == 1;
(b_idx, is_low)
}
/// Decompose a nibble offset into byte oriented terms.
///
/// Returns `(byte_offset, is_low)`. `byte_offset` is a offset into a `[u8]`. `is_low` is true when
/// the `offs` refers to the lower nibble in the byte located at `byte_offset`.
pub fn decompose_offset(exclude: Exclude, offs: usize) -> (usize, bool) {
b(exclude, offs)
}
impl<'a> SliceIndex<'a> for usize {
type Output = u8;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
if self >= slice.len() {
return None;
}
let (b_idx, is_low) = b(slice.exclude, self);
let b = &slice.inner[b_idx];
Some(if is_low {
b & 0x0f
} else {
b >> 4
})
}
}
impl<'a> SliceIndex<'a> for ops::Range<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
eprintln!("{:?} {:#x?}", self, slice);
if self.start > self.end {
eprintln!("1: {} {}", self.start, self.end);
return None;
}
if self.end > slice.len() {
eprintln!("2: {} {}", self.end, slice.len());
return None;
}
let (b_start, exclude_first) = b(slice.exclude, self.start);
let (b_end, end_is_low) = b(slice.exclude, self.end + 1);
eprintln!("bs: {:?}, ef: {:?}, be: {:?}, eil: {:?}", b_start, exclude_first, b_end, end_is_low);
/*
let b_end = if b_start == b_end {
b_end + 1
} else {
b_end
};
*/
Some(NibSlice::from_bytes_exclude(
&slice.inner[b_start..b_end],
Exclude::from_excludes(exclude_first,!end_is_low),
))
}
}
impl<'a> SliceIndex<'a> for ops::RangeFrom<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
(self.start..slice.len()).get(slice)
}
}
impl<'a> SliceIndex<'a> for ops::RangeTo<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
(0..self.end).get(slice)
}
}
impl<'a> SliceIndex<'a> for ops::RangeFull {
type Output = NibSlice<'a>;
fn | (self, slice: &NibSlice<'a>) -> Option<Self::Output> {
Some(*slice)
}
}
impl<'a> SliceIndex<'a> for ops::RangeInclusive<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
(*self.start()..(*self.end() + 1)).get(slice)
}
}
impl<'a> SliceIndex<'a> for ops::RangeToInclusive<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
(0..self.end + 1).get(slice)
}
}
/*
impl<'a> TryFrom<NibSlice<'a> for u8 {
type Error = ();
fn try_from(value: NibSlice<'a>) -> Result<Self, Self::Error> {
if value.len()!= 1 {
return Err(Self::Error);
}
match value.exclude {
}
}
}
*/
/*
impl<'a> fmt::Debug for NibSlice<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut dl = f.debug_list();
/*
for i in self.iter() {
dl.entry(&i);
}
*/
dl.finish()
}
}
*/
#[cfg(test)]
mod test_nibslice {
use super::*;
#[test]
fn build_ok() {
let ns = NibSlice::from_bytes_exclude(&[0xab, 0xcd], Exclude::Last);
assert_eq!(ns.len(), 3);
}
#[test]
fn index_single() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::First);
assert_eq!(ns.len(), 3);
assert_eq!(3, ns.index(1));
}
#[test]
#[should_panic]
fn index_oob() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::Both);
assert_eq!(ns.len(), 2);
// panic!
let n = ns.index(2);
assert_eq!(n, 0x4);
}
#[test]
fn index_range() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34, 0x56], Exclude::Both);
assert_eq!(ns.len(), 4);
let n = ns.index(1..3);
println!("{:#x?}", n);
assert_eq!(n, NibSlice::from_bytes_exclude(&[0x34], Exclude::None_));
assert_eq!(n.len(), 2);
}
#[test]
fn get_range_oob_exclude_both() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::Both);
assert_ne!(ns.len(), 4);
let x = ns.get(1..3);
assert_eq!(x, None);
}
#[test]
#[should_panic]
fn index_range_bad() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::Both);
assert_ne!(ns.len(), 4);
let _ = ns.index(1..3);
}
#[test]
fn index_2() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x04], Exclude::Both);
eprintln!("n1: {:?}", ns.len());
assert_eq!(ns.len(), 2);
let n = ns.get(1..);
eprintln!("n: {:?}", n.map(|x| x.len()));
assert_eq!(n, Some(NibSlice::from_bytes_exclude(&[0x00], Exclude::First)));
}
#[test]
fn index_to_1() {
let orig_s = NibSlice::from_bytes_exclude(&[0x02, 0x34], Exclude::First);
let nib_s = orig_s.index(..1);
assert_eq!(nib_s.len(), 1);
let nib = nib_s.nibble();
assert_eq!(nib, 0x2);
}
#[test]
fn index_middle() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34, 0x56], Exclude::Both);
let n = ns.index(2..4);
assert_eq!(n, NibSlice::from_bytes(&[0x45]));
}
#[test]
fn iter() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::Both);
let mut i = ns.iter();
assert_eq!(i.next(), Some(2));
assert_eq!(i.next(), Some(3));
assert_eq!(i.next(), None);
assert_eq!(i.next(), None);
assert_eq!(i.next(), None);
assert_eq!(i.next(), None);
}
}
/// Which nibs are excluded from the [`NibSlice`] but are included in the internal `[u8]`
// NOTE: if we want to represent general bit chunks (rather than exactly 4-bit chunks) we'd need a
// pair of offsets that hold values between 0 and 7.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Exclude {
/// the first (high) nibble in the slice is excluded
First,
/// the last (low) nibble in the slice is excluded
Last,
/// no nibbles in the byte slice are excluded
None_,
/// both the high nibble in the first byte and the low nibble in the last byte are excluded
Both,
}
impl Exclude {
/// Is the first nibble (high) excluded?
pub fn is_first_excluded(self) -> bool {
match self {
Self::First | Self::Both => true,
_ => false
}
}
/// Is the last nibble (low) excluded?
pub fn is_last_excluded(self) -> bool {
match self {
Self::Last | Self::Both => true,
_ => false
}
}
/// Number of nibbles to be excluded
pub fn len_excluded(self) -> usize {
match self {
Self::Both => 2,
Self::First | Self::Last => 1,
Self::None_ => 0
}
}
/// Given bools of what to include, generate an [`Exclude`] instance
pub fn from_includes(include_first: bool, include_last: bool) -> Self {
Self::from_excludes(!include_first,!include_last)
}
/// Given bools of what to exclude, generate an [`Exclude`] instance
pub fn from_excludes(exclude_first: bool, exclude_last: bool) -> Self {
match (exclude_first, exclude_last) {
(true, true) => Exclude::Both,
(false, true) => Exclude::Last,
(true, false) => Exclude::First,
(false, false) => Exclude::None_,
}
}
}
| get | identifier_name |
lib.rs | #![warn(rust_2018_idioms, missing_debug_implementations, missing_docs)]
//! `nib` provides useful abstractions for working with data that is described/structure in 4-bit
//! chunks.
//!
//!
//! ```
//! use quartet::NibSlice;
//! let nib_slice = NibSlice::from_bytes_skip_last(&[0x12, 0x34, 0x50]);
//! assert_eq!(nib_slice.index(1), 2);
//! ```
//use std::fmt;
use std::ops;
/// A slice (`[T]`) over nibs (4-bit values)
///
/// Internally, it operates on an array of bytes, and interprets them as pairs of nibs. This is
/// intended to allow use of a `NibSlice` to examine binary structures that are composed of nibs.
///
/// For each byte, the nibble composed of the lower bits (mask = `0x0f`) is considered to come
/// before the nibble composed of the higher bits (mask = `0xf0`).
#[derive(Clone, Copy, Debug)]
pub struct NibSlice<'a> {
exclude: Exclude,
inner: &'a [u8],
}
impl<'a> NibSlice<'a> {
/// Create a [`NibSlice`] from a slice of bytes and whether to exclude either end of the slice
pub fn from_bytes_exclude(inner: &'a [u8], exclude: Exclude) -> Self {
if inner.len() == 0 {
assert_eq!(exclude, Exclude::None_);
}
if inner.len() == 1 {
assert_ne!(exclude, Exclude::Both);
}
Self {
inner,
exclude,
}
}
/// Create a [`NibSlice`] from a slice of bytes, excluding the last nibble in the slice
pub fn from_bytes_skip_last(inner: &'a [u8]) -> Self {
Self::from_bytes_exclude(inner, Exclude::Last)
}
/// Create a [`NibSlice`] from a slice of bytes, including all nibbles in the given bytes
///
/// The resulting [`NibSlice`] will have `.len()` equal to `2 * inner.len()`.
pub fn from_bytes(inner: &'a [u8]) -> Self {
Self::from_bytes_exclude(inner, Exclude::None_)
}
/// The number of nibbles in the [`NibSlice`]
pub fn len(&self) -> usize {
self.inner.len() * 2 - self.exclude.len_excluded()
}
/// Split the [`NibSlice`] into 2 [`NibSlice`]s at the nibble offset given
pub fn split_at(&self, _offset: usize) -> (NibSlice<'a>, NibSlice<'a>) {
unimplemented!()
}
/// Index, using various ranges, a `NibSlice` into `NibSlice`s that are sub-slices
///
/// # Examples
///
///
/// ```
/// use quartet::{NibSlice, Exclude};
/// let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34, 0x56], Exclude::Both);
/// let n = ns.index(2..4);
///
/// assert_eq!(n, NibSlice::from_bytes(&[0x45]));
/// ```
pub fn index<S: SliceIndex<'a>>(&self, idx: S) -> S::Output {
self.get(idx).unwrap()
}
/// Get the [`NibSlice`] refered to by the indexing value, or return `None` if index is out of
/// range
pub fn get<S: SliceIndex<'a>>(&self, idx: S) -> Option<S::Output> {
idx.get(self)
}
/// If the slice refers to a single nibble, return that nibble as a byte. Panic if slice does
/// not have exactly one nibble
///
/// # Examples
///
/// ```
/// use quartet::{NibSlice, Exclude};
///
/// let orig_s = NibSlice::from_bytes_exclude(&[0x02, 0x34], Exclude::First);
/// let nib_s = orig_s.index(..1);
/// assert_eq!(nib_s.len(), 1);
///
/// let nib = nib_s.nibble();
///
/// assert_eq!(nib, 0x2);
/// ```
pub fn nibble(&self) -> u8 {
self.try_nibble().unwrap()
}
/// If the slice refers to a single nibble, return that nibble as a byte. Return None if the
/// slice does not have exactly one nibble
///
/// # Examples
///
/// ```
/// use quartet::{NibSlice, Exclude};
/// let orig_s = NibSlice::from_bytes_exclude(&[0x02, 0x34], Exclude::First);
/// let nib_s = orig_s.index(..1);
/// let nib = nib_s.try_nibble();
/// assert_eq!(nib, Some(0x2));
///
/// // more than 1 nibble
/// assert_eq!(orig_s.index(1..3).try_nibble(), None);
/// ```
pub fn try_nibble(&self) -> Option<u8> {
if self.len()!= 1 {
return None
}
let b = self.inner[0];
Some(match self.exclude {
Exclude::First => { b & 0xf },
Exclude::Last => { b >> 4 },
_ => panic!(),
})
}
/// Create an iterator over the [`NibSlice`], where each item is a nibble
pub fn iter(&self) -> Iter<'a> {
Iter { inner: *self }
}
/// Decompose the [`NibSlice`] into byte-oriented parts
///
/// The first and last members of the tuple are the non-byte aligned nibbles optionally at the
/// start and end of the [`NibSlice`]. The middle member is the byte-aligned nibbles organized
/// into bytes
pub fn byte_parts(&self) -> (Option<u8>, &[u8], Option<u8>) {
let (rem, first) = if self.exclude.is_first_excluded() {
(&self.inner[1..], Some(self.inner[0] & 0x0f))
} else {
(self.inner, None)
};
let (rem, last) = if self.exclude.is_last_excluded() {
let l = rem.len();
(&rem[..l - 1], Some(rem[rem.len() - 1] >> 4))
} else {
(rem, None)
};
(first, rem, last)
}
}
/// Iterate over a [`NibSlice`], returning a nibble for each item
#[derive(Debug)]
pub struct Iter<'a> {
inner: NibSlice<'a>,
}
impl<'a> Iterator for Iter<'a> {
type Item = u8;
fn next(&mut self) -> Option<Self::Item> {
if self.inner.len() == 0 {
return None;
}
let v = if self.inner.exclude.is_first_excluded() | else {
let v = self.inner.inner[0] >> 4;
self.inner.exclude = Exclude::from_excludes(true, self.inner.exclude.is_last_excluded());
v
};
if self.inner.inner.len() == 0 {
self.inner.exclude = Exclude::None_;
}
if self.inner.inner.len() == 1 && self.inner.exclude == Exclude::Both {
self.inner.exclude = Exclude::None_;
self.inner.inner = &[];
}
Some(v)
/*
let n = self.inner.index(0);
self.inner = self.inner.index(1..);
Some(n)
*/
}
}
impl PartialEq<NibSlice<'_>> for NibSlice<'_> {
fn eq(&self, other: &NibSlice<'_>) -> bool {
let i1 = self.iter();
let i2 = other.iter();
// NOTE: performance of this (doing a nibble-based comparison via an iterator) is probably
// really bad. Ideally, we'd pick a faster method in cases where we have byte-alignment
// (ie: where both slices have the same `exclude.is_first_excluded()` value. Should really
// speed things up.
i1.eq(i2)
}
}
impl Eq for NibSlice<'_> {}
/// A helper trait used for indexing operations
///
/// This is modeled after `std::slice::SliceIndex`, which slight modification to return owned types
/// (as is required for our double-fat slice references).
pub trait SliceIndex<'a> {
/// Type returned by this indexing
type Output;
/// Returns a shared reference to the output at this location, if in bounds.
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output>;
}
// ```norust
// ab | cd | ef
// ex: .
// o:1: ^
//
// index: 2
// boffs: 1
// is_low: false
//
// ab | cd | ef
// ex: . .
// o:3: ^
//
// index: 4
// boffs: 2
// is_low: false
// ```
fn b(exclude: Exclude, offs: usize) -> (usize, bool) {
let index = offs + if exclude.is_first_excluded() { 1 } else { 0 };
let b_idx = index >> 1;
let is_low = index & 1 == 1;
(b_idx, is_low)
}
/// Decompose a nibble offset into byte oriented terms.
///
/// Returns `(byte_offset, is_low)`. `byte_offset` is a offset into a `[u8]`. `is_low` is true when
/// the `offs` refers to the lower nibble in the byte located at `byte_offset`.
pub fn decompose_offset(exclude: Exclude, offs: usize) -> (usize, bool) {
b(exclude, offs)
}
impl<'a> SliceIndex<'a> for usize {
type Output = u8;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
if self >= slice.len() {
return None;
}
let (b_idx, is_low) = b(slice.exclude, self);
let b = &slice.inner[b_idx];
Some(if is_low {
b & 0x0f
} else {
b >> 4
})
}
}
impl<'a> SliceIndex<'a> for ops::Range<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
eprintln!("{:?} {:#x?}", self, slice);
if self.start > self.end {
eprintln!("1: {} {}", self.start, self.end);
return None;
}
if self.end > slice.len() {
eprintln!("2: {} {}", self.end, slice.len());
return None;
}
let (b_start, exclude_first) = b(slice.exclude, self.start);
let (b_end, end_is_low) = b(slice.exclude, self.end + 1);
eprintln!("bs: {:?}, ef: {:?}, be: {:?}, eil: {:?}", b_start, exclude_first, b_end, end_is_low);
/*
let b_end = if b_start == b_end {
b_end + 1
} else {
b_end
};
*/
Some(NibSlice::from_bytes_exclude(
&slice.inner[b_start..b_end],
Exclude::from_excludes(exclude_first,!end_is_low),
))
}
}
impl<'a> SliceIndex<'a> for ops::RangeFrom<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
(self.start..slice.len()).get(slice)
}
}
impl<'a> SliceIndex<'a> for ops::RangeTo<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
(0..self.end).get(slice)
}
}
impl<'a> SliceIndex<'a> for ops::RangeFull {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
Some(*slice)
}
}
impl<'a> SliceIndex<'a> for ops::RangeInclusive<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
(*self.start()..(*self.end() + 1)).get(slice)
}
}
impl<'a> SliceIndex<'a> for ops::RangeToInclusive<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
(0..self.end + 1).get(slice)
}
}
/*
impl<'a> TryFrom<NibSlice<'a> for u8 {
type Error = ();
fn try_from(value: NibSlice<'a>) -> Result<Self, Self::Error> {
if value.len()!= 1 {
return Err(Self::Error);
}
match value.exclude {
}
}
}
*/
/*
impl<'a> fmt::Debug for NibSlice<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut dl = f.debug_list();
/*
for i in self.iter() {
dl.entry(&i);
}
*/
dl.finish()
}
}
*/
#[cfg(test)]
mod test_nibslice {
use super::*;
#[test]
fn build_ok() {
let ns = NibSlice::from_bytes_exclude(&[0xab, 0xcd], Exclude::Last);
assert_eq!(ns.len(), 3);
}
#[test]
fn index_single() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::First);
assert_eq!(ns.len(), 3);
assert_eq!(3, ns.index(1));
}
#[test]
#[should_panic]
fn index_oob() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::Both);
assert_eq!(ns.len(), 2);
// panic!
let n = ns.index(2);
assert_eq!(n, 0x4);
}
#[test]
fn index_range() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34, 0x56], Exclude::Both);
assert_eq!(ns.len(), 4);
let n = ns.index(1..3);
println!("{:#x?}", n);
assert_eq!(n, NibSlice::from_bytes_exclude(&[0x34], Exclude::None_));
assert_eq!(n.len(), 2);
}
#[test]
fn get_range_oob_exclude_both() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::Both);
assert_ne!(ns.len(), 4);
let x = ns.get(1..3);
assert_eq!(x, None);
}
#[test]
#[should_panic]
fn index_range_bad() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::Both);
assert_ne!(ns.len(), 4);
let _ = ns.index(1..3);
}
#[test]
fn index_2() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x04], Exclude::Both);
eprintln!("n1: {:?}", ns.len());
assert_eq!(ns.len(), 2);
let n = ns.get(1..);
eprintln!("n: {:?}", n.map(|x| x.len()));
assert_eq!(n, Some(NibSlice::from_bytes_exclude(&[0x00], Exclude::First)));
}
#[test]
fn index_to_1() {
let orig_s = NibSlice::from_bytes_exclude(&[0x02, 0x34], Exclude::First);
let nib_s = orig_s.index(..1);
assert_eq!(nib_s.len(), 1);
let nib = nib_s.nibble();
assert_eq!(nib, 0x2);
}
#[test]
fn index_middle() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34, 0x56], Exclude::Both);
let n = ns.index(2..4);
assert_eq!(n, NibSlice::from_bytes(&[0x45]));
}
#[test]
fn iter() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::Both);
let mut i = ns.iter();
assert_eq!(i.next(), Some(2));
assert_eq!(i.next(), Some(3));
assert_eq!(i.next(), None);
assert_eq!(i.next(), None);
assert_eq!(i.next(), None);
assert_eq!(i.next(), None);
}
}
/// Which nibs are excluded from the [`NibSlice`] but are included in the internal `[u8]`
// NOTE: if we want to represent general bit chunks (rather than exactly 4-bit chunks) we'd need a
// pair of offsets that hold values between 0 and 7.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Exclude {
/// the first (high) nibble in the slice is excluded
First,
/// the last (low) nibble in the slice is excluded
Last,
/// no nibbles in the byte slice are excluded
None_,
/// both the high nibble in the first byte and the low nibble in the last byte are excluded
Both,
}
impl Exclude {
/// Is the first nibble (high) excluded?
pub fn is_first_excluded(self) -> bool {
match self {
Self::First | Self::Both => true,
_ => false
}
}
/// Is the last nibble (low) excluded?
pub fn is_last_excluded(self) -> bool {
match self {
Self::Last | Self::Both => true,
_ => false
}
}
/// Number of nibbles to be excluded
pub fn len_excluded(self) -> usize {
match self {
Self::Both => 2,
Self::First | Self::Last => 1,
Self::None_ => 0
}
}
/// Given bools of what to include, generate an [`Exclude`] instance
pub fn from_includes(include_first: bool, include_last: bool) -> Self {
Self::from_excludes(!include_first,!include_last)
}
/// Given bools of what to exclude, generate an [`Exclude`] instance
pub fn from_excludes(exclude_first: bool, exclude_last: bool) -> Self {
match (exclude_first, exclude_last) {
(true, true) => Exclude::Both,
(false, true) => Exclude::Last,
(true, false) => Exclude::First,
(false, false) => Exclude::None_,
}
}
}
| {
let v = self.inner.inner[0] & 0x0f;
self.inner.inner = &self.inner.inner[1..];
self.inner.exclude = Exclude::from_excludes(false, self.inner.exclude.is_last_excluded());
v
} | conditional_block |
lib.rs | #![warn(rust_2018_idioms, missing_debug_implementations, missing_docs)]
//! `nib` provides useful abstractions for working with data that is described/structure in 4-bit
//! chunks.
//!
//!
//! ```
//! use quartet::NibSlice;
//! let nib_slice = NibSlice::from_bytes_skip_last(&[0x12, 0x34, 0x50]);
//! assert_eq!(nib_slice.index(1), 2);
//! ```
//use std::fmt;
use std::ops;
/// A slice (`[T]`) over nibs (4-bit values)
///
/// Internally, it operates on an array of bytes, and interprets them as pairs of nibs. This is
/// intended to allow use of a `NibSlice` to examine binary structures that are composed of nibs.
///
/// For each byte, the nibble composed of the lower bits (mask = `0x0f`) is considered to come
/// before the nibble composed of the higher bits (mask = `0xf0`).
#[derive(Clone, Copy, Debug)]
pub struct NibSlice<'a> {
exclude: Exclude,
inner: &'a [u8],
}
impl<'a> NibSlice<'a> {
/// Create a [`NibSlice`] from a slice of bytes and whether to exclude either end of the slice
pub fn from_bytes_exclude(inner: &'a [u8], exclude: Exclude) -> Self {
if inner.len() == 0 {
assert_eq!(exclude, Exclude::None_);
}
if inner.len() == 1 {
assert_ne!(exclude, Exclude::Both);
}
Self {
inner,
exclude,
}
}
/// Create a [`NibSlice`] from a slice of bytes, excluding the last nibble in the slice
pub fn from_bytes_skip_last(inner: &'a [u8]) -> Self {
Self::from_bytes_exclude(inner, Exclude::Last)
}
/// Create a [`NibSlice`] from a slice of bytes, including all nibbles in the given bytes
///
/// The resulting [`NibSlice`] will have `.len()` equal to `2 * inner.len()`.
pub fn from_bytes(inner: &'a [u8]) -> Self {
Self::from_bytes_exclude(inner, Exclude::None_)
}
/// The number of nibbles in the [`NibSlice`]
pub fn len(&self) -> usize {
self.inner.len() * 2 - self.exclude.len_excluded()
}
/// Split the [`NibSlice`] into 2 [`NibSlice`]s at the nibble offset given
pub fn split_at(&self, _offset: usize) -> (NibSlice<'a>, NibSlice<'a>) {
unimplemented!()
}
/// Index, using various ranges, a `NibSlice` into `NibSlice`s that are sub-slices
///
/// # Examples
///
///
/// ```
/// use quartet::{NibSlice, Exclude};
/// let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34, 0x56], Exclude::Both);
/// let n = ns.index(2..4);
///
/// assert_eq!(n, NibSlice::from_bytes(&[0x45]));
/// ```
pub fn index<S: SliceIndex<'a>>(&self, idx: S) -> S::Output {
self.get(idx).unwrap()
}
/// Get the [`NibSlice`] refered to by the indexing value, or return `None` if index is out of
/// range
pub fn get<S: SliceIndex<'a>>(&self, idx: S) -> Option<S::Output> {
idx.get(self)
}
/// If the slice refers to a single nibble, return that nibble as a byte. Panic if slice does
/// not have exactly one nibble
///
/// # Examples
///
/// ```
/// use quartet::{NibSlice, Exclude};
///
/// let orig_s = NibSlice::from_bytes_exclude(&[0x02, 0x34], Exclude::First);
/// let nib_s = orig_s.index(..1);
/// assert_eq!(nib_s.len(), 1);
///
/// let nib = nib_s.nibble();
///
/// assert_eq!(nib, 0x2);
/// ```
pub fn nibble(&self) -> u8 {
self.try_nibble().unwrap()
}
/// If the slice refers to a single nibble, return that nibble as a byte. Return None if the
/// slice does not have exactly one nibble
///
/// # Examples
///
/// ```
/// use quartet::{NibSlice, Exclude};
/// let orig_s = NibSlice::from_bytes_exclude(&[0x02, 0x34], Exclude::First);
/// let nib_s = orig_s.index(..1);
/// let nib = nib_s.try_nibble();
/// assert_eq!(nib, Some(0x2));
///
/// // more than 1 nibble
/// assert_eq!(orig_s.index(1..3).try_nibble(), None);
/// ```
pub fn try_nibble(&self) -> Option<u8> {
if self.len()!= 1 {
return None
}
let b = self.inner[0];
Some(match self.exclude {
Exclude::First => { b & 0xf },
Exclude::Last => { b >> 4 },
_ => panic!(),
})
}
/// Create an iterator over the [`NibSlice`], where each item is a nibble
pub fn iter(&self) -> Iter<'a> {
Iter { inner: *self }
}
/// Decompose the [`NibSlice`] into byte-oriented parts
///
/// The first and last members of the tuple are the non-byte aligned nibbles optionally at the
/// start and end of the [`NibSlice`]. The middle member is the byte-aligned nibbles organized
/// into bytes
pub fn byte_parts(&self) -> (Option<u8>, &[u8], Option<u8>) {
let (rem, first) = if self.exclude.is_first_excluded() {
(&self.inner[1..], Some(self.inner[0] & 0x0f))
} else {
(self.inner, None)
};
let (rem, last) = if self.exclude.is_last_excluded() {
let l = rem.len();
(&rem[..l - 1], Some(rem[rem.len() - 1] >> 4))
} else {
(rem, None)
};
(first, rem, last)
}
}
/// Iterate over a [`NibSlice`], returning a nibble for each item
#[derive(Debug)]
pub struct Iter<'a> {
inner: NibSlice<'a>,
}
impl<'a> Iterator for Iter<'a> {
type Item = u8;
fn next(&mut self) -> Option<Self::Item> {
if self.inner.len() == 0 {
return None;
}
let v = if self.inner.exclude.is_first_excluded() {
let v = self.inner.inner[0] & 0x0f;
self.inner.inner = &self.inner.inner[1..];
self.inner.exclude = Exclude::from_excludes(false, self.inner.exclude.is_last_excluded());
v
} else {
let v = self.inner.inner[0] >> 4;
self.inner.exclude = Exclude::from_excludes(true, self.inner.exclude.is_last_excluded());
v
};
if self.inner.inner.len() == 0 {
self.inner.exclude = Exclude::None_;
}
if self.inner.inner.len() == 1 && self.inner.exclude == Exclude::Both {
self.inner.exclude = Exclude::None_;
self.inner.inner = &[];
}
Some(v)
/*
let n = self.inner.index(0);
self.inner = self.inner.index(1..);
Some(n)
*/
}
}
impl PartialEq<NibSlice<'_>> for NibSlice<'_> {
fn eq(&self, other: &NibSlice<'_>) -> bool {
let i1 = self.iter();
let i2 = other.iter();
// NOTE: performance of this (doing a nibble-based comparison via an iterator) is probably
// really bad. Ideally, we'd pick a faster method in cases where we have byte-alignment
// (ie: where both slices have the same `exclude.is_first_excluded()` value. Should really
// speed things up.
i1.eq(i2)
}
}
impl Eq for NibSlice<'_> {}
/// A helper trait used for indexing operations
///
/// This is modeled after `std::slice::SliceIndex`, which slight modification to return owned types
/// (as is required for our double-fat slice references).
pub trait SliceIndex<'a> {
/// Type returned by this indexing
type Output;
/// Returns a shared reference to the output at this location, if in bounds.
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output>;
}
// ```norust
// ab | cd | ef
// ex: .
// o:1: ^
//
// index: 2
// boffs: 1
// is_low: false
//
// ab | cd | ef
// ex: . .
// o:3: ^
//
// index: 4
// boffs: 2
// is_low: false
// ```
fn b(exclude: Exclude, offs: usize) -> (usize, bool) {
let index = offs + if exclude.is_first_excluded() { 1 } else { 0 };
let b_idx = index >> 1;
let is_low = index & 1 == 1;
(b_idx, is_low)
}
/// Decompose a nibble offset into byte oriented terms.
///
/// Returns `(byte_offset, is_low)`. `byte_offset` is a offset into a `[u8]`. `is_low` is true when
/// the `offs` refers to the lower nibble in the byte located at `byte_offset`.
pub fn decompose_offset(exclude: Exclude, offs: usize) -> (usize, bool) {
b(exclude, offs)
}
impl<'a> SliceIndex<'a> for usize {
type Output = u8;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
if self >= slice.len() {
return None;
}
let (b_idx, is_low) = b(slice.exclude, self);
let b = &slice.inner[b_idx];
Some(if is_low {
b & 0x0f
} else {
b >> 4
})
}
}
impl<'a> SliceIndex<'a> for ops::Range<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
eprintln!("{:?} {:#x?}", self, slice);
if self.start > self.end {
eprintln!("1: {} {}", self.start, self.end);
return None;
}
if self.end > slice.len() {
eprintln!("2: {} {}", self.end, slice.len());
return None;
}
let (b_start, exclude_first) = b(slice.exclude, self.start);
let (b_end, end_is_low) = b(slice.exclude, self.end + 1);
eprintln!("bs: {:?}, ef: {:?}, be: {:?}, eil: {:?}", b_start, exclude_first, b_end, end_is_low);
/*
let b_end = if b_start == b_end {
b_end + 1
} else {
b_end
};
*/
Some(NibSlice::from_bytes_exclude(
&slice.inner[b_start..b_end],
Exclude::from_excludes(exclude_first,!end_is_low),
))
}
}
impl<'a> SliceIndex<'a> for ops::RangeFrom<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
(self.start..slice.len()).get(slice)
}
}
impl<'a> SliceIndex<'a> for ops::RangeTo<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
(0..self.end).get(slice)
}
}
impl<'a> SliceIndex<'a> for ops::RangeFull {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
Some(*slice)
}
}
impl<'a> SliceIndex<'a> for ops::RangeInclusive<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
(*self.start()..(*self.end() + 1)).get(slice)
}
}
impl<'a> SliceIndex<'a> for ops::RangeToInclusive<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
(0..self.end + 1).get(slice)
}
}
/*
impl<'a> TryFrom<NibSlice<'a> for u8 {
type Error = ();
fn try_from(value: NibSlice<'a>) -> Result<Self, Self::Error> {
if value.len()!= 1 {
return Err(Self::Error);
}
match value.exclude {
}
}
}
*/
/*
impl<'a> fmt::Debug for NibSlice<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut dl = f.debug_list();
/*
for i in self.iter() {
dl.entry(&i);
}
*/
dl.finish()
}
}
*/
#[cfg(test)]
mod test_nibslice {
use super::*;
#[test]
fn build_ok() {
let ns = NibSlice::from_bytes_exclude(&[0xab, 0xcd], Exclude::Last);
assert_eq!(ns.len(), 3);
}
#[test]
fn index_single() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::First);
assert_eq!(ns.len(), 3);
assert_eq!(3, ns.index(1));
}
#[test]
#[should_panic]
fn index_oob() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::Both);
assert_eq!(ns.len(), 2);
// panic!
let n = ns.index(2);
assert_eq!(n, 0x4);
}
#[test]
fn index_range() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34, 0x56], Exclude::Both);
assert_eq!(ns.len(), 4);
let n = ns.index(1..3);
println!("{:#x?}", n);
assert_eq!(n, NibSlice::from_bytes_exclude(&[0x34], Exclude::None_));
assert_eq!(n.len(), 2);
}
#[test]
fn get_range_oob_exclude_both() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::Both);
assert_ne!(ns.len(), 4);
let x = ns.get(1..3);
assert_eq!(x, None);
}
#[test]
#[should_panic]
fn index_range_bad() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::Both);
assert_ne!(ns.len(), 4);
let _ = ns.index(1..3);
}
#[test]
fn index_2() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x04], Exclude::Both);
eprintln!("n1: {:?}", ns.len());
assert_eq!(ns.len(), 2);
let n = ns.get(1..);
eprintln!("n: {:?}", n.map(|x| x.len()));
assert_eq!(n, Some(NibSlice::from_bytes_exclude(&[0x00], Exclude::First)));
}
#[test]
fn index_to_1() {
let orig_s = NibSlice::from_bytes_exclude(&[0x02, 0x34], Exclude::First);
let nib_s = orig_s.index(..1);
assert_eq!(nib_s.len(), 1);
let nib = nib_s.nibble(); | }
#[test]
fn index_middle() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34, 0x56], Exclude::Both);
let n = ns.index(2..4);
assert_eq!(n, NibSlice::from_bytes(&[0x45]));
}
#[test]
fn iter() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::Both);
let mut i = ns.iter();
assert_eq!(i.next(), Some(2));
assert_eq!(i.next(), Some(3));
assert_eq!(i.next(), None);
assert_eq!(i.next(), None);
assert_eq!(i.next(), None);
assert_eq!(i.next(), None);
}
}
/// Which nibs are excluded from the [`NibSlice`] but are included in the internal `[u8]`
// NOTE: if we want to represent general bit chunks (rather than exactly 4-bit chunks) we'd need a
// pair of offsets that hold values between 0 and 7.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Exclude {
/// the first (high) nibble in the slice is excluded
First,
/// the last (low) nibble in the slice is excluded
Last,
/// no nibbles in the byte slice are excluded
None_,
/// both the high nibble in the first byte and the low nibble in the last byte are excluded
Both,
}
impl Exclude {
/// Is the first nibble (high) excluded?
pub fn is_first_excluded(self) -> bool {
match self {
Self::First | Self::Both => true,
_ => false
}
}
/// Is the last nibble (low) excluded?
pub fn is_last_excluded(self) -> bool {
match self {
Self::Last | Self::Both => true,
_ => false
}
}
/// Number of nibbles to be excluded
pub fn len_excluded(self) -> usize {
match self {
Self::Both => 2,
Self::First | Self::Last => 1,
Self::None_ => 0
}
}
/// Given bools of what to include, generate an [`Exclude`] instance
pub fn from_includes(include_first: bool, include_last: bool) -> Self {
Self::from_excludes(!include_first,!include_last)
}
/// Given bools of what to exclude, generate an [`Exclude`] instance
pub fn from_excludes(exclude_first: bool, exclude_last: bool) -> Self {
match (exclude_first, exclude_last) {
(true, true) => Exclude::Both,
(false, true) => Exclude::Last,
(true, false) => Exclude::First,
(false, false) => Exclude::None_,
}
}
} |
assert_eq!(nib, 0x2); | random_line_split |
lib.rs | #![warn(rust_2018_idioms, missing_debug_implementations, missing_docs)]
//! `nib` provides useful abstractions for working with data that is described/structure in 4-bit
//! chunks.
//!
//!
//! ```
//! use quartet::NibSlice;
//! let nib_slice = NibSlice::from_bytes_skip_last(&[0x12, 0x34, 0x50]);
//! assert_eq!(nib_slice.index(1), 2);
//! ```
//use std::fmt;
use std::ops;
/// A slice (`[T]`) over nibs (4-bit values)
///
/// Internally, it operates on an array of bytes, and interprets them as pairs of nibs. This is
/// intended to allow use of a `NibSlice` to examine binary structures that are composed of nibs.
///
/// For each byte, the nibble composed of the lower bits (mask = `0x0f`) is considered to come
/// before the nibble composed of the higher bits (mask = `0xf0`).
#[derive(Clone, Copy, Debug)]
pub struct NibSlice<'a> {
exclude: Exclude,
inner: &'a [u8],
}
impl<'a> NibSlice<'a> {
/// Create a [`NibSlice`] from a slice of bytes and whether to exclude either end of the slice
pub fn from_bytes_exclude(inner: &'a [u8], exclude: Exclude) -> Self {
if inner.len() == 0 {
assert_eq!(exclude, Exclude::None_);
}
if inner.len() == 1 {
assert_ne!(exclude, Exclude::Both);
}
Self {
inner,
exclude,
}
}
/// Create a [`NibSlice`] from a slice of bytes, excluding the last nibble in the slice
pub fn from_bytes_skip_last(inner: &'a [u8]) -> Self {
Self::from_bytes_exclude(inner, Exclude::Last)
}
/// Create a [`NibSlice`] from a slice of bytes, including all nibbles in the given bytes
///
/// The resulting [`NibSlice`] will have `.len()` equal to `2 * inner.len()`.
pub fn from_bytes(inner: &'a [u8]) -> Self {
Self::from_bytes_exclude(inner, Exclude::None_)
}
/// The number of nibbles in the [`NibSlice`]
pub fn len(&self) -> usize {
self.inner.len() * 2 - self.exclude.len_excluded()
}
/// Split the [`NibSlice`] into 2 [`NibSlice`]s at the nibble offset given
pub fn split_at(&self, _offset: usize) -> (NibSlice<'a>, NibSlice<'a>) {
unimplemented!()
}
/// Index, using various ranges, a `NibSlice` into `NibSlice`s that are sub-slices
///
/// # Examples
///
///
/// ```
/// use quartet::{NibSlice, Exclude};
/// let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34, 0x56], Exclude::Both);
/// let n = ns.index(2..4);
///
/// assert_eq!(n, NibSlice::from_bytes(&[0x45]));
/// ```
pub fn index<S: SliceIndex<'a>>(&self, idx: S) -> S::Output {
self.get(idx).unwrap()
}
/// Get the [`NibSlice`] refered to by the indexing value, or return `None` if index is out of
/// range
pub fn get<S: SliceIndex<'a>>(&self, idx: S) -> Option<S::Output> {
idx.get(self)
}
/// If the slice refers to a single nibble, return that nibble as a byte. Panic if slice does
/// not have exactly one nibble
///
/// # Examples
///
/// ```
/// use quartet::{NibSlice, Exclude};
///
/// let orig_s = NibSlice::from_bytes_exclude(&[0x02, 0x34], Exclude::First);
/// let nib_s = orig_s.index(..1);
/// assert_eq!(nib_s.len(), 1);
///
/// let nib = nib_s.nibble();
///
/// assert_eq!(nib, 0x2);
/// ```
pub fn nibble(&self) -> u8 {
self.try_nibble().unwrap()
}
/// If the slice refers to a single nibble, return that nibble as a byte. Return None if the
/// slice does not have exactly one nibble
///
/// # Examples
///
/// ```
/// use quartet::{NibSlice, Exclude};
/// let orig_s = NibSlice::from_bytes_exclude(&[0x02, 0x34], Exclude::First);
/// let nib_s = orig_s.index(..1);
/// let nib = nib_s.try_nibble();
/// assert_eq!(nib, Some(0x2));
///
/// // more than 1 nibble
/// assert_eq!(orig_s.index(1..3).try_nibble(), None);
/// ```
pub fn try_nibble(&self) -> Option<u8> {
if self.len()!= 1 {
return None
}
let b = self.inner[0];
Some(match self.exclude {
Exclude::First => { b & 0xf },
Exclude::Last => { b >> 4 },
_ => panic!(),
})
}
/// Create an iterator over the [`NibSlice`], where each item is a nibble
pub fn iter(&self) -> Iter<'a> {
Iter { inner: *self }
}
/// Decompose the [`NibSlice`] into byte-oriented parts
///
/// The first and last members of the tuple are the non-byte aligned nibbles optionally at the
/// start and end of the [`NibSlice`]. The middle member is the byte-aligned nibbles organized
/// into bytes
pub fn byte_parts(&self) -> (Option<u8>, &[u8], Option<u8>) {
let (rem, first) = if self.exclude.is_first_excluded() {
(&self.inner[1..], Some(self.inner[0] & 0x0f))
} else {
(self.inner, None)
};
let (rem, last) = if self.exclude.is_last_excluded() {
let l = rem.len();
(&rem[..l - 1], Some(rem[rem.len() - 1] >> 4))
} else {
(rem, None)
};
(first, rem, last)
}
}
/// Iterate over a [`NibSlice`], returning a nibble for each item
#[derive(Debug)]
pub struct Iter<'a> {
inner: NibSlice<'a>,
}
impl<'a> Iterator for Iter<'a> {
type Item = u8;
fn next(&mut self) -> Option<Self::Item> {
if self.inner.len() == 0 {
return None;
}
let v = if self.inner.exclude.is_first_excluded() {
let v = self.inner.inner[0] & 0x0f;
self.inner.inner = &self.inner.inner[1..];
self.inner.exclude = Exclude::from_excludes(false, self.inner.exclude.is_last_excluded());
v
} else {
let v = self.inner.inner[0] >> 4;
self.inner.exclude = Exclude::from_excludes(true, self.inner.exclude.is_last_excluded());
v
};
if self.inner.inner.len() == 0 {
self.inner.exclude = Exclude::None_;
}
if self.inner.inner.len() == 1 && self.inner.exclude == Exclude::Both {
self.inner.exclude = Exclude::None_;
self.inner.inner = &[];
}
Some(v)
/*
let n = self.inner.index(0);
self.inner = self.inner.index(1..);
Some(n)
*/
}
}
impl PartialEq<NibSlice<'_>> for NibSlice<'_> {
fn eq(&self, other: &NibSlice<'_>) -> bool {
let i1 = self.iter();
let i2 = other.iter();
// NOTE: performance of this (doing a nibble-based comparison via an iterator) is probably
// really bad. Ideally, we'd pick a faster method in cases where we have byte-alignment
// (ie: where both slices have the same `exclude.is_first_excluded()` value. Should really
// speed things up.
i1.eq(i2)
}
}
impl Eq for NibSlice<'_> {}
/// A helper trait used for indexing operations
///
/// This is modeled after `std::slice::SliceIndex`, which slight modification to return owned types
/// (as is required for our double-fat slice references).
pub trait SliceIndex<'a> {
/// Type returned by this indexing
type Output;
/// Returns a shared reference to the output at this location, if in bounds.
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output>;
}
// ```norust
// ab | cd | ef
// ex: .
// o:1: ^
//
// index: 2
// boffs: 1
// is_low: false
//
// ab | cd | ef
// ex: . .
// o:3: ^
//
// index: 4
// boffs: 2
// is_low: false
// ```
fn b(exclude: Exclude, offs: usize) -> (usize, bool) |
/// Decompose a nibble offset into byte oriented terms.
///
/// Returns `(byte_offset, is_low)`. `byte_offset` is a offset into a `[u8]`. `is_low` is true when
/// the `offs` refers to the lower nibble in the byte located at `byte_offset`.
pub fn decompose_offset(exclude: Exclude, offs: usize) -> (usize, bool) {
b(exclude, offs)
}
impl<'a> SliceIndex<'a> for usize {
type Output = u8;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
if self >= slice.len() {
return None;
}
let (b_idx, is_low) = b(slice.exclude, self);
let b = &slice.inner[b_idx];
Some(if is_low {
b & 0x0f
} else {
b >> 4
})
}
}
impl<'a> SliceIndex<'a> for ops::Range<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
eprintln!("{:?} {:#x?}", self, slice);
if self.start > self.end {
eprintln!("1: {} {}", self.start, self.end);
return None;
}
if self.end > slice.len() {
eprintln!("2: {} {}", self.end, slice.len());
return None;
}
let (b_start, exclude_first) = b(slice.exclude, self.start);
let (b_end, end_is_low) = b(slice.exclude, self.end + 1);
eprintln!("bs: {:?}, ef: {:?}, be: {:?}, eil: {:?}", b_start, exclude_first, b_end, end_is_low);
/*
let b_end = if b_start == b_end {
b_end + 1
} else {
b_end
};
*/
Some(NibSlice::from_bytes_exclude(
&slice.inner[b_start..b_end],
Exclude::from_excludes(exclude_first,!end_is_low),
))
}
}
impl<'a> SliceIndex<'a> for ops::RangeFrom<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
(self.start..slice.len()).get(slice)
}
}
impl<'a> SliceIndex<'a> for ops::RangeTo<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
(0..self.end).get(slice)
}
}
impl<'a> SliceIndex<'a> for ops::RangeFull {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
Some(*slice)
}
}
impl<'a> SliceIndex<'a> for ops::RangeInclusive<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
(*self.start()..(*self.end() + 1)).get(slice)
}
}
impl<'a> SliceIndex<'a> for ops::RangeToInclusive<usize> {
type Output = NibSlice<'a>;
fn get(self, slice: &NibSlice<'a>) -> Option<Self::Output> {
(0..self.end + 1).get(slice)
}
}
/*
impl<'a> TryFrom<NibSlice<'a> for u8 {
type Error = ();
fn try_from(value: NibSlice<'a>) -> Result<Self, Self::Error> {
if value.len()!= 1 {
return Err(Self::Error);
}
match value.exclude {
}
}
}
*/
/*
impl<'a> fmt::Debug for NibSlice<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut dl = f.debug_list();
/*
for i in self.iter() {
dl.entry(&i);
}
*/
dl.finish()
}
}
*/
#[cfg(test)]
mod test_nibslice {
use super::*;
#[test]
fn build_ok() {
let ns = NibSlice::from_bytes_exclude(&[0xab, 0xcd], Exclude::Last);
assert_eq!(ns.len(), 3);
}
#[test]
fn index_single() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::First);
assert_eq!(ns.len(), 3);
assert_eq!(3, ns.index(1));
}
#[test]
#[should_panic]
fn index_oob() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::Both);
assert_eq!(ns.len(), 2);
// panic!
let n = ns.index(2);
assert_eq!(n, 0x4);
}
#[test]
fn index_range() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34, 0x56], Exclude::Both);
assert_eq!(ns.len(), 4);
let n = ns.index(1..3);
println!("{:#x?}", n);
assert_eq!(n, NibSlice::from_bytes_exclude(&[0x34], Exclude::None_));
assert_eq!(n.len(), 2);
}
#[test]
fn get_range_oob_exclude_both() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::Both);
assert_ne!(ns.len(), 4);
let x = ns.get(1..3);
assert_eq!(x, None);
}
#[test]
#[should_panic]
fn index_range_bad() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::Both);
assert_ne!(ns.len(), 4);
let _ = ns.index(1..3);
}
#[test]
fn index_2() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x04], Exclude::Both);
eprintln!("n1: {:?}", ns.len());
assert_eq!(ns.len(), 2);
let n = ns.get(1..);
eprintln!("n: {:?}", n.map(|x| x.len()));
assert_eq!(n, Some(NibSlice::from_bytes_exclude(&[0x00], Exclude::First)));
}
#[test]
fn index_to_1() {
let orig_s = NibSlice::from_bytes_exclude(&[0x02, 0x34], Exclude::First);
let nib_s = orig_s.index(..1);
assert_eq!(nib_s.len(), 1);
let nib = nib_s.nibble();
assert_eq!(nib, 0x2);
}
#[test]
fn index_middle() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34, 0x56], Exclude::Both);
let n = ns.index(2..4);
assert_eq!(n, NibSlice::from_bytes(&[0x45]));
}
#[test]
fn iter() {
let ns = NibSlice::from_bytes_exclude(&[0x12, 0x34], Exclude::Both);
let mut i = ns.iter();
assert_eq!(i.next(), Some(2));
assert_eq!(i.next(), Some(3));
assert_eq!(i.next(), None);
assert_eq!(i.next(), None);
assert_eq!(i.next(), None);
assert_eq!(i.next(), None);
}
}
/// Which nibs are excluded from the [`NibSlice`] but are included in the internal `[u8]`
// NOTE: if we want to represent general bit chunks (rather than exactly 4-bit chunks) we'd need a
// pair of offsets that hold values between 0 and 7.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Exclude {
/// the first (high) nibble in the slice is excluded
First,
/// the last (low) nibble in the slice is excluded
Last,
/// no nibbles in the byte slice are excluded
None_,
/// both the high nibble in the first byte and the low nibble in the last byte are excluded
Both,
}
impl Exclude {
/// Is the first nibble (high) excluded?
pub fn is_first_excluded(self) -> bool {
match self {
Self::First | Self::Both => true,
_ => false
}
}
/// Is the last nibble (low) excluded?
pub fn is_last_excluded(self) -> bool {
match self {
Self::Last | Self::Both => true,
_ => false
}
}
/// Number of nibbles to be excluded
pub fn len_excluded(self) -> usize {
match self {
Self::Both => 2,
Self::First | Self::Last => 1,
Self::None_ => 0
}
}
/// Given bools of what to include, generate an [`Exclude`] instance
pub fn from_includes(include_first: bool, include_last: bool) -> Self {
Self::from_excludes(!include_first,!include_last)
}
/// Given bools of what to exclude, generate an [`Exclude`] instance
pub fn from_excludes(exclude_first: bool, exclude_last: bool) -> Self {
match (exclude_first, exclude_last) {
(true, true) => Exclude::Both,
(false, true) => Exclude::Last,
(true, false) => Exclude::First,
(false, false) => Exclude::None_,
}
}
}
| {
let index = offs + if exclude.is_first_excluded() { 1 } else { 0 };
let b_idx = index >> 1;
let is_low = index & 1 == 1;
(b_idx, is_low)
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.