file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
mod.rs
|
//! Compiler utilities.
//!
//! This module contains a number of utilities used throughput the compiler, such as unique symbol
//! and ID generators and a module for measuring compile-time performance of various aspects of the
//! compiler.
use fnv;
use crate::ast::ExprKind::*;
use crate::ast::*;
use std::cmp::max;
use std::iter;
pub mod colors;
pub mod dump;
pub mod stats;
/// Utility struct that can track and generate unique IDs and symbols for use in an expression.
/// Each SymbolGenerator tracks the maximum ID used for every symbol name, and can be used to
/// create new symbols with the same name but a unique ID.
#[derive(Debug, Clone)]
pub struct SymbolGenerator {
id_map: fnv::FnvHashMap<String, i32>,
}
impl SymbolGenerator {
/// Initialize a SymbolGenerator with no existing symbols.
pub fn new() -> SymbolGenerator {
SymbolGenerator {
id_map: fnv::FnvHashMap::default(),
}
}
/// Initialize a SymbolGenerator from all the symbols defined in an expression.
pub fn from_expression(expr: &Expr) -> SymbolGenerator {
let mut id_map: fnv::FnvHashMap<String, i32> = fnv::FnvHashMap::default();
let update_id = |id_map: &mut fnv::FnvHashMap<String, i32>, symbol: &Symbol| {
let id = id_map.entry(symbol.name().clone()).or_insert(0);
*id = max(*id, symbol.id());
};
expr.traverse(&mut |e| match e.kind {
Let { ref name,.. } => update_id(&mut id_map, name),
Ident(ref sym) => update_id(&mut id_map, sym),
Lambda { ref params,.. } => {
for p in params.iter() {
update_id(&mut id_map, &p.name);
}
}
_ => {}
});
SymbolGenerator { id_map }
}
pub fn new_symbol(&mut self, name: &str) -> Symbol {
let id = self.id_map.entry(name.to_owned()).or_insert(-1);
*id += 1;
Symbol::new(name, *id)
}
}
pub fn
|
<T: iter::Iterator<Item = String>>(
start: &str,
sep: &str,
end: &str,
strings: T,
) -> String {
let mut res = String::new();
res.push_str(start);
for (i, s) in strings.enumerate() {
if i > 0 {
res.push_str(sep);
}
res.push_str(&s);
}
res.push_str(end);
res
}
|
join
|
identifier_name
|
mod.rs
|
//! Compiler utilities.
//!
//! This module contains a number of utilities used throughput the compiler, such as unique symbol
//! and ID generators and a module for measuring compile-time performance of various aspects of the
//! compiler.
use fnv;
use crate::ast::ExprKind::*;
use crate::ast::*;
use std::cmp::max;
use std::iter;
pub mod colors;
pub mod dump;
pub mod stats;
/// Utility struct that can track and generate unique IDs and symbols for use in an expression.
/// Each SymbolGenerator tracks the maximum ID used for every symbol name, and can be used to
/// create new symbols with the same name but a unique ID.
#[derive(Debug, Clone)]
pub struct SymbolGenerator {
id_map: fnv::FnvHashMap<String, i32>,
}
impl SymbolGenerator {
/// Initialize a SymbolGenerator with no existing symbols.
pub fn new() -> SymbolGenerator {
SymbolGenerator {
id_map: fnv::FnvHashMap::default(),
}
}
/// Initialize a SymbolGenerator from all the symbols defined in an expression.
pub fn from_expression(expr: &Expr) -> SymbolGenerator {
let mut id_map: fnv::FnvHashMap<String, i32> = fnv::FnvHashMap::default();
let update_id = |id_map: &mut fnv::FnvHashMap<String, i32>, symbol: &Symbol| {
let id = id_map.entry(symbol.name().clone()).or_insert(0);
*id = max(*id, symbol.id());
};
expr.traverse(&mut |e| match e.kind {
Let { ref name,.. } => update_id(&mut id_map, name),
Ident(ref sym) => update_id(&mut id_map, sym),
Lambda { ref params,.. } => {
for p in params.iter() {
update_id(&mut id_map, &p.name);
}
}
_ => {}
});
SymbolGenerator { id_map }
}
pub fn new_symbol(&mut self, name: &str) -> Symbol {
let id = self.id_map.entry(name.to_owned()).or_insert(-1);
*id += 1;
Symbol::new(name, *id)
}
}
pub fn join<T: iter::Iterator<Item = String>>(
start: &str,
sep: &str,
end: &str,
strings: T,
) -> String
|
{
let mut res = String::new();
res.push_str(start);
for (i, s) in strings.enumerate() {
if i > 0 {
res.push_str(sep);
}
res.push_str(&s);
}
res.push_str(end);
res
}
|
identifier_body
|
|
mod.rs
|
//! Compiler utilities.
//!
//! This module contains a number of utilities used throughput the compiler, such as unique symbol
|
//! and ID generators and a module for measuring compile-time performance of various aspects of the
//! compiler.
use fnv;
use crate::ast::ExprKind::*;
use crate::ast::*;
use std::cmp::max;
use std::iter;
pub mod colors;
pub mod dump;
pub mod stats;
/// Utility struct that can track and generate unique IDs and symbols for use in an expression.
/// Each SymbolGenerator tracks the maximum ID used for every symbol name, and can be used to
/// create new symbols with the same name but a unique ID.
#[derive(Debug, Clone)]
pub struct SymbolGenerator {
id_map: fnv::FnvHashMap<String, i32>,
}
impl SymbolGenerator {
/// Initialize a SymbolGenerator with no existing symbols.
pub fn new() -> SymbolGenerator {
SymbolGenerator {
id_map: fnv::FnvHashMap::default(),
}
}
/// Initialize a SymbolGenerator from all the symbols defined in an expression.
pub fn from_expression(expr: &Expr) -> SymbolGenerator {
let mut id_map: fnv::FnvHashMap<String, i32> = fnv::FnvHashMap::default();
let update_id = |id_map: &mut fnv::FnvHashMap<String, i32>, symbol: &Symbol| {
let id = id_map.entry(symbol.name().clone()).or_insert(0);
*id = max(*id, symbol.id());
};
expr.traverse(&mut |e| match e.kind {
Let { ref name,.. } => update_id(&mut id_map, name),
Ident(ref sym) => update_id(&mut id_map, sym),
Lambda { ref params,.. } => {
for p in params.iter() {
update_id(&mut id_map, &p.name);
}
}
_ => {}
});
SymbolGenerator { id_map }
}
pub fn new_symbol(&mut self, name: &str) -> Symbol {
let id = self.id_map.entry(name.to_owned()).or_insert(-1);
*id += 1;
Symbol::new(name, *id)
}
}
pub fn join<T: iter::Iterator<Item = String>>(
start: &str,
sep: &str,
end: &str,
strings: T,
) -> String {
let mut res = String::new();
res.push_str(start);
for (i, s) in strings.enumerate() {
if i > 0 {
res.push_str(sep);
}
res.push_str(&s);
}
res.push_str(end);
res
}
|
random_line_split
|
|
mod.rs
|
//! Compiler utilities.
//!
//! This module contains a number of utilities used throughput the compiler, such as unique symbol
//! and ID generators and a module for measuring compile-time performance of various aspects of the
//! compiler.
use fnv;
use crate::ast::ExprKind::*;
use crate::ast::*;
use std::cmp::max;
use std::iter;
pub mod colors;
pub mod dump;
pub mod stats;
/// Utility struct that can track and generate unique IDs and symbols for use in an expression.
/// Each SymbolGenerator tracks the maximum ID used for every symbol name, and can be used to
/// create new symbols with the same name but a unique ID.
#[derive(Debug, Clone)]
pub struct SymbolGenerator {
id_map: fnv::FnvHashMap<String, i32>,
}
impl SymbolGenerator {
/// Initialize a SymbolGenerator with no existing symbols.
pub fn new() -> SymbolGenerator {
SymbolGenerator {
id_map: fnv::FnvHashMap::default(),
}
}
/// Initialize a SymbolGenerator from all the symbols defined in an expression.
pub fn from_expression(expr: &Expr) -> SymbolGenerator {
let mut id_map: fnv::FnvHashMap<String, i32> = fnv::FnvHashMap::default();
let update_id = |id_map: &mut fnv::FnvHashMap<String, i32>, symbol: &Symbol| {
let id = id_map.entry(symbol.name().clone()).or_insert(0);
*id = max(*id, symbol.id());
};
expr.traverse(&mut |e| match e.kind {
Let { ref name,.. } => update_id(&mut id_map, name),
Ident(ref sym) => update_id(&mut id_map, sym),
Lambda { ref params,.. } => {
for p in params.iter() {
update_id(&mut id_map, &p.name);
}
}
_ =>
|
});
SymbolGenerator { id_map }
}
pub fn new_symbol(&mut self, name: &str) -> Symbol {
let id = self.id_map.entry(name.to_owned()).or_insert(-1);
*id += 1;
Symbol::new(name, *id)
}
}
pub fn join<T: iter::Iterator<Item = String>>(
start: &str,
sep: &str,
end: &str,
strings: T,
) -> String {
let mut res = String::new();
res.push_str(start);
for (i, s) in strings.enumerate() {
if i > 0 {
res.push_str(sep);
}
res.push_str(&s);
}
res.push_str(end);
res
}
|
{}
|
conditional_block
|
test_cargo.rs
|
use std::env;
use std::ffi::OsString;
use std::fs::{self, File};
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::str;
use cargo::util::process;
use support::paths;
use support::{execs, project, cargo_dir, mkdir_recursive, ProjectBuilder};
use hamcrest::{assert_that};
fn setup() {
}
/// Add an empty file with executable flags (and platform-dependent suffix).
/// TODO: move this to `ProjectBuilder` if other cases using this emerge.
fn fake_executable(proj: ProjectBuilder, dir: &Path, name: &str) -> ProjectBuilder {
let path = proj.root().join(dir).join(&format!("{}{}", name,
env::consts::EXE_SUFFIX));
mkdir_recursive(path.parent().unwrap()).unwrap();
File::create(&path).unwrap();
make_executable(&path);
return proj;
#[cfg(unix)]
fn make_executable(p: &Path) {
use std::os::unix::prelude::*;
let mut perms = fs::metadata(p).unwrap().permissions();;
let mode = perms.mode();
perms.set_mode(mode | 0o111);
fs::set_permissions(p, perms).unwrap();
}
#[cfg(windows)]
fn
|
(_: &Path) {}
}
fn path() -> Vec<PathBuf> {
env::split_paths(&env::var_os("PATH").unwrap_or(OsString::new())).collect()
}
test!(list_commands_looks_at_path {
let proj = project("list-non-overlapping");
let proj = fake_executable(proj, &Path::new("path-test"), "cargo-1");
let mut pr = process(&cargo_dir().join("cargo"));
pr.cwd(&proj.root())
.env("HOME", &paths::home());
let mut path = path();
path.push(proj.root().join("path-test"));
let path = env::join_paths(path.iter()).unwrap();
let output = pr.arg("-v").arg("--list")
.env("PATH", &path);
let output = output.exec_with_output().unwrap();
let output = str::from_utf8(&output.stdout).unwrap();
assert!(output.contains("\n 1\n"), "missing 1: {}", output);
});
test!(find_closest_biuld_to_build {
let mut pr = process(&cargo_dir().join("cargo"));
pr.arg("biuld").cwd(&paths::root()).env("HOME", &paths::home());
assert_that(pr,
execs().with_status(127)
.with_stderr("No such subcommand
Did you mean `build`?
"));
});
// if a subcommand is more than 3 edit distance away, we don't make a suggestion
test!(find_closest_dont_correct_nonsense {
let mut pr = process(&cargo_dir().join("cargo"));
pr.arg("asdf").cwd(&paths::root()).env("HOME", &paths::home());
assert_that(pr,
execs().with_status(127)
.with_stderr("No such subcommand
"));
});
test!(override_cargo_home {
let root = paths::root();
let my_home = root.join("my_home");
fs::create_dir(&my_home).unwrap();
File::create(&my_home.join("config")).unwrap().write_all(br#"
[cargo-new]
name = "foo"
email = "bar"
git = false
"#).unwrap();
assert_that(process(&cargo_dir().join("cargo"))
.arg("new").arg("foo")
.cwd(&paths::root())
.env("USER", "foo")
.env("HOME", &paths::home())
.env("CARGO_HOME", &my_home),
execs().with_status(0));
let toml = paths::root().join("foo/Cargo.toml");
let mut contents = String::new();
File::open(&toml).unwrap().read_to_string(&mut contents).unwrap();
assert!(contents.contains(r#"authors = ["foo <bar>"]"#));
});
test!(cargo_help {
assert_that(process(&cargo_dir().join("cargo")),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo")).arg("help"),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo")).arg("-h"),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo"))
.arg("help").arg("build"),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo"))
.arg("build").arg("-h"),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo"))
.arg("help").arg("-h"),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo"))
.arg("help").arg("help"),
execs().with_status(0));
});
|
make_executable
|
identifier_name
|
test_cargo.rs
|
use std::env;
use std::ffi::OsString;
use std::fs::{self, File};
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::str;
use cargo::util::process;
use support::paths;
use support::{execs, project, cargo_dir, mkdir_recursive, ProjectBuilder};
use hamcrest::{assert_that};
fn setup() {
}
/// Add an empty file with executable flags (and platform-dependent suffix).
/// TODO: move this to `ProjectBuilder` if other cases using this emerge.
fn fake_executable(proj: ProjectBuilder, dir: &Path, name: &str) -> ProjectBuilder {
let path = proj.root().join(dir).join(&format!("{}{}", name,
env::consts::EXE_SUFFIX));
mkdir_recursive(path.parent().unwrap()).unwrap();
File::create(&path).unwrap();
make_executable(&path);
return proj;
#[cfg(unix)]
fn make_executable(p: &Path) {
use std::os::unix::prelude::*;
let mut perms = fs::metadata(p).unwrap().permissions();;
let mode = perms.mode();
perms.set_mode(mode | 0o111);
fs::set_permissions(p, perms).unwrap();
}
#[cfg(windows)]
fn make_executable(_: &Path) {}
}
fn path() -> Vec<PathBuf> {
env::split_paths(&env::var_os("PATH").unwrap_or(OsString::new())).collect()
}
test!(list_commands_looks_at_path {
let proj = project("list-non-overlapping");
let proj = fake_executable(proj, &Path::new("path-test"), "cargo-1");
let mut pr = process(&cargo_dir().join("cargo"));
pr.cwd(&proj.root())
.env("HOME", &paths::home());
let mut path = path();
path.push(proj.root().join("path-test"));
let path = env::join_paths(path.iter()).unwrap();
let output = pr.arg("-v").arg("--list")
.env("PATH", &path);
|
test!(find_closest_biuld_to_build {
let mut pr = process(&cargo_dir().join("cargo"));
pr.arg("biuld").cwd(&paths::root()).env("HOME", &paths::home());
assert_that(pr,
execs().with_status(127)
.with_stderr("No such subcommand
Did you mean `build`?
"));
});
// if a subcommand is more than 3 edit distance away, we don't make a suggestion
test!(find_closest_dont_correct_nonsense {
let mut pr = process(&cargo_dir().join("cargo"));
pr.arg("asdf").cwd(&paths::root()).env("HOME", &paths::home());
assert_that(pr,
execs().with_status(127)
.with_stderr("No such subcommand
"));
});
test!(override_cargo_home {
let root = paths::root();
let my_home = root.join("my_home");
fs::create_dir(&my_home).unwrap();
File::create(&my_home.join("config")).unwrap().write_all(br#"
[cargo-new]
name = "foo"
email = "bar"
git = false
"#).unwrap();
assert_that(process(&cargo_dir().join("cargo"))
.arg("new").arg("foo")
.cwd(&paths::root())
.env("USER", "foo")
.env("HOME", &paths::home())
.env("CARGO_HOME", &my_home),
execs().with_status(0));
let toml = paths::root().join("foo/Cargo.toml");
let mut contents = String::new();
File::open(&toml).unwrap().read_to_string(&mut contents).unwrap();
assert!(contents.contains(r#"authors = ["foo <bar>"]"#));
});
test!(cargo_help {
assert_that(process(&cargo_dir().join("cargo")),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo")).arg("help"),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo")).arg("-h"),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo"))
.arg("help").arg("build"),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo"))
.arg("build").arg("-h"),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo"))
.arg("help").arg("-h"),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo"))
.arg("help").arg("help"),
execs().with_status(0));
});
|
let output = output.exec_with_output().unwrap();
let output = str::from_utf8(&output.stdout).unwrap();
assert!(output.contains("\n 1\n"), "missing 1: {}", output);
});
|
random_line_split
|
test_cargo.rs
|
use std::env;
use std::ffi::OsString;
use std::fs::{self, File};
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::str;
use cargo::util::process;
use support::paths;
use support::{execs, project, cargo_dir, mkdir_recursive, ProjectBuilder};
use hamcrest::{assert_that};
fn setup()
|
/// Add an empty file with executable flags (and platform-dependent suffix).
/// TODO: move this to `ProjectBuilder` if other cases using this emerge.
fn fake_executable(proj: ProjectBuilder, dir: &Path, name: &str) -> ProjectBuilder {
let path = proj.root().join(dir).join(&format!("{}{}", name,
env::consts::EXE_SUFFIX));
mkdir_recursive(path.parent().unwrap()).unwrap();
File::create(&path).unwrap();
make_executable(&path);
return proj;
#[cfg(unix)]
fn make_executable(p: &Path) {
use std::os::unix::prelude::*;
let mut perms = fs::metadata(p).unwrap().permissions();;
let mode = perms.mode();
perms.set_mode(mode | 0o111);
fs::set_permissions(p, perms).unwrap();
}
#[cfg(windows)]
fn make_executable(_: &Path) {}
}
fn path() -> Vec<PathBuf> {
env::split_paths(&env::var_os("PATH").unwrap_or(OsString::new())).collect()
}
test!(list_commands_looks_at_path {
let proj = project("list-non-overlapping");
let proj = fake_executable(proj, &Path::new("path-test"), "cargo-1");
let mut pr = process(&cargo_dir().join("cargo"));
pr.cwd(&proj.root())
.env("HOME", &paths::home());
let mut path = path();
path.push(proj.root().join("path-test"));
let path = env::join_paths(path.iter()).unwrap();
let output = pr.arg("-v").arg("--list")
.env("PATH", &path);
let output = output.exec_with_output().unwrap();
let output = str::from_utf8(&output.stdout).unwrap();
assert!(output.contains("\n 1\n"), "missing 1: {}", output);
});
test!(find_closest_biuld_to_build {
let mut pr = process(&cargo_dir().join("cargo"));
pr.arg("biuld").cwd(&paths::root()).env("HOME", &paths::home());
assert_that(pr,
execs().with_status(127)
.with_stderr("No such subcommand
Did you mean `build`?
"));
});
// if a subcommand is more than 3 edit distance away, we don't make a suggestion
test!(find_closest_dont_correct_nonsense {
let mut pr = process(&cargo_dir().join("cargo"));
pr.arg("asdf").cwd(&paths::root()).env("HOME", &paths::home());
assert_that(pr,
execs().with_status(127)
.with_stderr("No such subcommand
"));
});
test!(override_cargo_home {
let root = paths::root();
let my_home = root.join("my_home");
fs::create_dir(&my_home).unwrap();
File::create(&my_home.join("config")).unwrap().write_all(br#"
[cargo-new]
name = "foo"
email = "bar"
git = false
"#).unwrap();
assert_that(process(&cargo_dir().join("cargo"))
.arg("new").arg("foo")
.cwd(&paths::root())
.env("USER", "foo")
.env("HOME", &paths::home())
.env("CARGO_HOME", &my_home),
execs().with_status(0));
let toml = paths::root().join("foo/Cargo.toml");
let mut contents = String::new();
File::open(&toml).unwrap().read_to_string(&mut contents).unwrap();
assert!(contents.contains(r#"authors = ["foo <bar>"]"#));
});
test!(cargo_help {
assert_that(process(&cargo_dir().join("cargo")),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo")).arg("help"),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo")).arg("-h"),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo"))
.arg("help").arg("build"),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo"))
.arg("build").arg("-h"),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo"))
.arg("help").arg("-h"),
execs().with_status(0));
assert_that(process(&cargo_dir().join("cargo"))
.arg("help").arg("help"),
execs().with_status(0));
});
|
{
}
|
identifier_body
|
bandit.rs
|
use env::{Env, EnvConvert, EnvRepr, Action, DiscreteAction, Response, HorizonAveraged};
use rng::xorshift::{Xorshiftplus128Rng};
use rand::{Rng, thread_rng};
use rand::distributions::{IndependentSample};
use rand::distributions::range::{Range};
use std::f32::consts::{PI};
#[derive(Clone, Copy)]
pub struct BanditAction {
idx: u32,
}
impl Action for BanditAction {
fn dim() -> usize {
10
}
}
impl DiscreteAction for BanditAction {
fn from_idx(idx: u32) -> BanditAction {
assert!(idx < 10);
BanditAction{idx: idx}
}
fn idx(&self) -> u32 {
self.idx
}
}
#[derive(Clone, Copy, Debug)]
pub struct BanditConfig {
}
impl Default for BanditConfig {
fn default() -> BanditConfig {
BanditConfig{
}
}
}
#[derive(Clone, Copy, Default)]
struct BanditState {
}
//#[derive(Default)]
pub struct BanditEnv {
//cfg: BanditConfig,
//state: BanditState,
dist: Range<u32>,
rng: Xorshiftplus128Rng,
}
impl Default for BanditEnv {
fn default() -> BanditEnv {
BanditEnv{
dist: Range::new(0, 10),
rng: Xorshiftplus128Rng::new(&mut thread_rng()),
}
}
}
impl Env for BanditEnv {
type Init = BanditConfig;
type Action = BanditAction;
type Response = HorizonAveraged<f32>;
fn reset<R>(&self, init: &BanditConfig, rng: &mut R) where R: Rng + Sized {
self.rng = Xorshiftplus128Rng::new(rng);
}
fn is_terminal(&self) -> bool {
false
}
fn is_legal_action(&self, action: &BanditAction) -> bool {
true
}
fn step(&self, action: &BanditAction) -> Result<Option<HorizonAveraged<f32>>, ()> {
//self.dist.ind_sample(&mut self.rng);
if action.idx == 7
|
else {
Ok(Some(HorizonAveraged{value: 0.0, horizon: 100}))
}
}
}
impl EnvConvert<BanditEnv> for BanditEnv {
fn clone_from_env(&mut self, other: &BanditEnv) {
/*self.cfg = other.cfg;
self.total_mass = other.total_mass;
self.pole_mass_length = other.pole_mass_length;
self.state = other.state;*/
self.dist = other.dist;
self.rng = other.rng.clone();
}
}
impl EnvRepr<f32> for BanditEnv {
fn observable_sz(&self) -> usize {
10
}
fn extract_observable(&self, obs: &mut [f32]) {
/*obs[0] = self.state.x;
obs[1] = self.state.x_dot;
obs[2] = self.state.theta;
obs[3] = self.state.theta_dot;*/
for i in 0.. 10 {
obs[i] = 0.0;
}
obs[7] = 1.0;
}
}
|
{
Ok(Some(HorizonAveraged{value: 1.0, horizon: 100}))
}
|
conditional_block
|
bandit.rs
|
use env::{Env, EnvConvert, EnvRepr, Action, DiscreteAction, Response, HorizonAveraged};
use rng::xorshift::{Xorshiftplus128Rng};
use rand::{Rng, thread_rng};
use rand::distributions::{IndependentSample};
use rand::distributions::range::{Range};
use std::f32::consts::{PI};
#[derive(Clone, Copy)]
pub struct BanditAction {
idx: u32,
}
impl Action for BanditAction {
fn dim() -> usize {
10
}
}
impl DiscreteAction for BanditAction {
fn from_idx(idx: u32) -> BanditAction {
assert!(idx < 10);
BanditAction{idx: idx}
}
fn idx(&self) -> u32 {
self.idx
}
}
#[derive(Clone, Copy, Debug)]
pub struct BanditConfig {
|
fn default() -> BanditConfig {
BanditConfig{
}
}
}
#[derive(Clone, Copy, Default)]
struct BanditState {
}
//#[derive(Default)]
pub struct BanditEnv {
//cfg: BanditConfig,
//state: BanditState,
dist: Range<u32>,
rng: Xorshiftplus128Rng,
}
impl Default for BanditEnv {
fn default() -> BanditEnv {
BanditEnv{
dist: Range::new(0, 10),
rng: Xorshiftplus128Rng::new(&mut thread_rng()),
}
}
}
impl Env for BanditEnv {
type Init = BanditConfig;
type Action = BanditAction;
type Response = HorizonAveraged<f32>;
fn reset<R>(&self, init: &BanditConfig, rng: &mut R) where R: Rng + Sized {
self.rng = Xorshiftplus128Rng::new(rng);
}
fn is_terminal(&self) -> bool {
false
}
fn is_legal_action(&self, action: &BanditAction) -> bool {
true
}
fn step(&self, action: &BanditAction) -> Result<Option<HorizonAveraged<f32>>, ()> {
//self.dist.ind_sample(&mut self.rng);
if action.idx == 7 {
Ok(Some(HorizonAveraged{value: 1.0, horizon: 100}))
} else {
Ok(Some(HorizonAveraged{value: 0.0, horizon: 100}))
}
}
}
impl EnvConvert<BanditEnv> for BanditEnv {
fn clone_from_env(&mut self, other: &BanditEnv) {
/*self.cfg = other.cfg;
self.total_mass = other.total_mass;
self.pole_mass_length = other.pole_mass_length;
self.state = other.state;*/
self.dist = other.dist;
self.rng = other.rng.clone();
}
}
impl EnvRepr<f32> for BanditEnv {
fn observable_sz(&self) -> usize {
10
}
fn extract_observable(&self, obs: &mut [f32]) {
/*obs[0] = self.state.x;
obs[1] = self.state.x_dot;
obs[2] = self.state.theta;
obs[3] = self.state.theta_dot;*/
for i in 0.. 10 {
obs[i] = 0.0;
}
obs[7] = 1.0;
}
}
|
}
impl Default for BanditConfig {
|
random_line_split
|
bandit.rs
|
use env::{Env, EnvConvert, EnvRepr, Action, DiscreteAction, Response, HorizonAveraged};
use rng::xorshift::{Xorshiftplus128Rng};
use rand::{Rng, thread_rng};
use rand::distributions::{IndependentSample};
use rand::distributions::range::{Range};
use std::f32::consts::{PI};
#[derive(Clone, Copy)]
pub struct BanditAction {
idx: u32,
}
impl Action for BanditAction {
fn dim() -> usize {
10
}
}
impl DiscreteAction for BanditAction {
fn from_idx(idx: u32) -> BanditAction {
assert!(idx < 10);
BanditAction{idx: idx}
}
fn idx(&self) -> u32 {
self.idx
}
}
#[derive(Clone, Copy, Debug)]
pub struct BanditConfig {
}
impl Default for BanditConfig {
fn default() -> BanditConfig {
BanditConfig{
}
}
}
#[derive(Clone, Copy, Default)]
struct
|
{
}
//#[derive(Default)]
pub struct BanditEnv {
//cfg: BanditConfig,
//state: BanditState,
dist: Range<u32>,
rng: Xorshiftplus128Rng,
}
impl Default for BanditEnv {
fn default() -> BanditEnv {
BanditEnv{
dist: Range::new(0, 10),
rng: Xorshiftplus128Rng::new(&mut thread_rng()),
}
}
}
impl Env for BanditEnv {
type Init = BanditConfig;
type Action = BanditAction;
type Response = HorizonAveraged<f32>;
fn reset<R>(&self, init: &BanditConfig, rng: &mut R) where R: Rng + Sized {
self.rng = Xorshiftplus128Rng::new(rng);
}
fn is_terminal(&self) -> bool {
false
}
fn is_legal_action(&self, action: &BanditAction) -> bool {
true
}
fn step(&self, action: &BanditAction) -> Result<Option<HorizonAveraged<f32>>, ()> {
//self.dist.ind_sample(&mut self.rng);
if action.idx == 7 {
Ok(Some(HorizonAveraged{value: 1.0, horizon: 100}))
} else {
Ok(Some(HorizonAveraged{value: 0.0, horizon: 100}))
}
}
}
impl EnvConvert<BanditEnv> for BanditEnv {
fn clone_from_env(&mut self, other: &BanditEnv) {
/*self.cfg = other.cfg;
self.total_mass = other.total_mass;
self.pole_mass_length = other.pole_mass_length;
self.state = other.state;*/
self.dist = other.dist;
self.rng = other.rng.clone();
}
}
impl EnvRepr<f32> for BanditEnv {
fn observable_sz(&self) -> usize {
10
}
fn extract_observable(&self, obs: &mut [f32]) {
/*obs[0] = self.state.x;
obs[1] = self.state.x_dot;
obs[2] = self.state.theta;
obs[3] = self.state.theta_dot;*/
for i in 0.. 10 {
obs[i] = 0.0;
}
obs[7] = 1.0;
}
}
|
BanditState
|
identifier_name
|
bandit.rs
|
use env::{Env, EnvConvert, EnvRepr, Action, DiscreteAction, Response, HorizonAveraged};
use rng::xorshift::{Xorshiftplus128Rng};
use rand::{Rng, thread_rng};
use rand::distributions::{IndependentSample};
use rand::distributions::range::{Range};
use std::f32::consts::{PI};
#[derive(Clone, Copy)]
pub struct BanditAction {
idx: u32,
}
impl Action for BanditAction {
fn dim() -> usize {
10
}
}
impl DiscreteAction for BanditAction {
fn from_idx(idx: u32) -> BanditAction {
assert!(idx < 10);
BanditAction{idx: idx}
}
fn idx(&self) -> u32 {
self.idx
}
}
#[derive(Clone, Copy, Debug)]
pub struct BanditConfig {
}
impl Default for BanditConfig {
fn default() -> BanditConfig {
BanditConfig{
}
}
}
#[derive(Clone, Copy, Default)]
struct BanditState {
}
//#[derive(Default)]
pub struct BanditEnv {
//cfg: BanditConfig,
//state: BanditState,
dist: Range<u32>,
rng: Xorshiftplus128Rng,
}
impl Default for BanditEnv {
fn default() -> BanditEnv {
BanditEnv{
dist: Range::new(0, 10),
rng: Xorshiftplus128Rng::new(&mut thread_rng()),
}
}
}
impl Env for BanditEnv {
type Init = BanditConfig;
type Action = BanditAction;
type Response = HorizonAveraged<f32>;
fn reset<R>(&self, init: &BanditConfig, rng: &mut R) where R: Rng + Sized {
self.rng = Xorshiftplus128Rng::new(rng);
}
fn is_terminal(&self) -> bool {
false
}
fn is_legal_action(&self, action: &BanditAction) -> bool {
true
}
fn step(&self, action: &BanditAction) -> Result<Option<HorizonAveraged<f32>>, ()> {
//self.dist.ind_sample(&mut self.rng);
if action.idx == 7 {
Ok(Some(HorizonAveraged{value: 1.0, horizon: 100}))
} else {
Ok(Some(HorizonAveraged{value: 0.0, horizon: 100}))
}
}
}
impl EnvConvert<BanditEnv> for BanditEnv {
fn clone_from_env(&mut self, other: &BanditEnv)
|
}
impl EnvRepr<f32> for BanditEnv {
fn observable_sz(&self) -> usize {
10
}
fn extract_observable(&self, obs: &mut [f32]) {
/*obs[0] = self.state.x;
obs[1] = self.state.x_dot;
obs[2] = self.state.theta;
obs[3] = self.state.theta_dot;*/
for i in 0.. 10 {
obs[i] = 0.0;
}
obs[7] = 1.0;
}
}
|
{
/*self.cfg = other.cfg;
self.total_mass = other.total_mass;
self.pole_mass_length = other.pole_mass_length;
self.state = other.state;*/
self.dist = other.dist;
self.rng = other.rng.clone();
}
|
identifier_body
|
main.rs
|
extern crate clap;
extern crate ansi_term;
extern crate vagment;
use std::io;
use std::io::Write;
use std::io::stdout;
use clap::ArgMatches;
use ansi_term::Colour::Yellow;
use vagment::app::logger;
use vagment::app::vagrant;
use vagment::app::formatter;
use vagment::app::machine::Machine;
use vagment::app::machine::Machines;
use vagment::app::command::AppCommand;
use vagment::app::number::AppNumber;
use vagment::app::args::AppArgs;
use vagment::app::errors::CommandError;
fn main()
|
println!("");
std::process::exit(1);
}
}
}
fn parse(matches: &ArgMatches, machines: &[Machine]) -> Option<(String, u16)> {
let mut number = 0;
let mut command = String::from("");
if let Some(subcommand) = matches.subcommand_name() {
command = subcommand.to_string();
if let Some(matches) = matches.subcommand_matches(subcommand) {
number = matches.parse_machine_number();
}
}
if command.needs_a_machine() &&!number.is_valid() {
number = if machines.len() > 1 {
ask_for_machine_number(machines)
} else {
1
};
}
if command.is_empty() {
None
} else {
Some((command, number))
}
}
fn ask_for_machine_number(machines: &[Machine]) -> u16 {
println!("{}", formatter::format(machines));
print!("{}", Yellow.paint("Please enter a machine number: "));
let _ = stdout().flush();
let mut input = String::new();
match io::stdin().read_line(&mut input) {
Ok(bytes) => bytes,
Err(error) => panic!("Could not read input: {}", error),
};
input.trim().to_string().parse().unwrap_or(0)
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
fn run(command: String, number: u16, machines: Vec<Machine>) -> Result<String, CommandError> {
if machines.len() < 1 {
return Err(CommandError::NoMachinesFound);
}
if command.needs_a_machine() {
let search = machines.get_machine_by_number(number);
if search.is_none() {
return Err(CommandError::InvalidNumber(number));
}
let machine = search.unwrap();
if command.needs_machine_up() &&!machine.is_running() {
logger::info("VM is not running, we are going to boot it up");
if vagrant::execute("up".to_string(), machine.get_path()).is_err() {
return Err(CommandError::MachineNotBootable);
}
}
match command.as_str() {
"up" | "ssh" | "halt" | "status" | "resume" | "reload" | "suspend" | "destroy" => {
vagrant::execute(command, machine.get_path())
}
"dump" => vagrant::dump(machine.get_path(), machine.get_vagrant_file_path()),
"edit" => vagrant::edit(machine.get_path(), machine.get_vagrant_file_path()),
_ => Err(CommandError::InvalidCommand(command)),
}
} else {
match command.as_str() {
"list" => vagrant::print_list(&machines),
"refresh" => vagrant::refresh(),
"shutdown" => vagrant::shutdown(machines.get_running_machines()),
"bootup" => vagrant::bootup(machines.get_stopped_machines()),
_ => Err(CommandError::InvalidCommand(command)),
}
}
}
|
{
let mut cli = vagment::app::cli::init();
let matches = cli.clone().get_matches();
let machines = vagrant::get_machine_list();
match parse(&matches, &machines) {
Some((command, number)) => {
match run(command, number, machines) {
Ok(m) => {
logger::info(m);
std::process::exit(0);
}
Err(e) => {
logger::error(e);
std::process::exit(1);
}
}
}
None => {
let _ = cli.print_help();
|
identifier_body
|
main.rs
|
extern crate clap;
extern crate ansi_term;
extern crate vagment;
use std::io;
use std::io::Write;
use std::io::stdout;
use clap::ArgMatches;
use ansi_term::Colour::Yellow;
use vagment::app::logger;
use vagment::app::vagrant;
use vagment::app::formatter;
use vagment::app::machine::Machine;
use vagment::app::machine::Machines;
use vagment::app::command::AppCommand;
use vagment::app::number::AppNumber;
use vagment::app::args::AppArgs;
use vagment::app::errors::CommandError;
fn main() {
let mut cli = vagment::app::cli::init();
let matches = cli.clone().get_matches();
let machines = vagrant::get_machine_list();
match parse(&matches, &machines) {
Some((command, number)) => {
match run(command, number, machines) {
Ok(m) => {
logger::info(m);
std::process::exit(0);
}
Err(e) => {
logger::error(e);
std::process::exit(1);
}
}
}
None => {
let _ = cli.print_help();
println!("");
std::process::exit(1);
}
|
fn parse(matches: &ArgMatches, machines: &[Machine]) -> Option<(String, u16)> {
let mut number = 0;
let mut command = String::from("");
if let Some(subcommand) = matches.subcommand_name() {
command = subcommand.to_string();
if let Some(matches) = matches.subcommand_matches(subcommand) {
number = matches.parse_machine_number();
}
}
if command.needs_a_machine() &&!number.is_valid() {
number = if machines.len() > 1 {
ask_for_machine_number(machines)
} else {
1
};
}
if command.is_empty() {
None
} else {
Some((command, number))
}
}
fn ask_for_machine_number(machines: &[Machine]) -> u16 {
println!("{}", formatter::format(machines));
print!("{}", Yellow.paint("Please enter a machine number: "));
let _ = stdout().flush();
let mut input = String::new();
match io::stdin().read_line(&mut input) {
Ok(bytes) => bytes,
Err(error) => panic!("Could not read input: {}", error),
};
input.trim().to_string().parse().unwrap_or(0)
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
fn run(command: String, number: u16, machines: Vec<Machine>) -> Result<String, CommandError> {
if machines.len() < 1 {
return Err(CommandError::NoMachinesFound);
}
if command.needs_a_machine() {
let search = machines.get_machine_by_number(number);
if search.is_none() {
return Err(CommandError::InvalidNumber(number));
}
let machine = search.unwrap();
if command.needs_machine_up() &&!machine.is_running() {
logger::info("VM is not running, we are going to boot it up");
if vagrant::execute("up".to_string(), machine.get_path()).is_err() {
return Err(CommandError::MachineNotBootable);
}
}
match command.as_str() {
"up" | "ssh" | "halt" | "status" | "resume" | "reload" | "suspend" | "destroy" => {
vagrant::execute(command, machine.get_path())
}
"dump" => vagrant::dump(machine.get_path(), machine.get_vagrant_file_path()),
"edit" => vagrant::edit(machine.get_path(), machine.get_vagrant_file_path()),
_ => Err(CommandError::InvalidCommand(command)),
}
} else {
match command.as_str() {
"list" => vagrant::print_list(&machines),
"refresh" => vagrant::refresh(),
"shutdown" => vagrant::shutdown(machines.get_running_machines()),
"bootup" => vagrant::bootup(machines.get_stopped_machines()),
_ => Err(CommandError::InvalidCommand(command)),
}
}
}
|
}
}
|
random_line_split
|
main.rs
|
extern crate clap;
extern crate ansi_term;
extern crate vagment;
use std::io;
use std::io::Write;
use std::io::stdout;
use clap::ArgMatches;
use ansi_term::Colour::Yellow;
use vagment::app::logger;
use vagment::app::vagrant;
use vagment::app::formatter;
use vagment::app::machine::Machine;
use vagment::app::machine::Machines;
use vagment::app::command::AppCommand;
use vagment::app::number::AppNumber;
use vagment::app::args::AppArgs;
use vagment::app::errors::CommandError;
fn main() {
let mut cli = vagment::app::cli::init();
let matches = cli.clone().get_matches();
let machines = vagrant::get_machine_list();
match parse(&matches, &machines) {
Some((command, number)) => {
match run(command, number, machines) {
Ok(m) => {
logger::info(m);
std::process::exit(0);
}
Err(e) => {
logger::error(e);
std::process::exit(1);
}
}
}
None => {
let _ = cli.print_help();
println!("");
std::process::exit(1);
}
}
}
fn parse(matches: &ArgMatches, machines: &[Machine]) -> Option<(String, u16)> {
let mut number = 0;
let mut command = String::from("");
if let Some(subcommand) = matches.subcommand_name() {
command = subcommand.to_string();
if let Some(matches) = matches.subcommand_matches(subcommand) {
number = matches.parse_machine_number();
}
}
if command.needs_a_machine() &&!number.is_valid() {
number = if machines.len() > 1 {
ask_for_machine_number(machines)
} else {
1
};
}
if command.is_empty() {
None
} else {
Some((command, number))
}
}
fn
|
(machines: &[Machine]) -> u16 {
println!("{}", formatter::format(machines));
print!("{}", Yellow.paint("Please enter a machine number: "));
let _ = stdout().flush();
let mut input = String::new();
match io::stdin().read_line(&mut input) {
Ok(bytes) => bytes,
Err(error) => panic!("Could not read input: {}", error),
};
input.trim().to_string().parse().unwrap_or(0)
}
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
fn run(command: String, number: u16, machines: Vec<Machine>) -> Result<String, CommandError> {
if machines.len() < 1 {
return Err(CommandError::NoMachinesFound);
}
if command.needs_a_machine() {
let search = machines.get_machine_by_number(number);
if search.is_none() {
return Err(CommandError::InvalidNumber(number));
}
let machine = search.unwrap();
if command.needs_machine_up() &&!machine.is_running() {
logger::info("VM is not running, we are going to boot it up");
if vagrant::execute("up".to_string(), machine.get_path()).is_err() {
return Err(CommandError::MachineNotBootable);
}
}
match command.as_str() {
"up" | "ssh" | "halt" | "status" | "resume" | "reload" | "suspend" | "destroy" => {
vagrant::execute(command, machine.get_path())
}
"dump" => vagrant::dump(machine.get_path(), machine.get_vagrant_file_path()),
"edit" => vagrant::edit(machine.get_path(), machine.get_vagrant_file_path()),
_ => Err(CommandError::InvalidCommand(command)),
}
} else {
match command.as_str() {
"list" => vagrant::print_list(&machines),
"refresh" => vagrant::refresh(),
"shutdown" => vagrant::shutdown(machines.get_running_machines()),
"bootup" => vagrant::bootup(machines.get_stopped_machines()),
_ => Err(CommandError::InvalidCommand(command)),
}
}
}
|
ask_for_machine_number
|
identifier_name
|
cmp.rs
|
use core::ops::{Eq, Deref};
use str_one::{ByteStr, CStr, NoNullStr};
use string::{String};
use c_string::{CString};
use alloc::{MemPool};
macro_rules! owned {
($one:ident, $two:ident) => {
impl<H1, H2> Eq<$two<H2>> for $one<H1>
where H1: MemPool,
H2: MemPool,
{
fn eq(&self, other: &$two<H2>) -> bool {
self.deref() == other.deref()
}
}
}
}
owned!(CString, CString);
owned!(CString, String);
owned!(String, CString);
owned!(String, String);
macro_rules! borrowed_no_str {
($one:ident, $two:ty) => {
impl<H> Eq<$two> for $one<H>
where H: MemPool,
{
fn eq(&self, other: &$two) -> bool {
let deref: &[u8] = self.deref().deref();
deref == other
}
}
}
}
borrowed_no_str!(CString, ByteStr);
borrowed_no_str!(CString, NoNullStr);
borrowed_no_str!(CString, CStr);
borrowed_no_str!(CString, str);
borrowed_no_str!(CString, [u8]);
macro_rules! borrowed_str {
($one:ident, $two:ty) => {
impl<H> Eq<$two> for $one<H>
where H: MemPool,
{
fn eq(&self, other: &$two) -> bool {
self.as_bytes() == other
}
}
}
}
borrowed_str!(String, ByteStr);
borrowed_str!(String, NoNullStr);
borrowed_str!(String, CStr);
borrowed_str!(String, str);
borrowed_str!(String, [u8]);
|
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
random_line_split
|
|
gdb-pretty-struct-and-enums.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-windows failing on win32 bot
// ignore-tidy-linelength
// ignore-lldb
// ignore-android: FIXME(#10381)
// compile-flags:-g
// gdb-use-pretty-printer
// This test uses some GDB Python API features (e.g. accessing anonymous fields)
// which are only available in newer GDB version. The following directive will
// case the test runner to ignore this test if an older GDB version is used:
// min-gdb-version 7.7
// gdb-command: run
// gdb-command: print regular_struct
// gdb-check:$1 = RegularStruct = {the_first_field = 101, the_second_field = 102.5, the_third_field = false, the_fourth_field = "I'm so pretty, oh so pretty..."}
// gdb-command: print tuple
// gdb-check:$2 = {true, 103, "blub"}
// gdb-command: print tuple_struct
// gdb-check:$3 = TupleStruct = {-104.5, 105}
// gdb-command: print empty_struct
// gdb-check:$4 = EmptyStruct
// gdb-command: print c_style_enum1
// gdb-check:$5 = CStyleEnumVar1
// gdb-command: print c_style_enum2
// gdb-check:$6 = CStyleEnumVar2
// gdb-command: print c_style_enum3
// gdb-check:$7 = CStyleEnumVar3
// gdb-command: print mixed_enum_c_style_var
// gdb-check:$8 = MixedEnumCStyleVar
// gdb-command: print mixed_enum_tuple_var
// gdb-check:$9 = MixedEnumTupleVar = {106, 107, false}
// gdb-command: print mixed_enum_struct_var
// gdb-check:$10 = MixedEnumStructVar = {field1 = 108.5, field2 = 109}
// gdb-command: print some
// gdb-check:$11 = Some = {110}
// gdb-command: print none
// gdb-check:$12 = None
// gdb-command: print nested_variant1
// gdb-check:$13 = NestedVariant1 = {NestedStruct = {regular_struct = RegularStruct = {the_first_field = 111, the_second_field = 112.5, the_third_field = true, the_fourth_field = "NestedStructString1"}, tuple_struct = TupleStruct = {113.5, 114}, empty_struct = EmptyStruct, c_style_enum = CStyleEnumVar2, mixed_enum = MixedEnumTupleVar = {115, 116, false}}}
// gdb-command: print nested_variant2
// gdb-check:$14 = NestedVariant2 = {abc = NestedStruct = {regular_struct = RegularStruct = {the_first_field = 117, the_second_field = 118.5, the_third_field = false, the_fourth_field = "NestedStructString10"}, tuple_struct = TupleStruct = {119.5, 120}, empty_struct = EmptyStruct, c_style_enum = CStyleEnumVar3, mixed_enum = MixedEnumStructVar = {field1 = 121.5, field2 = -122}}}
#![feature(struct_variant)]
use self::CStyleEnum::{CStyleEnumVar1, CStyleEnumVar2, CStyleEnumVar3};
use self::MixedEnum::{MixedEnumCStyleVar, MixedEnumTupleVar, MixedEnumStructVar};
use self::NestedEnum::{NestedVariant1, NestedVariant2};
|
the_fourth_field: &'static str,
}
struct TupleStruct(f64, i16);
struct EmptyStruct;
enum CStyleEnum {
CStyleEnumVar1,
CStyleEnumVar2,
CStyleEnumVar3,
}
enum MixedEnum {
MixedEnumCStyleVar,
MixedEnumTupleVar(u32, u16, bool),
MixedEnumStructVar { field1: f64, field2: i32 }
}
struct NestedStruct {
regular_struct: RegularStruct,
tuple_struct: TupleStruct,
empty_struct: EmptyStruct,
c_style_enum: CStyleEnum,
mixed_enum: MixedEnum,
}
enum NestedEnum {
NestedVariant1(NestedStruct),
NestedVariant2 { abc: NestedStruct }
}
fn main() {
let regular_struct = RegularStruct {
the_first_field: 101,
the_second_field: 102.5,
the_third_field: false,
the_fourth_field: "I'm so pretty, oh so pretty..."
};
let tuple = ( true, 103u32, "blub" );
let tuple_struct = TupleStruct(-104.5, 105);
let empty_struct = EmptyStruct;
let c_style_enum1 = CStyleEnumVar1;
let c_style_enum2 = CStyleEnumVar2;
let c_style_enum3 = CStyleEnumVar3;
let mixed_enum_c_style_var = MixedEnumCStyleVar;
let mixed_enum_tuple_var = MixedEnumTupleVar(106, 107, false);
let mixed_enum_struct_var = MixedEnumStructVar { field1: 108.5, field2: 109 };
let some = Some(110u);
let none: Option<int> = None;
let nested_variant1 = NestedVariant1(
NestedStruct {
regular_struct: RegularStruct {
the_first_field: 111,
the_second_field: 112.5,
the_third_field: true,
the_fourth_field: "NestedStructString1",
},
tuple_struct: TupleStruct(113.5, 114),
empty_struct: EmptyStruct,
c_style_enum: CStyleEnumVar2,
mixed_enum: MixedEnumTupleVar(115, 116, false)
}
);
let nested_variant2 = NestedVariant2 {
abc: NestedStruct {
regular_struct: RegularStruct {
the_first_field: 117,
the_second_field: 118.5,
the_third_field: false,
the_fourth_field: "NestedStructString10",
},
tuple_struct: TupleStruct(119.5, 120),
empty_struct: EmptyStruct,
c_style_enum: CStyleEnumVar3,
mixed_enum: MixedEnumStructVar {
field1: 121.5,
field2: -122
}
}
};
zzz(); // #break
}
fn zzz() { () }
|
struct RegularStruct {
the_first_field: int,
the_second_field: f64,
the_third_field: bool,
|
random_line_split
|
gdb-pretty-struct-and-enums.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-windows failing on win32 bot
// ignore-tidy-linelength
// ignore-lldb
// ignore-android: FIXME(#10381)
// compile-flags:-g
// gdb-use-pretty-printer
// This test uses some GDB Python API features (e.g. accessing anonymous fields)
// which are only available in newer GDB version. The following directive will
// case the test runner to ignore this test if an older GDB version is used:
// min-gdb-version 7.7
// gdb-command: run
// gdb-command: print regular_struct
// gdb-check:$1 = RegularStruct = {the_first_field = 101, the_second_field = 102.5, the_third_field = false, the_fourth_field = "I'm so pretty, oh so pretty..."}
// gdb-command: print tuple
// gdb-check:$2 = {true, 103, "blub"}
// gdb-command: print tuple_struct
// gdb-check:$3 = TupleStruct = {-104.5, 105}
// gdb-command: print empty_struct
// gdb-check:$4 = EmptyStruct
// gdb-command: print c_style_enum1
// gdb-check:$5 = CStyleEnumVar1
// gdb-command: print c_style_enum2
// gdb-check:$6 = CStyleEnumVar2
// gdb-command: print c_style_enum3
// gdb-check:$7 = CStyleEnumVar3
// gdb-command: print mixed_enum_c_style_var
// gdb-check:$8 = MixedEnumCStyleVar
// gdb-command: print mixed_enum_tuple_var
// gdb-check:$9 = MixedEnumTupleVar = {106, 107, false}
// gdb-command: print mixed_enum_struct_var
// gdb-check:$10 = MixedEnumStructVar = {field1 = 108.5, field2 = 109}
// gdb-command: print some
// gdb-check:$11 = Some = {110}
// gdb-command: print none
// gdb-check:$12 = None
// gdb-command: print nested_variant1
// gdb-check:$13 = NestedVariant1 = {NestedStruct = {regular_struct = RegularStruct = {the_first_field = 111, the_second_field = 112.5, the_third_field = true, the_fourth_field = "NestedStructString1"}, tuple_struct = TupleStruct = {113.5, 114}, empty_struct = EmptyStruct, c_style_enum = CStyleEnumVar2, mixed_enum = MixedEnumTupleVar = {115, 116, false}}}
// gdb-command: print nested_variant2
// gdb-check:$14 = NestedVariant2 = {abc = NestedStruct = {regular_struct = RegularStruct = {the_first_field = 117, the_second_field = 118.5, the_third_field = false, the_fourth_field = "NestedStructString10"}, tuple_struct = TupleStruct = {119.5, 120}, empty_struct = EmptyStruct, c_style_enum = CStyleEnumVar3, mixed_enum = MixedEnumStructVar = {field1 = 121.5, field2 = -122}}}
#![feature(struct_variant)]
use self::CStyleEnum::{CStyleEnumVar1, CStyleEnumVar2, CStyleEnumVar3};
use self::MixedEnum::{MixedEnumCStyleVar, MixedEnumTupleVar, MixedEnumStructVar};
use self::NestedEnum::{NestedVariant1, NestedVariant2};
struct RegularStruct {
the_first_field: int,
the_second_field: f64,
the_third_field: bool,
the_fourth_field: &'static str,
}
struct TupleStruct(f64, i16);
struct EmptyStruct;
enum CStyleEnum {
CStyleEnumVar1,
CStyleEnumVar2,
CStyleEnumVar3,
}
enum MixedEnum {
MixedEnumCStyleVar,
MixedEnumTupleVar(u32, u16, bool),
MixedEnumStructVar { field1: f64, field2: i32 }
}
struct NestedStruct {
regular_struct: RegularStruct,
tuple_struct: TupleStruct,
empty_struct: EmptyStruct,
c_style_enum: CStyleEnum,
mixed_enum: MixedEnum,
}
enum NestedEnum {
NestedVariant1(NestedStruct),
NestedVariant2 { abc: NestedStruct }
}
fn
|
() {
let regular_struct = RegularStruct {
the_first_field: 101,
the_second_field: 102.5,
the_third_field: false,
the_fourth_field: "I'm so pretty, oh so pretty..."
};
let tuple = ( true, 103u32, "blub" );
let tuple_struct = TupleStruct(-104.5, 105);
let empty_struct = EmptyStruct;
let c_style_enum1 = CStyleEnumVar1;
let c_style_enum2 = CStyleEnumVar2;
let c_style_enum3 = CStyleEnumVar3;
let mixed_enum_c_style_var = MixedEnumCStyleVar;
let mixed_enum_tuple_var = MixedEnumTupleVar(106, 107, false);
let mixed_enum_struct_var = MixedEnumStructVar { field1: 108.5, field2: 109 };
let some = Some(110u);
let none: Option<int> = None;
let nested_variant1 = NestedVariant1(
NestedStruct {
regular_struct: RegularStruct {
the_first_field: 111,
the_second_field: 112.5,
the_third_field: true,
the_fourth_field: "NestedStructString1",
},
tuple_struct: TupleStruct(113.5, 114),
empty_struct: EmptyStruct,
c_style_enum: CStyleEnumVar2,
mixed_enum: MixedEnumTupleVar(115, 116, false)
}
);
let nested_variant2 = NestedVariant2 {
abc: NestedStruct {
regular_struct: RegularStruct {
the_first_field: 117,
the_second_field: 118.5,
the_third_field: false,
the_fourth_field: "NestedStructString10",
},
tuple_struct: TupleStruct(119.5, 120),
empty_struct: EmptyStruct,
c_style_enum: CStyleEnumVar3,
mixed_enum: MixedEnumStructVar {
field1: 121.5,
field2: -122
}
}
};
zzz(); // #break
}
fn zzz() { () }
|
main
|
identifier_name
|
packed-struct-with-destructor.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// ignore-android: FIXME(#10381)
// compile-flags:-g
// debugger:set print pretty off
// debugger:rbreak zzz
// debugger:run
// debugger:finish
// debugger:print packed
// check:$1 = {x = 123, y = 234, z = 345}
// debugger:print packedInPacked
// check:$2 = {a = 1111, b = {x = 2222, y = 3333, z = 4444}, c = 5555, d = {x = 6666, y = 7777, z = 8888}}
|
// debugger:print unpackedInPacked
// check:$4 = {a = 987, b = {x = 876, y = 765, z = 654}, c = {x = 543, y = 432, z = 321}, d = 210}
// debugger:print packedInPackedWithDrop
// check:$5 = {a = 11, b = {x = 22, y = 33, z = 44}, c = 55, d = {x = 66, y = 77, z = 88}}
// debugger:print packedInUnpackedWithDrop
// check:$6 = {a = -11, b = {x = -22, y = -33, z = -44}, c = -55, d = {x = -66, y = -77, z = -88}}
// debugger:print unpackedInPackedWithDrop
// check:$7 = {a = 98, b = {x = 87, y = 76, z = 65}, c = {x = 54, y = 43, z = 32}, d = 21}
// debugger:print deeplyNested
// check:$8 = {a = {a = 1, b = {x = 2, y = 3, z = 4}, c = 5, d = {x = 6, y = 7, z = 8}}, b = {a = 9, b = {x = 10, y = 11, z = 12}, c = {x = 13, y = 14, z = 15}, d = 16}, c = {a = 17, b = {x = 18, y = 19, z = 20}, c = 21, d = {x = 22, y = 23, z = 24}}, d = {a = 25, b = {x = 26, y = 27, z = 28}, c = 29, d = {x = 30, y = 31, z = 32}}, e = {a = 33, b = {x = 34, y = 35, z = 36}, c = {x = 37, y = 38, z = 39}, d = 40}, f = {a = 41, b = {x = 42, y = 43, z = 44}, c = 45, d = {x = 46, y = 47, z = 48}}}
#![allow(unused_variable)]
#[packed]
struct Packed {
x: i16,
y: i32,
z: i64
}
impl Drop for Packed {
fn drop(&mut self) {}
}
#[packed]
struct PackedInPacked {
a: i32,
b: Packed,
c: i64,
d: Packed
}
struct PackedInUnpacked {
a: i32,
b: Packed,
c: i64,
d: Packed
}
struct Unpacked {
x: i64,
y: i32,
z: i16
}
impl Drop for Unpacked {
fn drop(&mut self) {}
}
#[packed]
struct UnpackedInPacked {
a: i16,
b: Unpacked,
c: Unpacked,
d: i64
}
#[packed]
struct PackedInPackedWithDrop {
a: i32,
b: Packed,
c: i64,
d: Packed
}
impl Drop for PackedInPackedWithDrop {
fn drop(&mut self) {}
}
struct PackedInUnpackedWithDrop {
a: i32,
b: Packed,
c: i64,
d: Packed
}
impl Drop for PackedInUnpackedWithDrop {
fn drop(&mut self) {}
}
#[packed]
struct UnpackedInPackedWithDrop {
a: i16,
b: Unpacked,
c: Unpacked,
d: i64
}
impl Drop for UnpackedInPackedWithDrop {
fn drop(&mut self) {}
}
struct DeeplyNested {
a: PackedInPacked,
b: UnpackedInPackedWithDrop,
c: PackedInUnpacked,
d: PackedInUnpackedWithDrop,
e: UnpackedInPacked,
f: PackedInPackedWithDrop
}
fn main() {
let packed = Packed { x: 123, y: 234, z: 345 };
let packedInPacked = PackedInPacked {
a: 1111,
b: Packed { x: 2222, y: 3333, z: 4444 },
c: 5555,
d: Packed { x: 6666, y: 7777, z: 8888 }
};
let packedInUnpacked = PackedInUnpacked {
a: -1111,
b: Packed { x: -2222, y: -3333, z: -4444 },
c: -5555,
d: Packed { x: -6666, y: -7777, z: -8888 }
};
let unpackedInPacked = UnpackedInPacked {
a: 987,
b: Unpacked { x: 876, y: 765, z: 654 },
c: Unpacked { x: 543, y: 432, z: 321 },
d: 210
};
let packedInPackedWithDrop = PackedInPackedWithDrop {
a: 11,
b: Packed { x: 22, y: 33, z: 44 },
c: 55,
d: Packed { x: 66, y: 77, z: 88 }
};
let packedInUnpackedWithDrop = PackedInUnpackedWithDrop {
a: -11,
b: Packed { x: -22, y: -33, z: -44 },
c: -55,
d: Packed { x: -66, y: -77, z: -88 }
};
let unpackedInPackedWithDrop = UnpackedInPackedWithDrop {
a: 98,
b: Unpacked { x: 87, y: 76, z: 65 },
c: Unpacked { x: 54, y: 43, z: 32 },
d: 21
};
let deeplyNested = DeeplyNested {
a: PackedInPacked {
a: 1,
b: Packed { x: 2, y: 3, z: 4 },
c: 5,
d: Packed { x: 6, y: 7, z: 8 }
},
b: UnpackedInPackedWithDrop {
a: 9,
b: Unpacked { x: 10, y: 11, z: 12 },
c: Unpacked { x: 13, y: 14, z: 15 },
d: 16
},
c: PackedInUnpacked {
a: 17,
b: Packed { x: 18, y: 19, z: 20 },
c: 21,
d: Packed { x: 22, y: 23, z: 24 }
},
d: PackedInUnpackedWithDrop {
a: 25,
b: Packed { x: 26, y: 27, z: 28 },
c: 29,
d: Packed { x: 30, y: 31, z: 32 }
},
e: UnpackedInPacked {
a: 33,
b: Unpacked { x: 34, y: 35, z: 36 },
c: Unpacked { x: 37, y: 38, z: 39 },
d: 40
},
f: PackedInPackedWithDrop {
a: 41,
b: Packed { x: 42, y: 43, z: 44 },
c: 45,
d: Packed { x: 46, y: 47, z: 48 }
}
};
zzz();
}
fn zzz() {()}
|
// debugger:print packedInUnpacked
// check:$3 = {a = -1111, b = {x = -2222, y = -3333, z = -4444}, c = -5555, d = {x = -6666, y = -7777, z = -8888}}
|
random_line_split
|
packed-struct-with-destructor.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// ignore-android: FIXME(#10381)
// compile-flags:-g
// debugger:set print pretty off
// debugger:rbreak zzz
// debugger:run
// debugger:finish
// debugger:print packed
// check:$1 = {x = 123, y = 234, z = 345}
// debugger:print packedInPacked
// check:$2 = {a = 1111, b = {x = 2222, y = 3333, z = 4444}, c = 5555, d = {x = 6666, y = 7777, z = 8888}}
// debugger:print packedInUnpacked
// check:$3 = {a = -1111, b = {x = -2222, y = -3333, z = -4444}, c = -5555, d = {x = -6666, y = -7777, z = -8888}}
// debugger:print unpackedInPacked
// check:$4 = {a = 987, b = {x = 876, y = 765, z = 654}, c = {x = 543, y = 432, z = 321}, d = 210}
// debugger:print packedInPackedWithDrop
// check:$5 = {a = 11, b = {x = 22, y = 33, z = 44}, c = 55, d = {x = 66, y = 77, z = 88}}
// debugger:print packedInUnpackedWithDrop
// check:$6 = {a = -11, b = {x = -22, y = -33, z = -44}, c = -55, d = {x = -66, y = -77, z = -88}}
// debugger:print unpackedInPackedWithDrop
// check:$7 = {a = 98, b = {x = 87, y = 76, z = 65}, c = {x = 54, y = 43, z = 32}, d = 21}
// debugger:print deeplyNested
// check:$8 = {a = {a = 1, b = {x = 2, y = 3, z = 4}, c = 5, d = {x = 6, y = 7, z = 8}}, b = {a = 9, b = {x = 10, y = 11, z = 12}, c = {x = 13, y = 14, z = 15}, d = 16}, c = {a = 17, b = {x = 18, y = 19, z = 20}, c = 21, d = {x = 22, y = 23, z = 24}}, d = {a = 25, b = {x = 26, y = 27, z = 28}, c = 29, d = {x = 30, y = 31, z = 32}}, e = {a = 33, b = {x = 34, y = 35, z = 36}, c = {x = 37, y = 38, z = 39}, d = 40}, f = {a = 41, b = {x = 42, y = 43, z = 44}, c = 45, d = {x = 46, y = 47, z = 48}}}
#![allow(unused_variable)]
#[packed]
struct Packed {
x: i16,
y: i32,
z: i64
}
impl Drop for Packed {
fn
|
(&mut self) {}
}
#[packed]
struct PackedInPacked {
a: i32,
b: Packed,
c: i64,
d: Packed
}
struct PackedInUnpacked {
a: i32,
b: Packed,
c: i64,
d: Packed
}
struct Unpacked {
x: i64,
y: i32,
z: i16
}
impl Drop for Unpacked {
fn drop(&mut self) {}
}
#[packed]
struct UnpackedInPacked {
a: i16,
b: Unpacked,
c: Unpacked,
d: i64
}
#[packed]
struct PackedInPackedWithDrop {
a: i32,
b: Packed,
c: i64,
d: Packed
}
impl Drop for PackedInPackedWithDrop {
fn drop(&mut self) {}
}
struct PackedInUnpackedWithDrop {
a: i32,
b: Packed,
c: i64,
d: Packed
}
impl Drop for PackedInUnpackedWithDrop {
fn drop(&mut self) {}
}
#[packed]
struct UnpackedInPackedWithDrop {
a: i16,
b: Unpacked,
c: Unpacked,
d: i64
}
impl Drop for UnpackedInPackedWithDrop {
fn drop(&mut self) {}
}
struct DeeplyNested {
a: PackedInPacked,
b: UnpackedInPackedWithDrop,
c: PackedInUnpacked,
d: PackedInUnpackedWithDrop,
e: UnpackedInPacked,
f: PackedInPackedWithDrop
}
fn main() {
let packed = Packed { x: 123, y: 234, z: 345 };
let packedInPacked = PackedInPacked {
a: 1111,
b: Packed { x: 2222, y: 3333, z: 4444 },
c: 5555,
d: Packed { x: 6666, y: 7777, z: 8888 }
};
let packedInUnpacked = PackedInUnpacked {
a: -1111,
b: Packed { x: -2222, y: -3333, z: -4444 },
c: -5555,
d: Packed { x: -6666, y: -7777, z: -8888 }
};
let unpackedInPacked = UnpackedInPacked {
a: 987,
b: Unpacked { x: 876, y: 765, z: 654 },
c: Unpacked { x: 543, y: 432, z: 321 },
d: 210
};
let packedInPackedWithDrop = PackedInPackedWithDrop {
a: 11,
b: Packed { x: 22, y: 33, z: 44 },
c: 55,
d: Packed { x: 66, y: 77, z: 88 }
};
let packedInUnpackedWithDrop = PackedInUnpackedWithDrop {
a: -11,
b: Packed { x: -22, y: -33, z: -44 },
c: -55,
d: Packed { x: -66, y: -77, z: -88 }
};
let unpackedInPackedWithDrop = UnpackedInPackedWithDrop {
a: 98,
b: Unpacked { x: 87, y: 76, z: 65 },
c: Unpacked { x: 54, y: 43, z: 32 },
d: 21
};
let deeplyNested = DeeplyNested {
a: PackedInPacked {
a: 1,
b: Packed { x: 2, y: 3, z: 4 },
c: 5,
d: Packed { x: 6, y: 7, z: 8 }
},
b: UnpackedInPackedWithDrop {
a: 9,
b: Unpacked { x: 10, y: 11, z: 12 },
c: Unpacked { x: 13, y: 14, z: 15 },
d: 16
},
c: PackedInUnpacked {
a: 17,
b: Packed { x: 18, y: 19, z: 20 },
c: 21,
d: Packed { x: 22, y: 23, z: 24 }
},
d: PackedInUnpackedWithDrop {
a: 25,
b: Packed { x: 26, y: 27, z: 28 },
c: 29,
d: Packed { x: 30, y: 31, z: 32 }
},
e: UnpackedInPacked {
a: 33,
b: Unpacked { x: 34, y: 35, z: 36 },
c: Unpacked { x: 37, y: 38, z: 39 },
d: 40
},
f: PackedInPackedWithDrop {
a: 41,
b: Packed { x: 42, y: 43, z: 44 },
c: 45,
d: Packed { x: 46, y: 47, z: 48 }
}
};
zzz();
}
fn zzz() {()}
|
drop
|
identifier_name
|
free_list.rs
|
// Copyright 2014 SiegeLord
// Licensed under GPL, see LICENSE for full terms
pub struct FreeList<T>
{
items: Vec<Option<T>>,
free_idxs: Vec<uint>,
}
impl<T> FreeList<T>
{
pub fn new() -> FreeList<T>
{
FreeList
{
items: vec![],
free_idxs: vec![],
}
}
pub fn push(&mut self, item: T) -> uint
{
match self.free_idxs.pop()
{
Some(idx) =>
{
*self.items.get_mut(idx) = Some(item);
idx
},
None =>
{
self.items.push(Some(item));
self.items.len() - 1
}
}
}
pub fn free(&mut self, idx: uint) -> bool
{
if self.items.get(idx).is_some()
{
self.free_idxs.push(idx);
*self.items.get_mut(idx) = None;
true
}
else
{
false
}
}
pub fn get<'l>(&'l self, idx: uint) -> Option<&'l T>
{
self.items.get(idx).as_ref()
}
pub fn get_mut<'l>(&'l mut self, idx: uint) -> Option<&'l mut T>
{
self.items.get_mut(idx).as_mut()
}
#[allow(dead_code)]
pub fn iter<'l>(&'l self) -> FreeListItems<'l, T>
{
FreeListItems{ idx: 0, items: self.items.as_slice() }
}
#[allow(dead_code)]
pub fn len(&self) -> uint
{
self.items.len() - self.free_idxs.len()
}
}
#[allow(dead_code)]
pub struct FreeListItems<'l, T>
{
idx: uint,
items: &'l [Option<T>]
}
impl<'l, T> Iterator<&'l T> for FreeListItems<'l, T>
{
fn next(&mut self) -> Option<&'l T>
{
loop
{
if self.idx >= self.items.len()
|
else
{
self.idx += 1;
unsafe
{
match *self.items.unsafe_ref(self.idx - 1)
{
Some(ref item) => return Some(item),
None => continue
}
}
}
}
}
}
#[test]
fn test_free_list()
{
let mut list = FreeList::new();
let idx1 = list.push(1u);
let idx2 = list.push(2);
list.free(idx1);
let idx3 = list.push(3);
assert_eq!(idx1, idx3);
let idx4 = list.push(4);
list.free(idx2);
assert_eq!(idx4, 2);
assert_eq!(list.iter().map(|s| *s).collect::<Vec<uint>>(), vec![3u, 4]);
}
|
{
return None
}
|
conditional_block
|
free_list.rs
|
// Copyright 2014 SiegeLord
// Licensed under GPL, see LICENSE for full terms
pub struct FreeList<T>
{
items: Vec<Option<T>>,
free_idxs: Vec<uint>,
}
impl<T> FreeList<T>
{
pub fn new() -> FreeList<T>
{
FreeList
{
items: vec![],
free_idxs: vec![],
}
}
pub fn push(&mut self, item: T) -> uint
{
match self.free_idxs.pop()
{
Some(idx) =>
{
*self.items.get_mut(idx) = Some(item);
idx
},
None =>
{
self.items.push(Some(item));
self.items.len() - 1
}
}
}
pub fn
|
(&mut self, idx: uint) -> bool
{
if self.items.get(idx).is_some()
{
self.free_idxs.push(idx);
*self.items.get_mut(idx) = None;
true
}
else
{
false
}
}
pub fn get<'l>(&'l self, idx: uint) -> Option<&'l T>
{
self.items.get(idx).as_ref()
}
pub fn get_mut<'l>(&'l mut self, idx: uint) -> Option<&'l mut T>
{
self.items.get_mut(idx).as_mut()
}
#[allow(dead_code)]
pub fn iter<'l>(&'l self) -> FreeListItems<'l, T>
{
FreeListItems{ idx: 0, items: self.items.as_slice() }
}
#[allow(dead_code)]
pub fn len(&self) -> uint
{
self.items.len() - self.free_idxs.len()
}
}
#[allow(dead_code)]
pub struct FreeListItems<'l, T>
{
idx: uint,
items: &'l [Option<T>]
}
impl<'l, T> Iterator<&'l T> for FreeListItems<'l, T>
{
fn next(&mut self) -> Option<&'l T>
{
loop
{
if self.idx >= self.items.len()
{
return None
}
else
{
self.idx += 1;
unsafe
{
match *self.items.unsafe_ref(self.idx - 1)
{
Some(ref item) => return Some(item),
None => continue
}
}
}
}
}
}
#[test]
fn test_free_list()
{
let mut list = FreeList::new();
let idx1 = list.push(1u);
let idx2 = list.push(2);
list.free(idx1);
let idx3 = list.push(3);
assert_eq!(idx1, idx3);
let idx4 = list.push(4);
list.free(idx2);
assert_eq!(idx4, 2);
assert_eq!(list.iter().map(|s| *s).collect::<Vec<uint>>(), vec![3u, 4]);
}
|
free
|
identifier_name
|
free_list.rs
|
// Copyright 2014 SiegeLord
// Licensed under GPL, see LICENSE for full terms
pub struct FreeList<T>
{
items: Vec<Option<T>>,
free_idxs: Vec<uint>,
}
impl<T> FreeList<T>
{
pub fn new() -> FreeList<T>
{
FreeList
{
items: vec![],
free_idxs: vec![],
}
}
pub fn push(&mut self, item: T) -> uint
{
match self.free_idxs.pop()
{
Some(idx) =>
{
*self.items.get_mut(idx) = Some(item);
idx
},
None =>
{
self.items.push(Some(item));
self.items.len() - 1
}
}
}
pub fn free(&mut self, idx: uint) -> bool
{
if self.items.get(idx).is_some()
{
self.free_idxs.push(idx);
*self.items.get_mut(idx) = None;
true
}
else
{
false
}
}
pub fn get<'l>(&'l self, idx: uint) -> Option<&'l T>
{
self.items.get(idx).as_ref()
}
pub fn get_mut<'l>(&'l mut self, idx: uint) -> Option<&'l mut T>
{
self.items.get_mut(idx).as_mut()
}
#[allow(dead_code)]
pub fn iter<'l>(&'l self) -> FreeListItems<'l, T>
{
FreeListItems{ idx: 0, items: self.items.as_slice() }
}
#[allow(dead_code)]
|
self.items.len() - self.free_idxs.len()
}
}
#[allow(dead_code)]
pub struct FreeListItems<'l, T>
{
idx: uint,
items: &'l [Option<T>]
}
impl<'l, T> Iterator<&'l T> for FreeListItems<'l, T>
{
fn next(&mut self) -> Option<&'l T>
{
loop
{
if self.idx >= self.items.len()
{
return None
}
else
{
self.idx += 1;
unsafe
{
match *self.items.unsafe_ref(self.idx - 1)
{
Some(ref item) => return Some(item),
None => continue
}
}
}
}
}
}
#[test]
fn test_free_list()
{
let mut list = FreeList::new();
let idx1 = list.push(1u);
let idx2 = list.push(2);
list.free(idx1);
let idx3 = list.push(3);
assert_eq!(idx1, idx3);
let idx4 = list.push(4);
list.free(idx2);
assert_eq!(idx4, 2);
assert_eq!(list.iter().map(|s| *s).collect::<Vec<uint>>(), vec![3u, 4]);
}
|
pub fn len(&self) -> uint
{
|
random_line_split
|
multiple_files.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate rand;
use rand::{task_rng, Rng};
use std::{char, os, str};
use std::io::{File, Process};
// creates unicode_input_multiple_files_{main,chars}.rs, where the
// former imports the latter. `_chars` just contains an indentifier
// made up of random characters, because will emit an error message
// about the ident being in the wrong place, with a span (and creating
// this span used to upset the compiler).
fn random_char() -> char {
let mut rng = task_rng();
// a subset of the XID_start unicode table (ensuring that the
// compiler doesn't fail with an "unrecognised token" error)
let (lo, hi): (u32, u32) = match rng.gen_range(1, 4 + 1) {
1 => (0x41, 0x5a),
2 => (0xf8, 0x1ba),
3 => (0x1401, 0x166c),
_ => (0x10400, 0x1044f)
};
char::from_u32(rng.gen_range(lo, hi + 1)).unwrap()
}
fn
|
() {
let args = os::args();
let rustc = args[1].as_slice();
let tmpdir = Path::new(args[2].as_slice());
let main_file = tmpdir.join("unicode_input_multiple_files_main.rs");
let main_file_str = main_file.as_str().unwrap();
{
let _ = File::create(&main_file).unwrap()
.write_str("mod unicode_input_multiple_files_chars;");
}
for _ in range(0, 100) {
{
let randoms = tmpdir.join("unicode_input_multiple_files_chars.rs");
let mut w = File::create(&randoms).unwrap();
for _ in range(0, 30) {
let _ = w.write_char(random_char());
}
}
// rustc is passed to us with --out-dir and -L etc., so we
// can't exec it directly
let result = Process::output("sh", [~"-c", rustc + " " + main_file_str]).unwrap();
let err = str::from_utf8_lossy(result.error.as_slice());
// positive test so that this test will be updated when the
// compiler changes.
assert!(err.as_slice().contains("expected item but found"))
}
}
|
main
|
identifier_name
|
multiple_files.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate rand;
use rand::{task_rng, Rng};
use std::{char, os, str};
use std::io::{File, Process};
// creates unicode_input_multiple_files_{main,chars}.rs, where the
// former imports the latter. `_chars` just contains an indentifier
// made up of random characters, because will emit an error message
// about the ident being in the wrong place, with a span (and creating
// this span used to upset the compiler).
fn random_char() -> char
|
fn main() {
let args = os::args();
let rustc = args[1].as_slice();
let tmpdir = Path::new(args[2].as_slice());
let main_file = tmpdir.join("unicode_input_multiple_files_main.rs");
let main_file_str = main_file.as_str().unwrap();
{
let _ = File::create(&main_file).unwrap()
.write_str("mod unicode_input_multiple_files_chars;");
}
for _ in range(0, 100) {
{
let randoms = tmpdir.join("unicode_input_multiple_files_chars.rs");
let mut w = File::create(&randoms).unwrap();
for _ in range(0, 30) {
let _ = w.write_char(random_char());
}
}
// rustc is passed to us with --out-dir and -L etc., so we
// can't exec it directly
let result = Process::output("sh", [~"-c", rustc + " " + main_file_str]).unwrap();
let err = str::from_utf8_lossy(result.error.as_slice());
// positive test so that this test will be updated when the
// compiler changes.
assert!(err.as_slice().contains("expected item but found"))
}
}
|
{
let mut rng = task_rng();
// a subset of the XID_start unicode table (ensuring that the
// compiler doesn't fail with an "unrecognised token" error)
let (lo, hi): (u32, u32) = match rng.gen_range(1, 4 + 1) {
1 => (0x41, 0x5a),
2 => (0xf8, 0x1ba),
3 => (0x1401, 0x166c),
_ => (0x10400, 0x1044f)
};
char::from_u32(rng.gen_range(lo, hi + 1)).unwrap()
}
|
identifier_body
|
multiple_files.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate rand;
use rand::{task_rng, Rng};
use std::{char, os, str};
use std::io::{File, Process};
// creates unicode_input_multiple_files_{main,chars}.rs, where the
// former imports the latter. `_chars` just contains an indentifier
// made up of random characters, because will emit an error message
// about the ident being in the wrong place, with a span (and creating
// this span used to upset the compiler).
fn random_char() -> char {
let mut rng = task_rng();
// a subset of the XID_start unicode table (ensuring that the
// compiler doesn't fail with an "unrecognised token" error)
let (lo, hi): (u32, u32) = match rng.gen_range(1, 4 + 1) {
1 => (0x41, 0x5a),
2 => (0xf8, 0x1ba),
3 => (0x1401, 0x166c),
_ => (0x10400, 0x1044f)
};
char::from_u32(rng.gen_range(lo, hi + 1)).unwrap()
}
fn main() {
let args = os::args();
let rustc = args[1].as_slice();
let tmpdir = Path::new(args[2].as_slice());
let main_file = tmpdir.join("unicode_input_multiple_files_main.rs");
let main_file_str = main_file.as_str().unwrap();
{
let _ = File::create(&main_file).unwrap()
.write_str("mod unicode_input_multiple_files_chars;");
}
for _ in range(0, 100) {
|
}
}
// rustc is passed to us with --out-dir and -L etc., so we
// can't exec it directly
let result = Process::output("sh", [~"-c", rustc + " " + main_file_str]).unwrap();
let err = str::from_utf8_lossy(result.error.as_slice());
// positive test so that this test will be updated when the
// compiler changes.
assert!(err.as_slice().contains("expected item but found"))
}
}
|
{
let randoms = tmpdir.join("unicode_input_multiple_files_chars.rs");
let mut w = File::create(&randoms).unwrap();
for _ in range(0, 30) {
let _ = w.write_char(random_char());
|
random_line_split
|
triehash.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Generetes trie root.
//!
//! This module should be used to generate trie root hash.
use std::collections::BTreeMap;
use std::cmp;
use hash::*;
use sha3::*;
use rlp;
use rlp::{RlpStream, Stream};
use vector::SharedPrefix;
/// Generates a trie root hash for a vector of values
///
/// ```rust
/// extern crate ethcore_util as util;
/// use std::str::FromStr;
/// use util::triehash::*;
/// use util::hash::*;
///
/// fn main() {
/// let v = vec![From::from("doe"), From::from("reindeer")];
/// let root = "e766d5d51b89dc39d981b41bda63248d7abce4f0225eefd023792a540bcffee3";
/// assert_eq!(ordered_trie_root(v), H256::from_str(root).unwrap());
/// }
/// ```
pub fn ordered_trie_root<I>(input: I) -> H256
where I: IntoIterator<Item=Vec<u8>>
{
let gen_input = input
// first put elements into btree to sort them by nibbles
// optimize it later
.into_iter()
.enumerate()
.map(|(i, vec)| (rlp::encode(&i).to_vec(), vec))
.collect::<BTreeMap<_, _>>()
// then move them to a vector
.into_iter()
.map(|(k, v)| (as_nibbles(&k), v) )
.collect();
gen_trie_root(gen_input)
}
/// Generates a trie root hash for a vector of key-values
///
/// ```rust
/// extern crate ethcore_util as util;
/// use std::str::FromStr;
/// use util::triehash::*;
/// use util::hash::*;
///
/// fn main() {
/// let v = vec![
/// (From::from("doe"), From::from("reindeer")),
/// (From::from("dog"), From::from("puppy")),
/// (From::from("dogglesworth"), From::from("cat")),
/// ];
///
/// let root = "8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3";
/// assert_eq!(trie_root(v), H256::from_str(root).unwrap());
/// }
/// ```
pub fn trie_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
let gen_input = input
// first put elements into btree to sort them and to remove duplicates
.into_iter()
.collect::<BTreeMap<_, _>>()
// then move them to a vector
.into_iter()
.map(|(k, v)| (as_nibbles(&k), v) )
.collect();
gen_trie_root(gen_input)
}
/// Generates a key-hashed (secure) trie root hash for a vector of key-values.
///
/// ```rust
/// extern crate ethcore_util as util;
/// use std::str::FromStr;
/// use util::triehash::*;
/// use util::hash::*;
///
/// fn main() {
/// let v = vec![
/// (From::from("doe"), From::from("reindeer")),
/// (From::from("dog"), From::from("puppy")),
/// (From::from("dogglesworth"), From::from("cat")),
/// ];
///
/// let root = "d4cd937e4a4368d7931a9cf51686b7e10abb3dce38a39000fd7902a092b64585";
/// assert_eq!(sec_trie_root(v), H256::from_str(root).unwrap());
/// }
/// ```
pub fn sec_trie_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
let gen_input = input
// first put elements into btree to sort them and to remove duplicates
.into_iter()
.map(|(k, v)| (k.sha3().to_vec(), v))
|
.into_iter()
.map(|(k, v)| (as_nibbles(&k), v) )
.collect();
gen_trie_root(gen_input)
}
fn gen_trie_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
let mut stream = RlpStream::new();
hash256rlp(&input, 0, &mut stream);
stream.out().sha3()
}
/// Hex-prefix Notation. First nibble has flags: oddness = 2^0 & termination = 2^1.
///
/// The "termination marker" and "leaf-node" specifier are completely equivalent.
///
/// Input values are in range `[0, 0xf]`.
///
/// ```markdown
/// [0,0,1,2,3,4,5] 0x10012345 // 7 > 4
/// [0,1,2,3,4,5] 0x00012345 // 6 > 4
/// [1,2,3,4,5] 0x112345 // 5 > 3
/// [0,0,1,2,3,4] 0x00001234 // 6 > 3
/// [0,1,2,3,4] 0x101234 // 5 > 3
/// [1,2,3,4] 0x001234 // 4 > 3
/// [0,0,1,2,3,4,5,T] 0x30012345 // 7 > 4
/// [0,0,1,2,3,4,T] 0x20001234 // 6 > 4
/// [0,1,2,3,4,5,T] 0x20012345 // 6 > 4
/// [1,2,3,4,5,T] 0x312345 // 5 > 3
/// [1,2,3,4,T] 0x201234 // 4 > 3
/// ```
fn hex_prefix_encode(nibbles: &[u8], leaf: bool) -> Vec<u8> {
let inlen = nibbles.len();
let oddness_factor = inlen % 2;
// next even number divided by two
let reslen = (inlen + 2) >> 1;
let mut res = vec![];
res.reserve(reslen);
let first_byte = {
let mut bits = ((inlen as u8 & 1) + (2 * leaf as u8)) << 4;
if oddness_factor == 1 {
bits += nibbles[0];
}
bits
};
res.push(first_byte);
let mut offset = oddness_factor;
while offset < inlen {
let byte = (nibbles[offset] << 4) + nibbles[offset + 1];
res.push(byte);
offset += 2;
}
res
}
/// Converts slice of bytes to nibbles.
fn as_nibbles(bytes: &[u8]) -> Vec<u8> {
let mut res = vec![];
res.reserve(bytes.len() * 2);
for i in 0..bytes.len() {
res.push(bytes[i] >> 4);
res.push((bytes[i] << 4) >> 4);
}
res
}
fn hash256rlp(input: &[(Vec<u8>, Vec<u8>)], pre_len: usize, stream: &mut RlpStream) {
let inlen = input.len();
// in case of empty slice, just append empty data
if inlen == 0 {
stream.append_empty_data();
return;
}
// take slices
let key: &[u8] = &input[0].0;
let value: &[u8] = &input[0].1;
// if the slice contains just one item, append the suffix of the key
// and then append value
if inlen == 1 {
stream.begin_list(2);
stream.append(&hex_prefix_encode(&key[pre_len..], true));
stream.append(&value);
return;
}
// get length of the longest shared prefix in slice keys
let shared_prefix = input.iter()
// skip first element
.skip(1)
// get minimum number of shared nibbles between first and each successive
.fold(key.len(), | acc, &(ref k, _) | {
cmp::min(key.shared_prefix_len(k), acc)
});
// if shared prefix is higher than current prefix append its
// new part of the key to the stream
// then recursively append suffixes of all items who had this key
if shared_prefix > pre_len {
stream.begin_list(2);
stream.append(&hex_prefix_encode(&key[pre_len..shared_prefix], false));
hash256aux(input, shared_prefix, stream);
return;
}
// an item for every possible nibble/suffix
// + 1 for data
stream.begin_list(17);
// if first key len is equal to prefix_len, move to next element
let mut begin = match pre_len == key.len() {
true => 1,
false => 0
};
// iterate over all possible nibbles
for i in 0..16 {
// cout how many successive elements have same next nibble
let len = match begin < input.len() {
true => input[begin..].iter()
.take_while(| pair | pair.0[pre_len] == i )
.count(),
false => 0
};
// if at least 1 successive element has the same nibble
// append their suffixes
match len {
0 => { stream.append_empty_data(); },
_ => hash256aux(&input[begin..(begin + len)], pre_len + 1, stream)
}
begin += len;
}
// if fist key len is equal prefix, append its value
match pre_len == key.len() {
true => { stream.append(&value); },
false => { stream.append_empty_data(); }
};
}
fn hash256aux(input: &[(Vec<u8>, Vec<u8>)], pre_len: usize, stream: &mut RlpStream) {
let mut s = RlpStream::new();
hash256rlp(input, pre_len, &mut s);
let out = s.out();
match out.len() {
0...31 => stream.append_raw(&out, 1),
_ => stream.append(&out.sha3())
};
}
#[test]
fn test_nibbles() {
let v = vec![0x31, 0x23, 0x45];
let e = vec![3, 1, 2, 3, 4, 5];
assert_eq!(as_nibbles(&v), e);
// A => 65 => 0x41 => [4, 1]
let v: Vec<u8> = From::from("A");
let e = vec![4, 1];
assert_eq!(as_nibbles(&v), e);
}
#[test]
fn test_hex_prefix_encode() {
let v = vec![0, 0, 1, 2, 3, 4, 5];
let e = vec![0x10, 0x01, 0x23, 0x45];
let h = hex_prefix_encode(&v, false);
assert_eq!(h, e);
let v = vec![0, 1, 2, 3, 4, 5];
let e = vec![0x00, 0x01, 0x23, 0x45];
let h = hex_prefix_encode(&v, false);
assert_eq!(h, e);
let v = vec![0, 1, 2, 3, 4, 5];
let e = vec![0x20, 0x01, 0x23, 0x45];
let h = hex_prefix_encode(&v, true);
assert_eq!(h, e);
let v = vec![1, 2, 3, 4, 5];
let e = vec![0x31, 0x23, 0x45];
let h = hex_prefix_encode(&v, true);
assert_eq!(h, e);
let v = vec![1, 2, 3, 4];
let e = vec![0x00, 0x12, 0x34];
let h = hex_prefix_encode(&v, false);
assert_eq!(h, e);
let v = vec![4, 1];
let e = vec![0x20, 0x41];
let h = hex_prefix_encode(&v, true);
assert_eq!(h, e);
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use hash::H256;
use super::trie_root;
#[test]
fn simple_test() {
assert_eq!(trie_root(vec![
(b"A".to_vec(), b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa".to_vec())
]), H256::from_str("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab").unwrap());
}
#[test]
fn test_triehash_out_of_order() {
assert!(trie_root(vec![
(vec![0x01u8, 0x23], vec![0x01u8, 0x23]),
(vec![0x81u8, 0x23], vec![0x81u8, 0x23]),
(vec![0xf1u8, 0x23], vec![0xf1u8, 0x23]),
]) ==
trie_root(vec![
(vec![0x01u8, 0x23], vec![0x01u8, 0x23]),
(vec![0xf1u8, 0x23], vec![0xf1u8, 0x23]),
(vec![0x81u8, 0x23], vec![0x81u8, 0x23]),
]));
}
}
|
.collect::<BTreeMap<_, _>>()
// then move them to a vector
|
random_line_split
|
triehash.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Generetes trie root.
//!
//! This module should be used to generate trie root hash.
use std::collections::BTreeMap;
use std::cmp;
use hash::*;
use sha3::*;
use rlp;
use rlp::{RlpStream, Stream};
use vector::SharedPrefix;
/// Generates a trie root hash for a vector of values
///
/// ```rust
/// extern crate ethcore_util as util;
/// use std::str::FromStr;
/// use util::triehash::*;
/// use util::hash::*;
///
/// fn main() {
/// let v = vec![From::from("doe"), From::from("reindeer")];
/// let root = "e766d5d51b89dc39d981b41bda63248d7abce4f0225eefd023792a540bcffee3";
/// assert_eq!(ordered_trie_root(v), H256::from_str(root).unwrap());
/// }
/// ```
pub fn ordered_trie_root<I>(input: I) -> H256
where I: IntoIterator<Item=Vec<u8>>
{
let gen_input = input
// first put elements into btree to sort them by nibbles
// optimize it later
.into_iter()
.enumerate()
.map(|(i, vec)| (rlp::encode(&i).to_vec(), vec))
.collect::<BTreeMap<_, _>>()
// then move them to a vector
.into_iter()
.map(|(k, v)| (as_nibbles(&k), v) )
.collect();
gen_trie_root(gen_input)
}
/// Generates a trie root hash for a vector of key-values
///
/// ```rust
/// extern crate ethcore_util as util;
/// use std::str::FromStr;
/// use util::triehash::*;
/// use util::hash::*;
///
/// fn main() {
/// let v = vec![
/// (From::from("doe"), From::from("reindeer")),
/// (From::from("dog"), From::from("puppy")),
/// (From::from("dogglesworth"), From::from("cat")),
/// ];
///
/// let root = "8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3";
/// assert_eq!(trie_root(v), H256::from_str(root).unwrap());
/// }
/// ```
pub fn trie_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
let gen_input = input
// first put elements into btree to sort them and to remove duplicates
.into_iter()
.collect::<BTreeMap<_, _>>()
// then move them to a vector
.into_iter()
.map(|(k, v)| (as_nibbles(&k), v) )
.collect();
gen_trie_root(gen_input)
}
/// Generates a key-hashed (secure) trie root hash for a vector of key-values.
///
/// ```rust
/// extern crate ethcore_util as util;
/// use std::str::FromStr;
/// use util::triehash::*;
/// use util::hash::*;
///
/// fn main() {
/// let v = vec![
/// (From::from("doe"), From::from("reindeer")),
/// (From::from("dog"), From::from("puppy")),
/// (From::from("dogglesworth"), From::from("cat")),
/// ];
///
/// let root = "d4cd937e4a4368d7931a9cf51686b7e10abb3dce38a39000fd7902a092b64585";
/// assert_eq!(sec_trie_root(v), H256::from_str(root).unwrap());
/// }
/// ```
pub fn sec_trie_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
let gen_input = input
// first put elements into btree to sort them and to remove duplicates
.into_iter()
.map(|(k, v)| (k.sha3().to_vec(), v))
.collect::<BTreeMap<_, _>>()
// then move them to a vector
.into_iter()
.map(|(k, v)| (as_nibbles(&k), v) )
.collect();
gen_trie_root(gen_input)
}
fn gen_trie_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
let mut stream = RlpStream::new();
hash256rlp(&input, 0, &mut stream);
stream.out().sha3()
}
/// Hex-prefix Notation. First nibble has flags: oddness = 2^0 & termination = 2^1.
///
/// The "termination marker" and "leaf-node" specifier are completely equivalent.
///
/// Input values are in range `[0, 0xf]`.
///
/// ```markdown
/// [0,0,1,2,3,4,5] 0x10012345 // 7 > 4
/// [0,1,2,3,4,5] 0x00012345 // 6 > 4
/// [1,2,3,4,5] 0x112345 // 5 > 3
/// [0,0,1,2,3,4] 0x00001234 // 6 > 3
/// [0,1,2,3,4] 0x101234 // 5 > 3
/// [1,2,3,4] 0x001234 // 4 > 3
/// [0,0,1,2,3,4,5,T] 0x30012345 // 7 > 4
/// [0,0,1,2,3,4,T] 0x20001234 // 6 > 4
/// [0,1,2,3,4,5,T] 0x20012345 // 6 > 4
/// [1,2,3,4,5,T] 0x312345 // 5 > 3
/// [1,2,3,4,T] 0x201234 // 4 > 3
/// ```
fn hex_prefix_encode(nibbles: &[u8], leaf: bool) -> Vec<u8> {
let inlen = nibbles.len();
let oddness_factor = inlen % 2;
// next even number divided by two
let reslen = (inlen + 2) >> 1;
let mut res = vec![];
res.reserve(reslen);
let first_byte = {
let mut bits = ((inlen as u8 & 1) + (2 * leaf as u8)) << 4;
if oddness_factor == 1 {
bits += nibbles[0];
}
bits
};
res.push(first_byte);
let mut offset = oddness_factor;
while offset < inlen {
let byte = (nibbles[offset] << 4) + nibbles[offset + 1];
res.push(byte);
offset += 2;
}
res
}
/// Converts slice of bytes to nibbles.
fn as_nibbles(bytes: &[u8]) -> Vec<u8> {
let mut res = vec![];
res.reserve(bytes.len() * 2);
for i in 0..bytes.len() {
res.push(bytes[i] >> 4);
res.push((bytes[i] << 4) >> 4);
}
res
}
fn hash256rlp(input: &[(Vec<u8>, Vec<u8>)], pre_len: usize, stream: &mut RlpStream) {
let inlen = input.len();
// in case of empty slice, just append empty data
if inlen == 0 {
stream.append_empty_data();
return;
}
// take slices
let key: &[u8] = &input[0].0;
let value: &[u8] = &input[0].1;
// if the slice contains just one item, append the suffix of the key
// and then append value
if inlen == 1 {
stream.begin_list(2);
stream.append(&hex_prefix_encode(&key[pre_len..], true));
stream.append(&value);
return;
}
// get length of the longest shared prefix in slice keys
let shared_prefix = input.iter()
// skip first element
.skip(1)
// get minimum number of shared nibbles between first and each successive
.fold(key.len(), | acc, &(ref k, _) | {
cmp::min(key.shared_prefix_len(k), acc)
});
// if shared prefix is higher than current prefix append its
// new part of the key to the stream
// then recursively append suffixes of all items who had this key
if shared_prefix > pre_len {
stream.begin_list(2);
stream.append(&hex_prefix_encode(&key[pre_len..shared_prefix], false));
hash256aux(input, shared_prefix, stream);
return;
}
// an item for every possible nibble/suffix
// + 1 for data
stream.begin_list(17);
// if first key len is equal to prefix_len, move to next element
let mut begin = match pre_len == key.len() {
true => 1,
false => 0
};
// iterate over all possible nibbles
for i in 0..16 {
// cout how many successive elements have same next nibble
let len = match begin < input.len() {
true => input[begin..].iter()
.take_while(| pair | pair.0[pre_len] == i )
.count(),
false => 0
};
// if at least 1 successive element has the same nibble
// append their suffixes
match len {
0 => { stream.append_empty_data(); },
_ => hash256aux(&input[begin..(begin + len)], pre_len + 1, stream)
}
begin += len;
}
// if fist key len is equal prefix, append its value
match pre_len == key.len() {
true => { stream.append(&value); },
false => { stream.append_empty_data(); }
};
}
fn hash256aux(input: &[(Vec<u8>, Vec<u8>)], pre_len: usize, stream: &mut RlpStream) {
let mut s = RlpStream::new();
hash256rlp(input, pre_len, &mut s);
let out = s.out();
match out.len() {
0...31 => stream.append_raw(&out, 1),
_ => stream.append(&out.sha3())
};
}
#[test]
fn test_nibbles() {
let v = vec![0x31, 0x23, 0x45];
let e = vec![3, 1, 2, 3, 4, 5];
assert_eq!(as_nibbles(&v), e);
// A => 65 => 0x41 => [4, 1]
let v: Vec<u8> = From::from("A");
let e = vec![4, 1];
assert_eq!(as_nibbles(&v), e);
}
#[test]
fn test_hex_prefix_encode() {
let v = vec![0, 0, 1, 2, 3, 4, 5];
let e = vec![0x10, 0x01, 0x23, 0x45];
let h = hex_prefix_encode(&v, false);
assert_eq!(h, e);
let v = vec![0, 1, 2, 3, 4, 5];
let e = vec![0x00, 0x01, 0x23, 0x45];
let h = hex_prefix_encode(&v, false);
assert_eq!(h, e);
let v = vec![0, 1, 2, 3, 4, 5];
let e = vec![0x20, 0x01, 0x23, 0x45];
let h = hex_prefix_encode(&v, true);
assert_eq!(h, e);
let v = vec![1, 2, 3, 4, 5];
let e = vec![0x31, 0x23, 0x45];
let h = hex_prefix_encode(&v, true);
assert_eq!(h, e);
let v = vec![1, 2, 3, 4];
let e = vec![0x00, 0x12, 0x34];
let h = hex_prefix_encode(&v, false);
assert_eq!(h, e);
let v = vec![4, 1];
let e = vec![0x20, 0x41];
let h = hex_prefix_encode(&v, true);
assert_eq!(h, e);
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use hash::H256;
use super::trie_root;
#[test]
fn simple_test() {
assert_eq!(trie_root(vec![
(b"A".to_vec(), b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa".to_vec())
]), H256::from_str("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab").unwrap());
}
#[test]
fn test_triehash_out_of_order()
|
}
|
{
assert!(trie_root(vec![
(vec![0x01u8, 0x23], vec![0x01u8, 0x23]),
(vec![0x81u8, 0x23], vec![0x81u8, 0x23]),
(vec![0xf1u8, 0x23], vec![0xf1u8, 0x23]),
]) ==
trie_root(vec![
(vec![0x01u8, 0x23], vec![0x01u8, 0x23]),
(vec![0xf1u8, 0x23], vec![0xf1u8, 0x23]),
(vec![0x81u8, 0x23], vec![0x81u8, 0x23]),
]));
}
|
identifier_body
|
triehash.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Generetes trie root.
//!
//! This module should be used to generate trie root hash.
use std::collections::BTreeMap;
use std::cmp;
use hash::*;
use sha3::*;
use rlp;
use rlp::{RlpStream, Stream};
use vector::SharedPrefix;
/// Generates a trie root hash for a vector of values
///
/// ```rust
/// extern crate ethcore_util as util;
/// use std::str::FromStr;
/// use util::triehash::*;
/// use util::hash::*;
///
/// fn main() {
/// let v = vec![From::from("doe"), From::from("reindeer")];
/// let root = "e766d5d51b89dc39d981b41bda63248d7abce4f0225eefd023792a540bcffee3";
/// assert_eq!(ordered_trie_root(v), H256::from_str(root).unwrap());
/// }
/// ```
pub fn ordered_trie_root<I>(input: I) -> H256
where I: IntoIterator<Item=Vec<u8>>
{
let gen_input = input
// first put elements into btree to sort them by nibbles
// optimize it later
.into_iter()
.enumerate()
.map(|(i, vec)| (rlp::encode(&i).to_vec(), vec))
.collect::<BTreeMap<_, _>>()
// then move them to a vector
.into_iter()
.map(|(k, v)| (as_nibbles(&k), v) )
.collect();
gen_trie_root(gen_input)
}
/// Generates a trie root hash for a vector of key-values
///
/// ```rust
/// extern crate ethcore_util as util;
/// use std::str::FromStr;
/// use util::triehash::*;
/// use util::hash::*;
///
/// fn main() {
/// let v = vec![
/// (From::from("doe"), From::from("reindeer")),
/// (From::from("dog"), From::from("puppy")),
/// (From::from("dogglesworth"), From::from("cat")),
/// ];
///
/// let root = "8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3";
/// assert_eq!(trie_root(v), H256::from_str(root).unwrap());
/// }
/// ```
pub fn trie_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
let gen_input = input
// first put elements into btree to sort them and to remove duplicates
.into_iter()
.collect::<BTreeMap<_, _>>()
// then move them to a vector
.into_iter()
.map(|(k, v)| (as_nibbles(&k), v) )
.collect();
gen_trie_root(gen_input)
}
/// Generates a key-hashed (secure) trie root hash for a vector of key-values.
///
/// ```rust
/// extern crate ethcore_util as util;
/// use std::str::FromStr;
/// use util::triehash::*;
/// use util::hash::*;
///
/// fn main() {
/// let v = vec![
/// (From::from("doe"), From::from("reindeer")),
/// (From::from("dog"), From::from("puppy")),
/// (From::from("dogglesworth"), From::from("cat")),
/// ];
///
/// let root = "d4cd937e4a4368d7931a9cf51686b7e10abb3dce38a39000fd7902a092b64585";
/// assert_eq!(sec_trie_root(v), H256::from_str(root).unwrap());
/// }
/// ```
pub fn sec_trie_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
let gen_input = input
// first put elements into btree to sort them and to remove duplicates
.into_iter()
.map(|(k, v)| (k.sha3().to_vec(), v))
.collect::<BTreeMap<_, _>>()
// then move them to a vector
.into_iter()
.map(|(k, v)| (as_nibbles(&k), v) )
.collect();
gen_trie_root(gen_input)
}
fn gen_trie_root(input: Vec<(Vec<u8>, Vec<u8>)>) -> H256 {
let mut stream = RlpStream::new();
hash256rlp(&input, 0, &mut stream);
stream.out().sha3()
}
/// Hex-prefix Notation. First nibble has flags: oddness = 2^0 & termination = 2^1.
///
/// The "termination marker" and "leaf-node" specifier are completely equivalent.
///
/// Input values are in range `[0, 0xf]`.
///
/// ```markdown
/// [0,0,1,2,3,4,5] 0x10012345 // 7 > 4
/// [0,1,2,3,4,5] 0x00012345 // 6 > 4
/// [1,2,3,4,5] 0x112345 // 5 > 3
/// [0,0,1,2,3,4] 0x00001234 // 6 > 3
/// [0,1,2,3,4] 0x101234 // 5 > 3
/// [1,2,3,4] 0x001234 // 4 > 3
/// [0,0,1,2,3,4,5,T] 0x30012345 // 7 > 4
/// [0,0,1,2,3,4,T] 0x20001234 // 6 > 4
/// [0,1,2,3,4,5,T] 0x20012345 // 6 > 4
/// [1,2,3,4,5,T] 0x312345 // 5 > 3
/// [1,2,3,4,T] 0x201234 // 4 > 3
/// ```
fn hex_prefix_encode(nibbles: &[u8], leaf: bool) -> Vec<u8> {
let inlen = nibbles.len();
let oddness_factor = inlen % 2;
// next even number divided by two
let reslen = (inlen + 2) >> 1;
let mut res = vec![];
res.reserve(reslen);
let first_byte = {
let mut bits = ((inlen as u8 & 1) + (2 * leaf as u8)) << 4;
if oddness_factor == 1 {
bits += nibbles[0];
}
bits
};
res.push(first_byte);
let mut offset = oddness_factor;
while offset < inlen {
let byte = (nibbles[offset] << 4) + nibbles[offset + 1];
res.push(byte);
offset += 2;
}
res
}
/// Converts slice of bytes to nibbles.
fn as_nibbles(bytes: &[u8]) -> Vec<u8> {
let mut res = vec![];
res.reserve(bytes.len() * 2);
for i in 0..bytes.len() {
res.push(bytes[i] >> 4);
res.push((bytes[i] << 4) >> 4);
}
res
}
fn hash256rlp(input: &[(Vec<u8>, Vec<u8>)], pre_len: usize, stream: &mut RlpStream) {
let inlen = input.len();
// in case of empty slice, just append empty data
if inlen == 0 {
stream.append_empty_data();
return;
}
// take slices
let key: &[u8] = &input[0].0;
let value: &[u8] = &input[0].1;
// if the slice contains just one item, append the suffix of the key
// and then append value
if inlen == 1 {
stream.begin_list(2);
stream.append(&hex_prefix_encode(&key[pre_len..], true));
stream.append(&value);
return;
}
// get length of the longest shared prefix in slice keys
let shared_prefix = input.iter()
// skip first element
.skip(1)
// get minimum number of shared nibbles between first and each successive
.fold(key.len(), | acc, &(ref k, _) | {
cmp::min(key.shared_prefix_len(k), acc)
});
// if shared prefix is higher than current prefix append its
// new part of the key to the stream
// then recursively append suffixes of all items who had this key
if shared_prefix > pre_len {
stream.begin_list(2);
stream.append(&hex_prefix_encode(&key[pre_len..shared_prefix], false));
hash256aux(input, shared_prefix, stream);
return;
}
// an item for every possible nibble/suffix
// + 1 for data
stream.begin_list(17);
// if first key len is equal to prefix_len, move to next element
let mut begin = match pre_len == key.len() {
true => 1,
false => 0
};
// iterate over all possible nibbles
for i in 0..16 {
// cout how many successive elements have same next nibble
let len = match begin < input.len() {
true => input[begin..].iter()
.take_while(| pair | pair.0[pre_len] == i )
.count(),
false => 0
};
// if at least 1 successive element has the same nibble
// append their suffixes
match len {
0 => { stream.append_empty_data(); },
_ => hash256aux(&input[begin..(begin + len)], pre_len + 1, stream)
}
begin += len;
}
// if fist key len is equal prefix, append its value
match pre_len == key.len() {
true => { stream.append(&value); },
false => { stream.append_empty_data(); }
};
}
fn hash256aux(input: &[(Vec<u8>, Vec<u8>)], pre_len: usize, stream: &mut RlpStream) {
let mut s = RlpStream::new();
hash256rlp(input, pre_len, &mut s);
let out = s.out();
match out.len() {
0...31 => stream.append_raw(&out, 1),
_ => stream.append(&out.sha3())
};
}
#[test]
fn
|
() {
let v = vec![0x31, 0x23, 0x45];
let e = vec![3, 1, 2, 3, 4, 5];
assert_eq!(as_nibbles(&v), e);
// A => 65 => 0x41 => [4, 1]
let v: Vec<u8> = From::from("A");
let e = vec![4, 1];
assert_eq!(as_nibbles(&v), e);
}
#[test]
fn test_hex_prefix_encode() {
let v = vec![0, 0, 1, 2, 3, 4, 5];
let e = vec![0x10, 0x01, 0x23, 0x45];
let h = hex_prefix_encode(&v, false);
assert_eq!(h, e);
let v = vec![0, 1, 2, 3, 4, 5];
let e = vec![0x00, 0x01, 0x23, 0x45];
let h = hex_prefix_encode(&v, false);
assert_eq!(h, e);
let v = vec![0, 1, 2, 3, 4, 5];
let e = vec![0x20, 0x01, 0x23, 0x45];
let h = hex_prefix_encode(&v, true);
assert_eq!(h, e);
let v = vec![1, 2, 3, 4, 5];
let e = vec![0x31, 0x23, 0x45];
let h = hex_prefix_encode(&v, true);
assert_eq!(h, e);
let v = vec![1, 2, 3, 4];
let e = vec![0x00, 0x12, 0x34];
let h = hex_prefix_encode(&v, false);
assert_eq!(h, e);
let v = vec![4, 1];
let e = vec![0x20, 0x41];
let h = hex_prefix_encode(&v, true);
assert_eq!(h, e);
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use hash::H256;
use super::trie_root;
#[test]
fn simple_test() {
assert_eq!(trie_root(vec![
(b"A".to_vec(), b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa".to_vec())
]), H256::from_str("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab").unwrap());
}
#[test]
fn test_triehash_out_of_order() {
assert!(trie_root(vec![
(vec![0x01u8, 0x23], vec![0x01u8, 0x23]),
(vec![0x81u8, 0x23], vec![0x81u8, 0x23]),
(vec![0xf1u8, 0x23], vec![0xf1u8, 0x23]),
]) ==
trie_root(vec![
(vec![0x01u8, 0x23], vec![0x01u8, 0x23]),
(vec![0xf1u8, 0x23], vec![0xf1u8, 0x23]),
(vec![0x81u8, 0x23], vec![0x81u8, 0x23]),
]));
}
}
|
test_nibbles
|
identifier_name
|
main.rs
|
/**
* Tutorial 1 -- Creating a Basic Triangle
*
* In this tutorial we learn how to create a window using glutin/glium and use
* OpenGL to render a triangle.
**/
// import the crates that we need
#[macro_use]
extern crate glium;
extern crate glutin;
use glium::{DisplayBuild, Surface};
// this struct will hold our vertex data
#[derive(Clone, Copy)]
struct Vertex {
position: [f32; 2],
}
// program entry point
fn main() {
let display = glutin::WindowBuilder::new()
.with_title("tetra".to_string())
.build_glium()
.unwrap();
implement_vertex!(Vertex, position);
let vertex1 = Vertex { position: [-0.5, -0.5] };
let vertex2 = Vertex { position: [ 0.0, 0.5] };
let vertex3 = Vertex { position: [ 0.5, -0.25] };
let shape = vec![vertex1, vertex2, vertex3];
let v_buf = glium::VertexBuffer::new(&display, shape);
let indices =
glium::index::NoIndices(glium::index::PrimitiveType::TrianglesList);
let vertex_shader_src = r#"
#version 130
in vec2 position;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}
"#;
let fragment_shader_src = r#"
#version 130
void main() {
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
"#;
let program = glium::Program::from_source(
&display, vertex_shader_src, fragment_shader_src, None
).unwrap();
loop {
let mut target = display.draw();
target.clear_color(0.0, 0.0, 1.0, 1.0);
target.draw(
&v_buf, &indices, &program, &glium::uniforms::EmptyUniforms,
&std::default::Default::default()
).unwrap();
|
display.wait_events().next();
if display.is_closed() {
break;
}
}
}
|
target.finish();
|
random_line_split
|
main.rs
|
/**
* Tutorial 1 -- Creating a Basic Triangle
*
* In this tutorial we learn how to create a window using glutin/glium and use
* OpenGL to render a triangle.
**/
// import the crates that we need
#[macro_use]
extern crate glium;
extern crate glutin;
use glium::{DisplayBuild, Surface};
// this struct will hold our vertex data
#[derive(Clone, Copy)]
struct
|
{
position: [f32; 2],
}
// program entry point
fn main() {
let display = glutin::WindowBuilder::new()
.with_title("tetra".to_string())
.build_glium()
.unwrap();
implement_vertex!(Vertex, position);
let vertex1 = Vertex { position: [-0.5, -0.5] };
let vertex2 = Vertex { position: [ 0.0, 0.5] };
let vertex3 = Vertex { position: [ 0.5, -0.25] };
let shape = vec![vertex1, vertex2, vertex3];
let v_buf = glium::VertexBuffer::new(&display, shape);
let indices =
glium::index::NoIndices(glium::index::PrimitiveType::TrianglesList);
let vertex_shader_src = r#"
#version 130
in vec2 position;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}
"#;
let fragment_shader_src = r#"
#version 130
void main() {
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
"#;
let program = glium::Program::from_source(
&display, vertex_shader_src, fragment_shader_src, None
).unwrap();
loop {
let mut target = display.draw();
target.clear_color(0.0, 0.0, 1.0, 1.0);
target.draw(
&v_buf, &indices, &program, &glium::uniforms::EmptyUniforms,
&std::default::Default::default()
).unwrap();
target.finish();
display.wait_events().next();
if display.is_closed() {
break;
}
}
}
|
Vertex
|
identifier_name
|
main.rs
|
/**
* Tutorial 1 -- Creating a Basic Triangle
*
* In this tutorial we learn how to create a window using glutin/glium and use
* OpenGL to render a triangle.
**/
// import the crates that we need
#[macro_use]
extern crate glium;
extern crate glutin;
use glium::{DisplayBuild, Surface};
// this struct will hold our vertex data
#[derive(Clone, Copy)]
struct Vertex {
position: [f32; 2],
}
// program entry point
fn main()
|
#version 130
in vec2 position;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}
"#;
let fragment_shader_src = r#"
#version 130
void main() {
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
"#;
let program = glium::Program::from_source(
&display, vertex_shader_src, fragment_shader_src, None
).unwrap();
loop {
let mut target = display.draw();
target.clear_color(0.0, 0.0, 1.0, 1.0);
target.draw(
&v_buf, &indices, &program, &glium::uniforms::EmptyUniforms,
&std::default::Default::default()
).unwrap();
target.finish();
display.wait_events().next();
if display.is_closed() {
break;
}
}
}
|
{
let display = glutin::WindowBuilder::new()
.with_title("tetra".to_string())
.build_glium()
.unwrap();
implement_vertex!(Vertex, position);
let vertex1 = Vertex { position: [-0.5, -0.5] };
let vertex2 = Vertex { position: [ 0.0, 0.5] };
let vertex3 = Vertex { position: [ 0.5, -0.25] };
let shape = vec![vertex1, vertex2, vertex3];
let v_buf = glium::VertexBuffer::new(&display, shape);
let indices =
glium::index::NoIndices(glium::index::PrimitiveType::TrianglesList);
let vertex_shader_src = r#"
|
identifier_body
|
main.rs
|
/**
* Tutorial 1 -- Creating a Basic Triangle
*
* In this tutorial we learn how to create a window using glutin/glium and use
* OpenGL to render a triangle.
**/
// import the crates that we need
#[macro_use]
extern crate glium;
extern crate glutin;
use glium::{DisplayBuild, Surface};
// this struct will hold our vertex data
#[derive(Clone, Copy)]
struct Vertex {
position: [f32; 2],
}
// program entry point
fn main() {
let display = glutin::WindowBuilder::new()
.with_title("tetra".to_string())
.build_glium()
.unwrap();
implement_vertex!(Vertex, position);
let vertex1 = Vertex { position: [-0.5, -0.5] };
let vertex2 = Vertex { position: [ 0.0, 0.5] };
let vertex3 = Vertex { position: [ 0.5, -0.25] };
let shape = vec![vertex1, vertex2, vertex3];
let v_buf = glium::VertexBuffer::new(&display, shape);
let indices =
glium::index::NoIndices(glium::index::PrimitiveType::TrianglesList);
let vertex_shader_src = r#"
#version 130
in vec2 position;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}
"#;
let fragment_shader_src = r#"
#version 130
void main() {
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
"#;
let program = glium::Program::from_source(
&display, vertex_shader_src, fragment_shader_src, None
).unwrap();
loop {
let mut target = display.draw();
target.clear_color(0.0, 0.0, 1.0, 1.0);
target.draw(
&v_buf, &indices, &program, &glium::uniforms::EmptyUniforms,
&std::default::Default::default()
).unwrap();
target.finish();
display.wait_events().next();
if display.is_closed()
|
}
}
|
{
break;
}
|
conditional_block
|
kindck-owned-trait.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait foo { fn foo(&self); }
fn
|
<T:Clone + foo>(t: T) -> @foo {
@t as @foo
//~^ ERROR value may contain borrowed pointers; add `'static` bound
//~^^ ERROR cannot pack type
//~^^^ ERROR value may contain borrowed pointers
}
fn to_foo2<T:Clone + foo +'static>(t: T) -> @foo {
@t as @foo
}
fn main() {}
|
to_foo
|
identifier_name
|
kindck-owned-trait.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait foo { fn foo(&self); }
|
//~^^^ ERROR value may contain borrowed pointers
}
fn to_foo2<T:Clone + foo +'static>(t: T) -> @foo {
@t as @foo
}
fn main() {}
|
fn to_foo<T:Clone + foo>(t: T) -> @foo {
@t as @foo
//~^ ERROR value may contain borrowed pointers; add `'static` bound
//~^^ ERROR cannot pack type
|
random_line_split
|
kindck-owned-trait.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait foo { fn foo(&self); }
fn to_foo<T:Clone + foo>(t: T) -> @foo {
@t as @foo
//~^ ERROR value may contain borrowed pointers; add `'static` bound
//~^^ ERROR cannot pack type
//~^^^ ERROR value may contain borrowed pointers
}
fn to_foo2<T:Clone + foo +'static>(t: T) -> @foo
|
fn main() {}
|
{
@t as @foo
}
|
identifier_body
|
server.rs
|
extern crate gj;
extern crate gjio;
extern crate slab;
use std::io::{Error, ErrorKind};
use std::rc::{Rc, Weak};
use std::cell::{Cell, RefCell};
use slab::Slab;
use gj::{EventLoop, Promise, TaskReaper, TaskSet};
use gjio::{SocketStream, AsyncRead, AsyncWrite};
struct WriteQueue {
task: Promise<(SocketStream, Bomb), Error>,
len: Rc<Cell<usize>>,
}
impl WriteQueue {
fn new() -> WriteQueue {
WriteQueue {
task: Promise::err(Error::new(ErrorKind::Other, "uninitialized")),
len: Rc::new(Cell::new(0)),
}
}
fn init(&mut self, idx: usize, subscribers: &Rc<RefCell<Slab<WriteQueue>>>,
stream: SocketStream ) {
self.task = Promise::ok((stream, Bomb {
subscribers: Rc::downgrade(subscribers),
idx: idx
}));
}
fn len(&self) -> usize {
|
}
fn send(&mut self, message: Vec<u8>) {
let task = ::std::mem::replace(&mut self.task, Promise::err(Error::new(ErrorKind::Other, "uninitialized")));
self.len.set(self.len.get() + 1);
let len = self.len.clone();
self.task = task.then(move |(mut stream, bomb)| {
let header = vec![message.len() as u8];
stream.write(header).then(move |_| {
stream.write(message).then(move |_| {
len.set(len.get() - 1);
Promise::ok((stream, bomb))
})
})
});
}
}
struct Bomb {
subscribers: Weak<RefCell<Slab<WriteQueue>>>,
idx: usize,
}
impl Drop for Bomb {
fn drop(&mut self) {
match self.subscribers.upgrade() {
Some(s) => {
s.borrow_mut().remove(self.idx).unwrap();
}
None => (),
}
}
}
fn handle_publisher(mut stream: SocketStream, messages_received: u64,
subscribers: Rc<RefCell<Slab<WriteQueue>>>) -> Promise<(), Error> {
stream.try_read(vec![0], 1).then(move |(buf, n)| {
if n == 0 {
// EOF
Promise::ok(())
} else {
let len = buf[0] as usize;
let body = vec![0u8; len];
stream.read(body, len).then(move |(buf, _)| {
for ref mut write_queue in subscribers.borrow_mut().iter_mut() {
if write_queue.len() < 5 {
write_queue.send(buf.clone());
}
}
handle_publisher(stream, messages_received + 1, subscribers)
})
}
})
}
fn handle_connection(stream: SocketStream,
subscribers: Rc<RefCell<Slab<WriteQueue>>>)
-> Promise<(), Error> {
let read_stream = stream.clone();
let write_queue = WriteQueue::new();
if!subscribers.borrow().has_available() {
let len = subscribers.borrow().len();
subscribers.borrow_mut().reserve_exact(len);
}
let idx = match subscribers.borrow_mut().insert(write_queue) {
Ok(idx) => idx,
Err(_) => unreachable!(),
};
match subscribers.borrow_mut().get_mut(idx) {
Some(ref mut q) => q.init(idx, &subscribers, stream),
None => unreachable!(),
}
handle_publisher(read_stream, 0, subscribers)
}
fn accept_loop(listener: gjio::SocketListener,
mut task_set: TaskSet<(), ::std::io::Error>,
subscribers: Rc<RefCell<Slab<WriteQueue>>>)
-> Promise<(), ::std::io::Error>
{
listener.accept().then(move |stream| {
task_set.add(handle_connection(stream, subscribers.clone()));
accept_loop(listener, task_set, subscribers)
})
}
struct Reaper;
impl TaskReaper<(), ::std::io::Error> for Reaper {
fn task_failed(&mut self, error: ::std::io::Error) {
println!("Task failed: {}", error);
}
}
pub fn main() {
let args: Vec<String> = ::std::env::args().collect();
if args.len()!= 2 {
println!("usage: {} HOST:PORT", args[0]);
return;
}
EventLoop::top_level(move |wait_scope| -> Result<(), Box<::std::error::Error>> {
let mut event_port = try!(gjio::EventPort::new());
let network = event_port.get_network();
let addr_str = &args[1];
let addr = try!(addr_str.parse::<::std::net::SocketAddr>());
let mut address = network.get_tcp_address(addr);
let listener = try!(address.listen());
println!("listening on {}", addr_str);
let reaper = Box::new(Reaper);
let subscribers: Rc<RefCell<Slab<WriteQueue>>> =
Rc::new(RefCell::new(Slab::with_capacity(1024)));
try!(accept_loop(listener, TaskSet::new(reaper), subscribers).wait(wait_scope, &mut event_port));
Ok(())
}).expect("top level");
}
|
self.len.get()
|
random_line_split
|
server.rs
|
extern crate gj;
extern crate gjio;
extern crate slab;
use std::io::{Error, ErrorKind};
use std::rc::{Rc, Weak};
use std::cell::{Cell, RefCell};
use slab::Slab;
use gj::{EventLoop, Promise, TaskReaper, TaskSet};
use gjio::{SocketStream, AsyncRead, AsyncWrite};
struct WriteQueue {
task: Promise<(SocketStream, Bomb), Error>,
len: Rc<Cell<usize>>,
}
impl WriteQueue {
fn new() -> WriteQueue {
WriteQueue {
task: Promise::err(Error::new(ErrorKind::Other, "uninitialized")),
len: Rc::new(Cell::new(0)),
}
}
fn init(&mut self, idx: usize, subscribers: &Rc<RefCell<Slab<WriteQueue>>>,
stream: SocketStream ) {
self.task = Promise::ok((stream, Bomb {
subscribers: Rc::downgrade(subscribers),
idx: idx
}));
}
fn len(&self) -> usize {
self.len.get()
}
fn send(&mut self, message: Vec<u8>) {
let task = ::std::mem::replace(&mut self.task, Promise::err(Error::new(ErrorKind::Other, "uninitialized")));
self.len.set(self.len.get() + 1);
let len = self.len.clone();
self.task = task.then(move |(mut stream, bomb)| {
let header = vec![message.len() as u8];
stream.write(header).then(move |_| {
stream.write(message).then(move |_| {
len.set(len.get() - 1);
Promise::ok((stream, bomb))
})
})
});
}
}
struct Bomb {
subscribers: Weak<RefCell<Slab<WriteQueue>>>,
idx: usize,
}
impl Drop for Bomb {
fn drop(&mut self) {
match self.subscribers.upgrade() {
Some(s) => {
s.borrow_mut().remove(self.idx).unwrap();
}
None => (),
}
}
}
fn handle_publisher(mut stream: SocketStream, messages_received: u64,
subscribers: Rc<RefCell<Slab<WriteQueue>>>) -> Promise<(), Error> {
stream.try_read(vec![0], 1).then(move |(buf, n)| {
if n == 0 {
// EOF
Promise::ok(())
} else {
let len = buf[0] as usize;
let body = vec![0u8; len];
stream.read(body, len).then(move |(buf, _)| {
for ref mut write_queue in subscribers.borrow_mut().iter_mut() {
if write_queue.len() < 5 {
write_queue.send(buf.clone());
}
}
handle_publisher(stream, messages_received + 1, subscribers)
})
}
})
}
fn handle_connection(stream: SocketStream,
subscribers: Rc<RefCell<Slab<WriteQueue>>>)
-> Promise<(), Error> {
let read_stream = stream.clone();
let write_queue = WriteQueue::new();
if!subscribers.borrow().has_available() {
let len = subscribers.borrow().len();
subscribers.borrow_mut().reserve_exact(len);
}
let idx = match subscribers.borrow_mut().insert(write_queue) {
Ok(idx) => idx,
Err(_) => unreachable!(),
};
match subscribers.borrow_mut().get_mut(idx) {
Some(ref mut q) => q.init(idx, &subscribers, stream),
None => unreachable!(),
}
handle_publisher(read_stream, 0, subscribers)
}
fn accept_loop(listener: gjio::SocketListener,
mut task_set: TaskSet<(), ::std::io::Error>,
subscribers: Rc<RefCell<Slab<WriteQueue>>>)
-> Promise<(), ::std::io::Error>
{
listener.accept().then(move |stream| {
task_set.add(handle_connection(stream, subscribers.clone()));
accept_loop(listener, task_set, subscribers)
})
}
struct Reaper;
impl TaskReaper<(), ::std::io::Error> for Reaper {
fn task_failed(&mut self, error: ::std::io::Error) {
println!("Task failed: {}", error);
}
}
pub fn main()
|
try!(accept_loop(listener, TaskSet::new(reaper), subscribers).wait(wait_scope, &mut event_port));
Ok(())
}).expect("top level");
}
|
{
let args: Vec<String> = ::std::env::args().collect();
if args.len() != 2 {
println!("usage: {} HOST:PORT", args[0]);
return;
}
EventLoop::top_level(move |wait_scope| -> Result<(), Box<::std::error::Error>> {
let mut event_port = try!(gjio::EventPort::new());
let network = event_port.get_network();
let addr_str = &args[1];
let addr = try!(addr_str.parse::<::std::net::SocketAddr>());
let mut address = network.get_tcp_address(addr);
let listener = try!(address.listen());
println!("listening on {}", addr_str);
let reaper = Box::new(Reaper);
let subscribers: Rc<RefCell<Slab<WriteQueue>>> =
Rc::new(RefCell::new(Slab::with_capacity(1024)));
|
identifier_body
|
server.rs
|
extern crate gj;
extern crate gjio;
extern crate slab;
use std::io::{Error, ErrorKind};
use std::rc::{Rc, Weak};
use std::cell::{Cell, RefCell};
use slab::Slab;
use gj::{EventLoop, Promise, TaskReaper, TaskSet};
use gjio::{SocketStream, AsyncRead, AsyncWrite};
struct WriteQueue {
task: Promise<(SocketStream, Bomb), Error>,
len: Rc<Cell<usize>>,
}
impl WriteQueue {
fn new() -> WriteQueue {
WriteQueue {
task: Promise::err(Error::new(ErrorKind::Other, "uninitialized")),
len: Rc::new(Cell::new(0)),
}
}
fn init(&mut self, idx: usize, subscribers: &Rc<RefCell<Slab<WriteQueue>>>,
stream: SocketStream ) {
self.task = Promise::ok((stream, Bomb {
subscribers: Rc::downgrade(subscribers),
idx: idx
}));
}
fn
|
(&self) -> usize {
self.len.get()
}
fn send(&mut self, message: Vec<u8>) {
let task = ::std::mem::replace(&mut self.task, Promise::err(Error::new(ErrorKind::Other, "uninitialized")));
self.len.set(self.len.get() + 1);
let len = self.len.clone();
self.task = task.then(move |(mut stream, bomb)| {
let header = vec![message.len() as u8];
stream.write(header).then(move |_| {
stream.write(message).then(move |_| {
len.set(len.get() - 1);
Promise::ok((stream, bomb))
})
})
});
}
}
struct Bomb {
subscribers: Weak<RefCell<Slab<WriteQueue>>>,
idx: usize,
}
impl Drop for Bomb {
fn drop(&mut self) {
match self.subscribers.upgrade() {
Some(s) => {
s.borrow_mut().remove(self.idx).unwrap();
}
None => (),
}
}
}
fn handle_publisher(mut stream: SocketStream, messages_received: u64,
subscribers: Rc<RefCell<Slab<WriteQueue>>>) -> Promise<(), Error> {
stream.try_read(vec![0], 1).then(move |(buf, n)| {
if n == 0 {
// EOF
Promise::ok(())
} else {
let len = buf[0] as usize;
let body = vec![0u8; len];
stream.read(body, len).then(move |(buf, _)| {
for ref mut write_queue in subscribers.borrow_mut().iter_mut() {
if write_queue.len() < 5 {
write_queue.send(buf.clone());
}
}
handle_publisher(stream, messages_received + 1, subscribers)
})
}
})
}
fn handle_connection(stream: SocketStream,
subscribers: Rc<RefCell<Slab<WriteQueue>>>)
-> Promise<(), Error> {
let read_stream = stream.clone();
let write_queue = WriteQueue::new();
if!subscribers.borrow().has_available() {
let len = subscribers.borrow().len();
subscribers.borrow_mut().reserve_exact(len);
}
let idx = match subscribers.borrow_mut().insert(write_queue) {
Ok(idx) => idx,
Err(_) => unreachable!(),
};
match subscribers.borrow_mut().get_mut(idx) {
Some(ref mut q) => q.init(idx, &subscribers, stream),
None => unreachable!(),
}
handle_publisher(read_stream, 0, subscribers)
}
fn accept_loop(listener: gjio::SocketListener,
mut task_set: TaskSet<(), ::std::io::Error>,
subscribers: Rc<RefCell<Slab<WriteQueue>>>)
-> Promise<(), ::std::io::Error>
{
listener.accept().then(move |stream| {
task_set.add(handle_connection(stream, subscribers.clone()));
accept_loop(listener, task_set, subscribers)
})
}
struct Reaper;
impl TaskReaper<(), ::std::io::Error> for Reaper {
fn task_failed(&mut self, error: ::std::io::Error) {
println!("Task failed: {}", error);
}
}
pub fn main() {
let args: Vec<String> = ::std::env::args().collect();
if args.len()!= 2 {
println!("usage: {} HOST:PORT", args[0]);
return;
}
EventLoop::top_level(move |wait_scope| -> Result<(), Box<::std::error::Error>> {
let mut event_port = try!(gjio::EventPort::new());
let network = event_port.get_network();
let addr_str = &args[1];
let addr = try!(addr_str.parse::<::std::net::SocketAddr>());
let mut address = network.get_tcp_address(addr);
let listener = try!(address.listen());
println!("listening on {}", addr_str);
let reaper = Box::new(Reaper);
let subscribers: Rc<RefCell<Slab<WriteQueue>>> =
Rc::new(RefCell::new(Slab::with_capacity(1024)));
try!(accept_loop(listener, TaskSet::new(reaper), subscribers).wait(wait_scope, &mut event_port));
Ok(())
}).expect("top level");
}
|
len
|
identifier_name
|
htmlcanvaselement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use canvas_traits::{CanvasMsg, FromLayoutMsg, CanvasData};
use dom::attr::Attr;
use dom::attr::AttrValue;
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::CanvasRenderingContext2DBinding::CanvasRenderingContext2DMethods;
use dom::bindings::codegen::Bindings::HTMLCanvasElementBinding;
use dom::bindings::codegen::Bindings::HTMLCanvasElementBinding::HTMLCanvasElementMethods;
use dom::bindings::codegen::Bindings::WebGLRenderingContextBinding::WebGLContextAttributes;
use dom::bindings::codegen::UnionTypes::CanvasRenderingContext2DOrWebGLRenderingContext;
use dom::bindings::error::{Error, Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{HeapGCValue, JS, LayoutJS, Root};
use dom::bindings::num::Finite;
use dom::bindings::reflector::Reflectable;
use dom::canvasrenderingcontext2d::{CanvasRenderingContext2D, LayoutCanvasRenderingContext2DHelpers};
use dom::document::Document;
use dom::element::{AttributeMutation, Element, RawLayoutElementHelpers};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, window_from_node};
use dom::virtualmethods::VirtualMethods;
use dom::webglrenderingcontext::{LayoutCanvasWebGLRenderingContextHelpers, WebGLRenderingContext};
use euclid::size::Size2D;
use image::ColorType;
use image::png::PNGEncoder;
use ipc_channel::ipc::{self, IpcSender};
use js::jsapi::{HandleValue, JSContext};
use offscreen_gl_context::GLContextAttributes;
use rustc_serialize::base64::{STANDARD, ToBase64};
use std::iter::repeat;
use string_cache::Atom;
use util::str::DOMString;
const DEFAULT_WIDTH: u32 = 300;
const DEFAULT_HEIGHT: u32 = 150;
#[must_root]
#[derive(JSTraceable, Clone, HeapSizeOf)]
pub enum CanvasContext {
Context2d(JS<CanvasRenderingContext2D>),
WebGL(JS<WebGLRenderingContext>),
}
impl HeapGCValue for CanvasContext {}
#[dom_struct]
pub struct HTMLCanvasElement {
htmlelement: HTMLElement,
context: DOMRefCell<Option<CanvasContext>>,
}
impl HTMLCanvasElement {
fn new_inherited(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> HTMLCanvasElement {
HTMLCanvasElement {
htmlelement: HTMLElement::new_inherited(localName, prefix, document),
context: DOMRefCell::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLCanvasElement> {
let element = HTMLCanvasElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLCanvasElementBinding::Wrap)
}
fn
|
(&self) {
let size = self.get_size();
if let Some(ref context) = *self.context.borrow() {
match *context {
CanvasContext::Context2d(ref context) => context.set_bitmap_dimensions(size),
CanvasContext::WebGL(ref context) => context.recreate(size),
}
}
}
pub fn get_size(&self) -> Size2D<i32> {
Size2D::new(self.Width() as i32, self.Height() as i32)
}
pub fn origin_is_clean(&self) -> bool {
match *self.context.borrow() {
Some(CanvasContext::Context2d(ref context)) => context.origin_is_clean(),
_ => true,
}
}
}
pub struct HTMLCanvasData {
pub ipc_renderer: Option<IpcSender<CanvasMsg>>,
pub width: u32,
pub height: u32,
}
pub trait LayoutHTMLCanvasElementHelpers {
fn data(&self) -> HTMLCanvasData;
}
impl LayoutHTMLCanvasElementHelpers for LayoutJS<HTMLCanvasElement> {
#[allow(unsafe_code)]
fn data(&self) -> HTMLCanvasData {
unsafe {
let canvas = &*self.unsafe_get();
let ipc_renderer = canvas.context.borrow_for_layout().as_ref().map(|context| {
match *context {
CanvasContext::Context2d(ref context) => {
context.to_layout().get_ipc_renderer()
},
CanvasContext::WebGL(ref context) => {
context.to_layout().get_ipc_renderer()
},
}
});
let width_attr = canvas.upcast::<Element>().get_attr_for_layout(&ns!(), &atom!("width"));
let height_attr = canvas.upcast::<Element>().get_attr_for_layout(&ns!(), &atom!("height"));
HTMLCanvasData {
ipc_renderer: ipc_renderer,
width: width_attr.map_or(DEFAULT_WIDTH, |val| val.as_uint()),
height: height_attr.map_or(DEFAULT_HEIGHT, |val| val.as_uint()),
}
}
}
}
impl HTMLCanvasElement {
pub fn ipc_renderer(&self) -> Option<IpcSender<CanvasMsg>> {
self.context.borrow().as_ref().map(|context| {
match *context {
CanvasContext::Context2d(ref context) => context.ipc_renderer(),
CanvasContext::WebGL(ref context) => context.ipc_renderer(),
}
})
}
pub fn get_or_init_2d_context(&self) -> Option<Root<CanvasRenderingContext2D>> {
if self.context.borrow().is_none() {
let window = window_from_node(self);
let size = self.get_size();
let context = CanvasRenderingContext2D::new(GlobalRef::Window(window.r()), self, size);
*self.context.borrow_mut() = Some(CanvasContext::Context2d(JS::from_rooted(&context)));
}
match *self.context.borrow().as_ref().unwrap() {
CanvasContext::Context2d(ref context) => Some(Root::from_ref(&*context)),
_ => None,
}
}
#[allow(unsafe_code)]
pub fn get_or_init_webgl_context(&self,
cx: *mut JSContext,
attrs: Option<HandleValue>) -> Option<Root<WebGLRenderingContext>> {
if self.context.borrow().is_none() {
let window = window_from_node(self);
let size = self.get_size();
let attrs = if let Some(webgl_attributes) = attrs {
if let Ok(ref attrs) = unsafe { WebGLContextAttributes::new(cx, webgl_attributes) } {
From::from(attrs)
} else {
debug!("Unexpected error on conversion of WebGLContextAttributes");
return None;
}
} else {
GLContextAttributes::default()
};
let maybe_ctx = WebGLRenderingContext::new(GlobalRef::Window(window.r()), self, size, attrs);
*self.context.borrow_mut() = maybe_ctx.map( |ctx| CanvasContext::WebGL(JS::from_rooted(&ctx)));
}
if let Some(CanvasContext::WebGL(ref context)) = *self.context.borrow() {
Some(Root::from_ref(&*context))
} else {
None
}
}
pub fn is_valid(&self) -> bool {
self.Height()!= 0 && self.Width()!= 0
}
pub fn fetch_all_data(&self) -> Option<(Vec<u8>, Size2D<i32>)> {
let size = self.get_size();
if size.width == 0 || size.height == 0 {
return None
}
let data = if let Some(renderer) = self.ipc_renderer() {
let (sender, receiver) = ipc::channel().unwrap();
let msg = CanvasMsg::FromLayout(FromLayoutMsg::SendData(sender));
renderer.send(msg).unwrap();
match receiver.recv().unwrap() {
CanvasData::Pixels(pixel_data)
=> pixel_data.image_data.to_vec(),
CanvasData::WebGL(_)
// TODO(ecoal95): Not sure if WebGL canvas is required for 2d spec,
// but I think it's not.
=> return None,
}
} else {
repeat(0xffu8).take((size.height as usize) * (size.width as usize) * 4).collect()
};
Some((data, size))
}
}
impl HTMLCanvasElementMethods for HTMLCanvasElement {
// https://html.spec.whatwg.org/multipage/#dom-canvas-width
make_uint_getter!(Width, "width", DEFAULT_WIDTH);
// https://html.spec.whatwg.org/multipage/#dom-canvas-width
make_uint_setter!(SetWidth, "width", DEFAULT_WIDTH);
// https://html.spec.whatwg.org/multipage/#dom-canvas-height
make_uint_getter!(Height, "height", DEFAULT_HEIGHT);
// https://html.spec.whatwg.org/multipage/#dom-canvas-height
make_uint_setter!(SetHeight, "height", DEFAULT_HEIGHT);
// https://html.spec.whatwg.org/multipage/#dom-canvas-getcontext
fn GetContext(&self,
cx: *mut JSContext,
id: DOMString,
attributes: Vec<HandleValue>)
-> Option<CanvasRenderingContext2DOrWebGLRenderingContext> {
match &*id {
"2d" => {
self.get_or_init_2d_context()
.map(CanvasRenderingContext2DOrWebGLRenderingContext::CanvasRenderingContext2D)
}
"webgl" | "experimental-webgl" => {
self.get_or_init_webgl_context(cx, attributes.get(0).cloned())
.map(CanvasRenderingContext2DOrWebGLRenderingContext::WebGLRenderingContext)
}
_ => None
}
}
// https://html.spec.whatwg.org/multipage/#dom-canvas-todataurl
fn ToDataURL(&self,
_context: *mut JSContext,
_mime_type: Option<DOMString>,
_arguments: Vec<HandleValue>) -> Fallible<DOMString> {
// Step 1.
if let Some(CanvasContext::Context2d(ref context)) = *self.context.borrow() {
if!context.origin_is_clean() {
return Err(Error::Security);
}
}
// Step 2.
if self.Width() == 0 || self.Height() == 0 {
return Ok(DOMString::from("data:,"));
}
// Step 3.
let raw_data = match *self.context.borrow() {
Some(CanvasContext::Context2d(ref context)) => {
let window = window_from_node(self);
let image_data = try!(context.GetImageData(Finite::wrap(0f64), Finite::wrap(0f64),
Finite::wrap(self.Width() as f64),
Finite::wrap(self.Height() as f64)));
image_data.get_data_array(&GlobalRef::Window(window.r()))
}
None => {
// Each pixel is fully-transparent black.
vec![0; (self.Width() * self.Height() * 4) as usize]
}
_ => return Err(Error::NotSupported) // WebGL
};
// Only handle image/png for now.
let mime_type = "image/png";
let mut encoded = Vec::new();
{
let encoder: PNGEncoder<&mut Vec<u8>> = PNGEncoder::new(&mut encoded);
encoder.encode(&raw_data, self.Width(), self.Height(), ColorType::RGBA(8)).unwrap();
}
let encoded = encoded.to_base64(STANDARD);
Ok(DOMString::from(format!("data:{};base64,{}", mime_type, encoded)))
}
}
impl VirtualMethods for HTMLCanvasElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
match attr.local_name() {
&atom!("width") | &atom!("height") => self.recreate_contexts(),
_ => (),
};
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("width") => AttrValue::from_u32(value, DEFAULT_WIDTH),
&atom!("height") => AttrValue::from_u32(value, DEFAULT_HEIGHT),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
impl<'a> From<&'a WebGLContextAttributes> for GLContextAttributes {
fn from(attrs: &'a WebGLContextAttributes) -> GLContextAttributes {
GLContextAttributes {
alpha: attrs.alpha,
depth: attrs.depth,
stencil: attrs.stencil,
antialias: attrs.antialias,
premultiplied_alpha: attrs.premultipliedAlpha,
preserve_drawing_buffer: attrs.preserveDrawingBuffer,
}
}
}
pub mod utils {
use dom::window::Window;
use ipc_channel::ipc;
use net_traits::image_cache_thread::{ImageCacheChan, ImageResponse};
use url::Url;
pub fn request_image_from_cache(window: &Window, url: Url) -> ImageResponse {
let image_cache = window.image_cache_thread();
let (response_chan, response_port) = ipc::channel().unwrap();
image_cache.request_image(url, ImageCacheChan(response_chan), None);
let result = response_port.recv().unwrap();
result.image_response
}
}
|
recreate_contexts
|
identifier_name
|
htmlcanvaselement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use canvas_traits::{CanvasMsg, FromLayoutMsg, CanvasData};
use dom::attr::Attr;
use dom::attr::AttrValue;
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::CanvasRenderingContext2DBinding::CanvasRenderingContext2DMethods;
use dom::bindings::codegen::Bindings::HTMLCanvasElementBinding;
use dom::bindings::codegen::Bindings::HTMLCanvasElementBinding::HTMLCanvasElementMethods;
use dom::bindings::codegen::Bindings::WebGLRenderingContextBinding::WebGLContextAttributes;
use dom::bindings::codegen::UnionTypes::CanvasRenderingContext2DOrWebGLRenderingContext;
use dom::bindings::error::{Error, Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{HeapGCValue, JS, LayoutJS, Root};
use dom::bindings::num::Finite;
use dom::bindings::reflector::Reflectable;
use dom::canvasrenderingcontext2d::{CanvasRenderingContext2D, LayoutCanvasRenderingContext2DHelpers};
use dom::document::Document;
use dom::element::{AttributeMutation, Element, RawLayoutElementHelpers};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, window_from_node};
use dom::virtualmethods::VirtualMethods;
use dom::webglrenderingcontext::{LayoutCanvasWebGLRenderingContextHelpers, WebGLRenderingContext};
use euclid::size::Size2D;
use image::ColorType;
use image::png::PNGEncoder;
use ipc_channel::ipc::{self, IpcSender};
use js::jsapi::{HandleValue, JSContext};
use offscreen_gl_context::GLContextAttributes;
use rustc_serialize::base64::{STANDARD, ToBase64};
use std::iter::repeat;
use string_cache::Atom;
use util::str::DOMString;
const DEFAULT_WIDTH: u32 = 300;
const DEFAULT_HEIGHT: u32 = 150;
#[must_root]
#[derive(JSTraceable, Clone, HeapSizeOf)]
pub enum CanvasContext {
Context2d(JS<CanvasRenderingContext2D>),
WebGL(JS<WebGLRenderingContext>),
}
impl HeapGCValue for CanvasContext {}
#[dom_struct]
pub struct HTMLCanvasElement {
htmlelement: HTMLElement,
context: DOMRefCell<Option<CanvasContext>>,
}
impl HTMLCanvasElement {
fn new_inherited(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> HTMLCanvasElement {
HTMLCanvasElement {
htmlelement: HTMLElement::new_inherited(localName, prefix, document),
context: DOMRefCell::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLCanvasElement> {
let element = HTMLCanvasElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLCanvasElementBinding::Wrap)
}
fn recreate_contexts(&self) {
let size = self.get_size();
if let Some(ref context) = *self.context.borrow() {
match *context {
CanvasContext::Context2d(ref context) => context.set_bitmap_dimensions(size),
CanvasContext::WebGL(ref context) => context.recreate(size),
}
}
}
pub fn get_size(&self) -> Size2D<i32> {
Size2D::new(self.Width() as i32, self.Height() as i32)
}
pub fn origin_is_clean(&self) -> bool {
match *self.context.borrow() {
Some(CanvasContext::Context2d(ref context)) => context.origin_is_clean(),
_ => true,
}
}
}
pub struct HTMLCanvasData {
pub ipc_renderer: Option<IpcSender<CanvasMsg>>,
pub width: u32,
pub height: u32,
}
pub trait LayoutHTMLCanvasElementHelpers {
fn data(&self) -> HTMLCanvasData;
}
impl LayoutHTMLCanvasElementHelpers for LayoutJS<HTMLCanvasElement> {
#[allow(unsafe_code)]
fn data(&self) -> HTMLCanvasData {
unsafe {
let canvas = &*self.unsafe_get();
let ipc_renderer = canvas.context.borrow_for_layout().as_ref().map(|context| {
match *context {
CanvasContext::Context2d(ref context) => {
context.to_layout().get_ipc_renderer()
},
CanvasContext::WebGL(ref context) => {
context.to_layout().get_ipc_renderer()
},
}
});
let width_attr = canvas.upcast::<Element>().get_attr_for_layout(&ns!(), &atom!("width"));
let height_attr = canvas.upcast::<Element>().get_attr_for_layout(&ns!(), &atom!("height"));
HTMLCanvasData {
ipc_renderer: ipc_renderer,
width: width_attr.map_or(DEFAULT_WIDTH, |val| val.as_uint()),
height: height_attr.map_or(DEFAULT_HEIGHT, |val| val.as_uint()),
}
}
}
}
impl HTMLCanvasElement {
pub fn ipc_renderer(&self) -> Option<IpcSender<CanvasMsg>> {
self.context.borrow().as_ref().map(|context| {
match *context {
CanvasContext::Context2d(ref context) => context.ipc_renderer(),
CanvasContext::WebGL(ref context) => context.ipc_renderer(),
}
})
}
pub fn get_or_init_2d_context(&self) -> Option<Root<CanvasRenderingContext2D>> {
if self.context.borrow().is_none() {
let window = window_from_node(self);
let size = self.get_size();
let context = CanvasRenderingContext2D::new(GlobalRef::Window(window.r()), self, size);
*self.context.borrow_mut() = Some(CanvasContext::Context2d(JS::from_rooted(&context)));
}
match *self.context.borrow().as_ref().unwrap() {
CanvasContext::Context2d(ref context) => Some(Root::from_ref(&*context)),
_ => None,
}
}
#[allow(unsafe_code)]
pub fn get_or_init_webgl_context(&self,
cx: *mut JSContext,
attrs: Option<HandleValue>) -> Option<Root<WebGLRenderingContext>> {
if self.context.borrow().is_none() {
let window = window_from_node(self);
let size = self.get_size();
let attrs = if let Some(webgl_attributes) = attrs {
if let Ok(ref attrs) = unsafe { WebGLContextAttributes::new(cx, webgl_attributes) } {
From::from(attrs)
} else {
debug!("Unexpected error on conversion of WebGLContextAttributes");
return None;
}
} else {
GLContextAttributes::default()
};
let maybe_ctx = WebGLRenderingContext::new(GlobalRef::Window(window.r()), self, size, attrs);
*self.context.borrow_mut() = maybe_ctx.map( |ctx| CanvasContext::WebGL(JS::from_rooted(&ctx)));
}
if let Some(CanvasContext::WebGL(ref context)) = *self.context.borrow() {
Some(Root::from_ref(&*context))
} else {
None
}
}
pub fn is_valid(&self) -> bool {
self.Height()!= 0 && self.Width()!= 0
}
pub fn fetch_all_data(&self) -> Option<(Vec<u8>, Size2D<i32>)>
|
} else {
repeat(0xffu8).take((size.height as usize) * (size.width as usize) * 4).collect()
};
Some((data, size))
}
}
impl HTMLCanvasElementMethods for HTMLCanvasElement {
// https://html.spec.whatwg.org/multipage/#dom-canvas-width
make_uint_getter!(Width, "width", DEFAULT_WIDTH);
// https://html.spec.whatwg.org/multipage/#dom-canvas-width
make_uint_setter!(SetWidth, "width", DEFAULT_WIDTH);
// https://html.spec.whatwg.org/multipage/#dom-canvas-height
make_uint_getter!(Height, "height", DEFAULT_HEIGHT);
// https://html.spec.whatwg.org/multipage/#dom-canvas-height
make_uint_setter!(SetHeight, "height", DEFAULT_HEIGHT);
// https://html.spec.whatwg.org/multipage/#dom-canvas-getcontext
fn GetContext(&self,
cx: *mut JSContext,
id: DOMString,
attributes: Vec<HandleValue>)
-> Option<CanvasRenderingContext2DOrWebGLRenderingContext> {
match &*id {
"2d" => {
self.get_or_init_2d_context()
.map(CanvasRenderingContext2DOrWebGLRenderingContext::CanvasRenderingContext2D)
}
"webgl" | "experimental-webgl" => {
self.get_or_init_webgl_context(cx, attributes.get(0).cloned())
.map(CanvasRenderingContext2DOrWebGLRenderingContext::WebGLRenderingContext)
}
_ => None
}
}
// https://html.spec.whatwg.org/multipage/#dom-canvas-todataurl
fn ToDataURL(&self,
_context: *mut JSContext,
_mime_type: Option<DOMString>,
_arguments: Vec<HandleValue>) -> Fallible<DOMString> {
// Step 1.
if let Some(CanvasContext::Context2d(ref context)) = *self.context.borrow() {
if!context.origin_is_clean() {
return Err(Error::Security);
}
}
// Step 2.
if self.Width() == 0 || self.Height() == 0 {
return Ok(DOMString::from("data:,"));
}
// Step 3.
let raw_data = match *self.context.borrow() {
Some(CanvasContext::Context2d(ref context)) => {
let window = window_from_node(self);
let image_data = try!(context.GetImageData(Finite::wrap(0f64), Finite::wrap(0f64),
Finite::wrap(self.Width() as f64),
Finite::wrap(self.Height() as f64)));
image_data.get_data_array(&GlobalRef::Window(window.r()))
}
None => {
// Each pixel is fully-transparent black.
vec![0; (self.Width() * self.Height() * 4) as usize]
}
_ => return Err(Error::NotSupported) // WebGL
};
// Only handle image/png for now.
let mime_type = "image/png";
let mut encoded = Vec::new();
{
let encoder: PNGEncoder<&mut Vec<u8>> = PNGEncoder::new(&mut encoded);
encoder.encode(&raw_data, self.Width(), self.Height(), ColorType::RGBA(8)).unwrap();
}
let encoded = encoded.to_base64(STANDARD);
Ok(DOMString::from(format!("data:{};base64,{}", mime_type, encoded)))
}
}
impl VirtualMethods for HTMLCanvasElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
match attr.local_name() {
&atom!("width") | &atom!("height") => self.recreate_contexts(),
_ => (),
};
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("width") => AttrValue::from_u32(value, DEFAULT_WIDTH),
&atom!("height") => AttrValue::from_u32(value, DEFAULT_HEIGHT),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
impl<'a> From<&'a WebGLContextAttributes> for GLContextAttributes {
fn from(attrs: &'a WebGLContextAttributes) -> GLContextAttributes {
GLContextAttributes {
alpha: attrs.alpha,
depth: attrs.depth,
stencil: attrs.stencil,
antialias: attrs.antialias,
premultiplied_alpha: attrs.premultipliedAlpha,
preserve_drawing_buffer: attrs.preserveDrawingBuffer,
}
}
}
pub mod utils {
use dom::window::Window;
use ipc_channel::ipc;
use net_traits::image_cache_thread::{ImageCacheChan, ImageResponse};
use url::Url;
pub fn request_image_from_cache(window: &Window, url: Url) -> ImageResponse {
let image_cache = window.image_cache_thread();
let (response_chan, response_port) = ipc::channel().unwrap();
image_cache.request_image(url, ImageCacheChan(response_chan), None);
let result = response_port.recv().unwrap();
result.image_response
}
}
|
{
let size = self.get_size();
if size.width == 0 || size.height == 0 {
return None
}
let data = if let Some(renderer) = self.ipc_renderer() {
let (sender, receiver) = ipc::channel().unwrap();
let msg = CanvasMsg::FromLayout(FromLayoutMsg::SendData(sender));
renderer.send(msg).unwrap();
match receiver.recv().unwrap() {
CanvasData::Pixels(pixel_data)
=> pixel_data.image_data.to_vec(),
CanvasData::WebGL(_)
// TODO(ecoal95): Not sure if WebGL canvas is required for 2d spec,
// but I think it's not.
=> return None,
}
|
identifier_body
|
htmlcanvaselement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use canvas_traits::{CanvasMsg, FromLayoutMsg, CanvasData};
use dom::attr::Attr;
use dom::attr::AttrValue;
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::CanvasRenderingContext2DBinding::CanvasRenderingContext2DMethods;
use dom::bindings::codegen::Bindings::HTMLCanvasElementBinding;
use dom::bindings::codegen::Bindings::HTMLCanvasElementBinding::HTMLCanvasElementMethods;
use dom::bindings::codegen::Bindings::WebGLRenderingContextBinding::WebGLContextAttributes;
use dom::bindings::codegen::UnionTypes::CanvasRenderingContext2DOrWebGLRenderingContext;
use dom::bindings::error::{Error, Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{HeapGCValue, JS, LayoutJS, Root};
use dom::bindings::num::Finite;
use dom::bindings::reflector::Reflectable;
use dom::canvasrenderingcontext2d::{CanvasRenderingContext2D, LayoutCanvasRenderingContext2DHelpers};
use dom::document::Document;
use dom::element::{AttributeMutation, Element, RawLayoutElementHelpers};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, window_from_node};
use dom::virtualmethods::VirtualMethods;
use dom::webglrenderingcontext::{LayoutCanvasWebGLRenderingContextHelpers, WebGLRenderingContext};
use euclid::size::Size2D;
use image::ColorType;
use image::png::PNGEncoder;
use ipc_channel::ipc::{self, IpcSender};
use js::jsapi::{HandleValue, JSContext};
use offscreen_gl_context::GLContextAttributes;
use rustc_serialize::base64::{STANDARD, ToBase64};
use std::iter::repeat;
use string_cache::Atom;
use util::str::DOMString;
const DEFAULT_WIDTH: u32 = 300;
const DEFAULT_HEIGHT: u32 = 150;
#[must_root]
#[derive(JSTraceable, Clone, HeapSizeOf)]
pub enum CanvasContext {
Context2d(JS<CanvasRenderingContext2D>),
WebGL(JS<WebGLRenderingContext>),
}
impl HeapGCValue for CanvasContext {}
#[dom_struct]
pub struct HTMLCanvasElement {
htmlelement: HTMLElement,
context: DOMRefCell<Option<CanvasContext>>,
}
impl HTMLCanvasElement {
fn new_inherited(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> HTMLCanvasElement {
HTMLCanvasElement {
htmlelement: HTMLElement::new_inherited(localName, prefix, document),
context: DOMRefCell::new(None),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: Atom,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLCanvasElement> {
let element = HTMLCanvasElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLCanvasElementBinding::Wrap)
}
fn recreate_contexts(&self) {
let size = self.get_size();
if let Some(ref context) = *self.context.borrow() {
match *context {
CanvasContext::Context2d(ref context) => context.set_bitmap_dimensions(size),
CanvasContext::WebGL(ref context) => context.recreate(size),
}
}
}
pub fn get_size(&self) -> Size2D<i32> {
Size2D::new(self.Width() as i32, self.Height() as i32)
}
pub fn origin_is_clean(&self) -> bool {
match *self.context.borrow() {
Some(CanvasContext::Context2d(ref context)) => context.origin_is_clean(),
_ => true,
}
}
}
pub struct HTMLCanvasData {
pub ipc_renderer: Option<IpcSender<CanvasMsg>>,
pub width: u32,
pub height: u32,
}
pub trait LayoutHTMLCanvasElementHelpers {
fn data(&self) -> HTMLCanvasData;
}
impl LayoutHTMLCanvasElementHelpers for LayoutJS<HTMLCanvasElement> {
#[allow(unsafe_code)]
fn data(&self) -> HTMLCanvasData {
unsafe {
let canvas = &*self.unsafe_get();
let ipc_renderer = canvas.context.borrow_for_layout().as_ref().map(|context| {
match *context {
CanvasContext::Context2d(ref context) => {
context.to_layout().get_ipc_renderer()
},
CanvasContext::WebGL(ref context) => {
context.to_layout().get_ipc_renderer()
},
}
});
let width_attr = canvas.upcast::<Element>().get_attr_for_layout(&ns!(), &atom!("width"));
let height_attr = canvas.upcast::<Element>().get_attr_for_layout(&ns!(), &atom!("height"));
HTMLCanvasData {
ipc_renderer: ipc_renderer,
width: width_attr.map_or(DEFAULT_WIDTH, |val| val.as_uint()),
height: height_attr.map_or(DEFAULT_HEIGHT, |val| val.as_uint()),
}
}
}
}
impl HTMLCanvasElement {
pub fn ipc_renderer(&self) -> Option<IpcSender<CanvasMsg>> {
self.context.borrow().as_ref().map(|context| {
match *context {
CanvasContext::Context2d(ref context) => context.ipc_renderer(),
CanvasContext::WebGL(ref context) => context.ipc_renderer(),
}
})
}
pub fn get_or_init_2d_context(&self) -> Option<Root<CanvasRenderingContext2D>> {
if self.context.borrow().is_none() {
let window = window_from_node(self);
let size = self.get_size();
let context = CanvasRenderingContext2D::new(GlobalRef::Window(window.r()), self, size);
*self.context.borrow_mut() = Some(CanvasContext::Context2d(JS::from_rooted(&context)));
}
match *self.context.borrow().as_ref().unwrap() {
CanvasContext::Context2d(ref context) => Some(Root::from_ref(&*context)),
_ => None,
}
}
#[allow(unsafe_code)]
pub fn get_or_init_webgl_context(&self,
cx: *mut JSContext,
attrs: Option<HandleValue>) -> Option<Root<WebGLRenderingContext>> {
if self.context.borrow().is_none() {
let window = window_from_node(self);
let size = self.get_size();
let attrs = if let Some(webgl_attributes) = attrs {
if let Ok(ref attrs) = unsafe { WebGLContextAttributes::new(cx, webgl_attributes) } {
From::from(attrs)
} else {
debug!("Unexpected error on conversion of WebGLContextAttributes");
return None;
}
} else {
GLContextAttributes::default()
};
let maybe_ctx = WebGLRenderingContext::new(GlobalRef::Window(window.r()), self, size, attrs);
*self.context.borrow_mut() = maybe_ctx.map( |ctx| CanvasContext::WebGL(JS::from_rooted(&ctx)));
}
if let Some(CanvasContext::WebGL(ref context)) = *self.context.borrow() {
Some(Root::from_ref(&*context))
} else {
None
}
}
pub fn is_valid(&self) -> bool {
self.Height()!= 0 && self.Width()!= 0
}
pub fn fetch_all_data(&self) -> Option<(Vec<u8>, Size2D<i32>)> {
let size = self.get_size();
if size.width == 0 || size.height == 0 {
return None
}
let data = if let Some(renderer) = self.ipc_renderer() {
let (sender, receiver) = ipc::channel().unwrap();
let msg = CanvasMsg::FromLayout(FromLayoutMsg::SendData(sender));
renderer.send(msg).unwrap();
match receiver.recv().unwrap() {
CanvasData::Pixels(pixel_data)
=> pixel_data.image_data.to_vec(),
CanvasData::WebGL(_)
// TODO(ecoal95): Not sure if WebGL canvas is required for 2d spec,
// but I think it's not.
=> return None,
}
} else {
repeat(0xffu8).take((size.height as usize) * (size.width as usize) * 4).collect()
};
Some((data, size))
}
}
impl HTMLCanvasElementMethods for HTMLCanvasElement {
// https://html.spec.whatwg.org/multipage/#dom-canvas-width
make_uint_getter!(Width, "width", DEFAULT_WIDTH);
// https://html.spec.whatwg.org/multipage/#dom-canvas-width
make_uint_setter!(SetWidth, "width", DEFAULT_WIDTH);
// https://html.spec.whatwg.org/multipage/#dom-canvas-height
make_uint_getter!(Height, "height", DEFAULT_HEIGHT);
// https://html.spec.whatwg.org/multipage/#dom-canvas-height
make_uint_setter!(SetHeight, "height", DEFAULT_HEIGHT);
// https://html.spec.whatwg.org/multipage/#dom-canvas-getcontext
fn GetContext(&self,
cx: *mut JSContext,
id: DOMString,
attributes: Vec<HandleValue>)
-> Option<CanvasRenderingContext2DOrWebGLRenderingContext> {
match &*id {
"2d" => {
self.get_or_init_2d_context()
.map(CanvasRenderingContext2DOrWebGLRenderingContext::CanvasRenderingContext2D)
}
"webgl" | "experimental-webgl" => {
self.get_or_init_webgl_context(cx, attributes.get(0).cloned())
.map(CanvasRenderingContext2DOrWebGLRenderingContext::WebGLRenderingContext)
}
_ => None
}
}
// https://html.spec.whatwg.org/multipage/#dom-canvas-todataurl
fn ToDataURL(&self,
_context: *mut JSContext,
_mime_type: Option<DOMString>,
_arguments: Vec<HandleValue>) -> Fallible<DOMString> {
// Step 1.
if let Some(CanvasContext::Context2d(ref context)) = *self.context.borrow() {
if!context.origin_is_clean() {
return Err(Error::Security);
}
}
// Step 2.
if self.Width() == 0 || self.Height() == 0 {
return Ok(DOMString::from("data:,"));
}
// Step 3.
let raw_data = match *self.context.borrow() {
Some(CanvasContext::Context2d(ref context)) => {
let window = window_from_node(self);
let image_data = try!(context.GetImageData(Finite::wrap(0f64), Finite::wrap(0f64),
Finite::wrap(self.Width() as f64),
Finite::wrap(self.Height() as f64)));
image_data.get_data_array(&GlobalRef::Window(window.r()))
}
None => {
// Each pixel is fully-transparent black.
vec![0; (self.Width() * self.Height() * 4) as usize]
}
_ => return Err(Error::NotSupported) // WebGL
};
// Only handle image/png for now.
let mime_type = "image/png";
let mut encoded = Vec::new();
{
let encoder: PNGEncoder<&mut Vec<u8>> = PNGEncoder::new(&mut encoded);
encoder.encode(&raw_data, self.Width(), self.Height(), ColorType::RGBA(8)).unwrap();
}
let encoded = encoded.to_base64(STANDARD);
Ok(DOMString::from(format!("data:{};base64,{}", mime_type, encoded)))
}
}
impl VirtualMethods for HTMLCanvasElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) {
self.super_type().unwrap().attribute_mutated(attr, mutation);
match attr.local_name() {
&atom!("width") | &atom!("height") => self.recreate_contexts(),
_ => (),
};
}
fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue {
match name {
&atom!("width") => AttrValue::from_u32(value, DEFAULT_WIDTH),
&atom!("height") => AttrValue::from_u32(value, DEFAULT_HEIGHT),
_ => self.super_type().unwrap().parse_plain_attribute(name, value),
}
}
}
|
impl<'a> From<&'a WebGLContextAttributes> for GLContextAttributes {
fn from(attrs: &'a WebGLContextAttributes) -> GLContextAttributes {
GLContextAttributes {
alpha: attrs.alpha,
depth: attrs.depth,
stencil: attrs.stencil,
antialias: attrs.antialias,
premultiplied_alpha: attrs.premultipliedAlpha,
preserve_drawing_buffer: attrs.preserveDrawingBuffer,
}
}
}
pub mod utils {
use dom::window::Window;
use ipc_channel::ipc;
use net_traits::image_cache_thread::{ImageCacheChan, ImageResponse};
use url::Url;
pub fn request_image_from_cache(window: &Window, url: Url) -> ImageResponse {
let image_cache = window.image_cache_thread();
let (response_chan, response_port) = ipc::channel().unwrap();
image_cache.request_image(url, ImageCacheChan(response_chan), None);
let result = response_port.recv().unwrap();
result.image_response
}
}
|
random_line_split
|
|
vec-matching-autoslice.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(slice_patterns)]
pub fn
|
() {
let x = [1, 2, 3];
match x {
[2, _, _] => panic!(),
[1, a, b] => {
assert!([a, b] == [2, 3]);
}
[_, _, _] => panic!(),
}
let y = ([(1, true), (2, false)], 0.5f64);
match y {
([(1, a), (b, false)], _) => {
assert_eq!(a, true);
assert_eq!(b, 2);
}
([_, _], 0.5) => panic!(),
([_, _], _) => panic!(),
}
}
|
main
|
identifier_name
|
vec-matching-autoslice.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(slice_patterns)]
pub fn main()
|
{
let x = [1, 2, 3];
match x {
[2, _, _] => panic!(),
[1, a, b] => {
assert!([a, b] == [2, 3]);
}
[_, _, _] => panic!(),
}
let y = ([(1, true), (2, false)], 0.5f64);
match y {
([(1, a), (b, false)], _) => {
assert_eq!(a, true);
assert_eq!(b, 2);
}
([_, _], 0.5) => panic!(),
([_, _], _) => panic!(),
}
}
|
identifier_body
|
|
vec-matching-autoslice.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(slice_patterns)]
pub fn main() {
let x = [1, 2, 3];
match x {
[2, _, _] => panic!(),
[1, a, b] => {
assert!([a, b] == [2, 3]);
}
|
([(1, a), (b, false)], _) => {
assert_eq!(a, true);
assert_eq!(b, 2);
}
([_, _], 0.5) => panic!(),
([_, _], _) => panic!(),
}
}
|
[_, _, _] => panic!(),
}
let y = ([(1, true), (2, false)], 0.5f64);
match y {
|
random_line_split
|
v6.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::migration::SimpleMigration;
/// This migration reduces the sizes of keys and moves `ExtrasIndex` byte from back to the front.
pub struct ToV6;
impl ToV6 {
fn migrate_old_key(&self, old_key: Vec<u8>, index: u8, len: usize) -> Vec<u8> {
let mut result = vec![];
result.reserve(len);
unsafe {
result.set_len(len);
}
result[0] = index;
let old_key_start = 33 - len;
result[1..].clone_from_slice(&old_key[old_key_start..32]);
result
}
}
impl SimpleMigration for ToV6 {
fn columns(&self) -> Option<u32> { None }
fn version(&self) -> u32 { 6 }
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
//// at this version all extras keys are 33 bytes long.
if key.len() == 33 {
// block details key changes:
// - index is moved to the front
if key[32] == 0 {
return Some((self.migrate_old_key(key, 0, 33), value));
}
// block hash key changes:
// - key is shorter 33 -> 5 bytes
// - index is moved to the front
if key[32] == 1 {
return Some((self.migrate_old_key(key, 1, 5), value));
}
// transaction addresses changes:
// - index is moved to the front
if key[32] == 2
|
// block log blooms are removed
if key[32] == 3 {
return None;
}
// blocks blooms key changes:
// - key is shorter 33 -> 6 bytes
// - index is moved to the front
// - index is changed 4 -> 3
if key[32] == 4 {
// i have no idea why it was reversed
let reverse = key.into_iter().rev().collect::<Vec<_>>();
let mut result = [0u8; 6];
// new extras index is 3
result[0] = 3;
// 9th (+ prefix) byte was the level. Now it's second.
result[1] = reverse[9];
result[2] = reverse[4];
result[3] = reverse[3];
result[4] = reverse[2];
result[5] = reverse[1];
return Some((result.to_vec(), value));
}
// blocks receipts key changes:
// - index is moved to the front
// - index is changed 5 -> 4
if key[32] == 5 {
return Some((self.migrate_old_key(key, 4, 33), value));
}
}
Some((key, value))
}
}
|
{
return Some((self.migrate_old_key(key, 2, 33), value));
}
|
conditional_block
|
v6.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::migration::SimpleMigration;
/// This migration reduces the sizes of keys and moves `ExtrasIndex` byte from back to the front.
pub struct ToV6;
impl ToV6 {
fn migrate_old_key(&self, old_key: Vec<u8>, index: u8, len: usize) -> Vec<u8> {
let mut result = vec![];
result.reserve(len);
unsafe {
result.set_len(len);
}
result[0] = index;
let old_key_start = 33 - len;
result[1..].clone_from_slice(&old_key[old_key_start..32]);
result
}
}
impl SimpleMigration for ToV6 {
fn
|
(&self) -> Option<u32> { None }
fn version(&self) -> u32 { 6 }
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
//// at this version all extras keys are 33 bytes long.
if key.len() == 33 {
// block details key changes:
// - index is moved to the front
if key[32] == 0 {
return Some((self.migrate_old_key(key, 0, 33), value));
}
// block hash key changes:
// - key is shorter 33 -> 5 bytes
// - index is moved to the front
if key[32] == 1 {
return Some((self.migrate_old_key(key, 1, 5), value));
}
// transaction addresses changes:
// - index is moved to the front
if key[32] == 2 {
return Some((self.migrate_old_key(key, 2, 33), value));
}
// block log blooms are removed
if key[32] == 3 {
return None;
}
// blocks blooms key changes:
// - key is shorter 33 -> 6 bytes
// - index is moved to the front
// - index is changed 4 -> 3
if key[32] == 4 {
// i have no idea why it was reversed
let reverse = key.into_iter().rev().collect::<Vec<_>>();
let mut result = [0u8; 6];
// new extras index is 3
result[0] = 3;
// 9th (+ prefix) byte was the level. Now it's second.
result[1] = reverse[9];
result[2] = reverse[4];
result[3] = reverse[3];
result[4] = reverse[2];
result[5] = reverse[1];
return Some((result.to_vec(), value));
}
// blocks receipts key changes:
// - index is moved to the front
// - index is changed 5 -> 4
if key[32] == 5 {
return Some((self.migrate_old_key(key, 4, 33), value));
}
}
Some((key, value))
}
}
|
columns
|
identifier_name
|
v6.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::migration::SimpleMigration;
/// This migration reduces the sizes of keys and moves `ExtrasIndex` byte from back to the front.
pub struct ToV6;
impl ToV6 {
fn migrate_old_key(&self, old_key: Vec<u8>, index: u8, len: usize) -> Vec<u8> {
let mut result = vec![];
result.reserve(len);
unsafe {
result.set_len(len);
}
result[0] = index;
let old_key_start = 33 - len;
result[1..].clone_from_slice(&old_key[old_key_start..32]);
result
}
}
impl SimpleMigration for ToV6 {
fn columns(&self) -> Option<u32> { None }
fn version(&self) -> u32
|
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
//// at this version all extras keys are 33 bytes long.
if key.len() == 33 {
// block details key changes:
// - index is moved to the front
if key[32] == 0 {
return Some((self.migrate_old_key(key, 0, 33), value));
}
// block hash key changes:
// - key is shorter 33 -> 5 bytes
// - index is moved to the front
if key[32] == 1 {
return Some((self.migrate_old_key(key, 1, 5), value));
}
// transaction addresses changes:
// - index is moved to the front
if key[32] == 2 {
return Some((self.migrate_old_key(key, 2, 33), value));
}
// block log blooms are removed
if key[32] == 3 {
return None;
}
// blocks blooms key changes:
// - key is shorter 33 -> 6 bytes
// - index is moved to the front
// - index is changed 4 -> 3
if key[32] == 4 {
// i have no idea why it was reversed
let reverse = key.into_iter().rev().collect::<Vec<_>>();
let mut result = [0u8; 6];
// new extras index is 3
result[0] = 3;
// 9th (+ prefix) byte was the level. Now it's second.
result[1] = reverse[9];
result[2] = reverse[4];
result[3] = reverse[3];
result[4] = reverse[2];
result[5] = reverse[1];
return Some((result.to_vec(), value));
}
// blocks receipts key changes:
// - index is moved to the front
// - index is changed 5 -> 4
if key[32] == 5 {
return Some((self.migrate_old_key(key, 4, 33), value));
}
}
Some((key, value))
}
}
|
{ 6 }
|
identifier_body
|
v6.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::migration::SimpleMigration;
/// This migration reduces the sizes of keys and moves `ExtrasIndex` byte from back to the front.
pub struct ToV6;
impl ToV6 {
fn migrate_old_key(&self, old_key: Vec<u8>, index: u8, len: usize) -> Vec<u8> {
let mut result = vec![];
result.reserve(len);
unsafe {
result.set_len(len);
}
result[0] = index;
let old_key_start = 33 - len;
result[1..].clone_from_slice(&old_key[old_key_start..32]);
result
}
}
impl SimpleMigration for ToV6 {
fn columns(&self) -> Option<u32> { None }
fn version(&self) -> u32 { 6 }
fn simple_migrate(&mut self, key: Vec<u8>, value: Vec<u8>) -> Option<(Vec<u8>, Vec<u8>)> {
//// at this version all extras keys are 33 bytes long.
if key.len() == 33 {
// block details key changes:
// - index is moved to the front
if key[32] == 0 {
return Some((self.migrate_old_key(key, 0, 33), value));
}
// block hash key changes:
// - key is shorter 33 -> 5 bytes
// - index is moved to the front
if key[32] == 1 {
return Some((self.migrate_old_key(key, 1, 5), value));
}
// transaction addresses changes:
// - index is moved to the front
if key[32] == 2 {
return Some((self.migrate_old_key(key, 2, 33), value));
}
// block log blooms are removed
if key[32] == 3 {
return None;
}
// blocks blooms key changes:
// - key is shorter 33 -> 6 bytes
// - index is moved to the front
// - index is changed 4 -> 3
if key[32] == 4 {
// i have no idea why it was reversed
let reverse = key.into_iter().rev().collect::<Vec<_>>();
let mut result = [0u8; 6];
// new extras index is 3
result[0] = 3;
|
result[2] = reverse[4];
result[3] = reverse[3];
result[4] = reverse[2];
result[5] = reverse[1];
return Some((result.to_vec(), value));
}
// blocks receipts key changes:
// - index is moved to the front
// - index is changed 5 -> 4
if key[32] == 5 {
return Some((self.migrate_old_key(key, 4, 33), value));
}
}
Some((key, value))
}
}
|
// 9th (+ prefix) byte was the level. Now it's second.
result[1] = reverse[9];
|
random_line_split
|
controller.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern crate serde_json;
extern crate mio;
use adapters::AdapterManager;
use config_store::ConfigService;
use foxbox_taxonomy::manager::AdapterManager as TaxoManager;
use foxbox_users::UsersManager;
use http_server::HttpServer;
use iron::{Request, Response, IronResult};
use iron::headers::{ ContentType, AccessControlAllowOrigin };
use iron::status::Status;
use profile_service::{ ProfilePath, ProfileService };
use service::{ Service, ServiceAdapter, ServiceProperties };
use std::collections::hash_map::HashMap;
use std::io;
use std::net::SocketAddr;
use std::net::ToSocketAddrs;
use std::path::PathBuf;
use std::sync::{ Arc, Mutex };
use std::sync::atomic::{ AtomicBool, Ordering };
use std::vec::IntoIter;
use upnp::UpnpManager;
use tls::{ CertificateManager, CertificateRecord, SniSslContextProvider, TlsOption };
use traits::Controller;
use ws_server::WsServer;
use ws;
#[derive(Clone)]
pub struct FoxBox {
pub verbose: bool,
tls_option: TlsOption,
certificate_manager: CertificateManager,
hostname: String,
http_port: u16,
ws_port: u16,
services: Arc<Mutex<HashMap<String, Box<Service>>>>,
websockets: Arc<Mutex<HashMap<ws::util::Token, ws::Sender>>>,
pub config: Arc<ConfigService>,
upnp: Arc<UpnpManager>,
users_manager: Arc<UsersManager>,
profile_service: Arc<ProfileService>,
}
impl FoxBox {
pub fn new(verbose: bool,
hostname: String,
http_port: u16,
ws_port: u16,
tls_option: TlsOption,
profile_path: ProfilePath) -> Self {
let profile_service = ProfileService::new(profile_path);
let config = Arc::new(ConfigService::new(&profile_service.path_for("foxbox.conf")));
let certificate_directory = PathBuf::from(
config.get_or_set_default("foxbox", "certificate_directory", "certs/"));
FoxBox {
certificate_manager: CertificateManager::new(certificate_directory, Box::new(SniSslContextProvider::new())),
tls_option: tls_option,
services: Arc::new(Mutex::new(HashMap::new())),
websockets: Arc::new(Mutex::new(HashMap::new())),
verbose: verbose,
hostname: hostname,
http_port: http_port,
ws_port: ws_port,
config: config,
upnp: Arc::new(UpnpManager::new()),
users_manager: Arc::new(UsersManager::new(&profile_service.path_for("users_db.sqlite"))),
profile_service: Arc::new(profile_service)
}
}
}
impl Controller for FoxBox {
fn run(&mut self, shutdown_flag: &AtomicBool) {
debug!("Starting controller");
let mut event_loop = mio::EventLoop::new().unwrap();
{
Arc::get_mut(&mut self.upnp).unwrap().start().unwrap();
}
// Create the taxonomy based AdapterManager
let taxo_manager = Arc::new(TaxoManager::new());
let mut adapter_manager = AdapterManager::new(self.clone());
adapter_manager.start(&taxo_manager);
HttpServer::new(self.clone()).start(&taxo_manager);
WsServer::start(self.clone());
self.upnp.search(None).unwrap();
event_loop.run(&mut FoxBoxEventLoop {
controller: self.clone(),
shutdown_flag: &shutdown_flag
}).unwrap();
debug!("Stopping controller");
adapter_manager.stop();
for service in self.services.lock().unwrap().values() {
service.stop();
}
}
fn dispatch_service_request(&self, id: String, request: &mut Request) -> IronResult<Response> {
let services = self.services.lock().unwrap();
match services.get(&id) {
None => {
let mut response = Response::with(json!({ error: "NoSuchService", id: id }));
response.status = Some(Status::BadRequest);
response.headers.set(AccessControlAllowOrigin::Any);
response.headers.set(ContentType::json());
Ok(response)
}
Some(service) => {
service.process_request(request)
}
}
}
fn adapter_started(&self, adapter: String) {
self.broadcast_to_websockets(json_value!({ type: "core/adapter/start", name: adapter }));
}
fn adapter_notification(&self, notification: serde_json::value::Value) {
self.broadcast_to_websockets(json_value!({ type: "core/adapter/notification", message: notification }));
}
fn add_service(&self, service: Box<Service>) {
let mut services = self.services.lock().unwrap();
let service_id = service.get_properties().id;
services.insert(service_id.clone(), service);
self.broadcast_to_websockets(json_value!({ type: "core/service/start", id: service_id }));
}
fn remove_service(&self, id: String) {
let mut services = self.services.lock().unwrap();
services.remove(&id);
self.broadcast_to_websockets(json_value!({ type: "core/service/stop", id: id }));
}
fn services_count(&self) -> usize {
let services = self.services.lock().unwrap();
services.len()
}
fn get_service_properties(&self, id: String) -> Option<ServiceProperties> {
let services = self.services.lock().unwrap();
services.get(&id).map(|v| v.get_properties().clone() )
}
fn services_as_json(&self) -> Result<String, serde_json::error::Error> {
let services = self.services.lock().unwrap();
let mut array: Vec<&Box<Service>> = vec!();
for service in services.values() {
array.push(service);
}
serde_json::to_string(&array)
}
fn get_http_root_for_service(&self, service_id: String) -> String {
let scheme = if self.get_tls_enabled() { "https" } else { "http" };
format!("{}://{}:{}/services/{}/", scheme, self.hostname, self.http_port, service_id)
}
fn get_ws_root_for_service(&self, service_id: String) -> String {
format!("ws://{}:{}/services/{}/", self.hostname, self.ws_port, service_id)
}
fn http_as_addrs(&self) -> Result<IntoIter<SocketAddr>, io::Error> {
("::", self.http_port).to_socket_addrs()
}
fn ws_as_addrs(&self) -> Result<IntoIter<SocketAddr>, io::Error> {
("::", self.ws_port).to_socket_addrs()
}
fn add_websocket(&mut self, socket: ws::Sender) {
self.websockets.lock().unwrap().insert(socket.token(), socket);
}
fn remove_websocket(&mut self, socket: ws::Sender) {
self.websockets.lock().unwrap().remove(&socket.token());
}
fn broadcast_to_websockets(&self, data: serde_json::value::Value) {
let serialized = serde_json::to_string(&data).unwrap_or("{}".to_owned());
debug!("broadcast_to_websockets {}", serialized.clone());
for socket in self.websockets.lock().unwrap().values() {
match socket.send(serialized.clone()) {
Ok(_) => (),
Err(err) => error!("Error sending to socket: {}", err)
}
}
}
fn get_config(&self) -> Arc<ConfigService> {
self.config.clone()
}
fn get_profile(&self) -> &ProfileService {
&self.profile_service
}
fn
|
(&self) -> Arc<UpnpManager> {
self.upnp.clone()
}
fn get_users_manager(&self) -> Arc<UsersManager> {
self.users_manager.clone()
}
fn get_certificate_manager(&self) -> CertificateManager {
self.certificate_manager.clone()
}
/// Every box should create a self signed certificate for a local name.
/// The fingerprint of that certificate becomes the box's identifier,
/// which is used to create the public DNS zone and local
/// (i.e. local.<fingerprint>.box.knilxof.org) and remote
/// (i.e. remote.<fingerprint>.box.knilxof.org) origins
fn get_box_certificate(&self) -> io::Result<CertificateRecord> {
self.certificate_manager.get_box_certificate()
}
fn get_tls_enabled(&self) -> bool {
self.tls_option == TlsOption::Enabled
}
fn get_hostname(&self) -> String {
self.hostname.clone()
}
}
#[allow(dead_code)]
struct FoxBoxEventLoop<'a> {
controller: FoxBox,
shutdown_flag: &'a AtomicBool
}
impl<'a> mio::Handler for FoxBoxEventLoop<'a> {
type Timeout = ();
type Message = ();
fn tick(&mut self, event_loop: &mut mio::EventLoop<Self>) {
if self.shutdown_flag.load(Ordering::Acquire) {
event_loop.shutdown();
}
}
}
#[cfg(test)]
describe! controller {
before_each {
use profile_service::ProfilePath;
use stubs::service::ServiceStub;
use tempdir::TempDir;
use tls::TlsOption;
use traits::Controller;
let profile_dir = TempDir::new_in("/tmp", "foxbox").unwrap();
let profile_path = String::from(profile_dir.into_path()
.to_str().unwrap());
let service = ServiceStub;
let controller = FoxBox::new(
false, "foxbox.local".to_owned(), 1234, 5678,
TlsOption::Disabled,
ProfilePath::Custom(profile_path));
}
describe! add_service {
it "should increase number of services" {
controller.add_service(Box::new(service));
assert_eq!(controller.services_count(), 1);
}
it "should make service available" {
controller.add_service(Box::new(service));
match controller.get_service_properties("1".to_owned()) {
Some(props) => {
assert_eq!(props.id, "1");
}
None => assert!(false, "No service with id 1")
}
}
it "should create https root if tls enabled and http root if disabled" {
controller.add_service(Box::new(service));
assert_eq!(controller.get_http_root_for_service("1".to_string()),
"http://foxbox.local:1234/services/1/");
let profile_dir = TempDir::new_in("/tmp", "foxbox").unwrap();
let profile_path = String::from(profile_dir.into_path()
.to_str().unwrap());
let controller = FoxBox::new(false, "foxbox.local".to_owned(),
1234, 5678, TlsOption::Enabled,
ProfilePath::Custom(profile_path));
controller.add_service(Box::new(service));
assert_eq!(controller.get_http_root_for_service("1".to_string()),
"https://foxbox.local:1234/services/1/");
}
it "should create ws root" {
controller.add_service(Box::new(service));
assert_eq!(controller.get_ws_root_for_service("1".to_string()),
"ws://foxbox.local:5678/services/1/");
}
it "should return a json" {
controller.add_service(Box::new(service));
match controller.services_as_json() {
Ok(txt) => assert_eq!(txt, "[{\"id\":\"1\",\"name\":\"dummy service\",\"description\":\"really nothing to see\",\"http_url\":\"2\",\"ws_url\":\"3\",\"properties\":{}}]"),
Err(err) => assert!(false, err)
}
}
}
it "should delete a service" {
controller.add_service(Box::new(service));
let id = "1".to_owned();
controller.remove_service(id);
assert_eq!(controller.services_count(), 0);
}
}
|
get_upnp_manager
|
identifier_name
|
controller.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern crate serde_json;
extern crate mio;
use adapters::AdapterManager;
use config_store::ConfigService;
use foxbox_taxonomy::manager::AdapterManager as TaxoManager;
use foxbox_users::UsersManager;
use http_server::HttpServer;
use iron::{Request, Response, IronResult};
use iron::headers::{ ContentType, AccessControlAllowOrigin };
use iron::status::Status;
use profile_service::{ ProfilePath, ProfileService };
use service::{ Service, ServiceAdapter, ServiceProperties };
use std::collections::hash_map::HashMap;
use std::io;
use std::net::SocketAddr;
use std::net::ToSocketAddrs;
use std::path::PathBuf;
use std::sync::{ Arc, Mutex };
use std::sync::atomic::{ AtomicBool, Ordering };
use std::vec::IntoIter;
use upnp::UpnpManager;
use tls::{ CertificateManager, CertificateRecord, SniSslContextProvider, TlsOption };
use traits::Controller;
use ws_server::WsServer;
use ws;
#[derive(Clone)]
pub struct FoxBox {
pub verbose: bool,
tls_option: TlsOption,
certificate_manager: CertificateManager,
hostname: String,
http_port: u16,
ws_port: u16,
services: Arc<Mutex<HashMap<String, Box<Service>>>>,
websockets: Arc<Mutex<HashMap<ws::util::Token, ws::Sender>>>,
pub config: Arc<ConfigService>,
upnp: Arc<UpnpManager>,
users_manager: Arc<UsersManager>,
profile_service: Arc<ProfileService>,
}
impl FoxBox {
pub fn new(verbose: bool,
hostname: String,
http_port: u16,
ws_port: u16,
tls_option: TlsOption,
profile_path: ProfilePath) -> Self {
let profile_service = ProfileService::new(profile_path);
let config = Arc::new(ConfigService::new(&profile_service.path_for("foxbox.conf")));
let certificate_directory = PathBuf::from(
config.get_or_set_default("foxbox", "certificate_directory", "certs/"));
FoxBox {
certificate_manager: CertificateManager::new(certificate_directory, Box::new(SniSslContextProvider::new())),
tls_option: tls_option,
services: Arc::new(Mutex::new(HashMap::new())),
websockets: Arc::new(Mutex::new(HashMap::new())),
verbose: verbose,
hostname: hostname,
http_port: http_port,
ws_port: ws_port,
config: config,
upnp: Arc::new(UpnpManager::new()),
users_manager: Arc::new(UsersManager::new(&profile_service.path_for("users_db.sqlite"))),
profile_service: Arc::new(profile_service)
}
}
}
impl Controller for FoxBox {
fn run(&mut self, shutdown_flag: &AtomicBool) {
debug!("Starting controller");
let mut event_loop = mio::EventLoop::new().unwrap();
{
Arc::get_mut(&mut self.upnp).unwrap().start().unwrap();
}
// Create the taxonomy based AdapterManager
let taxo_manager = Arc::new(TaxoManager::new());
let mut adapter_manager = AdapterManager::new(self.clone());
adapter_manager.start(&taxo_manager);
HttpServer::new(self.clone()).start(&taxo_manager);
WsServer::start(self.clone());
self.upnp.search(None).unwrap();
event_loop.run(&mut FoxBoxEventLoop {
controller: self.clone(),
shutdown_flag: &shutdown_flag
}).unwrap();
debug!("Stopping controller");
adapter_manager.stop();
for service in self.services.lock().unwrap().values() {
service.stop();
}
}
fn dispatch_service_request(&self, id: String, request: &mut Request) -> IronResult<Response> {
let services = self.services.lock().unwrap();
match services.get(&id) {
None => {
let mut response = Response::with(json!({ error: "NoSuchService", id: id }));
response.status = Some(Status::BadRequest);
response.headers.set(AccessControlAllowOrigin::Any);
response.headers.set(ContentType::json());
Ok(response)
}
Some(service) => {
service.process_request(request)
}
}
}
fn adapter_started(&self, adapter: String) {
self.broadcast_to_websockets(json_value!({ type: "core/adapter/start", name: adapter }));
}
fn adapter_notification(&self, notification: serde_json::value::Value) {
self.broadcast_to_websockets(json_value!({ type: "core/adapter/notification", message: notification }));
}
fn add_service(&self, service: Box<Service>) {
let mut services = self.services.lock().unwrap();
let service_id = service.get_properties().id;
services.insert(service_id.clone(), service);
self.broadcast_to_websockets(json_value!({ type: "core/service/start", id: service_id }));
}
fn remove_service(&self, id: String) {
let mut services = self.services.lock().unwrap();
services.remove(&id);
self.broadcast_to_websockets(json_value!({ type: "core/service/stop", id: id }));
}
fn services_count(&self) -> usize {
let services = self.services.lock().unwrap();
services.len()
}
fn get_service_properties(&self, id: String) -> Option<ServiceProperties> {
let services = self.services.lock().unwrap();
services.get(&id).map(|v| v.get_properties().clone() )
}
fn services_as_json(&self) -> Result<String, serde_json::error::Error> {
let services = self.services.lock().unwrap();
let mut array: Vec<&Box<Service>> = vec!();
for service in services.values() {
array.push(service);
}
serde_json::to_string(&array)
}
fn get_http_root_for_service(&self, service_id: String) -> String {
let scheme = if self.get_tls_enabled() { "https" } else { "http" };
format!("{}://{}:{}/services/{}/", scheme, self.hostname, self.http_port, service_id)
}
fn get_ws_root_for_service(&self, service_id: String) -> String {
format!("ws://{}:{}/services/{}/", self.hostname, self.ws_port, service_id)
}
fn http_as_addrs(&self) -> Result<IntoIter<SocketAddr>, io::Error> {
("::", self.http_port).to_socket_addrs()
}
fn ws_as_addrs(&self) -> Result<IntoIter<SocketAddr>, io::Error> {
("::", self.ws_port).to_socket_addrs()
}
fn add_websocket(&mut self, socket: ws::Sender) {
self.websockets.lock().unwrap().insert(socket.token(), socket);
}
fn remove_websocket(&mut self, socket: ws::Sender) {
self.websockets.lock().unwrap().remove(&socket.token());
}
fn broadcast_to_websockets(&self, data: serde_json::value::Value) {
let serialized = serde_json::to_string(&data).unwrap_or("{}".to_owned());
debug!("broadcast_to_websockets {}", serialized.clone());
for socket in self.websockets.lock().unwrap().values() {
match socket.send(serialized.clone()) {
Ok(_) => (),
Err(err) => error!("Error sending to socket: {}", err)
}
}
}
fn get_config(&self) -> Arc<ConfigService> {
self.config.clone()
}
fn get_profile(&self) -> &ProfileService {
&self.profile_service
}
fn get_upnp_manager(&self) -> Arc<UpnpManager> {
self.upnp.clone()
}
fn get_users_manager(&self) -> Arc<UsersManager>
|
fn get_certificate_manager(&self) -> CertificateManager {
self.certificate_manager.clone()
}
/// Every box should create a self signed certificate for a local name.
/// The fingerprint of that certificate becomes the box's identifier,
/// which is used to create the public DNS zone and local
/// (i.e. local.<fingerprint>.box.knilxof.org) and remote
/// (i.e. remote.<fingerprint>.box.knilxof.org) origins
fn get_box_certificate(&self) -> io::Result<CertificateRecord> {
self.certificate_manager.get_box_certificate()
}
fn get_tls_enabled(&self) -> bool {
self.tls_option == TlsOption::Enabled
}
fn get_hostname(&self) -> String {
self.hostname.clone()
}
}
#[allow(dead_code)]
struct FoxBoxEventLoop<'a> {
controller: FoxBox,
shutdown_flag: &'a AtomicBool
}
impl<'a> mio::Handler for FoxBoxEventLoop<'a> {
type Timeout = ();
type Message = ();
fn tick(&mut self, event_loop: &mut mio::EventLoop<Self>) {
if self.shutdown_flag.load(Ordering::Acquire) {
event_loop.shutdown();
}
}
}
#[cfg(test)]
describe! controller {
before_each {
use profile_service::ProfilePath;
use stubs::service::ServiceStub;
use tempdir::TempDir;
use tls::TlsOption;
use traits::Controller;
let profile_dir = TempDir::new_in("/tmp", "foxbox").unwrap();
let profile_path = String::from(profile_dir.into_path()
.to_str().unwrap());
let service = ServiceStub;
let controller = FoxBox::new(
false, "foxbox.local".to_owned(), 1234, 5678,
TlsOption::Disabled,
ProfilePath::Custom(profile_path));
}
describe! add_service {
it "should increase number of services" {
controller.add_service(Box::new(service));
assert_eq!(controller.services_count(), 1);
}
it "should make service available" {
controller.add_service(Box::new(service));
match controller.get_service_properties("1".to_owned()) {
Some(props) => {
assert_eq!(props.id, "1");
}
None => assert!(false, "No service with id 1")
}
}
it "should create https root if tls enabled and http root if disabled" {
controller.add_service(Box::new(service));
assert_eq!(controller.get_http_root_for_service("1".to_string()),
"http://foxbox.local:1234/services/1/");
let profile_dir = TempDir::new_in("/tmp", "foxbox").unwrap();
let profile_path = String::from(profile_dir.into_path()
.to_str().unwrap());
let controller = FoxBox::new(false, "foxbox.local".to_owned(),
1234, 5678, TlsOption::Enabled,
ProfilePath::Custom(profile_path));
controller.add_service(Box::new(service));
assert_eq!(controller.get_http_root_for_service("1".to_string()),
"https://foxbox.local:1234/services/1/");
}
it "should create ws root" {
controller.add_service(Box::new(service));
assert_eq!(controller.get_ws_root_for_service("1".to_string()),
"ws://foxbox.local:5678/services/1/");
}
it "should return a json" {
controller.add_service(Box::new(service));
match controller.services_as_json() {
Ok(txt) => assert_eq!(txt, "[{\"id\":\"1\",\"name\":\"dummy service\",\"description\":\"really nothing to see\",\"http_url\":\"2\",\"ws_url\":\"3\",\"properties\":{}}]"),
Err(err) => assert!(false, err)
}
}
}
it "should delete a service" {
controller.add_service(Box::new(service));
let id = "1".to_owned();
controller.remove_service(id);
assert_eq!(controller.services_count(), 0);
}
}
|
{
self.users_manager.clone()
}
|
identifier_body
|
controller.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern crate serde_json;
extern crate mio;
use adapters::AdapterManager;
use config_store::ConfigService;
use foxbox_taxonomy::manager::AdapterManager as TaxoManager;
use foxbox_users::UsersManager;
use http_server::HttpServer;
use iron::{Request, Response, IronResult};
use iron::headers::{ ContentType, AccessControlAllowOrigin };
use iron::status::Status;
use profile_service::{ ProfilePath, ProfileService };
use service::{ Service, ServiceAdapter, ServiceProperties };
use std::collections::hash_map::HashMap;
use std::io;
use std::net::SocketAddr;
use std::net::ToSocketAddrs;
use std::path::PathBuf;
use std::sync::{ Arc, Mutex };
use std::sync::atomic::{ AtomicBool, Ordering };
use std::vec::IntoIter;
use upnp::UpnpManager;
use tls::{ CertificateManager, CertificateRecord, SniSslContextProvider, TlsOption };
use traits::Controller;
use ws_server::WsServer;
use ws;
#[derive(Clone)]
pub struct FoxBox {
pub verbose: bool,
tls_option: TlsOption,
certificate_manager: CertificateManager,
hostname: String,
http_port: u16,
ws_port: u16,
services: Arc<Mutex<HashMap<String, Box<Service>>>>,
websockets: Arc<Mutex<HashMap<ws::util::Token, ws::Sender>>>,
pub config: Arc<ConfigService>,
upnp: Arc<UpnpManager>,
users_manager: Arc<UsersManager>,
profile_service: Arc<ProfileService>,
}
impl FoxBox {
pub fn new(verbose: bool,
hostname: String,
http_port: u16,
ws_port: u16,
tls_option: TlsOption,
profile_path: ProfilePath) -> Self {
let profile_service = ProfileService::new(profile_path);
let config = Arc::new(ConfigService::new(&profile_service.path_for("foxbox.conf")));
let certificate_directory = PathBuf::from(
config.get_or_set_default("foxbox", "certificate_directory", "certs/"));
FoxBox {
certificate_manager: CertificateManager::new(certificate_directory, Box::new(SniSslContextProvider::new())),
tls_option: tls_option,
services: Arc::new(Mutex::new(HashMap::new())),
websockets: Arc::new(Mutex::new(HashMap::new())),
verbose: verbose,
hostname: hostname,
http_port: http_port,
ws_port: ws_port,
config: config,
upnp: Arc::new(UpnpManager::new()),
users_manager: Arc::new(UsersManager::new(&profile_service.path_for("users_db.sqlite"))),
profile_service: Arc::new(profile_service)
}
}
}
impl Controller for FoxBox {
fn run(&mut self, shutdown_flag: &AtomicBool) {
debug!("Starting controller");
let mut event_loop = mio::EventLoop::new().unwrap();
{
Arc::get_mut(&mut self.upnp).unwrap().start().unwrap();
}
// Create the taxonomy based AdapterManager
let taxo_manager = Arc::new(TaxoManager::new());
let mut adapter_manager = AdapterManager::new(self.clone());
adapter_manager.start(&taxo_manager);
HttpServer::new(self.clone()).start(&taxo_manager);
WsServer::start(self.clone());
self.upnp.search(None).unwrap();
event_loop.run(&mut FoxBoxEventLoop {
controller: self.clone(),
shutdown_flag: &shutdown_flag
}).unwrap();
debug!("Stopping controller");
adapter_manager.stop();
for service in self.services.lock().unwrap().values() {
service.stop();
}
}
fn dispatch_service_request(&self, id: String, request: &mut Request) -> IronResult<Response> {
let services = self.services.lock().unwrap();
match services.get(&id) {
None => {
let mut response = Response::with(json!({ error: "NoSuchService", id: id }));
response.status = Some(Status::BadRequest);
response.headers.set(AccessControlAllowOrigin::Any);
response.headers.set(ContentType::json());
Ok(response)
}
Some(service) => {
service.process_request(request)
}
}
}
fn adapter_started(&self, adapter: String) {
self.broadcast_to_websockets(json_value!({ type: "core/adapter/start", name: adapter }));
}
fn adapter_notification(&self, notification: serde_json::value::Value) {
self.broadcast_to_websockets(json_value!({ type: "core/adapter/notification", message: notification }));
}
fn add_service(&self, service: Box<Service>) {
let mut services = self.services.lock().unwrap();
let service_id = service.get_properties().id;
services.insert(service_id.clone(), service);
self.broadcast_to_websockets(json_value!({ type: "core/service/start", id: service_id }));
}
fn remove_service(&self, id: String) {
let mut services = self.services.lock().unwrap();
services.remove(&id);
self.broadcast_to_websockets(json_value!({ type: "core/service/stop", id: id }));
}
fn services_count(&self) -> usize {
let services = self.services.lock().unwrap();
services.len()
}
fn get_service_properties(&self, id: String) -> Option<ServiceProperties> {
let services = self.services.lock().unwrap();
services.get(&id).map(|v| v.get_properties().clone() )
}
fn services_as_json(&self) -> Result<String, serde_json::error::Error> {
|
array.push(service);
}
serde_json::to_string(&array)
}
fn get_http_root_for_service(&self, service_id: String) -> String {
let scheme = if self.get_tls_enabled() { "https" } else { "http" };
format!("{}://{}:{}/services/{}/", scheme, self.hostname, self.http_port, service_id)
}
fn get_ws_root_for_service(&self, service_id: String) -> String {
format!("ws://{}:{}/services/{}/", self.hostname, self.ws_port, service_id)
}
fn http_as_addrs(&self) -> Result<IntoIter<SocketAddr>, io::Error> {
("::", self.http_port).to_socket_addrs()
}
fn ws_as_addrs(&self) -> Result<IntoIter<SocketAddr>, io::Error> {
("::", self.ws_port).to_socket_addrs()
}
fn add_websocket(&mut self, socket: ws::Sender) {
self.websockets.lock().unwrap().insert(socket.token(), socket);
}
fn remove_websocket(&mut self, socket: ws::Sender) {
self.websockets.lock().unwrap().remove(&socket.token());
}
fn broadcast_to_websockets(&self, data: serde_json::value::Value) {
let serialized = serde_json::to_string(&data).unwrap_or("{}".to_owned());
debug!("broadcast_to_websockets {}", serialized.clone());
for socket in self.websockets.lock().unwrap().values() {
match socket.send(serialized.clone()) {
Ok(_) => (),
Err(err) => error!("Error sending to socket: {}", err)
}
}
}
fn get_config(&self) -> Arc<ConfigService> {
self.config.clone()
}
fn get_profile(&self) -> &ProfileService {
&self.profile_service
}
fn get_upnp_manager(&self) -> Arc<UpnpManager> {
self.upnp.clone()
}
fn get_users_manager(&self) -> Arc<UsersManager> {
self.users_manager.clone()
}
fn get_certificate_manager(&self) -> CertificateManager {
self.certificate_manager.clone()
}
/// Every box should create a self signed certificate for a local name.
/// The fingerprint of that certificate becomes the box's identifier,
/// which is used to create the public DNS zone and local
/// (i.e. local.<fingerprint>.box.knilxof.org) and remote
/// (i.e. remote.<fingerprint>.box.knilxof.org) origins
fn get_box_certificate(&self) -> io::Result<CertificateRecord> {
self.certificate_manager.get_box_certificate()
}
fn get_tls_enabled(&self) -> bool {
self.tls_option == TlsOption::Enabled
}
fn get_hostname(&self) -> String {
self.hostname.clone()
}
}
#[allow(dead_code)]
struct FoxBoxEventLoop<'a> {
controller: FoxBox,
shutdown_flag: &'a AtomicBool
}
impl<'a> mio::Handler for FoxBoxEventLoop<'a> {
type Timeout = ();
type Message = ();
fn tick(&mut self, event_loop: &mut mio::EventLoop<Self>) {
if self.shutdown_flag.load(Ordering::Acquire) {
event_loop.shutdown();
}
}
}
#[cfg(test)]
describe! controller {
before_each {
use profile_service::ProfilePath;
use stubs::service::ServiceStub;
use tempdir::TempDir;
use tls::TlsOption;
use traits::Controller;
let profile_dir = TempDir::new_in("/tmp", "foxbox").unwrap();
let profile_path = String::from(profile_dir.into_path()
.to_str().unwrap());
let service = ServiceStub;
let controller = FoxBox::new(
false, "foxbox.local".to_owned(), 1234, 5678,
TlsOption::Disabled,
ProfilePath::Custom(profile_path));
}
describe! add_service {
it "should increase number of services" {
controller.add_service(Box::new(service));
assert_eq!(controller.services_count(), 1);
}
it "should make service available" {
controller.add_service(Box::new(service));
match controller.get_service_properties("1".to_owned()) {
Some(props) => {
assert_eq!(props.id, "1");
}
None => assert!(false, "No service with id 1")
}
}
it "should create https root if tls enabled and http root if disabled" {
controller.add_service(Box::new(service));
assert_eq!(controller.get_http_root_for_service("1".to_string()),
"http://foxbox.local:1234/services/1/");
let profile_dir = TempDir::new_in("/tmp", "foxbox").unwrap();
let profile_path = String::from(profile_dir.into_path()
.to_str().unwrap());
let controller = FoxBox::new(false, "foxbox.local".to_owned(),
1234, 5678, TlsOption::Enabled,
ProfilePath::Custom(profile_path));
controller.add_service(Box::new(service));
assert_eq!(controller.get_http_root_for_service("1".to_string()),
"https://foxbox.local:1234/services/1/");
}
it "should create ws root" {
controller.add_service(Box::new(service));
assert_eq!(controller.get_ws_root_for_service("1".to_string()),
"ws://foxbox.local:5678/services/1/");
}
it "should return a json" {
controller.add_service(Box::new(service));
match controller.services_as_json() {
Ok(txt) => assert_eq!(txt, "[{\"id\":\"1\",\"name\":\"dummy service\",\"description\":\"really nothing to see\",\"http_url\":\"2\",\"ws_url\":\"3\",\"properties\":{}}]"),
Err(err) => assert!(false, err)
}
}
}
it "should delete a service" {
controller.add_service(Box::new(service));
let id = "1".to_owned();
controller.remove_service(id);
assert_eq!(controller.services_count(), 0);
}
}
|
let services = self.services.lock().unwrap();
let mut array: Vec<&Box<Service>> = vec!();
for service in services.values() {
|
random_line_split
|
expand.rs
|
#![crate_name = "expand"]
#![feature(unicode)]
/*
* This file is part of the uutils coreutils package.
*
* (c) Virgile Andreani <[email protected]>
* (c) kwantam <[email protected]>
* 20150428 updated to work with both UTF-8 and non-UTF-8 encodings
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
extern crate libc;
extern crate rustc_unicode;
extern crate unicode_width;
use std::fs::File;
use std::io::{stdin, stdout, BufRead, BufReader, BufWriter, Read, Write};
use std::iter::repeat;
use std::str::from_utf8;
use rustc_unicode::str::utf8_char_width;
use unicode_width::UnicodeWidthChar;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
static NAME: &'static str = "expand";
static VERSION: &'static str = "0.0.1";
static DEFAULT_TABSTOP: usize = 8;
fn tabstops_parse(s: String) -> Vec<usize> {
let words = s.split(',').collect::<Vec<&str>>();
let nums = words.into_iter()
.map(|sn| sn.parse::<usize>()
.unwrap_or_else(
|_| crash!(1, "{}\n", "tab size contains invalid character(s)"))
)
.collect::<Vec<usize>>();
if nums.iter().any(|&n| n == 0) {
crash!(1, "{}\n", "tab size cannot be 0");
}
match nums.iter().fold((true, 0), |(acc, last), &n| (acc && last <= n, n)) {
(false, _) => crash!(1, "{}\n", "tab sizes must be ascending"),
_ => {}
}
nums
}
struct Options {
files: Vec<String>,
tabstops: Vec<usize>,
tspaces: String,
iflag: bool,
uflag: bool,
}
impl Options {
fn new(matches: getopts::Matches) -> Options {
let tabstops = match matches.opt_str("t") {
None => vec!(DEFAULT_TABSTOP),
Some(s) => tabstops_parse(s)
};
let iflag = matches.opt_present("i");
let uflag =!matches.opt_present("U");
// avoid allocations when dumping out long sequences of spaces
// by precomputing the longest string of spaces we will ever need
let nspaces = tabstops.iter().scan(0, |pr,&it| {
let ret = Some(it - *pr);
*pr = it;
ret
}).max().unwrap(); // length of tabstops is guaranteed >= 1
let tspaces = repeat(' ').take(nspaces).collect();
let files =
if matches.free.is_empty() {
vec!("-".to_string())
} else {
matches.free
};
Options { files: files, tabstops: tabstops, tspaces: tspaces, iflag: iflag, uflag: uflag }
}
}
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("i", "initial", "do not convert tabs after non blanks");
opts.optopt("t", "tabs", "have tabs NUMBER characters apart, not 8", "NUMBER");
opts.optopt("t", "tabs", "use comma separated list of explicit tab positions", "LIST");
opts.optflag("U", "no-utf8", "interpret input file as 8-bit ASCII rather than UTF-8");
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => crash!(1, "{}", f)
};
if matches.opt_present("help") {
println!("Usage: {} [OPTION]... [FILE]...", NAME);
println!("{}", opts.usage(
"Convert tabs in each FILE to spaces, writing to standard output.\n\
With no FILE, or when FILE is -, read standard input."));
return 0;
}
if matches.opt_present("V") {
println!("{} {}", NAME, VERSION);
return 0;
}
expand(Options::new(matches));
0
}
fn open(path: String) -> BufReader<Box<Read+'static>> {
let file_buf;
if path == "-" {
BufReader::new(Box::new(stdin()) as Box<Read>)
} else {
file_buf = match File::open(&path[..]) {
Ok(a) => a,
Err(e) => crash!(1, "{}: {}\n", &path[..], e),
};
BufReader::new(Box::new(file_buf) as Box<Read>)
}
}
fn next_tabstop(tabstops: &[usize], col: usize) -> usize
|
#[derive(PartialEq, Eq, Debug)]
enum CharType {
Backspace,
Tab,
Other,
}
fn expand(options: Options) {
use self::CharType::*;
let mut output = BufWriter::new(stdout());
let ts = options.tabstops.as_ref();
let mut buf = Vec::new();
for file in options.files.into_iter() {
let mut fh = open(file);
while match fh.read_until('\n' as u8, &mut buf) {
Ok(s) => s > 0,
Err(_) => buf.len() > 0,
} {
let mut col = 0;
let mut byte = 0;
let mut init = true;
while byte < buf.len() {
let (ctype, cwidth, nbytes) = if options.uflag {
let nbytes = utf8_char_width(buf[byte]);
if byte + nbytes > buf.len() {
// don't overrun buffer because of invalid UTF-8
(Other, 1, 1)
} else if let Ok(t) = from_utf8(&buf[byte..byte+nbytes]) {
match t.chars().next() {
Some('\t') => (Tab, 0, nbytes),
Some('\x08') => (Backspace, 0, nbytes),
Some(c) => (Other, UnicodeWidthChar::width(c).unwrap_or(0), nbytes),
None => { // no valid char at start of t, so take 1 byte
(Other, 1, 1)
},
}
} else {
(Other, 1, 1) // implicit assumption: non-UTF-8 char is 1 col wide
}
} else {
(match buf[byte] { // always take exactly 1 byte in strict ASCII mode
0x09 => Tab,
0x08 => Backspace,
_ => Other,
}, 1, 1)
};
// figure out how many columns this char takes up
match ctype {
Tab => {
// figure out how many spaces to the next tabstop
let nts = next_tabstop(ts, col);
col += nts;
// now dump out either spaces if we're expanding, or a literal tab if we're not
if init ||!options.iflag {
safe_unwrap!(output.write_all(&options.tspaces[..nts].as_bytes()));
} else {
safe_unwrap!(output.write_all(&buf[byte..byte+nbytes]));
}
},
_ => {
col = if ctype == Other {
col + cwidth
} else if col > 0 {
col - 1
} else {
0
};
// if we're writing anything other than a space, then we're
// done with the line's leading spaces
if buf[byte]!= 0x20 {
init = false;
}
safe_unwrap!(output.write_all(&buf[byte..byte+nbytes]));
},
}
byte += nbytes; // advance the pointer
}
buf.truncate(0); // clear the buffer
}
}
}
|
{
if tabstops.len() == 1 {
tabstops[0] - col % tabstops[0]
} else {
match tabstops.iter().skip_while(|&&t| t <= col).next() {
Some(t) => t - col,
None => 1,
}
}
}
|
identifier_body
|
expand.rs
|
#![crate_name = "expand"]
#![feature(unicode)]
/*
* This file is part of the uutils coreutils package.
*
* (c) Virgile Andreani <[email protected]>
* (c) kwantam <[email protected]>
* 20150428 updated to work with both UTF-8 and non-UTF-8 encodings
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
extern crate libc;
extern crate rustc_unicode;
extern crate unicode_width;
use std::fs::File;
use std::io::{stdin, stdout, BufRead, BufReader, BufWriter, Read, Write};
use std::iter::repeat;
use std::str::from_utf8;
use rustc_unicode::str::utf8_char_width;
use unicode_width::UnicodeWidthChar;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
static NAME: &'static str = "expand";
static VERSION: &'static str = "0.0.1";
static DEFAULT_TABSTOP: usize = 8;
fn tabstops_parse(s: String) -> Vec<usize> {
let words = s.split(',').collect::<Vec<&str>>();
let nums = words.into_iter()
.map(|sn| sn.parse::<usize>()
.unwrap_or_else(
|_| crash!(1, "{}\n", "tab size contains invalid character(s)"))
)
.collect::<Vec<usize>>();
if nums.iter().any(|&n| n == 0) {
crash!(1, "{}\n", "tab size cannot be 0");
|
match nums.iter().fold((true, 0), |(acc, last), &n| (acc && last <= n, n)) {
(false, _) => crash!(1, "{}\n", "tab sizes must be ascending"),
_ => {}
}
nums
}
struct Options {
files: Vec<String>,
tabstops: Vec<usize>,
tspaces: String,
iflag: bool,
uflag: bool,
}
impl Options {
fn new(matches: getopts::Matches) -> Options {
let tabstops = match matches.opt_str("t") {
None => vec!(DEFAULT_TABSTOP),
Some(s) => tabstops_parse(s)
};
let iflag = matches.opt_present("i");
let uflag =!matches.opt_present("U");
// avoid allocations when dumping out long sequences of spaces
// by precomputing the longest string of spaces we will ever need
let nspaces = tabstops.iter().scan(0, |pr,&it| {
let ret = Some(it - *pr);
*pr = it;
ret
}).max().unwrap(); // length of tabstops is guaranteed >= 1
let tspaces = repeat(' ').take(nspaces).collect();
let files =
if matches.free.is_empty() {
vec!("-".to_string())
} else {
matches.free
};
Options { files: files, tabstops: tabstops, tspaces: tspaces, iflag: iflag, uflag: uflag }
}
}
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("i", "initial", "do not convert tabs after non blanks");
opts.optopt("t", "tabs", "have tabs NUMBER characters apart, not 8", "NUMBER");
opts.optopt("t", "tabs", "use comma separated list of explicit tab positions", "LIST");
opts.optflag("U", "no-utf8", "interpret input file as 8-bit ASCII rather than UTF-8");
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => crash!(1, "{}", f)
};
if matches.opt_present("help") {
println!("Usage: {} [OPTION]... [FILE]...", NAME);
println!("{}", opts.usage(
"Convert tabs in each FILE to spaces, writing to standard output.\n\
With no FILE, or when FILE is -, read standard input."));
return 0;
}
if matches.opt_present("V") {
println!("{} {}", NAME, VERSION);
return 0;
}
expand(Options::new(matches));
0
}
fn open(path: String) -> BufReader<Box<Read+'static>> {
let file_buf;
if path == "-" {
BufReader::new(Box::new(stdin()) as Box<Read>)
} else {
file_buf = match File::open(&path[..]) {
Ok(a) => a,
Err(e) => crash!(1, "{}: {}\n", &path[..], e),
};
BufReader::new(Box::new(file_buf) as Box<Read>)
}
}
fn next_tabstop(tabstops: &[usize], col: usize) -> usize {
if tabstops.len() == 1 {
tabstops[0] - col % tabstops[0]
} else {
match tabstops.iter().skip_while(|&&t| t <= col).next() {
Some(t) => t - col,
None => 1,
}
}
}
#[derive(PartialEq, Eq, Debug)]
enum CharType {
Backspace,
Tab,
Other,
}
fn expand(options: Options) {
use self::CharType::*;
let mut output = BufWriter::new(stdout());
let ts = options.tabstops.as_ref();
let mut buf = Vec::new();
for file in options.files.into_iter() {
let mut fh = open(file);
while match fh.read_until('\n' as u8, &mut buf) {
Ok(s) => s > 0,
Err(_) => buf.len() > 0,
} {
let mut col = 0;
let mut byte = 0;
let mut init = true;
while byte < buf.len() {
let (ctype, cwidth, nbytes) = if options.uflag {
let nbytes = utf8_char_width(buf[byte]);
if byte + nbytes > buf.len() {
// don't overrun buffer because of invalid UTF-8
(Other, 1, 1)
} else if let Ok(t) = from_utf8(&buf[byte..byte+nbytes]) {
match t.chars().next() {
Some('\t') => (Tab, 0, nbytes),
Some('\x08') => (Backspace, 0, nbytes),
Some(c) => (Other, UnicodeWidthChar::width(c).unwrap_or(0), nbytes),
None => { // no valid char at start of t, so take 1 byte
(Other, 1, 1)
},
}
} else {
(Other, 1, 1) // implicit assumption: non-UTF-8 char is 1 col wide
}
} else {
(match buf[byte] { // always take exactly 1 byte in strict ASCII mode
0x09 => Tab,
0x08 => Backspace,
_ => Other,
}, 1, 1)
};
// figure out how many columns this char takes up
match ctype {
Tab => {
// figure out how many spaces to the next tabstop
let nts = next_tabstop(ts, col);
col += nts;
// now dump out either spaces if we're expanding, or a literal tab if we're not
if init ||!options.iflag {
safe_unwrap!(output.write_all(&options.tspaces[..nts].as_bytes()));
} else {
safe_unwrap!(output.write_all(&buf[byte..byte+nbytes]));
}
},
_ => {
col = if ctype == Other {
col + cwidth
} else if col > 0 {
col - 1
} else {
0
};
// if we're writing anything other than a space, then we're
// done with the line's leading spaces
if buf[byte]!= 0x20 {
init = false;
}
safe_unwrap!(output.write_all(&buf[byte..byte+nbytes]));
},
}
byte += nbytes; // advance the pointer
}
buf.truncate(0); // clear the buffer
}
}
}
|
}
|
random_line_split
|
expand.rs
|
#![crate_name = "expand"]
#![feature(unicode)]
/*
* This file is part of the uutils coreutils package.
*
* (c) Virgile Andreani <[email protected]>
* (c) kwantam <[email protected]>
* 20150428 updated to work with both UTF-8 and non-UTF-8 encodings
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
extern crate libc;
extern crate rustc_unicode;
extern crate unicode_width;
use std::fs::File;
use std::io::{stdin, stdout, BufRead, BufReader, BufWriter, Read, Write};
use std::iter::repeat;
use std::str::from_utf8;
use rustc_unicode::str::utf8_char_width;
use unicode_width::UnicodeWidthChar;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
static NAME: &'static str = "expand";
static VERSION: &'static str = "0.0.1";
static DEFAULT_TABSTOP: usize = 8;
fn tabstops_parse(s: String) -> Vec<usize> {
let words = s.split(',').collect::<Vec<&str>>();
let nums = words.into_iter()
.map(|sn| sn.parse::<usize>()
.unwrap_or_else(
|_| crash!(1, "{}\n", "tab size contains invalid character(s)"))
)
.collect::<Vec<usize>>();
if nums.iter().any(|&n| n == 0) {
crash!(1, "{}\n", "tab size cannot be 0");
}
match nums.iter().fold((true, 0), |(acc, last), &n| (acc && last <= n, n)) {
(false, _) => crash!(1, "{}\n", "tab sizes must be ascending"),
_ => {}
}
nums
}
struct Options {
files: Vec<String>,
tabstops: Vec<usize>,
tspaces: String,
iflag: bool,
uflag: bool,
}
impl Options {
fn new(matches: getopts::Matches) -> Options {
let tabstops = match matches.opt_str("t") {
None => vec!(DEFAULT_TABSTOP),
Some(s) => tabstops_parse(s)
};
let iflag = matches.opt_present("i");
let uflag =!matches.opt_present("U");
// avoid allocations when dumping out long sequences of spaces
// by precomputing the longest string of spaces we will ever need
let nspaces = tabstops.iter().scan(0, |pr,&it| {
let ret = Some(it - *pr);
*pr = it;
ret
}).max().unwrap(); // length of tabstops is guaranteed >= 1
let tspaces = repeat(' ').take(nspaces).collect();
let files =
if matches.free.is_empty() {
vec!("-".to_string())
} else {
matches.free
};
Options { files: files, tabstops: tabstops, tspaces: tspaces, iflag: iflag, uflag: uflag }
}
}
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("i", "initial", "do not convert tabs after non blanks");
opts.optopt("t", "tabs", "have tabs NUMBER characters apart, not 8", "NUMBER");
opts.optopt("t", "tabs", "use comma separated list of explicit tab positions", "LIST");
opts.optflag("U", "no-utf8", "interpret input file as 8-bit ASCII rather than UTF-8");
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => crash!(1, "{}", f)
};
if matches.opt_present("help") {
println!("Usage: {} [OPTION]... [FILE]...", NAME);
println!("{}", opts.usage(
"Convert tabs in each FILE to spaces, writing to standard output.\n\
With no FILE, or when FILE is -, read standard input."));
return 0;
}
if matches.opt_present("V") {
println!("{} {}", NAME, VERSION);
return 0;
}
expand(Options::new(matches));
0
}
fn open(path: String) -> BufReader<Box<Read+'static>> {
let file_buf;
if path == "-" {
BufReader::new(Box::new(stdin()) as Box<Read>)
} else {
file_buf = match File::open(&path[..]) {
Ok(a) => a,
Err(e) => crash!(1, "{}: {}\n", &path[..], e),
};
BufReader::new(Box::new(file_buf) as Box<Read>)
}
}
fn next_tabstop(tabstops: &[usize], col: usize) -> usize {
if tabstops.len() == 1 {
tabstops[0] - col % tabstops[0]
} else {
match tabstops.iter().skip_while(|&&t| t <= col).next() {
Some(t) => t - col,
None => 1,
}
}
}
#[derive(PartialEq, Eq, Debug)]
enum CharType {
Backspace,
Tab,
Other,
}
fn expand(options: Options) {
use self::CharType::*;
let mut output = BufWriter::new(stdout());
let ts = options.tabstops.as_ref();
let mut buf = Vec::new();
for file in options.files.into_iter() {
let mut fh = open(file);
while match fh.read_until('\n' as u8, &mut buf) {
Ok(s) => s > 0,
Err(_) => buf.len() > 0,
} {
let mut col = 0;
let mut byte = 0;
let mut init = true;
while byte < buf.len() {
let (ctype, cwidth, nbytes) = if options.uflag {
let nbytes = utf8_char_width(buf[byte]);
if byte + nbytes > buf.len() {
// don't overrun buffer because of invalid UTF-8
(Other, 1, 1)
} else if let Ok(t) = from_utf8(&buf[byte..byte+nbytes]) {
match t.chars().next() {
Some('\t') => (Tab, 0, nbytes),
Some('\x08') => (Backspace, 0, nbytes),
Some(c) => (Other, UnicodeWidthChar::width(c).unwrap_or(0), nbytes),
None => { // no valid char at start of t, so take 1 byte
(Other, 1, 1)
},
}
} else {
(Other, 1, 1) // implicit assumption: non-UTF-8 char is 1 col wide
}
} else {
(match buf[byte] { // always take exactly 1 byte in strict ASCII mode
0x09 => Tab,
0x08 => Backspace,
_ => Other,
}, 1, 1)
};
// figure out how many columns this char takes up
match ctype {
Tab => {
// figure out how many spaces to the next tabstop
let nts = next_tabstop(ts, col);
col += nts;
// now dump out either spaces if we're expanding, or a literal tab if we're not
if init ||!options.iflag {
safe_unwrap!(output.write_all(&options.tspaces[..nts].as_bytes()));
} else {
safe_unwrap!(output.write_all(&buf[byte..byte+nbytes]));
}
},
_ =>
|
,
}
byte += nbytes; // advance the pointer
}
buf.truncate(0); // clear the buffer
}
}
}
|
{
col = if ctype == Other {
col + cwidth
} else if col > 0 {
col - 1
} else {
0
};
// if we're writing anything other than a space, then we're
// done with the line's leading spaces
if buf[byte] != 0x20 {
init = false;
}
safe_unwrap!(output.write_all(&buf[byte..byte+nbytes]));
}
|
conditional_block
|
expand.rs
|
#![crate_name = "expand"]
#![feature(unicode)]
/*
* This file is part of the uutils coreutils package.
*
* (c) Virgile Andreani <[email protected]>
* (c) kwantam <[email protected]>
* 20150428 updated to work with both UTF-8 and non-UTF-8 encodings
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
extern crate libc;
extern crate rustc_unicode;
extern crate unicode_width;
use std::fs::File;
use std::io::{stdin, stdout, BufRead, BufReader, BufWriter, Read, Write};
use std::iter::repeat;
use std::str::from_utf8;
use rustc_unicode::str::utf8_char_width;
use unicode_width::UnicodeWidthChar;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
static NAME: &'static str = "expand";
static VERSION: &'static str = "0.0.1";
static DEFAULT_TABSTOP: usize = 8;
fn tabstops_parse(s: String) -> Vec<usize> {
let words = s.split(',').collect::<Vec<&str>>();
let nums = words.into_iter()
.map(|sn| sn.parse::<usize>()
.unwrap_or_else(
|_| crash!(1, "{}\n", "tab size contains invalid character(s)"))
)
.collect::<Vec<usize>>();
if nums.iter().any(|&n| n == 0) {
crash!(1, "{}\n", "tab size cannot be 0");
}
match nums.iter().fold((true, 0), |(acc, last), &n| (acc && last <= n, n)) {
(false, _) => crash!(1, "{}\n", "tab sizes must be ascending"),
_ => {}
}
nums
}
struct Options {
files: Vec<String>,
tabstops: Vec<usize>,
tspaces: String,
iflag: bool,
uflag: bool,
}
impl Options {
fn new(matches: getopts::Matches) -> Options {
let tabstops = match matches.opt_str("t") {
None => vec!(DEFAULT_TABSTOP),
Some(s) => tabstops_parse(s)
};
let iflag = matches.opt_present("i");
let uflag =!matches.opt_present("U");
// avoid allocations when dumping out long sequences of spaces
// by precomputing the longest string of spaces we will ever need
let nspaces = tabstops.iter().scan(0, |pr,&it| {
let ret = Some(it - *pr);
*pr = it;
ret
}).max().unwrap(); // length of tabstops is guaranteed >= 1
let tspaces = repeat(' ').take(nspaces).collect();
let files =
if matches.free.is_empty() {
vec!("-".to_string())
} else {
matches.free
};
Options { files: files, tabstops: tabstops, tspaces: tspaces, iflag: iflag, uflag: uflag }
}
}
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optflag("i", "initial", "do not convert tabs after non blanks");
opts.optopt("t", "tabs", "have tabs NUMBER characters apart, not 8", "NUMBER");
opts.optopt("t", "tabs", "use comma separated list of explicit tab positions", "LIST");
opts.optflag("U", "no-utf8", "interpret input file as 8-bit ASCII rather than UTF-8");
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => crash!(1, "{}", f)
};
if matches.opt_present("help") {
println!("Usage: {} [OPTION]... [FILE]...", NAME);
println!("{}", opts.usage(
"Convert tabs in each FILE to spaces, writing to standard output.\n\
With no FILE, or when FILE is -, read standard input."));
return 0;
}
if matches.opt_present("V") {
println!("{} {}", NAME, VERSION);
return 0;
}
expand(Options::new(matches));
0
}
fn open(path: String) -> BufReader<Box<Read+'static>> {
let file_buf;
if path == "-" {
BufReader::new(Box::new(stdin()) as Box<Read>)
} else {
file_buf = match File::open(&path[..]) {
Ok(a) => a,
Err(e) => crash!(1, "{}: {}\n", &path[..], e),
};
BufReader::new(Box::new(file_buf) as Box<Read>)
}
}
fn next_tabstop(tabstops: &[usize], col: usize) -> usize {
if tabstops.len() == 1 {
tabstops[0] - col % tabstops[0]
} else {
match tabstops.iter().skip_while(|&&t| t <= col).next() {
Some(t) => t - col,
None => 1,
}
}
}
#[derive(PartialEq, Eq, Debug)]
enum CharType {
Backspace,
Tab,
Other,
}
fn
|
(options: Options) {
use self::CharType::*;
let mut output = BufWriter::new(stdout());
let ts = options.tabstops.as_ref();
let mut buf = Vec::new();
for file in options.files.into_iter() {
let mut fh = open(file);
while match fh.read_until('\n' as u8, &mut buf) {
Ok(s) => s > 0,
Err(_) => buf.len() > 0,
} {
let mut col = 0;
let mut byte = 0;
let mut init = true;
while byte < buf.len() {
let (ctype, cwidth, nbytes) = if options.uflag {
let nbytes = utf8_char_width(buf[byte]);
if byte + nbytes > buf.len() {
// don't overrun buffer because of invalid UTF-8
(Other, 1, 1)
} else if let Ok(t) = from_utf8(&buf[byte..byte+nbytes]) {
match t.chars().next() {
Some('\t') => (Tab, 0, nbytes),
Some('\x08') => (Backspace, 0, nbytes),
Some(c) => (Other, UnicodeWidthChar::width(c).unwrap_or(0), nbytes),
None => { // no valid char at start of t, so take 1 byte
(Other, 1, 1)
},
}
} else {
(Other, 1, 1) // implicit assumption: non-UTF-8 char is 1 col wide
}
} else {
(match buf[byte] { // always take exactly 1 byte in strict ASCII mode
0x09 => Tab,
0x08 => Backspace,
_ => Other,
}, 1, 1)
};
// figure out how many columns this char takes up
match ctype {
Tab => {
// figure out how many spaces to the next tabstop
let nts = next_tabstop(ts, col);
col += nts;
// now dump out either spaces if we're expanding, or a literal tab if we're not
if init ||!options.iflag {
safe_unwrap!(output.write_all(&options.tspaces[..nts].as_bytes()));
} else {
safe_unwrap!(output.write_all(&buf[byte..byte+nbytes]));
}
},
_ => {
col = if ctype == Other {
col + cwidth
} else if col > 0 {
col - 1
} else {
0
};
// if we're writing anything other than a space, then we're
// done with the line's leading spaces
if buf[byte]!= 0x20 {
init = false;
}
safe_unwrap!(output.write_all(&buf[byte..byte+nbytes]));
},
}
byte += nbytes; // advance the pointer
}
buf.truncate(0); // clear the buffer
}
}
}
|
expand
|
identifier_name
|
htmldatalistelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLDataListElementBinding;
use crate::dom::bindings::codegen::Bindings::HTMLDataListElementBinding::HTMLDataListElementMethods;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::DomRoot;
use crate::dom::document::Document;
use crate::dom::element::Element;
use crate::dom::htmlcollection::{CollectionFilter, HTMLCollection};
use crate::dom::htmlelement::HTMLElement;
use crate::dom::htmloptionelement::HTMLOptionElement;
use crate::dom::node::{window_from_node, Node};
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLDataListElement {
htmlelement: HTMLElement,
}
impl HTMLDataListElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLDataListElement {
HTMLDataListElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
|
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLDataListElement> {
Node::reflect_node(
Box::new(HTMLDataListElement::new_inherited(
local_name, prefix, document,
)),
document,
HTMLDataListElementBinding::Wrap,
)
}
}
impl HTMLDataListElementMethods for HTMLDataListElement {
// https://html.spec.whatwg.org/multipage/#dom-datalist-options
fn Options(&self) -> DomRoot<HTMLCollection> {
#[derive(JSTraceable, MallocSizeOf)]
struct HTMLDataListOptionsFilter;
impl CollectionFilter for HTMLDataListOptionsFilter {
fn filter(&self, elem: &Element, _root: &Node) -> bool {
elem.is::<HTMLOptionElement>()
}
}
let filter = Box::new(HTMLDataListOptionsFilter);
let window = window_from_node(self);
HTMLCollection::create(&window, self.upcast(), filter)
}
}
|
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
|
random_line_split
|
htmldatalistelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLDataListElementBinding;
use crate::dom::bindings::codegen::Bindings::HTMLDataListElementBinding::HTMLDataListElementMethods;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::DomRoot;
use crate::dom::document::Document;
use crate::dom::element::Element;
use crate::dom::htmlcollection::{CollectionFilter, HTMLCollection};
use crate::dom::htmlelement::HTMLElement;
use crate::dom::htmloptionelement::HTMLOptionElement;
use crate::dom::node::{window_from_node, Node};
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLDataListElement {
htmlelement: HTMLElement,
}
impl HTMLDataListElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLDataListElement
|
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLDataListElement> {
Node::reflect_node(
Box::new(HTMLDataListElement::new_inherited(
local_name, prefix, document,
)),
document,
HTMLDataListElementBinding::Wrap,
)
}
}
impl HTMLDataListElementMethods for HTMLDataListElement {
// https://html.spec.whatwg.org/multipage/#dom-datalist-options
fn Options(&self) -> DomRoot<HTMLCollection> {
#[derive(JSTraceable, MallocSizeOf)]
struct HTMLDataListOptionsFilter;
impl CollectionFilter for HTMLDataListOptionsFilter {
fn filter(&self, elem: &Element, _root: &Node) -> bool {
elem.is::<HTMLOptionElement>()
}
}
let filter = Box::new(HTMLDataListOptionsFilter);
let window = window_from_node(self);
HTMLCollection::create(&window, self.upcast(), filter)
}
}
|
{
HTMLDataListElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
|
identifier_body
|
htmldatalistelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLDataListElementBinding;
use crate::dom::bindings::codegen::Bindings::HTMLDataListElementBinding::HTMLDataListElementMethods;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::DomRoot;
use crate::dom::document::Document;
use crate::dom::element::Element;
use crate::dom::htmlcollection::{CollectionFilter, HTMLCollection};
use crate::dom::htmlelement::HTMLElement;
use crate::dom::htmloptionelement::HTMLOptionElement;
use crate::dom::node::{window_from_node, Node};
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
#[dom_struct]
pub struct HTMLDataListElement {
htmlelement: HTMLElement,
}
impl HTMLDataListElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLDataListElement {
HTMLDataListElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn
|
(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLDataListElement> {
Node::reflect_node(
Box::new(HTMLDataListElement::new_inherited(
local_name, prefix, document,
)),
document,
HTMLDataListElementBinding::Wrap,
)
}
}
impl HTMLDataListElementMethods for HTMLDataListElement {
// https://html.spec.whatwg.org/multipage/#dom-datalist-options
fn Options(&self) -> DomRoot<HTMLCollection> {
#[derive(JSTraceable, MallocSizeOf)]
struct HTMLDataListOptionsFilter;
impl CollectionFilter for HTMLDataListOptionsFilter {
fn filter(&self, elem: &Element, _root: &Node) -> bool {
elem.is::<HTMLOptionElement>()
}
}
let filter = Box::new(HTMLDataListOptionsFilter);
let window = window_from_node(self);
HTMLCollection::create(&window, self.upcast(), filter)
}
}
|
new
|
identifier_name
|
else_if_without_else.rs
|
//! Lint on if expressions with an else if, but without a final else branch.
use clippy_utils::diagnostics::span_lint_and_help;
use rustc_ast::ast::{Expr, ExprKind};
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_middle::lint::in_external_macro;
use rustc_session::{declare_lint_pass, declare_tool_lint};
declare_clippy_lint! {
/// ### What it does
/// Checks for usage of if expressions with an `else if` branch,
/// but without a final `else` branch.
///
/// ### Why is this bad?
/// Some coding guidelines require this (e.g., MISRA-C:2004 Rule 14.10).
///
/// ### Example
/// ```rust
/// # fn a() {}
/// # fn b() {}
/// # let x: i32 = 1;
/// if x.is_positive() {
/// a();
/// } else if x.is_negative() {
/// b();
/// }
/// ```
///
/// Could be written:
///
/// ```rust
/// # fn a() {}
/// # fn b() {}
/// # let x: i32 = 1;
/// if x.is_positive() {
/// a();
/// } else if x.is_negative() {
/// b();
/// } else {
/// // We don't care about zero.
/// }
/// ```
pub ELSE_IF_WITHOUT_ELSE,
restriction,
"`if` expression with an `else if`, but without a final `else` branch"
}
declare_lint_pass!(ElseIfWithoutElse => [ELSE_IF_WITHOUT_ELSE]);
impl EarlyLintPass for ElseIfWithoutElse {
fn check_expr(&mut self, cx: &EarlyContext<'_>, mut item: &Expr)
|
}
|
{
if in_external_macro(cx.sess, item.span) {
return;
}
while let ExprKind::If(_, _, Some(ref els)) = item.kind {
if let ExprKind::If(_, _, None) = els.kind {
span_lint_and_help(
cx,
ELSE_IF_WITHOUT_ELSE,
els.span,
"`if` expression with an `else if`, but without a final `else`",
None,
"add an `else` block here",
);
}
item = els;
}
}
|
identifier_body
|
else_if_without_else.rs
|
//! Lint on if expressions with an else if, but without a final else branch.
use clippy_utils::diagnostics::span_lint_and_help;
use rustc_ast::ast::{Expr, ExprKind};
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_middle::lint::in_external_macro;
use rustc_session::{declare_lint_pass, declare_tool_lint};
declare_clippy_lint! {
/// ### What it does
/// Checks for usage of if expressions with an `else if` branch,
/// but without a final `else` branch.
///
/// ### Why is this bad?
/// Some coding guidelines require this (e.g., MISRA-C:2004 Rule 14.10).
///
/// ### Example
/// ```rust
/// # fn a() {}
/// # fn b() {}
/// # let x: i32 = 1;
/// if x.is_positive() {
/// a();
/// } else if x.is_negative() {
/// b();
/// }
/// ```
///
/// Could be written:
///
/// ```rust
/// # fn a() {}
/// # fn b() {}
/// # let x: i32 = 1;
/// if x.is_positive() {
/// a();
/// } else if x.is_negative() {
/// b();
/// } else {
/// // We don't care about zero.
/// }
/// ```
pub ELSE_IF_WITHOUT_ELSE,
restriction,
"`if` expression with an `else if`, but without a final `else` branch"
}
declare_lint_pass!(ElseIfWithoutElse => [ELSE_IF_WITHOUT_ELSE]);
impl EarlyLintPass for ElseIfWithoutElse {
fn
|
(&mut self, cx: &EarlyContext<'_>, mut item: &Expr) {
if in_external_macro(cx.sess, item.span) {
return;
}
while let ExprKind::If(_, _, Some(ref els)) = item.kind {
if let ExprKind::If(_, _, None) = els.kind {
span_lint_and_help(
cx,
ELSE_IF_WITHOUT_ELSE,
els.span,
"`if` expression with an `else if`, but without a final `else`",
None,
"add an `else` block here",
);
}
item = els;
}
}
}
|
check_expr
|
identifier_name
|
else_if_without_else.rs
|
//! Lint on if expressions with an else if, but without a final else branch.
use clippy_utils::diagnostics::span_lint_and_help;
use rustc_ast::ast::{Expr, ExprKind};
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_middle::lint::in_external_macro;
use rustc_session::{declare_lint_pass, declare_tool_lint};
declare_clippy_lint! {
/// ### What it does
/// Checks for usage of if expressions with an `else if` branch,
/// but without a final `else` branch.
///
/// ### Why is this bad?
/// Some coding guidelines require this (e.g., MISRA-C:2004 Rule 14.10).
///
/// ### Example
/// ```rust
/// # fn a() {}
/// # fn b() {}
/// # let x: i32 = 1;
/// if x.is_positive() {
/// a();
/// } else if x.is_negative() {
/// b();
/// }
/// ```
///
/// Could be written:
///
/// ```rust
/// # fn a() {}
/// # fn b() {}
/// # let x: i32 = 1;
/// if x.is_positive() {
/// a();
/// } else if x.is_negative() {
/// b();
/// } else {
/// // We don't care about zero.
/// }
/// ```
pub ELSE_IF_WITHOUT_ELSE,
restriction,
"`if` expression with an `else if`, but without a final `else` branch"
}
declare_lint_pass!(ElseIfWithoutElse => [ELSE_IF_WITHOUT_ELSE]);
impl EarlyLintPass for ElseIfWithoutElse {
fn check_expr(&mut self, cx: &EarlyContext<'_>, mut item: &Expr) {
if in_external_macro(cx.sess, item.span)
|
while let ExprKind::If(_, _, Some(ref els)) = item.kind {
if let ExprKind::If(_, _, None) = els.kind {
span_lint_and_help(
cx,
ELSE_IF_WITHOUT_ELSE,
els.span,
"`if` expression with an `else if`, but without a final `else`",
None,
"add an `else` block here",
);
}
item = els;
}
}
}
|
{
return;
}
|
conditional_block
|
else_if_without_else.rs
|
//! Lint on if expressions with an else if, but without a final else branch.
use clippy_utils::diagnostics::span_lint_and_help;
use rustc_ast::ast::{Expr, ExprKind};
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_middle::lint::in_external_macro;
use rustc_session::{declare_lint_pass, declare_tool_lint};
declare_clippy_lint! {
/// ### What it does
/// Checks for usage of if expressions with an `else if` branch,
/// but without a final `else` branch.
///
/// ### Why is this bad?
/// Some coding guidelines require this (e.g., MISRA-C:2004 Rule 14.10).
///
/// ### Example
/// ```rust
/// # fn a() {}
/// # fn b() {}
/// # let x: i32 = 1;
|
/// if x.is_positive() {
/// a();
/// } else if x.is_negative() {
/// b();
/// }
/// ```
///
/// Could be written:
///
/// ```rust
/// # fn a() {}
/// # fn b() {}
/// # let x: i32 = 1;
/// if x.is_positive() {
/// a();
/// } else if x.is_negative() {
/// b();
/// } else {
/// // We don't care about zero.
/// }
/// ```
pub ELSE_IF_WITHOUT_ELSE,
restriction,
"`if` expression with an `else if`, but without a final `else` branch"
}
declare_lint_pass!(ElseIfWithoutElse => [ELSE_IF_WITHOUT_ELSE]);
impl EarlyLintPass for ElseIfWithoutElse {
fn check_expr(&mut self, cx: &EarlyContext<'_>, mut item: &Expr) {
if in_external_macro(cx.sess, item.span) {
return;
}
while let ExprKind::If(_, _, Some(ref els)) = item.kind {
if let ExprKind::If(_, _, None) = els.kind {
span_lint_and_help(
cx,
ELSE_IF_WITHOUT_ELSE,
els.span,
"`if` expression with an `else if`, but without a final `else`",
None,
"add an `else` block here",
);
}
item = els;
}
}
}
|
random_line_split
|
|
value.rs
|
use Renderable;
use context::Context;
use std::collections::HashMap;
use std::cmp::Ordering;
/// An enum to represent different value types
#[derive(Clone, PartialEq, Debug)]
pub enum Value{
Num(f32),
Str(String),
Object(HashMap<String, Value>),
Array(Vec<Value>)
}
// TODO implement for object and array
// TODO clean this up
impl PartialOrd<Value> for Value{
fn partial_cmp(&self, other: &Value) -> Option<Ordering>{
match (self, other) {
(&Value::Num(x), &Value::Num(y)) => x.partial_cmp(&y),
(&Value::Str(ref x), &Value::Str(ref y)) => x.partial_cmp(y),
_ => None
}
}
fn lt(&self, other: &Value) -> bool{
match (self, other) {
(&Value::Num(x), &Value::Num(y)) => x.lt(&y),
(&Value::Str(ref x), &Value::Str(ref y)) => x.lt(y),
_ => false
}
}
fn le(&self, other: &Value) -> bool{
match (self, other) {
(&Value::Num(x), &Value::Num(y)) => x.le(&y),
(&Value::Str(ref x), &Value::Str(ref y)) => x.le(y),
_ => false
}
}
fn gt(&self, other: &Value) -> bool
|
fn ge(&self, other: &Value) -> bool{
match (self, other) {
(&Value::Num(x), &Value::Num(y)) => x.ge(&y),
(&Value::Str(ref x), &Value::Str(ref y)) => x.ge(y),
_ => false
}
}
}
impl ToString for Value{
fn to_string(&self) -> String{
match self{
&Value::Num(ref x) => x.to_string(),
&Value::Str(ref x) => x.to_string(),
_ => "[Object object]".to_string() // TODO
}
}
}
impl Renderable for Value{
fn render(&self, _context: &mut Context) -> Option<String>{
Some(self.to_string())
}
}
|
{
match (self, other) {
(&Value::Num(x), &Value::Num(y)) => x.gt(&y),
(&Value::Str(ref x), &Value::Str(ref y)) => x.gt(y),
_ => false
}
}
|
identifier_body
|
value.rs
|
use Renderable;
use context::Context;
use std::collections::HashMap;
use std::cmp::Ordering;
/// An enum to represent different value types
#[derive(Clone, PartialEq, Debug)]
pub enum
|
{
Num(f32),
Str(String),
Object(HashMap<String, Value>),
Array(Vec<Value>)
}
// TODO implement for object and array
// TODO clean this up
impl PartialOrd<Value> for Value{
fn partial_cmp(&self, other: &Value) -> Option<Ordering>{
match (self, other) {
(&Value::Num(x), &Value::Num(y)) => x.partial_cmp(&y),
(&Value::Str(ref x), &Value::Str(ref y)) => x.partial_cmp(y),
_ => None
}
}
fn lt(&self, other: &Value) -> bool{
match (self, other) {
(&Value::Num(x), &Value::Num(y)) => x.lt(&y),
(&Value::Str(ref x), &Value::Str(ref y)) => x.lt(y),
_ => false
}
}
fn le(&self, other: &Value) -> bool{
match (self, other) {
(&Value::Num(x), &Value::Num(y)) => x.le(&y),
(&Value::Str(ref x), &Value::Str(ref y)) => x.le(y),
_ => false
}
}
fn gt(&self, other: &Value) -> bool{
match (self, other) {
(&Value::Num(x), &Value::Num(y)) => x.gt(&y),
(&Value::Str(ref x), &Value::Str(ref y)) => x.gt(y),
_ => false
}
}
fn ge(&self, other: &Value) -> bool{
match (self, other) {
(&Value::Num(x), &Value::Num(y)) => x.ge(&y),
(&Value::Str(ref x), &Value::Str(ref y)) => x.ge(y),
_ => false
}
}
}
impl ToString for Value{
fn to_string(&self) -> String{
match self{
&Value::Num(ref x) => x.to_string(),
&Value::Str(ref x) => x.to_string(),
_ => "[Object object]".to_string() // TODO
}
}
}
impl Renderable for Value{
fn render(&self, _context: &mut Context) -> Option<String>{
Some(self.to_string())
}
}
|
Value
|
identifier_name
|
value.rs
|
use Renderable;
use context::Context;
use std::collections::HashMap;
use std::cmp::Ordering;
/// An enum to represent different value types
#[derive(Clone, PartialEq, Debug)]
pub enum Value{
Num(f32),
Str(String),
|
// TODO implement for object and array
// TODO clean this up
impl PartialOrd<Value> for Value{
fn partial_cmp(&self, other: &Value) -> Option<Ordering>{
match (self, other) {
(&Value::Num(x), &Value::Num(y)) => x.partial_cmp(&y),
(&Value::Str(ref x), &Value::Str(ref y)) => x.partial_cmp(y),
_ => None
}
}
fn lt(&self, other: &Value) -> bool{
match (self, other) {
(&Value::Num(x), &Value::Num(y)) => x.lt(&y),
(&Value::Str(ref x), &Value::Str(ref y)) => x.lt(y),
_ => false
}
}
fn le(&self, other: &Value) -> bool{
match (self, other) {
(&Value::Num(x), &Value::Num(y)) => x.le(&y),
(&Value::Str(ref x), &Value::Str(ref y)) => x.le(y),
_ => false
}
}
fn gt(&self, other: &Value) -> bool{
match (self, other) {
(&Value::Num(x), &Value::Num(y)) => x.gt(&y),
(&Value::Str(ref x), &Value::Str(ref y)) => x.gt(y),
_ => false
}
}
fn ge(&self, other: &Value) -> bool{
match (self, other) {
(&Value::Num(x), &Value::Num(y)) => x.ge(&y),
(&Value::Str(ref x), &Value::Str(ref y)) => x.ge(y),
_ => false
}
}
}
impl ToString for Value{
fn to_string(&self) -> String{
match self{
&Value::Num(ref x) => x.to_string(),
&Value::Str(ref x) => x.to_string(),
_ => "[Object object]".to_string() // TODO
}
}
}
impl Renderable for Value{
fn render(&self, _context: &mut Context) -> Option<String>{
Some(self.to_string())
}
}
|
Object(HashMap<String, Value>),
Array(Vec<Value>)
}
|
random_line_split
|
plus_minus.rs
|
//https://www.hackerrank.com/challenges/plus-minus
use std::io;
use std::io::prelude::*;
fn main()
|
}
}
println!("{}", count_positive / count);
println!("{}", count_negative / count);
println!("{}", count_zeros / count);
}
|
{
let stdin = io::stdin();
let count: f64 = stdin.lock().lines() //iterator over lines in stdin
.next().unwrap().unwrap() //finally it's a string
.trim().parse().unwrap(); //and then parsing count value...
let mut count_positive: f64 = 0.0;
let mut count_negative: f64 = 0.0;
let mut count_zeros: f64 = 0.0;
for line in stdin.lock().lines() {
let line: String = line.unwrap();
let v: Vec<f64> = line.trim().split(' ').map(|x| x.parse::<f64>().unwrap()).collect();
for item in v {
if item > 0.0 {
count_positive += 1.0;
} else if item < 0.0 {
count_negative += 1.0;
} else {
count_zeros += 1.0;
}
|
identifier_body
|
plus_minus.rs
|
//https://www.hackerrank.com/challenges/plus-minus
|
fn main() {
let stdin = io::stdin();
let count: f64 = stdin.lock().lines() //iterator over lines in stdin
.next().unwrap().unwrap() //finally it's a string
.trim().parse().unwrap(); //and then parsing count value...
let mut count_positive: f64 = 0.0;
let mut count_negative: f64 = 0.0;
let mut count_zeros: f64 = 0.0;
for line in stdin.lock().lines() {
let line: String = line.unwrap();
let v: Vec<f64> = line.trim().split(' ').map(|x| x.parse::<f64>().unwrap()).collect();
for item in v {
if item > 0.0 {
count_positive += 1.0;
} else if item < 0.0 {
count_negative += 1.0;
} else {
count_zeros += 1.0;
}
}
}
println!("{}", count_positive / count);
println!("{}", count_negative / count);
println!("{}", count_zeros / count);
}
|
use std::io;
use std::io::prelude::*;
|
random_line_split
|
plus_minus.rs
|
//https://www.hackerrank.com/challenges/plus-minus
use std::io;
use std::io::prelude::*;
fn main() {
let stdin = io::stdin();
let count: f64 = stdin.lock().lines() //iterator over lines in stdin
.next().unwrap().unwrap() //finally it's a string
.trim().parse().unwrap(); //and then parsing count value...
let mut count_positive: f64 = 0.0;
let mut count_negative: f64 = 0.0;
let mut count_zeros: f64 = 0.0;
for line in stdin.lock().lines() {
let line: String = line.unwrap();
let v: Vec<f64> = line.trim().split(' ').map(|x| x.parse::<f64>().unwrap()).collect();
for item in v {
if item > 0.0
|
else if item < 0.0 {
count_negative += 1.0;
} else {
count_zeros += 1.0;
}
}
}
println!("{}", count_positive / count);
println!("{}", count_negative / count);
println!("{}", count_zeros / count);
}
|
{
count_positive += 1.0;
}
|
conditional_block
|
plus_minus.rs
|
//https://www.hackerrank.com/challenges/plus-minus
use std::io;
use std::io::prelude::*;
fn
|
() {
let stdin = io::stdin();
let count: f64 = stdin.lock().lines() //iterator over lines in stdin
.next().unwrap().unwrap() //finally it's a string
.trim().parse().unwrap(); //and then parsing count value...
let mut count_positive: f64 = 0.0;
let mut count_negative: f64 = 0.0;
let mut count_zeros: f64 = 0.0;
for line in stdin.lock().lines() {
let line: String = line.unwrap();
let v: Vec<f64> = line.trim().split(' ').map(|x| x.parse::<f64>().unwrap()).collect();
for item in v {
if item > 0.0 {
count_positive += 1.0;
} else if item < 0.0 {
count_negative += 1.0;
} else {
count_zeros += 1.0;
}
}
}
println!("{}", count_positive / count);
println!("{}", count_negative / count);
println!("{}", count_zeros / count);
}
|
main
|
identifier_name
|
step2.rs
|
// PART 1
indy::pool::set_protocol_version(PROTOCOL_VERSION).wait().unwrap();
println!("1. Creating a new local pool ledger configuration that can be used later to connect pool nodes");
let pool_config_file = create_genesis_txn_file_for_pool(pool_name);
let pool_config = json!({
"genesis_txn" : &pool_config_file
});
pool::create_pool_ledger_config(&pool_name, Some(&pool_config.to_string())).wait().unwrap();
println!("2. Open pool ledger and get the pool handle from libindy");
|
let config = json!({ "id" : wallet_name.to_string() }).to_string();
wallet::create_wallet(&config, USEFUL_CREDENTIALS).wait().unwrap();
println!("4. Open wallet and get the wallet handle from libindy");
let wallet_handle: i32 = wallet::open_wallet(&config, USEFUL_CREDENTIALS).wait().unwrap();
println!("5. Generating and storing steward DID and Verkey");
let first_json_seed = json!({
"seed":"000000000000000000000000Steward1"
}).to_string();
let (steward_did, _steward_verkey) = did::create_and_store_my_did(wallet_handle, &first_json_seed).wait().unwrap();
println!("6. Generating and storing Trust Anchor DID and Verkey");
let (trustee_did, trustee_verkey) = did::create_and_store_my_did(wallet_handle, &"{}".to_string()).wait().unwrap();
// 7. Build NYM request to add Trust Anchor to the ledger
println!("7. Build NYM request to add Trust Anchor to the ledger");
let build_nym_request: String = ledger::build_nym_request(&steward_did, &trustee_did, Some(&trustee_verkey), None, Some("TRUST_ANCHOR")).wait().unwrap();
// 8. Sending the nym request to ledger
println!("8. Sending NYM request to ledger");
let _build_nym_sign_submit_result: String = ledger::sign_and_submit_request(pool_handle, wallet_handle, &steward_did, &build_nym_request).wait().unwrap();
|
let pool_handle: i32 = pool::open_pool_ledger(&pool_name, None).wait().unwrap();
println!("3. Creates a new wallet");
|
random_line_split
|
it.rs
|
extern crate unterflow;
extern crate tokio_core;
use tokio_core::reactor::Core;
use unterflow::Client;
fn client(core: &mut Core) -> Client {
let addr = "127.0.0.1:51015".parse().unwrap();
let client = Client::connect(&addr, &core.handle());
core.run(client).unwrap()
}
#[test]
fn test_topology()
|
topic.get(&0)
},
)
);
assert_eq!(
Some(1),
topology.topic_leaders.get("internal-system").map(|topic| {
topic.len()
})
);
assert_eq!(
Some(broker),
topology.topic_leaders.get("internal-system").and_then(
|topic| {
topic.get(&0)
},
)
);
}
#[test]
fn test_create_task() {
let mut core = Core::new().unwrap();
let client = client(&mut core);
let task = client.new_task("foo".to_string()).retires(12).add_header(
"foo".to_string(),
"bar".to_string(),
);
let task = core.run(task.create("default-topic")).unwrap();
assert_eq!("CREATED", task.state);
assert!(task.key > 0);
assert!(task.key < u64::max_value());
}
|
{
let mut core = Core::new().unwrap();
let client = client(&mut core);
let topology = core.run(client.topology()).unwrap();
assert_eq!(1, topology.brokers.len());
let broker = &topology.brokers[0];
assert_eq!(
Some(1),
topology.topic_leaders.get("default-topic").map(|topic| {
topic.len()
})
);
assert_eq!(
Some(broker),
topology.topic_leaders.get("default-topic").and_then(
|topic| {
|
identifier_body
|
it.rs
|
extern crate unterflow;
extern crate tokio_core;
use tokio_core::reactor::Core;
use unterflow::Client;
fn client(core: &mut Core) -> Client {
let addr = "127.0.0.1:51015".parse().unwrap();
let client = Client::connect(&addr, &core.handle());
core.run(client).unwrap()
}
#[test]
fn test_topology() {
let mut core = Core::new().unwrap();
let client = client(&mut core);
let topology = core.run(client.topology()).unwrap();
assert_eq!(1, topology.brokers.len());
let broker = &topology.brokers[0];
assert_eq!(
Some(1),
topology.topic_leaders.get("default-topic").map(|topic| {
topic.len()
})
);
assert_eq!(
Some(broker),
topology.topic_leaders.get("default-topic").and_then(
|topic| {
topic.get(&0)
},
)
);
assert_eq!(
Some(1),
topology.topic_leaders.get("internal-system").map(|topic| {
topic.len()
})
);
assert_eq!(
Some(broker),
topology.topic_leaders.get("internal-system").and_then(
|topic| {
topic.get(&0)
},
)
);
}
#[test]
fn
|
() {
let mut core = Core::new().unwrap();
let client = client(&mut core);
let task = client.new_task("foo".to_string()).retires(12).add_header(
"foo".to_string(),
"bar".to_string(),
);
let task = core.run(task.create("default-topic")).unwrap();
assert_eq!("CREATED", task.state);
assert!(task.key > 0);
assert!(task.key < u64::max_value());
}
|
test_create_task
|
identifier_name
|
it.rs
|
extern crate unterflow;
extern crate tokio_core;
use tokio_core::reactor::Core;
use unterflow::Client;
|
fn client(core: &mut Core) -> Client {
let addr = "127.0.0.1:51015".parse().unwrap();
let client = Client::connect(&addr, &core.handle());
core.run(client).unwrap()
}
#[test]
fn test_topology() {
let mut core = Core::new().unwrap();
let client = client(&mut core);
let topology = core.run(client.topology()).unwrap();
assert_eq!(1, topology.brokers.len());
let broker = &topology.brokers[0];
assert_eq!(
Some(1),
topology.topic_leaders.get("default-topic").map(|topic| {
topic.len()
})
);
assert_eq!(
Some(broker),
topology.topic_leaders.get("default-topic").and_then(
|topic| {
topic.get(&0)
},
)
);
assert_eq!(
Some(1),
topology.topic_leaders.get("internal-system").map(|topic| {
topic.len()
})
);
assert_eq!(
Some(broker),
topology.topic_leaders.get("internal-system").and_then(
|topic| {
topic.get(&0)
},
)
);
}
#[test]
fn test_create_task() {
let mut core = Core::new().unwrap();
let client = client(&mut core);
let task = client.new_task("foo".to_string()).retires(12).add_header(
"foo".to_string(),
"bar".to_string(),
);
let task = core.run(task.create("default-topic")).unwrap();
assert_eq!("CREATED", task.state);
assert!(task.key > 0);
assert!(task.key < u64::max_value());
}
|
random_line_split
|
|
cors.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A partial implementation of CORS
//! For now this library is XHR-specific.
//! For stuff involving `<img>`, `<iframe>`, `<form>`, etc please check what
//! the request mode should be and compare with the fetch spec
//! This library will eventually become the core of the Fetch crate
//! with CORSRequest being expanded into FetchRequest (etc)
use network_listener::{NetworkListener, PreInvoke};
use script_task::ScriptChan;
use net_traits::{AsyncResponseTarget, AsyncResponseListener, ResponseAction, Metadata};
use std::ascii::AsciiExt;
use std::borrow::ToOwned;
use std::cell::RefCell;
use std::sync::{Arc, Mutex};
use time;
use time::{now, Timespec};
use hyper::header::{AccessControlRequestMethod, AccessControlAllowMethods};
use hyper::header::{AccessControlMaxAge, AccessControlAllowOrigin};
use hyper::header::{AccessControlRequestHeaders, AccessControlAllowHeaders};
use hyper::header::{Headers, HeaderView};
use hyper::client::Request;
use hyper::mime::{Mime, TopLevel, SubLevel};
use hyper::header::{ContentType, Host};
use hyper::method::Method;
use hyper::status::StatusClass::Success;
use unicase::UniCase;
use url::{SchemeData, Url};
use util::task::spawn_named;
/// Interface for network listeners concerned with CORS checks. Proper network requests
/// should be initiated from this method, based on the response provided.
pub trait AsyncCORSResponseListener {
fn response_available(&self, response: CORSResponse);
}
#[derive(Clone)]
pub struct CORSRequest {
pub origin: Url,
pub destination: Url,
pub mode: RequestMode,
pub method: Method,
pub headers: Headers,
/// CORS preflight flag (https://fetch.spec.whatwg.org/#concept-http-fetch)
/// Indicates that a CORS preflight request and/or cache check is to be performed
pub preflight_flag: bool
}
/// https://fetch.spec.whatwg.org/#concept-request-mode
/// This only covers some of the request modes. The
/// `same-origin` and `no CORS` modes are unnecessary for XHR.
#[derive(PartialEq, Copy, Clone)]
pub enum RequestMode {
CORS, // CORS
ForcedPreflight // CORS-with-forced-preflight
}
impl CORSRequest {
/// Creates a CORS request if necessary. Will return an error when fetching is forbidden
pub fn maybe_new(referer: Url, destination: Url, mode: RequestMode,
method: Method, headers: Headers) -> Result<Option<CORSRequest>, ()> {
if referer.scheme == destination.scheme &&
referer.host() == destination.host() &&
referer.port() == destination.port() {
return Ok(None); // Not cross-origin, proceed with a normal fetch
}
match &*destination.scheme {
// TODO: If the request's same origin data url flag is set (which isn't the case for XHR)
// we can fetch a data URL normally. about:blank can also be fetched by XHR
"http" | "https" => {
let mut req = CORSRequest::new(referer, destination, mode, method, headers);
req.preflight_flag =!is_simple_method(&req.method) || mode == RequestMode::ForcedPreflight;
if req.headers.iter().all(|h| is_simple_header(&h)) {
req.preflight_flag = true;
}
Ok(Some(req))
},
_ => Err(()),
}
}
fn new(mut referer: Url, destination: Url, mode: RequestMode, method: Method,
headers: Headers) -> CORSRequest {
match referer.scheme_data {
SchemeData::Relative(ref mut data) => data.path = vec!(),
_ => {}
};
referer.fragment = None;
referer.query = None;
CORSRequest {
origin: referer,
destination: destination,
mode: mode,
method: method,
headers: headers,
preflight_flag: false
}
}
pub fn http_fetch_async(&self,
listener: Box<AsyncCORSResponseListener+Send>,
script_chan: Box<ScriptChan+Send>) {
struct CORSContext {
listener: Box<AsyncCORSResponseListener+Send>,
response: RefCell<Option<CORSResponse>>,
}
// This is shoe-horning the CORSReponse stuff into the rest of the async network
// framework right now. It would be worth redesigning http_fetch to do this properly.
impl AsyncResponseListener for CORSContext {
fn headers_available(&self, _metadata: Metadata) {
}
fn data_available(&self, _payload: Vec<u8>) {
}
fn response_complete(&self, _status: Result<(), String>) {
let response = self.response.borrow_mut().take().unwrap();
self.listener.response_available(response);
}
}
impl PreInvoke for CORSContext {}
let context = CORSContext {
listener: listener,
response: RefCell::new(None),
};
let listener = NetworkListener {
context: Arc::new(Mutex::new(context)),
script_chan: script_chan,
};
// TODO: this exists only to make preflight check non-blocking
// perhaps should be handled by the resource task?
let req = self.clone();
spawn_named("cors".to_owned(), move || {
let response = req.http_fetch();
let mut context = listener.context.lock();
let context = context.as_mut().unwrap();
*context.response.borrow_mut() = Some(response);
listener.invoke_with_listener(ResponseAction::ResponseComplete(Ok(())));
});
}
/// http://fetch.spec.whatwg.org/#concept-http-fetch
/// This method assumes that the CORS flag is set
/// This does not perform the full HTTP fetch, rather it handles part of the CORS filtering
/// if self.mode is ForcedPreflight, then the CORS-with-forced-preflight
/// fetch flag is set as well
pub fn http_fetch(&self) -> CORSResponse {
let response = CORSResponse::new();
// Step 2: Handle service workers (unimplemented)
// Step 3
// Substep 1: Service workers (unimplemented )
// Substep 2
let cache = &mut CORSCache(vec!()); // XXXManishearth Should come from user agent
if self.preflight_flag &&
!cache.match_method(self, &self.method) &&
!self.headers.iter().all(|h| is_simple_header(&h) && cache.match_header(self, h.name())) {
if!is_simple_method(&self.method) || self.mode == RequestMode::ForcedPreflight {
return self.preflight_fetch();
// Everything after this is part of XHR::fetch()
// Expect the organization of code to improve once we have a fetch crate
}
}
response
}
/// https://fetch.spec.whatwg.org/#cors-preflight-fetch
fn preflight_fetch(&self) -> CORSResponse {
let error = CORSResponse::new_error();
let mut cors_response = CORSResponse::new();
let mut preflight = self.clone(); // Step 1
preflight.method = Method::Options; // Step 2
preflight.headers = Headers::new(); // Step 3
// Step 4
preflight.headers.set(AccessControlRequestMethod(self.method.clone()));
// Step 5 - 7
let mut header_names = vec!();
for header in self.headers.iter() {
header_names.push(header.name().to_owned());
}
header_names.sort();
preflight.headers.set(AccessControlRequestHeaders(header_names.into_iter().map(UniCase).collect()));
// Step 8 unnecessary, we don't use the request body
// Step 9, 10 unnecessary, we're writing our own fetch code
// Step 11
let preflight_request = Request::new(preflight.method, preflight.destination);
let mut req = match preflight_request {
Ok(req) => req,
Err(_) => return error
};
let host = req.headers().get::<Host>().unwrap().clone();
*req.headers_mut() = preflight.headers.clone();
req.headers_mut().set(host);
let stream = match req.start() {
Ok(s) => s,
Err(_) => return error
};
let response = match stream.send() {
Ok(r) => r,
Err(_) => return error
};
// Step 12
match response.status.class() {
Success => {}
_ => return error
}
cors_response.headers = response.headers.clone();
// Substeps 1-3 (parsing rules: https://fetch.spec.whatwg.org/#http-new-header-syntax)
let methods_substep4 = [self.method.clone()];
let mut methods = match response.headers.get() {
Some(&AccessControlAllowMethods(ref v)) => &**v,
_ => return error
};
let headers = match response.headers.get() {
Some(&AccessControlAllowHeaders(ref h)) => h,
_ => return error
};
// Substep 4
if methods.len() == 0 || preflight.mode == RequestMode::ForcedPreflight {
methods = &methods_substep4;
}
// Substep 5
if!is_simple_method(&self.method) &&
!methods.iter().any(|m| m == &self.method) {
return error;
}
// Substep 6
for h in self.headers.iter() {
if is_simple_header(&h) {
continue;
}
if!headers.iter().any(|ref h2| h.name().eq_ignore_ascii_case(h2)) {
return error;
}
}
// Substep 7, 8
let max_age = match response.headers.get() {
Some(&AccessControlMaxAge(num)) => num,
None => 0
};
// Substep 9: Impose restrictions on max-age, if any (unimplemented)
// Substeps 10-12: Add a cache (partially implemented, XXXManishearth)
// This cache should come from the user agent, creating a new one here to check
// for compile time errors
let cache = &mut CORSCache(vec!());
for m in methods.iter() {
let cache_match = cache.match_method_and_update(self, m, max_age);
if!cache_match {
cache.insert(CORSCacheEntry::new(self.origin.clone(), self.destination.clone(),
max_age, false, HeaderOrMethod::MethodData(m.clone())));
}
}
for h in response.headers.iter() {
let cache_match = cache.match_header_and_update(self, h.name(), max_age);
if!cache_match {
cache.insert(CORSCacheEntry::new(self.origin.clone(), self.destination.clone(),
max_age, false, HeaderOrMethod::HeaderData(h.to_string())));
}
}
cors_response
}
}
pub struct CORSResponse {
pub network_error: bool,
pub headers: Headers
}
impl CORSResponse {
fn new() -> CORSResponse {
CORSResponse {
network_error: false,
headers: Headers::new()
}
}
fn new_error() -> CORSResponse {
CORSResponse {
network_error: true,
headers: Headers::new()
}
}
}
// CORS Cache stuff
/// A CORS cache object. Anchor it somewhere to the user agent.
#[derive(Clone)]
pub struct CORSCache(Vec<CORSCacheEntry>);
/// Union type for CORS cache entries
/// Each entry might pertain to a header or method
#[derive(Clone)]
pub enum HeaderOrMethod {
HeaderData(String),
MethodData(Method)
}
impl HeaderOrMethod {
fn match_header(&self, header_name: &str) -> bool
|
fn match_method(&self, method: &Method) -> bool {
match *self {
HeaderOrMethod::MethodData(ref m) => m == method,
_ => false
}
}
}
// An entry in the CORS cache
#[derive(Clone)]
pub struct CORSCacheEntry {
pub origin: Url,
pub url: Url,
pub max_age: u32,
pub credentials: bool,
pub header_or_method: HeaderOrMethod,
created: Timespec
}
impl CORSCacheEntry {
fn new(origin:Url,
url: Url,
max_age: u32,
credentials: bool,
header_or_method: HeaderOrMethod) -> CORSCacheEntry {
CORSCacheEntry {
origin: origin,
url: url,
max_age: max_age,
credentials: credentials,
header_or_method: header_or_method,
created: time::now().to_timespec()
}
}
}
impl CORSCache {
/// https://fetch.spec.whatwg.org/#concept-cache-clear
#[allow(dead_code)]
fn clear(&mut self, request: &CORSRequest) {
let CORSCache(buf) = self.clone();
let new_buf: Vec<CORSCacheEntry> =
buf.into_iter()
.filter(|e| e.origin == request.origin && request.destination == e.url)
.collect();
*self = CORSCache(new_buf);
}
// Remove old entries
fn cleanup(&mut self) {
let CORSCache(buf) = self.clone();
let now = time::now().to_timespec();
let new_buf: Vec<CORSCacheEntry> = buf.into_iter()
.filter(|e| now.sec > e.created.sec + e.max_age as i64)
.collect();
*self = CORSCache(new_buf);
}
/// https://fetch.spec.whatwg.org/#concept-cache-match-header
fn find_entry_by_header<'a>(&'a mut self,
request: &CORSRequest,
header_name: &str) -> Option<&'a mut CORSCacheEntry> {
self.cleanup();
let CORSCache(ref mut buf) = *self;
// Credentials are not yet implemented here
let entry = buf.iter_mut().find(|e| e.origin.scheme == request.origin.scheme &&
e.origin.host() == request.origin.host() &&
e.origin.port() == request.origin.port() &&
e.url == request.destination &&
e.header_or_method.match_header(header_name));
entry
}
fn match_header(&mut self, request: &CORSRequest, header_name: &str) -> bool {
self.find_entry_by_header(request, header_name).is_some()
}
fn match_header_and_update(&mut self, request: &CORSRequest, header_name: &str, new_max_age: u32) -> bool {
self.find_entry_by_header(request, header_name).map(|e| e.max_age = new_max_age).is_some()
}
fn find_entry_by_method<'a>(&'a mut self,
request: &CORSRequest,
method: &Method) -> Option<&'a mut CORSCacheEntry> {
// we can take the method from CORSRequest itself
self.cleanup();
let CORSCache(ref mut buf) = *self;
// Credentials are not yet implemented here
let entry = buf.iter_mut().find(|e| e.origin.scheme == request.origin.scheme &&
e.origin.host() == request.origin.host() &&
e.origin.port() == request.origin.port() &&
e.url == request.destination &&
e.header_or_method.match_method(method));
entry
}
/// https://fetch.spec.whatwg.org/#concept-cache-match-method
fn match_method(&mut self, request: &CORSRequest, method: &Method) -> bool {
self.find_entry_by_method(request, method).is_some()
}
fn match_method_and_update(&mut self, request: &CORSRequest, method: &Method, new_max_age: u32) -> bool {
self.find_entry_by_method(request, method).map(|e| e.max_age = new_max_age).is_some()
}
fn insert(&mut self, entry: CORSCacheEntry) {
self.cleanup();
let CORSCache(ref mut buf) = *self;
buf.push(entry);
}
}
fn is_simple_header(h: &HeaderView) -> bool {
//FIXME: use h.is::<HeaderType>() when AcceptLanguage and
//ContentLanguage headers exist
match &*h.name().to_ascii_lowercase() {
"accept" | "accept-language" | "content-language" => true,
"content-type" => match h.value() {
Some(&ContentType(Mime(TopLevel::Text, SubLevel::Plain, _))) |
Some(&ContentType(Mime(TopLevel::Application, SubLevel::WwwFormUrlEncoded, _))) |
Some(&ContentType(Mime(TopLevel::Multipart, SubLevel::FormData, _))) => true,
_ => false
},
_ => false
}
}
fn is_simple_method(m: &Method) -> bool {
match *m {
Method::Get | Method::Head | Method::Post => true,
_ => false
}
}
/// Perform a CORS check on a header list and CORS request
/// https://fetch.spec.whatwg.org/#cors-check
pub fn allow_cross_origin_request(req: &CORSRequest, headers: &Headers) -> bool {
match headers.get::<AccessControlAllowOrigin>() {
Some(&AccessControlAllowOrigin::Any) => true, // Not always true, depends on credentials mode
// FIXME: https://github.com/servo/servo/issues/6020
Some(&AccessControlAllowOrigin::Value(ref url)) =>
url.scheme == req.origin.scheme &&
url.host() == req.origin.host() &&
url.port() == req.origin.port(),
Some(&AccessControlAllowOrigin::Null) |
None => false
}
}
|
{
match *self {
HeaderOrMethod::HeaderData(ref s) => s.eq_ignore_ascii_case(header_name),
_ => false
}
}
|
identifier_body
|
cors.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A partial implementation of CORS
//! For now this library is XHR-specific.
//! For stuff involving `<img>`, `<iframe>`, `<form>`, etc please check what
//! the request mode should be and compare with the fetch spec
//! This library will eventually become the core of the Fetch crate
//! with CORSRequest being expanded into FetchRequest (etc)
use network_listener::{NetworkListener, PreInvoke};
use script_task::ScriptChan;
use net_traits::{AsyncResponseTarget, AsyncResponseListener, ResponseAction, Metadata};
use std::ascii::AsciiExt;
use std::borrow::ToOwned;
use std::cell::RefCell;
use std::sync::{Arc, Mutex};
use time;
use time::{now, Timespec};
use hyper::header::{AccessControlRequestMethod, AccessControlAllowMethods};
use hyper::header::{AccessControlMaxAge, AccessControlAllowOrigin};
use hyper::header::{AccessControlRequestHeaders, AccessControlAllowHeaders};
use hyper::header::{Headers, HeaderView};
use hyper::client::Request;
use hyper::mime::{Mime, TopLevel, SubLevel};
use hyper::header::{ContentType, Host};
use hyper::method::Method;
use hyper::status::StatusClass::Success;
use unicase::UniCase;
use url::{SchemeData, Url};
use util::task::spawn_named;
/// Interface for network listeners concerned with CORS checks. Proper network requests
/// should be initiated from this method, based on the response provided.
pub trait AsyncCORSResponseListener {
fn response_available(&self, response: CORSResponse);
}
#[derive(Clone)]
pub struct CORSRequest {
pub origin: Url,
pub destination: Url,
pub mode: RequestMode,
pub method: Method,
pub headers: Headers,
/// CORS preflight flag (https://fetch.spec.whatwg.org/#concept-http-fetch)
/// Indicates that a CORS preflight request and/or cache check is to be performed
pub preflight_flag: bool
}
/// https://fetch.spec.whatwg.org/#concept-request-mode
/// This only covers some of the request modes. The
/// `same-origin` and `no CORS` modes are unnecessary for XHR.
#[derive(PartialEq, Copy, Clone)]
pub enum RequestMode {
CORS, // CORS
ForcedPreflight // CORS-with-forced-preflight
}
impl CORSRequest {
/// Creates a CORS request if necessary. Will return an error when fetching is forbidden
pub fn maybe_new(referer: Url, destination: Url, mode: RequestMode,
method: Method, headers: Headers) -> Result<Option<CORSRequest>, ()> {
if referer.scheme == destination.scheme &&
referer.host() == destination.host() &&
referer.port() == destination.port() {
return Ok(None); // Not cross-origin, proceed with a normal fetch
}
match &*destination.scheme {
// TODO: If the request's same origin data url flag is set (which isn't the case for XHR)
// we can fetch a data URL normally. about:blank can also be fetched by XHR
"http" | "https" => {
let mut req = CORSRequest::new(referer, destination, mode, method, headers);
req.preflight_flag =!is_simple_method(&req.method) || mode == RequestMode::ForcedPreflight;
if req.headers.iter().all(|h| is_simple_header(&h)) {
req.preflight_flag = true;
}
Ok(Some(req))
},
_ => Err(()),
}
}
fn
|
(mut referer: Url, destination: Url, mode: RequestMode, method: Method,
headers: Headers) -> CORSRequest {
match referer.scheme_data {
SchemeData::Relative(ref mut data) => data.path = vec!(),
_ => {}
};
referer.fragment = None;
referer.query = None;
CORSRequest {
origin: referer,
destination: destination,
mode: mode,
method: method,
headers: headers,
preflight_flag: false
}
}
pub fn http_fetch_async(&self,
listener: Box<AsyncCORSResponseListener+Send>,
script_chan: Box<ScriptChan+Send>) {
struct CORSContext {
listener: Box<AsyncCORSResponseListener+Send>,
response: RefCell<Option<CORSResponse>>,
}
// This is shoe-horning the CORSReponse stuff into the rest of the async network
// framework right now. It would be worth redesigning http_fetch to do this properly.
impl AsyncResponseListener for CORSContext {
fn headers_available(&self, _metadata: Metadata) {
}
fn data_available(&self, _payload: Vec<u8>) {
}
fn response_complete(&self, _status: Result<(), String>) {
let response = self.response.borrow_mut().take().unwrap();
self.listener.response_available(response);
}
}
impl PreInvoke for CORSContext {}
let context = CORSContext {
listener: listener,
response: RefCell::new(None),
};
let listener = NetworkListener {
context: Arc::new(Mutex::new(context)),
script_chan: script_chan,
};
// TODO: this exists only to make preflight check non-blocking
// perhaps should be handled by the resource task?
let req = self.clone();
spawn_named("cors".to_owned(), move || {
let response = req.http_fetch();
let mut context = listener.context.lock();
let context = context.as_mut().unwrap();
*context.response.borrow_mut() = Some(response);
listener.invoke_with_listener(ResponseAction::ResponseComplete(Ok(())));
});
}
/// http://fetch.spec.whatwg.org/#concept-http-fetch
/// This method assumes that the CORS flag is set
/// This does not perform the full HTTP fetch, rather it handles part of the CORS filtering
/// if self.mode is ForcedPreflight, then the CORS-with-forced-preflight
/// fetch flag is set as well
pub fn http_fetch(&self) -> CORSResponse {
let response = CORSResponse::new();
// Step 2: Handle service workers (unimplemented)
// Step 3
// Substep 1: Service workers (unimplemented )
// Substep 2
let cache = &mut CORSCache(vec!()); // XXXManishearth Should come from user agent
if self.preflight_flag &&
!cache.match_method(self, &self.method) &&
!self.headers.iter().all(|h| is_simple_header(&h) && cache.match_header(self, h.name())) {
if!is_simple_method(&self.method) || self.mode == RequestMode::ForcedPreflight {
return self.preflight_fetch();
// Everything after this is part of XHR::fetch()
// Expect the organization of code to improve once we have a fetch crate
}
}
response
}
/// https://fetch.spec.whatwg.org/#cors-preflight-fetch
fn preflight_fetch(&self) -> CORSResponse {
let error = CORSResponse::new_error();
let mut cors_response = CORSResponse::new();
let mut preflight = self.clone(); // Step 1
preflight.method = Method::Options; // Step 2
preflight.headers = Headers::new(); // Step 3
// Step 4
preflight.headers.set(AccessControlRequestMethod(self.method.clone()));
// Step 5 - 7
let mut header_names = vec!();
for header in self.headers.iter() {
header_names.push(header.name().to_owned());
}
header_names.sort();
preflight.headers.set(AccessControlRequestHeaders(header_names.into_iter().map(UniCase).collect()));
// Step 8 unnecessary, we don't use the request body
// Step 9, 10 unnecessary, we're writing our own fetch code
// Step 11
let preflight_request = Request::new(preflight.method, preflight.destination);
let mut req = match preflight_request {
Ok(req) => req,
Err(_) => return error
};
let host = req.headers().get::<Host>().unwrap().clone();
*req.headers_mut() = preflight.headers.clone();
req.headers_mut().set(host);
let stream = match req.start() {
Ok(s) => s,
Err(_) => return error
};
let response = match stream.send() {
Ok(r) => r,
Err(_) => return error
};
// Step 12
match response.status.class() {
Success => {}
_ => return error
}
cors_response.headers = response.headers.clone();
// Substeps 1-3 (parsing rules: https://fetch.spec.whatwg.org/#http-new-header-syntax)
let methods_substep4 = [self.method.clone()];
let mut methods = match response.headers.get() {
Some(&AccessControlAllowMethods(ref v)) => &**v,
_ => return error
};
let headers = match response.headers.get() {
Some(&AccessControlAllowHeaders(ref h)) => h,
_ => return error
};
// Substep 4
if methods.len() == 0 || preflight.mode == RequestMode::ForcedPreflight {
methods = &methods_substep4;
}
// Substep 5
if!is_simple_method(&self.method) &&
!methods.iter().any(|m| m == &self.method) {
return error;
}
// Substep 6
for h in self.headers.iter() {
if is_simple_header(&h) {
continue;
}
if!headers.iter().any(|ref h2| h.name().eq_ignore_ascii_case(h2)) {
return error;
}
}
// Substep 7, 8
let max_age = match response.headers.get() {
Some(&AccessControlMaxAge(num)) => num,
None => 0
};
// Substep 9: Impose restrictions on max-age, if any (unimplemented)
// Substeps 10-12: Add a cache (partially implemented, XXXManishearth)
// This cache should come from the user agent, creating a new one here to check
// for compile time errors
let cache = &mut CORSCache(vec!());
for m in methods.iter() {
let cache_match = cache.match_method_and_update(self, m, max_age);
if!cache_match {
cache.insert(CORSCacheEntry::new(self.origin.clone(), self.destination.clone(),
max_age, false, HeaderOrMethod::MethodData(m.clone())));
}
}
for h in response.headers.iter() {
let cache_match = cache.match_header_and_update(self, h.name(), max_age);
if!cache_match {
cache.insert(CORSCacheEntry::new(self.origin.clone(), self.destination.clone(),
max_age, false, HeaderOrMethod::HeaderData(h.to_string())));
}
}
cors_response
}
}
pub struct CORSResponse {
pub network_error: bool,
pub headers: Headers
}
impl CORSResponse {
fn new() -> CORSResponse {
CORSResponse {
network_error: false,
headers: Headers::new()
}
}
fn new_error() -> CORSResponse {
CORSResponse {
network_error: true,
headers: Headers::new()
}
}
}
// CORS Cache stuff
/// A CORS cache object. Anchor it somewhere to the user agent.
#[derive(Clone)]
pub struct CORSCache(Vec<CORSCacheEntry>);
/// Union type for CORS cache entries
/// Each entry might pertain to a header or method
#[derive(Clone)]
pub enum HeaderOrMethod {
HeaderData(String),
MethodData(Method)
}
impl HeaderOrMethod {
fn match_header(&self, header_name: &str) -> bool {
match *self {
HeaderOrMethod::HeaderData(ref s) => s.eq_ignore_ascii_case(header_name),
_ => false
}
}
fn match_method(&self, method: &Method) -> bool {
match *self {
HeaderOrMethod::MethodData(ref m) => m == method,
_ => false
}
}
}
// An entry in the CORS cache
#[derive(Clone)]
pub struct CORSCacheEntry {
pub origin: Url,
pub url: Url,
pub max_age: u32,
pub credentials: bool,
pub header_or_method: HeaderOrMethod,
created: Timespec
}
impl CORSCacheEntry {
fn new(origin:Url,
url: Url,
max_age: u32,
credentials: bool,
header_or_method: HeaderOrMethod) -> CORSCacheEntry {
CORSCacheEntry {
origin: origin,
url: url,
max_age: max_age,
credentials: credentials,
header_or_method: header_or_method,
created: time::now().to_timespec()
}
}
}
impl CORSCache {
/// https://fetch.spec.whatwg.org/#concept-cache-clear
#[allow(dead_code)]
fn clear(&mut self, request: &CORSRequest) {
let CORSCache(buf) = self.clone();
let new_buf: Vec<CORSCacheEntry> =
buf.into_iter()
.filter(|e| e.origin == request.origin && request.destination == e.url)
.collect();
*self = CORSCache(new_buf);
}
// Remove old entries
fn cleanup(&mut self) {
let CORSCache(buf) = self.clone();
let now = time::now().to_timespec();
let new_buf: Vec<CORSCacheEntry> = buf.into_iter()
.filter(|e| now.sec > e.created.sec + e.max_age as i64)
.collect();
*self = CORSCache(new_buf);
}
/// https://fetch.spec.whatwg.org/#concept-cache-match-header
fn find_entry_by_header<'a>(&'a mut self,
request: &CORSRequest,
header_name: &str) -> Option<&'a mut CORSCacheEntry> {
self.cleanup();
let CORSCache(ref mut buf) = *self;
// Credentials are not yet implemented here
let entry = buf.iter_mut().find(|e| e.origin.scheme == request.origin.scheme &&
e.origin.host() == request.origin.host() &&
e.origin.port() == request.origin.port() &&
e.url == request.destination &&
e.header_or_method.match_header(header_name));
entry
}
fn match_header(&mut self, request: &CORSRequest, header_name: &str) -> bool {
self.find_entry_by_header(request, header_name).is_some()
}
fn match_header_and_update(&mut self, request: &CORSRequest, header_name: &str, new_max_age: u32) -> bool {
self.find_entry_by_header(request, header_name).map(|e| e.max_age = new_max_age).is_some()
}
fn find_entry_by_method<'a>(&'a mut self,
request: &CORSRequest,
method: &Method) -> Option<&'a mut CORSCacheEntry> {
// we can take the method from CORSRequest itself
self.cleanup();
let CORSCache(ref mut buf) = *self;
// Credentials are not yet implemented here
let entry = buf.iter_mut().find(|e| e.origin.scheme == request.origin.scheme &&
e.origin.host() == request.origin.host() &&
e.origin.port() == request.origin.port() &&
e.url == request.destination &&
e.header_or_method.match_method(method));
entry
}
/// https://fetch.spec.whatwg.org/#concept-cache-match-method
fn match_method(&mut self, request: &CORSRequest, method: &Method) -> bool {
self.find_entry_by_method(request, method).is_some()
}
fn match_method_and_update(&mut self, request: &CORSRequest, method: &Method, new_max_age: u32) -> bool {
self.find_entry_by_method(request, method).map(|e| e.max_age = new_max_age).is_some()
}
fn insert(&mut self, entry: CORSCacheEntry) {
self.cleanup();
let CORSCache(ref mut buf) = *self;
buf.push(entry);
}
}
fn is_simple_header(h: &HeaderView) -> bool {
//FIXME: use h.is::<HeaderType>() when AcceptLanguage and
//ContentLanguage headers exist
match &*h.name().to_ascii_lowercase() {
"accept" | "accept-language" | "content-language" => true,
"content-type" => match h.value() {
Some(&ContentType(Mime(TopLevel::Text, SubLevel::Plain, _))) |
Some(&ContentType(Mime(TopLevel::Application, SubLevel::WwwFormUrlEncoded, _))) |
Some(&ContentType(Mime(TopLevel::Multipart, SubLevel::FormData, _))) => true,
_ => false
},
_ => false
}
}
fn is_simple_method(m: &Method) -> bool {
match *m {
Method::Get | Method::Head | Method::Post => true,
_ => false
}
}
/// Perform a CORS check on a header list and CORS request
/// https://fetch.spec.whatwg.org/#cors-check
pub fn allow_cross_origin_request(req: &CORSRequest, headers: &Headers) -> bool {
match headers.get::<AccessControlAllowOrigin>() {
Some(&AccessControlAllowOrigin::Any) => true, // Not always true, depends on credentials mode
// FIXME: https://github.com/servo/servo/issues/6020
Some(&AccessControlAllowOrigin::Value(ref url)) =>
url.scheme == req.origin.scheme &&
url.host() == req.origin.host() &&
url.port() == req.origin.port(),
Some(&AccessControlAllowOrigin::Null) |
None => false
}
}
|
new
|
identifier_name
|
cors.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! A partial implementation of CORS
//! For now this library is XHR-specific.
//! For stuff involving `<img>`, `<iframe>`, `<form>`, etc please check what
//! the request mode should be and compare with the fetch spec
//! This library will eventually become the core of the Fetch crate
//! with CORSRequest being expanded into FetchRequest (etc)
use network_listener::{NetworkListener, PreInvoke};
use script_task::ScriptChan;
use net_traits::{AsyncResponseTarget, AsyncResponseListener, ResponseAction, Metadata};
use std::ascii::AsciiExt;
use std::borrow::ToOwned;
use std::cell::RefCell;
use std::sync::{Arc, Mutex};
use time;
use time::{now, Timespec};
use hyper::header::{AccessControlRequestMethod, AccessControlAllowMethods};
use hyper::header::{AccessControlMaxAge, AccessControlAllowOrigin};
use hyper::header::{AccessControlRequestHeaders, AccessControlAllowHeaders};
use hyper::header::{Headers, HeaderView};
use hyper::client::Request;
use hyper::mime::{Mime, TopLevel, SubLevel};
use hyper::header::{ContentType, Host};
use hyper::method::Method;
use hyper::status::StatusClass::Success;
use unicase::UniCase;
use url::{SchemeData, Url};
use util::task::spawn_named;
/// Interface for network listeners concerned with CORS checks. Proper network requests
/// should be initiated from this method, based on the response provided.
pub trait AsyncCORSResponseListener {
fn response_available(&self, response: CORSResponse);
}
#[derive(Clone)]
pub struct CORSRequest {
pub origin: Url,
pub destination: Url,
pub mode: RequestMode,
pub method: Method,
pub headers: Headers,
/// CORS preflight flag (https://fetch.spec.whatwg.org/#concept-http-fetch)
/// Indicates that a CORS preflight request and/or cache check is to be performed
pub preflight_flag: bool
}
/// https://fetch.spec.whatwg.org/#concept-request-mode
/// This only covers some of the request modes. The
/// `same-origin` and `no CORS` modes are unnecessary for XHR.
#[derive(PartialEq, Copy, Clone)]
pub enum RequestMode {
CORS, // CORS
ForcedPreflight // CORS-with-forced-preflight
}
impl CORSRequest {
/// Creates a CORS request if necessary. Will return an error when fetching is forbidden
pub fn maybe_new(referer: Url, destination: Url, mode: RequestMode,
method: Method, headers: Headers) -> Result<Option<CORSRequest>, ()> {
if referer.scheme == destination.scheme &&
referer.host() == destination.host() &&
referer.port() == destination.port() {
return Ok(None); // Not cross-origin, proceed with a normal fetch
}
match &*destination.scheme {
|
let mut req = CORSRequest::new(referer, destination, mode, method, headers);
req.preflight_flag =!is_simple_method(&req.method) || mode == RequestMode::ForcedPreflight;
if req.headers.iter().all(|h| is_simple_header(&h)) {
req.preflight_flag = true;
}
Ok(Some(req))
},
_ => Err(()),
}
}
fn new(mut referer: Url, destination: Url, mode: RequestMode, method: Method,
headers: Headers) -> CORSRequest {
match referer.scheme_data {
SchemeData::Relative(ref mut data) => data.path = vec!(),
_ => {}
};
referer.fragment = None;
referer.query = None;
CORSRequest {
origin: referer,
destination: destination,
mode: mode,
method: method,
headers: headers,
preflight_flag: false
}
}
pub fn http_fetch_async(&self,
listener: Box<AsyncCORSResponseListener+Send>,
script_chan: Box<ScriptChan+Send>) {
struct CORSContext {
listener: Box<AsyncCORSResponseListener+Send>,
response: RefCell<Option<CORSResponse>>,
}
// This is shoe-horning the CORSReponse stuff into the rest of the async network
// framework right now. It would be worth redesigning http_fetch to do this properly.
impl AsyncResponseListener for CORSContext {
fn headers_available(&self, _metadata: Metadata) {
}
fn data_available(&self, _payload: Vec<u8>) {
}
fn response_complete(&self, _status: Result<(), String>) {
let response = self.response.borrow_mut().take().unwrap();
self.listener.response_available(response);
}
}
impl PreInvoke for CORSContext {}
let context = CORSContext {
listener: listener,
response: RefCell::new(None),
};
let listener = NetworkListener {
context: Arc::new(Mutex::new(context)),
script_chan: script_chan,
};
// TODO: this exists only to make preflight check non-blocking
// perhaps should be handled by the resource task?
let req = self.clone();
spawn_named("cors".to_owned(), move || {
let response = req.http_fetch();
let mut context = listener.context.lock();
let context = context.as_mut().unwrap();
*context.response.borrow_mut() = Some(response);
listener.invoke_with_listener(ResponseAction::ResponseComplete(Ok(())));
});
}
/// http://fetch.spec.whatwg.org/#concept-http-fetch
/// This method assumes that the CORS flag is set
/// This does not perform the full HTTP fetch, rather it handles part of the CORS filtering
/// if self.mode is ForcedPreflight, then the CORS-with-forced-preflight
/// fetch flag is set as well
pub fn http_fetch(&self) -> CORSResponse {
let response = CORSResponse::new();
// Step 2: Handle service workers (unimplemented)
// Step 3
// Substep 1: Service workers (unimplemented )
// Substep 2
let cache = &mut CORSCache(vec!()); // XXXManishearth Should come from user agent
if self.preflight_flag &&
!cache.match_method(self, &self.method) &&
!self.headers.iter().all(|h| is_simple_header(&h) && cache.match_header(self, h.name())) {
if!is_simple_method(&self.method) || self.mode == RequestMode::ForcedPreflight {
return self.preflight_fetch();
// Everything after this is part of XHR::fetch()
// Expect the organization of code to improve once we have a fetch crate
}
}
response
}
/// https://fetch.spec.whatwg.org/#cors-preflight-fetch
fn preflight_fetch(&self) -> CORSResponse {
let error = CORSResponse::new_error();
let mut cors_response = CORSResponse::new();
let mut preflight = self.clone(); // Step 1
preflight.method = Method::Options; // Step 2
preflight.headers = Headers::new(); // Step 3
// Step 4
preflight.headers.set(AccessControlRequestMethod(self.method.clone()));
// Step 5 - 7
let mut header_names = vec!();
for header in self.headers.iter() {
header_names.push(header.name().to_owned());
}
header_names.sort();
preflight.headers.set(AccessControlRequestHeaders(header_names.into_iter().map(UniCase).collect()));
// Step 8 unnecessary, we don't use the request body
// Step 9, 10 unnecessary, we're writing our own fetch code
// Step 11
let preflight_request = Request::new(preflight.method, preflight.destination);
let mut req = match preflight_request {
Ok(req) => req,
Err(_) => return error
};
let host = req.headers().get::<Host>().unwrap().clone();
*req.headers_mut() = preflight.headers.clone();
req.headers_mut().set(host);
let stream = match req.start() {
Ok(s) => s,
Err(_) => return error
};
let response = match stream.send() {
Ok(r) => r,
Err(_) => return error
};
// Step 12
match response.status.class() {
Success => {}
_ => return error
}
cors_response.headers = response.headers.clone();
// Substeps 1-3 (parsing rules: https://fetch.spec.whatwg.org/#http-new-header-syntax)
let methods_substep4 = [self.method.clone()];
let mut methods = match response.headers.get() {
Some(&AccessControlAllowMethods(ref v)) => &**v,
_ => return error
};
let headers = match response.headers.get() {
Some(&AccessControlAllowHeaders(ref h)) => h,
_ => return error
};
// Substep 4
if methods.len() == 0 || preflight.mode == RequestMode::ForcedPreflight {
methods = &methods_substep4;
}
// Substep 5
if!is_simple_method(&self.method) &&
!methods.iter().any(|m| m == &self.method) {
return error;
}
// Substep 6
for h in self.headers.iter() {
if is_simple_header(&h) {
continue;
}
if!headers.iter().any(|ref h2| h.name().eq_ignore_ascii_case(h2)) {
return error;
}
}
// Substep 7, 8
let max_age = match response.headers.get() {
Some(&AccessControlMaxAge(num)) => num,
None => 0
};
// Substep 9: Impose restrictions on max-age, if any (unimplemented)
// Substeps 10-12: Add a cache (partially implemented, XXXManishearth)
// This cache should come from the user agent, creating a new one here to check
// for compile time errors
let cache = &mut CORSCache(vec!());
for m in methods.iter() {
let cache_match = cache.match_method_and_update(self, m, max_age);
if!cache_match {
cache.insert(CORSCacheEntry::new(self.origin.clone(), self.destination.clone(),
max_age, false, HeaderOrMethod::MethodData(m.clone())));
}
}
for h in response.headers.iter() {
let cache_match = cache.match_header_and_update(self, h.name(), max_age);
if!cache_match {
cache.insert(CORSCacheEntry::new(self.origin.clone(), self.destination.clone(),
max_age, false, HeaderOrMethod::HeaderData(h.to_string())));
}
}
cors_response
}
}
pub struct CORSResponse {
pub network_error: bool,
pub headers: Headers
}
impl CORSResponse {
fn new() -> CORSResponse {
CORSResponse {
network_error: false,
headers: Headers::new()
}
}
fn new_error() -> CORSResponse {
CORSResponse {
network_error: true,
headers: Headers::new()
}
}
}
// CORS Cache stuff
/// A CORS cache object. Anchor it somewhere to the user agent.
#[derive(Clone)]
pub struct CORSCache(Vec<CORSCacheEntry>);
/// Union type for CORS cache entries
/// Each entry might pertain to a header or method
#[derive(Clone)]
pub enum HeaderOrMethod {
HeaderData(String),
MethodData(Method)
}
impl HeaderOrMethod {
fn match_header(&self, header_name: &str) -> bool {
match *self {
HeaderOrMethod::HeaderData(ref s) => s.eq_ignore_ascii_case(header_name),
_ => false
}
}
fn match_method(&self, method: &Method) -> bool {
match *self {
HeaderOrMethod::MethodData(ref m) => m == method,
_ => false
}
}
}
// An entry in the CORS cache
#[derive(Clone)]
pub struct CORSCacheEntry {
pub origin: Url,
pub url: Url,
pub max_age: u32,
pub credentials: bool,
pub header_or_method: HeaderOrMethod,
created: Timespec
}
impl CORSCacheEntry {
fn new(origin:Url,
url: Url,
max_age: u32,
credentials: bool,
header_or_method: HeaderOrMethod) -> CORSCacheEntry {
CORSCacheEntry {
origin: origin,
url: url,
max_age: max_age,
credentials: credentials,
header_or_method: header_or_method,
created: time::now().to_timespec()
}
}
}
impl CORSCache {
/// https://fetch.spec.whatwg.org/#concept-cache-clear
#[allow(dead_code)]
fn clear(&mut self, request: &CORSRequest) {
let CORSCache(buf) = self.clone();
let new_buf: Vec<CORSCacheEntry> =
buf.into_iter()
.filter(|e| e.origin == request.origin && request.destination == e.url)
.collect();
*self = CORSCache(new_buf);
}
// Remove old entries
fn cleanup(&mut self) {
let CORSCache(buf) = self.clone();
let now = time::now().to_timespec();
let new_buf: Vec<CORSCacheEntry> = buf.into_iter()
.filter(|e| now.sec > e.created.sec + e.max_age as i64)
.collect();
*self = CORSCache(new_buf);
}
/// https://fetch.spec.whatwg.org/#concept-cache-match-header
fn find_entry_by_header<'a>(&'a mut self,
request: &CORSRequest,
header_name: &str) -> Option<&'a mut CORSCacheEntry> {
self.cleanup();
let CORSCache(ref mut buf) = *self;
// Credentials are not yet implemented here
let entry = buf.iter_mut().find(|e| e.origin.scheme == request.origin.scheme &&
e.origin.host() == request.origin.host() &&
e.origin.port() == request.origin.port() &&
e.url == request.destination &&
e.header_or_method.match_header(header_name));
entry
}
fn match_header(&mut self, request: &CORSRequest, header_name: &str) -> bool {
self.find_entry_by_header(request, header_name).is_some()
}
fn match_header_and_update(&mut self, request: &CORSRequest, header_name: &str, new_max_age: u32) -> bool {
self.find_entry_by_header(request, header_name).map(|e| e.max_age = new_max_age).is_some()
}
fn find_entry_by_method<'a>(&'a mut self,
request: &CORSRequest,
method: &Method) -> Option<&'a mut CORSCacheEntry> {
// we can take the method from CORSRequest itself
self.cleanup();
let CORSCache(ref mut buf) = *self;
// Credentials are not yet implemented here
let entry = buf.iter_mut().find(|e| e.origin.scheme == request.origin.scheme &&
e.origin.host() == request.origin.host() &&
e.origin.port() == request.origin.port() &&
e.url == request.destination &&
e.header_or_method.match_method(method));
entry
}
/// https://fetch.spec.whatwg.org/#concept-cache-match-method
fn match_method(&mut self, request: &CORSRequest, method: &Method) -> bool {
self.find_entry_by_method(request, method).is_some()
}
fn match_method_and_update(&mut self, request: &CORSRequest, method: &Method, new_max_age: u32) -> bool {
self.find_entry_by_method(request, method).map(|e| e.max_age = new_max_age).is_some()
}
fn insert(&mut self, entry: CORSCacheEntry) {
self.cleanup();
let CORSCache(ref mut buf) = *self;
buf.push(entry);
}
}
fn is_simple_header(h: &HeaderView) -> bool {
//FIXME: use h.is::<HeaderType>() when AcceptLanguage and
//ContentLanguage headers exist
match &*h.name().to_ascii_lowercase() {
"accept" | "accept-language" | "content-language" => true,
"content-type" => match h.value() {
Some(&ContentType(Mime(TopLevel::Text, SubLevel::Plain, _))) |
Some(&ContentType(Mime(TopLevel::Application, SubLevel::WwwFormUrlEncoded, _))) |
Some(&ContentType(Mime(TopLevel::Multipart, SubLevel::FormData, _))) => true,
_ => false
},
_ => false
}
}
fn is_simple_method(m: &Method) -> bool {
match *m {
Method::Get | Method::Head | Method::Post => true,
_ => false
}
}
/// Perform a CORS check on a header list and CORS request
/// https://fetch.spec.whatwg.org/#cors-check
pub fn allow_cross_origin_request(req: &CORSRequest, headers: &Headers) -> bool {
match headers.get::<AccessControlAllowOrigin>() {
Some(&AccessControlAllowOrigin::Any) => true, // Not always true, depends on credentials mode
// FIXME: https://github.com/servo/servo/issues/6020
Some(&AccessControlAllowOrigin::Value(ref url)) =>
url.scheme == req.origin.scheme &&
url.host() == req.origin.host() &&
url.port() == req.origin.port(),
Some(&AccessControlAllowOrigin::Null) |
None => false
}
}
|
// TODO: If the request's same origin data url flag is set (which isn't the case for XHR)
// we can fetch a data URL normally. about:blank can also be fetched by XHR
"http" | "https" => {
|
random_line_split
|
issue-1451.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test
struct T { f: @fn() };
struct
|
{ f: @fn() };
fn fooS(t: S) {
}
fn fooT(t: T) {
}
fn bar() {
}
fn main() {
let x: @fn() = bar;
fooS(S {f: x});
fooS(S {f: bar});
let x: @fn() = bar;
fooT(T {f: x});
fooT(T {f: bar});
}
|
S
|
identifier_name
|
issue-1451.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
// xfail-test
struct T { f: @fn() };
struct S { f: @fn() };
fn fooS(t: S) {
}
fn fooT(t: T) {
}
fn bar() {
}
fn main() {
let x: @fn() = bar;
fooS(S {f: x});
fooS(S {f: bar});
let x: @fn() = bar;
fooT(T {f: x});
fooT(T {f: bar});
}
|
random_line_split
|
|
issue-1451.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test
struct T { f: @fn() };
struct S { f: @fn() };
fn fooS(t: S) {
}
fn fooT(t: T)
|
fn bar() {
}
fn main() {
let x: @fn() = bar;
fooS(S {f: x});
fooS(S {f: bar});
let x: @fn() = bar;
fooT(T {f: x});
fooT(T {f: bar});
}
|
{
}
|
identifier_body
|
webglshaderprecisionformat.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// https://www.khronos.org/registry/webgl/specs/latest/1.0/webgl.idl
use dom::bindings::codegen::Bindings::WebGLShaderPrecisionFormatBinding;
use dom::bindings::codegen::Bindings::WebGLShaderPrecisionFormatBinding::WebGLShaderPrecisionFormatMethods;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{Temporary, JSRef};
use dom::bindings::utils::{Reflector,reflect_dom_object};
#[dom_struct]
pub struct WebGLShaderPrecisionFormat {
reflector_: Reflector,
range_min: i32,
range_max: i32,
precision: i32,
}
impl WebGLShaderPrecisionFormat {
fn new_inherited(range_min: i32, range_max: i32, precision: i32) -> WebGLShaderPrecisionFormat {
WebGLShaderPrecisionFormat {
reflector_: Reflector::new(),
range_min: range_min,
range_max: range_max,
precision: precision,
}
}
pub fn new(global: GlobalRef,
range_min: i32,
range_max: i32,
precision: i32) -> Temporary<WebGLShaderPrecisionFormat> {
reflect_dom_object(
box WebGLShaderPrecisionFormat::new_inherited(range_min, range_max, precision),
global,
WebGLShaderPrecisionFormatBinding::Wrap)
}
}
impl<'a> WebGLShaderPrecisionFormatMethods for JSRef<'a, WebGLShaderPrecisionFormat> {
fn RangeMin(self) -> i32 {
self.range_min
|
}
fn Precision(self) -> i32 {
self.precision
}
}
|
}
fn RangeMax(self) -> i32 {
self.range_max
|
random_line_split
|
webglshaderprecisionformat.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// https://www.khronos.org/registry/webgl/specs/latest/1.0/webgl.idl
use dom::bindings::codegen::Bindings::WebGLShaderPrecisionFormatBinding;
use dom::bindings::codegen::Bindings::WebGLShaderPrecisionFormatBinding::WebGLShaderPrecisionFormatMethods;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{Temporary, JSRef};
use dom::bindings::utils::{Reflector,reflect_dom_object};
#[dom_struct]
pub struct WebGLShaderPrecisionFormat {
reflector_: Reflector,
range_min: i32,
range_max: i32,
precision: i32,
}
impl WebGLShaderPrecisionFormat {
fn
|
(range_min: i32, range_max: i32, precision: i32) -> WebGLShaderPrecisionFormat {
WebGLShaderPrecisionFormat {
reflector_: Reflector::new(),
range_min: range_min,
range_max: range_max,
precision: precision,
}
}
pub fn new(global: GlobalRef,
range_min: i32,
range_max: i32,
precision: i32) -> Temporary<WebGLShaderPrecisionFormat> {
reflect_dom_object(
box WebGLShaderPrecisionFormat::new_inherited(range_min, range_max, precision),
global,
WebGLShaderPrecisionFormatBinding::Wrap)
}
}
impl<'a> WebGLShaderPrecisionFormatMethods for JSRef<'a, WebGLShaderPrecisionFormat> {
fn RangeMin(self) -> i32 {
self.range_min
}
fn RangeMax(self) -> i32 {
self.range_max
}
fn Precision(self) -> i32 {
self.precision
}
}
|
new_inherited
|
identifier_name
|
webglshaderprecisionformat.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// https://www.khronos.org/registry/webgl/specs/latest/1.0/webgl.idl
use dom::bindings::codegen::Bindings::WebGLShaderPrecisionFormatBinding;
use dom::bindings::codegen::Bindings::WebGLShaderPrecisionFormatBinding::WebGLShaderPrecisionFormatMethods;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{Temporary, JSRef};
use dom::bindings::utils::{Reflector,reflect_dom_object};
#[dom_struct]
pub struct WebGLShaderPrecisionFormat {
reflector_: Reflector,
range_min: i32,
range_max: i32,
precision: i32,
}
impl WebGLShaderPrecisionFormat {
fn new_inherited(range_min: i32, range_max: i32, precision: i32) -> WebGLShaderPrecisionFormat
|
pub fn new(global: GlobalRef,
range_min: i32,
range_max: i32,
precision: i32) -> Temporary<WebGLShaderPrecisionFormat> {
reflect_dom_object(
box WebGLShaderPrecisionFormat::new_inherited(range_min, range_max, precision),
global,
WebGLShaderPrecisionFormatBinding::Wrap)
}
}
impl<'a> WebGLShaderPrecisionFormatMethods for JSRef<'a, WebGLShaderPrecisionFormat> {
fn RangeMin(self) -> i32 {
self.range_min
}
fn RangeMax(self) -> i32 {
self.range_max
}
fn Precision(self) -> i32 {
self.precision
}
}
|
{
WebGLShaderPrecisionFormat {
reflector_: Reflector::new(),
range_min: range_min,
range_max: range_max,
precision: precision,
}
}
|
identifier_body
|
camera.rs
|
// See LICENSE file for copyright and license details.
use std::f32::consts::{PI};
use num::{Float};
use cgmath::{perspective, rad, Matrix4, Vector, Vector3, Rad};
use common::types::{Size2, ZFloat};
use common::misc::{clamp};
use zgl::{Zgl};
use types::{WorldPos};
pub struct
|
{
x_angle: Rad<ZFloat>,
z_angle: Rad<ZFloat>,
pos: WorldPos,
max_pos: WorldPos,
zoom: ZFloat,
projection_mat: Matrix4<ZFloat>,
}
fn get_projection_mat(win_size: &Size2) -> Matrix4<ZFloat> {
let fov = rad(PI / 4.0);
let ratio = win_size.w as ZFloat / win_size.h as ZFloat;
let display_range_min = 0.1;
let display_range_max = 100.0;
perspective(
fov, ratio, display_range_min, display_range_max)
}
impl Camera {
pub fn new(win_size: &Size2) -> Camera {
Camera {
x_angle: rad(PI / 4.0),
z_angle: rad(0.0),
pos: WorldPos{v: Vector::from_value(0.0)},
max_pos: WorldPos{v: Vector::from_value(0.0)},
zoom: 20.0,
projection_mat: get_projection_mat(win_size),
}
}
pub fn mat(&self, zgl: &Zgl) -> Matrix4<ZFloat> {
let mut m = self.projection_mat;
m = zgl.tr(m, &Vector3{x: 0.0, y: 0.0, z: -self.zoom});
m = zgl.rot_x(m, &-self.x_angle);
m = zgl.rot_z(m, &-self.z_angle);
m = zgl.tr(m, &self.pos.v);
m
}
pub fn add_horizontal_angle(&mut self, angle: Rad<ZFloat>) {
self.z_angle = self.z_angle + angle;
while self.z_angle < rad(0.0) {
self.z_angle = self.z_angle + rad(PI * 2.0);
}
while self.z_angle > rad(PI * 2.0) {
self.z_angle = self.z_angle - rad(PI * 2.0);
}
}
pub fn add_vertical_angle(&mut self, angle: Rad<ZFloat>) {
self.x_angle = self.x_angle + angle;
let min = rad(PI / 18.0);
let max = rad(PI / 4.0);
self.x_angle = clamp(self.x_angle, min, max);
}
fn clamp_pos(&mut self) {
self.pos.v.x = clamp(self.pos.v.x, self.max_pos.v.x, 0.0);
self.pos.v.y = clamp(self.pos.v.y, self.max_pos.v.y, 0.0);
}
pub fn set_pos(&mut self, pos: WorldPos) {
self.pos = pos;
self.clamp_pos();
}
pub fn set_max_pos(&mut self, max_pos: WorldPos) {
self.max_pos = max_pos;
}
pub fn change_zoom(&mut self, ratio: ZFloat) {
self.zoom *= ratio;
self.zoom = clamp(self.zoom, 10.0, 40.0);
}
pub fn get_z_angle(&self) -> &Rad<ZFloat> {
&self.z_angle
}
pub fn get_x_angle(&self) -> &Rad<ZFloat> {
&self.x_angle
}
// TODO: rename to'move'
pub fn move_camera(&mut self, angle: Rad<ZFloat>, speed: ZFloat) {
let diff = (self.z_angle - angle).s;
let dx = diff.sin();
let dy = diff.cos();
// TODO: handle zoom
// self.pos.v.x -= dy * speed * self.zoom;
// self.pos.v.y -= dx * speed * self.zoom;
self.pos.v.x -= dy * speed;
self.pos.v.y -= dx * speed;
self.clamp_pos();
}
pub fn regenerate_projection_mat(&mut self, win_size: &Size2) {
self.projection_mat = get_projection_mat(win_size);
}
}
// vim: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab:
|
Camera
|
identifier_name
|
camera.rs
|
// See LICENSE file for copyright and license details.
use std::f32::consts::{PI};
use num::{Float};
use cgmath::{perspective, rad, Matrix4, Vector, Vector3, Rad};
use common::types::{Size2, ZFloat};
use common::misc::{clamp};
use zgl::{Zgl};
use types::{WorldPos};
pub struct Camera {
x_angle: Rad<ZFloat>,
z_angle: Rad<ZFloat>,
pos: WorldPos,
max_pos: WorldPos,
zoom: ZFloat,
projection_mat: Matrix4<ZFloat>,
}
fn get_projection_mat(win_size: &Size2) -> Matrix4<ZFloat> {
let fov = rad(PI / 4.0);
let ratio = win_size.w as ZFloat / win_size.h as ZFloat;
let display_range_min = 0.1;
let display_range_max = 100.0;
perspective(
fov, ratio, display_range_min, display_range_max)
}
impl Camera {
pub fn new(win_size: &Size2) -> Camera {
Camera {
x_angle: rad(PI / 4.0),
z_angle: rad(0.0),
pos: WorldPos{v: Vector::from_value(0.0)},
max_pos: WorldPos{v: Vector::from_value(0.0)},
zoom: 20.0,
projection_mat: get_projection_mat(win_size),
}
}
pub fn mat(&self, zgl: &Zgl) -> Matrix4<ZFloat> {
let mut m = self.projection_mat;
m = zgl.tr(m, &Vector3{x: 0.0, y: 0.0, z: -self.zoom});
m = zgl.rot_x(m, &-self.x_angle);
m = zgl.rot_z(m, &-self.z_angle);
m = zgl.tr(m, &self.pos.v);
m
}
pub fn add_horizontal_angle(&mut self, angle: Rad<ZFloat>) {
self.z_angle = self.z_angle + angle;
while self.z_angle < rad(0.0) {
self.z_angle = self.z_angle + rad(PI * 2.0);
}
while self.z_angle > rad(PI * 2.0) {
self.z_angle = self.z_angle - rad(PI * 2.0);
}
}
pub fn add_vertical_angle(&mut self, angle: Rad<ZFloat>) {
self.x_angle = self.x_angle + angle;
let min = rad(PI / 18.0);
let max = rad(PI / 4.0);
self.x_angle = clamp(self.x_angle, min, max);
}
fn clamp_pos(&mut self) {
self.pos.v.x = clamp(self.pos.v.x, self.max_pos.v.x, 0.0);
self.pos.v.y = clamp(self.pos.v.y, self.max_pos.v.y, 0.0);
}
pub fn set_pos(&mut self, pos: WorldPos)
|
pub fn set_max_pos(&mut self, max_pos: WorldPos) {
self.max_pos = max_pos;
}
pub fn change_zoom(&mut self, ratio: ZFloat) {
self.zoom *= ratio;
self.zoom = clamp(self.zoom, 10.0, 40.0);
}
pub fn get_z_angle(&self) -> &Rad<ZFloat> {
&self.z_angle
}
pub fn get_x_angle(&self) -> &Rad<ZFloat> {
&self.x_angle
}
// TODO: rename to'move'
pub fn move_camera(&mut self, angle: Rad<ZFloat>, speed: ZFloat) {
let diff = (self.z_angle - angle).s;
let dx = diff.sin();
let dy = diff.cos();
// TODO: handle zoom
// self.pos.v.x -= dy * speed * self.zoom;
// self.pos.v.y -= dx * speed * self.zoom;
self.pos.v.x -= dy * speed;
self.pos.v.y -= dx * speed;
self.clamp_pos();
}
pub fn regenerate_projection_mat(&mut self, win_size: &Size2) {
self.projection_mat = get_projection_mat(win_size);
}
}
// vim: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab:
|
{
self.pos = pos;
self.clamp_pos();
}
|
identifier_body
|
camera.rs
|
// See LICENSE file for copyright and license details.
use std::f32::consts::{PI};
use num::{Float};
use cgmath::{perspective, rad, Matrix4, Vector, Vector3, Rad};
use common::types::{Size2, ZFloat};
use common::misc::{clamp};
use zgl::{Zgl};
use types::{WorldPos};
pub struct Camera {
x_angle: Rad<ZFloat>,
z_angle: Rad<ZFloat>,
pos: WorldPos,
max_pos: WorldPos,
zoom: ZFloat,
projection_mat: Matrix4<ZFloat>,
}
fn get_projection_mat(win_size: &Size2) -> Matrix4<ZFloat> {
let fov = rad(PI / 4.0);
let ratio = win_size.w as ZFloat / win_size.h as ZFloat;
let display_range_min = 0.1;
let display_range_max = 100.0;
perspective(
fov, ratio, display_range_min, display_range_max)
}
impl Camera {
pub fn new(win_size: &Size2) -> Camera {
Camera {
x_angle: rad(PI / 4.0),
z_angle: rad(0.0),
pos: WorldPos{v: Vector::from_value(0.0)},
max_pos: WorldPos{v: Vector::from_value(0.0)},
zoom: 20.0,
projection_mat: get_projection_mat(win_size),
}
}
pub fn mat(&self, zgl: &Zgl) -> Matrix4<ZFloat> {
let mut m = self.projection_mat;
m = zgl.tr(m, &Vector3{x: 0.0, y: 0.0, z: -self.zoom});
m = zgl.rot_x(m, &-self.x_angle);
m = zgl.rot_z(m, &-self.z_angle);
m = zgl.tr(m, &self.pos.v);
m
}
pub fn add_horizontal_angle(&mut self, angle: Rad<ZFloat>) {
self.z_angle = self.z_angle + angle;
while self.z_angle < rad(0.0) {
self.z_angle = self.z_angle + rad(PI * 2.0);
}
while self.z_angle > rad(PI * 2.0) {
self.z_angle = self.z_angle - rad(PI * 2.0);
}
}
pub fn add_vertical_angle(&mut self, angle: Rad<ZFloat>) {
self.x_angle = self.x_angle + angle;
let min = rad(PI / 18.0);
let max = rad(PI / 4.0);
|
}
fn clamp_pos(&mut self) {
self.pos.v.x = clamp(self.pos.v.x, self.max_pos.v.x, 0.0);
self.pos.v.y = clamp(self.pos.v.y, self.max_pos.v.y, 0.0);
}
pub fn set_pos(&mut self, pos: WorldPos) {
self.pos = pos;
self.clamp_pos();
}
pub fn set_max_pos(&mut self, max_pos: WorldPos) {
self.max_pos = max_pos;
}
pub fn change_zoom(&mut self, ratio: ZFloat) {
self.zoom *= ratio;
self.zoom = clamp(self.zoom, 10.0, 40.0);
}
pub fn get_z_angle(&self) -> &Rad<ZFloat> {
&self.z_angle
}
pub fn get_x_angle(&self) -> &Rad<ZFloat> {
&self.x_angle
}
// TODO: rename to'move'
pub fn move_camera(&mut self, angle: Rad<ZFloat>, speed: ZFloat) {
let diff = (self.z_angle - angle).s;
let dx = diff.sin();
let dy = diff.cos();
// TODO: handle zoom
// self.pos.v.x -= dy * speed * self.zoom;
// self.pos.v.y -= dx * speed * self.zoom;
self.pos.v.x -= dy * speed;
self.pos.v.y -= dx * speed;
self.clamp_pos();
}
pub fn regenerate_projection_mat(&mut self, win_size: &Size2) {
self.projection_mat = get_projection_mat(win_size);
}
}
// vim: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab:
|
self.x_angle = clamp(self.x_angle, min, max);
|
random_line_split
|
borrowable.rs
|
use std::hash::Hash;
use std::borrow::Borrow;
use std::collections::HashMap;
struct Borrowable<K, V> {
table: HashMap<K, V>,
}
impl<K, V> Borrowable<K, V>
where
K: Eq + Hash,
{
fn get<Q:?Sized>(&self, key: &Q) -> Option<&V>
|
{
self.table.get(key)
}
fn new() -> Borrowable<K, V> {
Borrowable {
table: HashMap::<K, V>::new(),
}
}
fn insert<Q:?Sized>(&mut self, key: &Q, value: V)
where
Q: ToOwned<Owned = K>,
K: Borrow<Q>,
{
self.table.insert(key.to_owned(), value);
}
}
pub fn demo_borrowable() {
let mut b = Borrowable::<String, &str>::new();
b.insert("name", "Jack");
b.insert("age", "23");
b.insert("sex", "male");
println!("{:?}", b.get("name"));
}
|
where
K: Borrow<Q>,
Q: Eq + Hash,
|
random_line_split
|
borrowable.rs
|
use std::hash::Hash;
use std::borrow::Borrow;
use std::collections::HashMap;
struct Borrowable<K, V> {
table: HashMap<K, V>,
}
impl<K, V> Borrowable<K, V>
where
K: Eq + Hash,
{
fn get<Q:?Sized>(&self, key: &Q) -> Option<&V>
where
K: Borrow<Q>,
Q: Eq + Hash,
{
self.table.get(key)
}
fn new() -> Borrowable<K, V> {
Borrowable {
table: HashMap::<K, V>::new(),
}
}
fn
|
<Q:?Sized>(&mut self, key: &Q, value: V)
where
Q: ToOwned<Owned = K>,
K: Borrow<Q>,
{
self.table.insert(key.to_owned(), value);
}
}
pub fn demo_borrowable() {
let mut b = Borrowable::<String, &str>::new();
b.insert("name", "Jack");
b.insert("age", "23");
b.insert("sex", "male");
println!("{:?}", b.get("name"));
}
|
insert
|
identifier_name
|
borrowable.rs
|
use std::hash::Hash;
use std::borrow::Borrow;
use std::collections::HashMap;
struct Borrowable<K, V> {
table: HashMap<K, V>,
}
impl<K, V> Borrowable<K, V>
where
K: Eq + Hash,
{
fn get<Q:?Sized>(&self, key: &Q) -> Option<&V>
where
K: Borrow<Q>,
Q: Eq + Hash,
{
self.table.get(key)
}
fn new() -> Borrowable<K, V>
|
fn insert<Q:?Sized>(&mut self, key: &Q, value: V)
where
Q: ToOwned<Owned = K>,
K: Borrow<Q>,
{
self.table.insert(key.to_owned(), value);
}
}
pub fn demo_borrowable() {
let mut b = Borrowable::<String, &str>::new();
b.insert("name", "Jack");
b.insert("age", "23");
b.insert("sex", "male");
println!("{:?}", b.get("name"));
}
|
{
Borrowable {
table: HashMap::<K, V>::new(),
}
}
|
identifier_body
|
utils.rs
|
//! Utility functions
use std::{
io,
time::{Duration, Instant},
};
use futures::{Async, Future, Poll};
use tokio::timer::Delay;
use tokio_io::{
io::{copy, Copy},
try_nb,
AsyncRead,
AsyncWrite,
};
use super::BUFFER_SIZE;
/// Copies all data from `r` to `w`, abort if timeout reaches
pub fn copy_timeout<R, W>(r: R, w: W, dur: Duration) -> CopyTimeout<R, W>
where
R: AsyncRead,
W: AsyncWrite,
{
CopyTimeout::new(r, w, dur)
}
/// Copies all data from `r` to `w`, abort if timeout reaches
pub struct CopyTimeout<R, W>
where
R: AsyncRead,
W: AsyncWrite,
{
r: Option<R>,
w: Option<W>,
timeout: Duration,
amt: u64,
timer: Option<Delay>,
buf: [u8; BUFFER_SIZE],
pos: usize,
cap: usize,
}
impl<R, W> CopyTimeout<R, W>
where
R: AsyncRead,
W: AsyncWrite,
{
fn new(r: R, w: W, timeout: Duration) -> CopyTimeout<R, W> {
CopyTimeout {
r: Some(r),
w: Some(w),
timeout,
amt: 0,
timer: None,
buf: [0u8; BUFFER_SIZE],
pos: 0,
cap: 0,
}
}
fn try_poll_timeout(&mut self) -> io::Result<()> {
match self.timer.as_mut() {
None => Ok(()),
Some(t) => match t.poll() {
Err(err) => panic!("Failed to poll on timer, err: {}", err),
Ok(Async::Ready(..)) => Err(io::Error::new(io::ErrorKind::TimedOut, "connection timed out")),
Ok(Async::NotReady) => Ok(()),
},
}
}
fn clear_timer(&mut self) {
let _ = self.timer.take();
}
|
fn read_or_set_timeout(&mut self) -> io::Result<usize> {
// First, return if timeout
self.try_poll_timeout()?;
// Then, unset the previous timeout
self.clear_timer();
match self.r.as_mut().unwrap().read(&mut self.buf) {
Ok(n) => Ok(n),
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
self.timer = Some(Delay::new(Instant::now() + self.timeout));
}
Err(e)
}
}
}
fn write_or_set_timeout(&mut self, beg: usize, end: usize) -> io::Result<usize> {
// First, return if timeout
self.try_poll_timeout()?;
// Then, unset the previous timeout
self.clear_timer();
match self.w.as_mut().unwrap().write(&self.buf[beg..end]) {
Ok(n) => Ok(n),
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
self.timer = Some(Delay::new(Instant::now() + self.timeout));
}
Err(e)
}
}
}
}
impl<R, W> Future for CopyTimeout<R, W>
where
R: AsyncRead,
W: AsyncWrite,
{
type Error = io::Error;
type Item = (u64, R, W);
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
if self.pos == self.cap {
let n = try_nb!(self.read_or_set_timeout());
if n == 0 {
// If we've written all the data and we've seen EOF, flush out the
// data and finish the transfer.
// done with the entire transfer.
try_nb!(self.w.as_mut().unwrap().flush());
return Ok((self.amt, self.r.take().unwrap(), self.w.take().unwrap()).into());
}
self.pos = 0;
self.cap = n;
// Clear it before write
self.clear_timer();
}
// If our buffer has some data, let's write it out!
while self.pos < self.cap {
let (pos, cap) = (self.pos, self.cap);
let i = try_nb!(self.write_or_set_timeout(pos, cap));
self.pos += i;
self.amt += i as u64;
}
// Clear it before read
self.clear_timer();
}
}
}
/// Copies all data from `r` to `w` with optional timeout param
pub fn copy_timeout_opt<R, W>(r: R, w: W, dur: Option<Duration>) -> CopyTimeoutOpt<R, W>
where
R: AsyncRead,
W: AsyncWrite,
{
match dur {
Some(d) => CopyTimeoutOpt::CopyTimeout(copy_timeout(r, w, d)),
None => CopyTimeoutOpt::Copy(copy(r, w)),
}
}
/// Copies all data from `R` to `W`
pub enum CopyTimeoutOpt<R: AsyncRead, W: AsyncWrite> {
Copy(Copy<R, W>),
CopyTimeout(CopyTimeout<R, W>),
}
impl<R: AsyncRead, W: AsyncWrite> Future for CopyTimeoutOpt<R, W> {
type Error = io::Error;
type Item = (u64, R, W);
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match *self {
CopyTimeoutOpt::CopyTimeout(ref mut c) => c.poll(),
CopyTimeoutOpt::Copy(ref mut c) => c.poll(),
}
}
}
|
random_line_split
|
|
utils.rs
|
//! Utility functions
use std::{
io,
time::{Duration, Instant},
};
use futures::{Async, Future, Poll};
use tokio::timer::Delay;
use tokio_io::{
io::{copy, Copy},
try_nb,
AsyncRead,
AsyncWrite,
};
use super::BUFFER_SIZE;
/// Copies all data from `r` to `w`, abort if timeout reaches
pub fn copy_timeout<R, W>(r: R, w: W, dur: Duration) -> CopyTimeout<R, W>
where
R: AsyncRead,
W: AsyncWrite,
{
CopyTimeout::new(r, w, dur)
}
/// Copies all data from `r` to `w`, abort if timeout reaches
pub struct CopyTimeout<R, W>
where
R: AsyncRead,
W: AsyncWrite,
{
r: Option<R>,
w: Option<W>,
timeout: Duration,
amt: u64,
timer: Option<Delay>,
buf: [u8; BUFFER_SIZE],
pos: usize,
cap: usize,
}
impl<R, W> CopyTimeout<R, W>
where
R: AsyncRead,
W: AsyncWrite,
{
fn new(r: R, w: W, timeout: Duration) -> CopyTimeout<R, W> {
CopyTimeout {
r: Some(r),
w: Some(w),
timeout,
amt: 0,
timer: None,
buf: [0u8; BUFFER_SIZE],
pos: 0,
cap: 0,
}
}
fn try_poll_timeout(&mut self) -> io::Result<()> {
match self.timer.as_mut() {
None => Ok(()),
Some(t) => match t.poll() {
Err(err) => panic!("Failed to poll on timer, err: {}", err),
Ok(Async::Ready(..)) => Err(io::Error::new(io::ErrorKind::TimedOut, "connection timed out")),
Ok(Async::NotReady) => Ok(()),
},
}
}
fn clear_timer(&mut self) {
let _ = self.timer.take();
}
fn read_or_set_timeout(&mut self) -> io::Result<usize> {
// First, return if timeout
self.try_poll_timeout()?;
// Then, unset the previous timeout
self.clear_timer();
match self.r.as_mut().unwrap().read(&mut self.buf) {
Ok(n) => Ok(n),
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
self.timer = Some(Delay::new(Instant::now() + self.timeout));
}
Err(e)
}
}
}
fn write_or_set_timeout(&mut self, beg: usize, end: usize) -> io::Result<usize> {
// First, return if timeout
self.try_poll_timeout()?;
// Then, unset the previous timeout
self.clear_timer();
match self.w.as_mut().unwrap().write(&self.buf[beg..end]) {
Ok(n) => Ok(n),
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
self.timer = Some(Delay::new(Instant::now() + self.timeout));
}
Err(e)
}
}
}
}
impl<R, W> Future for CopyTimeout<R, W>
where
R: AsyncRead,
W: AsyncWrite,
{
type Error = io::Error;
type Item = (u64, R, W);
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
if self.pos == self.cap {
let n = try_nb!(self.read_or_set_timeout());
if n == 0 {
// If we've written all the data and we've seen EOF, flush out the
// data and finish the transfer.
// done with the entire transfer.
try_nb!(self.w.as_mut().unwrap().flush());
return Ok((self.amt, self.r.take().unwrap(), self.w.take().unwrap()).into());
}
self.pos = 0;
self.cap = n;
// Clear it before write
self.clear_timer();
}
// If our buffer has some data, let's write it out!
while self.pos < self.cap {
let (pos, cap) = (self.pos, self.cap);
let i = try_nb!(self.write_or_set_timeout(pos, cap));
self.pos += i;
self.amt += i as u64;
}
// Clear it before read
self.clear_timer();
}
}
}
/// Copies all data from `r` to `w` with optional timeout param
pub fn
|
<R, W>(r: R, w: W, dur: Option<Duration>) -> CopyTimeoutOpt<R, W>
where
R: AsyncRead,
W: AsyncWrite,
{
match dur {
Some(d) => CopyTimeoutOpt::CopyTimeout(copy_timeout(r, w, d)),
None => CopyTimeoutOpt::Copy(copy(r, w)),
}
}
/// Copies all data from `R` to `W`
pub enum CopyTimeoutOpt<R: AsyncRead, W: AsyncWrite> {
Copy(Copy<R, W>),
CopyTimeout(CopyTimeout<R, W>),
}
impl<R: AsyncRead, W: AsyncWrite> Future for CopyTimeoutOpt<R, W> {
type Error = io::Error;
type Item = (u64, R, W);
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match *self {
CopyTimeoutOpt::CopyTimeout(ref mut c) => c.poll(),
CopyTimeoutOpt::Copy(ref mut c) => c.poll(),
}
}
}
|
copy_timeout_opt
|
identifier_name
|
utils.rs
|
//! Utility functions
use std::{
io,
time::{Duration, Instant},
};
use futures::{Async, Future, Poll};
use tokio::timer::Delay;
use tokio_io::{
io::{copy, Copy},
try_nb,
AsyncRead,
AsyncWrite,
};
use super::BUFFER_SIZE;
/// Copies all data from `r` to `w`, abort if timeout reaches
pub fn copy_timeout<R, W>(r: R, w: W, dur: Duration) -> CopyTimeout<R, W>
where
R: AsyncRead,
W: AsyncWrite,
{
CopyTimeout::new(r, w, dur)
}
/// Copies all data from `r` to `w`, abort if timeout reaches
pub struct CopyTimeout<R, W>
where
R: AsyncRead,
W: AsyncWrite,
{
r: Option<R>,
w: Option<W>,
timeout: Duration,
amt: u64,
timer: Option<Delay>,
buf: [u8; BUFFER_SIZE],
pos: usize,
cap: usize,
}
impl<R, W> CopyTimeout<R, W>
where
R: AsyncRead,
W: AsyncWrite,
{
fn new(r: R, w: W, timeout: Duration) -> CopyTimeout<R, W> {
CopyTimeout {
r: Some(r),
w: Some(w),
timeout,
amt: 0,
timer: None,
buf: [0u8; BUFFER_SIZE],
pos: 0,
cap: 0,
}
}
fn try_poll_timeout(&mut self) -> io::Result<()> {
match self.timer.as_mut() {
None => Ok(()),
Some(t) => match t.poll() {
Err(err) => panic!("Failed to poll on timer, err: {}", err),
Ok(Async::Ready(..)) => Err(io::Error::new(io::ErrorKind::TimedOut, "connection timed out")),
Ok(Async::NotReady) => Ok(()),
},
}
}
fn clear_timer(&mut self) {
let _ = self.timer.take();
}
fn read_or_set_timeout(&mut self) -> io::Result<usize> {
// First, return if timeout
self.try_poll_timeout()?;
// Then, unset the previous timeout
self.clear_timer();
match self.r.as_mut().unwrap().read(&mut self.buf) {
Ok(n) => Ok(n),
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
self.timer = Some(Delay::new(Instant::now() + self.timeout));
}
Err(e)
}
}
}
fn write_or_set_timeout(&mut self, beg: usize, end: usize) -> io::Result<usize> {
// First, return if timeout
self.try_poll_timeout()?;
// Then, unset the previous timeout
self.clear_timer();
match self.w.as_mut().unwrap().write(&self.buf[beg..end]) {
Ok(n) => Ok(n),
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
self.timer = Some(Delay::new(Instant::now() + self.timeout));
}
Err(e)
}
}
}
}
impl<R, W> Future for CopyTimeout<R, W>
where
R: AsyncRead,
W: AsyncWrite,
{
type Error = io::Error;
type Item = (u64, R, W);
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
if self.pos == self.cap {
let n = try_nb!(self.read_or_set_timeout());
if n == 0 {
// If we've written all the data and we've seen EOF, flush out the
// data and finish the transfer.
// done with the entire transfer.
try_nb!(self.w.as_mut().unwrap().flush());
return Ok((self.amt, self.r.take().unwrap(), self.w.take().unwrap()).into());
}
self.pos = 0;
self.cap = n;
// Clear it before write
self.clear_timer();
}
// If our buffer has some data, let's write it out!
while self.pos < self.cap {
let (pos, cap) = (self.pos, self.cap);
let i = try_nb!(self.write_or_set_timeout(pos, cap));
self.pos += i;
self.amt += i as u64;
}
// Clear it before read
self.clear_timer();
}
}
}
/// Copies all data from `r` to `w` with optional timeout param
pub fn copy_timeout_opt<R, W>(r: R, w: W, dur: Option<Duration>) -> CopyTimeoutOpt<R, W>
where
R: AsyncRead,
W: AsyncWrite,
|
/// Copies all data from `R` to `W`
pub enum CopyTimeoutOpt<R: AsyncRead, W: AsyncWrite> {
Copy(Copy<R, W>),
CopyTimeout(CopyTimeout<R, W>),
}
impl<R: AsyncRead, W: AsyncWrite> Future for CopyTimeoutOpt<R, W> {
type Error = io::Error;
type Item = (u64, R, W);
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match *self {
CopyTimeoutOpt::CopyTimeout(ref mut c) => c.poll(),
CopyTimeoutOpt::Copy(ref mut c) => c.poll(),
}
}
}
|
{
match dur {
Some(d) => CopyTimeoutOpt::CopyTimeout(copy_timeout(r, w, d)),
None => CopyTimeoutOpt::Copy(copy(r, w)),
}
}
|
identifier_body
|
multibyte.rs
|
0xC1. These 2-byte sequences are disallowed in UTF-8,
//! because they would form a duplicate encoding for the the 1-byte
//! ASCII range.
//!
//! Due to these specialties, we cannot treat Emacs strings as Rust
//! `&str`, and this module regrettably contains adapted copies of
//! stretches of `std::str` functions.
use libc::{c_char, c_int, c_uchar, c_uint, ptrdiff_t};
use std::ptr;
use std::slice;
use remacs_sys::{EmacsInt, Lisp_String, CHARACTERBITS, CHAR_CTL, CHAR_MODIFIER_MASK, CHAR_SHIFT};
use remacs_sys::emacs_abort;
use lisp::ExternalPtr;
pub type LispStringRef = ExternalPtr<Lisp_String>;
// cannot use `char`, it takes values out of its range
pub type Codepoint = u32;
/// Maximum character code
pub const MAX_CHAR: Codepoint = (1 << CHARACTERBITS) - 1;
/// Maximum character codes for several encoded lengths
pub const MAX_1_BYTE_CHAR: Codepoint = 0x7F;
pub const MAX_2_BYTE_CHAR: Codepoint = 0x7FF;
pub const MAX_3_BYTE_CHAR: Codepoint = 0xFFFF;
pub const MAX_4_BYTE_CHAR: Codepoint = 0x1F_FFFF;
pub const MAX_5_BYTE_CHAR: Codepoint = 0x3F_FF7F;
/// Maximum length of a single encoded codepoint
pub const MAX_MULTIBYTE_LENGTH: usize = 5;
impl LispStringRef {
/// Return the string's len in bytes.
pub fn len_bytes(&self) -> ptrdiff_t {
if self.size_byte < 0 {
self.size
} else {
self.size_byte
}
}
/// Return the string's length in characters. Differs from
/// `len_bytes` for multibyte strings.
pub fn len_chars(&self) -> ptrdiff_t {
self.size
}
pub fn is_multibyte(&self) -> bool {
self.size_byte >= 0
}
pub fn data_ptr(&mut self) -> *mut c_uchar {
self.data as *mut c_uchar
}
pub fn sdata_ptr(&mut self) -> *mut c_char {
self.data as *mut c_char
}
pub fn const_data_ptr(&self) -> *const c_uchar {
self.data as *const c_uchar
}
pub fn const_sdata_ptr(&self) -> *const c_char {
self.data as *const c_char
}
#[inline]
pub fn as_slice(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.data as *const u8, self.len_bytes() as usize) }
}
#[inline]
pub fn as_mut_slice(&self) -> &mut [u8] {
unsafe { slice::from_raw_parts_mut(self.data as *mut u8, self.len_bytes() as usize) }
}
#[inline]
pub fn byte_at(&self, index: ptrdiff_t) -> u8 {
unsafe { *self.const_data_ptr().offset(index) }
}
}
pub struct LispStringRefIterator<'a> {
string_ref: &'a LispStringRef,
cur: usize,
}
pub struct LispStringRefCharIterator<'a>(LispStringRefIterator<'a>);
// Substitute for FETCH_STRING_CHAR_ADVANCE
impl<'a> Iterator for LispStringRefIterator<'a> {
type Item = (usize, Codepoint);
fn next(&mut self) -> Option<(usize, Codepoint)> {
if self.cur < self.string_ref.len_bytes() as usize {
let codepoint: Codepoint;
let old_index = self.cur;
let ref_slice = self.string_ref.as_slice();
if self.string_ref.is_multibyte() {
let (cp, advance) = multibyte_char_at(&ref_slice[self.cur..]);
codepoint = cp;
self.cur += advance;
} else {
codepoint = ref_slice[self.cur] as Codepoint;
self.cur += 1;
}
Some((old_index, codepoint))
} else {
None
}
}
}
impl<'a> Iterator for LispStringRefCharIterator<'a> {
type Item = Codepoint;
fn next(&mut self) -> Option<Codepoint> {
self.0.next().map(|result| result.1)
}
}
impl LispStringRef {
pub fn char_indices(&self) -> LispStringRefIterator {
LispStringRefIterator {
string_ref: self,
cur: 0,
}
}
pub fn chars(&self) -> LispStringRefCharIterator {
LispStringRefCharIterator(self.char_indices())
}
}
fn string_overflow() ->! {
error!("Maximum string size exceeded")
}
/// Parse unibyte string at STR of LEN bytes, and return the number of
/// bytes it may occupy when converted to multibyte string by
/// `str_to_multibyte`.
#[no_mangle]
pub fn count_size_as_multibyte(ptr: *const c_uchar, len: ptrdiff_t) -> ptrdiff_t {
let slice = unsafe { slice::from_raw_parts(ptr, len as usize) };
slice.iter().fold(0, |total, &byte| {
let n = if byte < 0x80 { 1 } else { 2 };
total.checked_add(n).unwrap_or_else(|| string_overflow())
})
}
/// Same as the `BYTE8_TO_CHAR` macro.
#[inline]
pub fn raw_byte_codepoint(byte: c_uchar) -> Codepoint {
if byte >= 0x80 {
byte as Codepoint + 0x3F_FF00
} else {
byte as Codepoint
}
}
/// Same as the `CHAR_TO_BYTE8` macro.
#[inline]
pub fn raw_byte_from_codepoint(cp: Codepoint) -> c_uchar {
(cp - 0x3F_FF00) as c_uchar
}
/// Same as the `CHAR_TO_BYTE_SAFE` macro.
/// Return the raw 8-bit byte for character CP,
/// or -1 if CP doesn't correspond to a byte.
#[inline]
pub fn raw_byte_from_codepoint_safe(cp: Codepoint) -> EmacsInt {
if cp < 0x80 {
cp as EmacsInt
} else if cp > MAX_5_BYTE_CHAR {
raw_byte_from_codepoint(cp) as EmacsInt
} else {
-1
}
}
/// `UNIBYTE_TO_CHAR` macro
#[inline]
pub fn unibyte_to_char(cp: Codepoint) -> Codepoint {
if cp < 0x80 {
cp
} else {
raw_byte_codepoint(cp as c_uchar)
}
}
/// `MAKE_CHAR_MULTIBYTE` macro
#[inline]
pub fn make_char_multibyte(cp: Codepoint) -> Codepoint {
debug_assert!(cp < 256);
unibyte_to_char(cp)
}
/// Same as the `CHAR_STRING` macro.
#[inline]
fn write_codepoint(to: &mut [c_uchar], cp: Codepoint) -> usize {
if cp <= MAX_1_BYTE_CHAR {
to[0] = cp as c_uchar;
1
} else if cp <= MAX_2_BYTE_CHAR {
// note: setting later bytes first to avoid multiple bound checks
to[1] = 0x80 | (cp & 0x3F) as c_uchar;
to[0] = 0xC0 | (cp >> 6) as c_uchar;
2
} else if cp <= MAX_3_BYTE_CHAR {
to[2] = 0x80 | (cp & 0x3F) as c_uchar;
to[1] = 0x80 | ((cp >> 6) & 0x3F) as c_uchar;
to[0] = 0xE0 | (cp >> 12) as c_uchar;
3
} else if cp <= MAX_4_BYTE_CHAR {
to[3] = 0x80 | (cp & 0x3F) as c_uchar;
to[2] = 0x80 | ((cp >> 6) & 0x3F) as c_uchar;
to[1] = 0x80 | ((cp >> 12) & 0x3F) as c_uchar;
to[0] = 0xF0 | (cp >> 18) as c_uchar;
4
} else if cp <= MAX_5_BYTE_CHAR {
to[4] = 0x80 | (cp & 0x3F) as c_uchar;
to[3] = 0x80 | ((cp >> 6) & 0x3F) as c_uchar;
to[2] = 0x80 | ((cp >> 12) & 0x3F) as c_uchar;
to[1] = 0x80 | ((cp >> 18) & 0x0F) as c_uchar;
to[0] = 0xF8;
5
} else if cp <= MAX_CHAR {
let b = raw_byte_from_codepoint(cp);
to[1] = 0x80 | (b & 0x3F);
to[0] = 0xC0 | ((b >> 6) & 1);
2
} else {
error!("Invalid character: {:#x}", cp)
}
}
/// If character code C has modifier masks, reflect them to the
/// character code if possible. Return the resulting code.
#[no_mangle]
pub fn char_resolve_modifier_mask(ch: EmacsInt) -> EmacsInt {
let mut cp = ch as Codepoint;
// A non-ASCII character can't reflect modifier bits to the code.
if (cp &!CHAR_MODIFIER_MASK) >= 0x80 {
return cp as EmacsInt;
}
let ascii = (cp & 0x7F) as u8;
// For Meta, Shift, and Control modifiers, we need special care.
if cp & CHAR_SHIFT!= 0 {
let unshifted = cp &!CHAR_SHIFT;
// Shift modifier is valid only with [A-Za-z].
if ascii >= b'A' && ascii <= b'Z' {
cp = unshifted;
} else if ascii >= b'a' && ascii <= b'z' {
cp = unshifted &!0x20;
} else if ascii <= b''{
// Shift modifier for control characters and SPC is ignored.
cp = unshifted;
}
}
// Simulate the code in lread.c.
if cp & CHAR_CTL!= 0 {
// Allow `\C-'and `\C-?'.
if ascii == b''{
cp &=!0x7F &!CHAR_CTL;
} else if ascii == b'?' {
cp = 0x7F | (cp &!0x7F &!CHAR_CTL);
} else if ascii >= b'@' && ascii <= b'_' {
// ASCII control chars are made from letters (both cases),
// as well as the non-letters within 0o100...0o137.
cp &= 0x1F | (!0x7F &!CHAR_CTL);
}
}
cp as EmacsInt
}
/// Store multibyte form of character CP at TO. If CP has modifier bits,
/// handle them appropriately.
#[no_mangle]
pub fn char_string(mut cp: c_uint, to: *mut c_uchar) -> c_int {
if cp & CHAR_MODIFIER_MASK!= 0 {
cp = char_resolve_modifier_mask(cp as EmacsInt) as Codepoint;
cp &=!CHAR_MODIFIER_MASK;
}
write_codepoint(
unsafe { slice::from_raw_parts_mut(to, MAX_MULTIBYTE_LENGTH) },
cp,
) as c_int
}
/// Convert unibyte text at STR of BYTES bytes to a multibyte text
/// that contains the same single-byte characters. It actually
/// converts all 8-bit characters to multibyte forms. It is assured
/// that we can use LEN bytes at STR as a work area and that is
/// enough. Returns the byte length of the multibyte string.
#[no_mangle]
pub fn str_to_multibyte(ptr: *mut c_uchar, len: ptrdiff_t, bytes: ptrdiff_t) -> ptrdiff_t {
// slice covers the whole work area to be able to write back
let slice = unsafe { slice::from_raw_parts_mut(ptr, len as usize) };
// first, search ASCII-only prefix that we can skip processing
let mut start = 0;
for (idx, &byte) in slice.iter().enumerate() {
if byte >= 0x80 {
start = idx;
break;
}
// whole string is ASCII-only, done!
if idx as ptrdiff_t == bytes - 1 {
return bytes;
}
}
// copy the rest to the end of the work area, which is guaranteed to be
// large enough, so we can read from there while writing the output
let offset = (len - bytes) as usize;
let slice = &mut slice[start..];
unsafe {
ptr::copy(
slice.as_mut_ptr(),
slice[offset..].as_mut_ptr(),
bytes as usize - start,
);
}
let mut to = 0;
for from in offset..slice.len() {
let byte = slice[from];
to += write_codepoint(&mut slice[to..], raw_byte_codepoint(byte));
}
(start + to) as ptrdiff_t
}
/// Same as `MULTIBYTE_LENGTH` macro in C.
fn multibyte_length(slice: &[c_uchar], allow_encoded_raw: bool) -> Option<usize> {
let len = slice.len();
if len < 1 {
None
} else if slice[0] & 0x80 == 0 {
Some(1)
} else if len < 2 || slice[1] & 0xC0!= 0x80 {
None
} else if!allow_encoded_raw && slice[0] & 0xFE == 0xC0 {
None
} else if slice[0] & 0xE0 == 0xC0 {
Some(2)
} else if len < 3 || slice[2] & 0xC0!= 0x80 {
None
} else if slice[0] & 0xF0 == 0xE0 {
Some(3)
} else if len < 4 || slice[3] & 0xC0!= 0x80 {
None
} else if slice[0] & 0xF8 == 0xF0 {
Some(4)
} else if len < 5 || slice[4] & 0xC0!= 0x80 {
None
} else if slice[0] == 0xF8 && slice[1] & 0xF0 == 0x80 {
Some(5)
} else {
None
}
}
/// Same as the `STRING_CHAR_ADVANCE` macro.
#[inline]
pub fn multibyte_char_at(slice: &[c_uchar]) -> (Codepoint, usize) {
|
if head < 0xC2 {
(cp | 0x3F_FF80, 2)
} else {
(cp, 2)
}
} else if head & 0x10 == 0 {
(
((head & 0x0F) << 12) | ((slice[1] as Codepoint & 0x3F) << 6)
| (slice[2] as Codepoint & 0x3F),
3,
)
} else if head & 0x08 == 0 {
(
((head & 0x07) << 18) | ((slice[1] as Codepoint & 0x3F) << 12)
| ((slice[2] as Codepoint & 0x3F) << 6)
| (slice[3] as Codepoint & 0x3F),
4,
)
} else {
// the relevant bytes of "head" are always zero
(
((slice[1] as Codepoint & 0x3F) << 18) | ((slice[2] as Codepoint & 0x3F) << 12)
| ((slice[3] as Codepoint & 0x3F) << 6)
| (slice[4] as Codepoint & 0x3F),
5,
)
}
}
/// Same as `BYTES_BY_CHAR_HEAD` macro in C.
fn multibyte_length_by_head(byte: c_uchar) -> usize {
if byte & 0x80 == 0 {
1
} else if byte & 0x20 == 0 {
2
} else if byte & 0x10 == 0 {
3
} else if byte & 0x08 == 0 {
4
} else {
5
}
}
/// Return the number of characters in the NBYTES bytes at PTR.
/// This works by looking at the contents and checking for multibyte
/// sequences while assuming that there's no invalid sequence. It
/// ignores enable-multibyte-characters.
#[no_mangle]
pub fn multibyte_chars_in_text(ptr: *const c_uchar, nbytes: ptrdiff_t) -> ptrdiff_t {
let slice = unsafe { slice::from_raw_parts(ptr, nbytes as usize) };
let len = slice.len();
let mut idx = 0;
let mut chars = 0;
// TODO: make this an iterator?
while idx < len {
idx += multibyte_length(&slice[idx..], true).unwrap_or_else(|| unsafe { emacs_abort() });
chars += 1;
}
chars as ptrdiff_t
}
/// Parse unibyte text at STR of LEN bytes as a multibyte text, count
/// characters and bytes in it, and store them in *NCHARS and *NBYTES
/// respectively. On counting bytes, pay attention to that 8-bit
/// characters not constructing a valid multibyte sequence are
/// represented by 2-byte in a multibyte text.
#[no_mangle]
pub fn parse_str_as_multibyte(
ptr: *const c_uchar,
len: ptrdiff_t,
nchars: *mut ptrdiff_t,
nbytes: *mut ptrdiff_t,
) {
let slice = unsafe { slice::from_raw_parts(ptr, len as usize) };
let len = slice.len();
let mut chars = 0;
let mut bytes = 0;
let mut idx = 0;
// XXX: in the original, there is an "unchecked" version of multibyte_length
// called while the remaining length is >= MAX_MULTIBYTE_LENGTH.
while idx < len {
chars += 1;
match multibyte_length(&slice[idx..], false) {
None => {
// This is either an invalid multibyte sequence, or
// one that would encode a raw 8-bit byte, which we
// only use internally when the string is *already*
// multibyte.
idx += 1;
bytes += 2;
}
Some(n) => {
idx += n;
bytes += n as ptrdiff_t;
}
}
}
unsafe {
*nchars = chars;
*nbytes = bytes;
}
}
/// Arrange unibyte text at STR of NBYTES bytes as a multibyte text.
/// It actually converts only such 8-bit characters that don't construct
/// a multibyte sequence to multibyte forms of Latin-1 characters. If
/// NCHARS is nonzero, set *NCHARS to the number of characters in the
/// text. It is assured that we can use LEN bytes at STR as a work
/// area and that is enough. Return the number of bytes of the
/// resulting text.
#[no_mangle]
pub fn str_as_multibyte(
ptr: *mut c_uchar,
len: ptrdiff_t,
mut nbytes: ptrdiff_t,
nchars: *mut ptrdiff_t,
) -> ptrdiff_t {
// slice covers the whole work area to be able to write back
let slice = unsafe { slice::from_raw_parts_mut(ptr, len as usize) };
// first, search ASCII-only prefix that we can skip processing
let mut start = None;
let mut chars = 0;
let mut idx = 0;
while idx < nbytes as usize {
match multibyte_length(&slice[idx..], false) {
None => {
start = Some(idx);
break;
}
Some(n) => {
idx += n;
chars += 1;
}
}
}
if let Some(start) = start {
// copy the rest to the end of the work area, which is guaranteed to be
// large enough, so we can read from there while writing the output
let offset = (len - nbytes) as usize;
let slice = &mut slice[start..];
unsafe {
ptr::copy(
slice.as_mut_ptr(),
slice[offset..].as_mut_ptr(),
nbytes as usize - start,
);
}
let mut to = 0;
let mut from = offset;
while from < slice.len() {
chars += 1;
match multibyte_length(&slice[from..], false) {
Some(n) => for _ in 0..n {
slice[to] = slice[from];
from += 1;
to += 1;
},
None => {
let byte = slice[from];
to += write_codepoint(&mut slice[to..], raw_byte_codepoint(byte));
from += 1;
}
}
}
nbytes = (start + to) as ptrdiff_t;
}
if!nchars.is_null() {
unsafe {
*nchars = chars;
}
}
nbytes
}
/// Arrange multibyte text at STR of LEN bytes as a unibyte text. It
/// actually converts characters in the range 0x80..0xFF to unibyte.
#[no_mangle]
pub fn str_as_unibyte(ptr: *mut c_uchar, bytes: ptrdiff_t) -> ptrdiff_t {
let slice = unsafe { slice::from_raw_parts_mut(ptr, bytes as usize) };
let mut from = 0;
while from < bytes as usize {
let byte = slice[from];
match multibyte_length_by_head(byte) {
2 if byte & 0xFE == 0xC0 => break,
n => from += n,
}
}
let mut to = from;
while from < bytes as usize {
let byte = slice[from];
match multibyte_length_by_head(byte) {
2 if byte & 0xFE == 0xC0 => {
let newbyte = 0x80 | ((byte & 1) << 6) | (slice[from + 1] & 0x3F);
slice[to] = newbyte;
from += 2;
to += 1;
}
n => for _ in 0..n {
slice[to] = slice[from];
|
let head = slice[0] as Codepoint;
if head & 0x80 == 0 {
(head, 1)
} else if head & 0x20 == 0 {
let cp = ((head & 0x1F) << 6) | (slice[1] as Codepoint & 0x3F);
|
random_line_split
|
multibyte.rs
|
xC1. These 2-byte sequences are disallowed in UTF-8,
//! because they would form a duplicate encoding for the the 1-byte
//! ASCII range.
//!
//! Due to these specialties, we cannot treat Emacs strings as Rust
//! `&str`, and this module regrettably contains adapted copies of
//! stretches of `std::str` functions.
use libc::{c_char, c_int, c_uchar, c_uint, ptrdiff_t};
use std::ptr;
use std::slice;
use remacs_sys::{EmacsInt, Lisp_String, CHARACTERBITS, CHAR_CTL, CHAR_MODIFIER_MASK, CHAR_SHIFT};
use remacs_sys::emacs_abort;
use lisp::ExternalPtr;
pub type LispStringRef = ExternalPtr<Lisp_String>;
// cannot use `char`, it takes values out of its range
pub type Codepoint = u32;
/// Maximum character code
pub const MAX_CHAR: Codepoint = (1 << CHARACTERBITS) - 1;
/// Maximum character codes for several encoded lengths
pub const MAX_1_BYTE_CHAR: Codepoint = 0x7F;
pub const MAX_2_BYTE_CHAR: Codepoint = 0x7FF;
pub const MAX_3_BYTE_CHAR: Codepoint = 0xFFFF;
pub const MAX_4_BYTE_CHAR: Codepoint = 0x1F_FFFF;
pub const MAX_5_BYTE_CHAR: Codepoint = 0x3F_FF7F;
/// Maximum length of a single encoded codepoint
pub const MAX_MULTIBYTE_LENGTH: usize = 5;
impl LispStringRef {
/// Return the string's len in bytes.
pub fn len_bytes(&self) -> ptrdiff_t {
if self.size_byte < 0 {
self.size
} else {
self.size_byte
}
}
/// Return the string's length in characters. Differs from
/// `len_bytes` for multibyte strings.
pub fn len_chars(&self) -> ptrdiff_t {
self.size
}
pub fn is_multibyte(&self) -> bool {
self.size_byte >= 0
}
pub fn data_ptr(&mut self) -> *mut c_uchar {
self.data as *mut c_uchar
}
pub fn sdata_ptr(&mut self) -> *mut c_char {
self.data as *mut c_char
}
pub fn const_data_ptr(&self) -> *const c_uchar {
self.data as *const c_uchar
}
pub fn const_sdata_ptr(&self) -> *const c_char {
self.data as *const c_char
}
#[inline]
pub fn as_slice(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.data as *const u8, self.len_bytes() as usize) }
}
#[inline]
pub fn
|
(&self) -> &mut [u8] {
unsafe { slice::from_raw_parts_mut(self.data as *mut u8, self.len_bytes() as usize) }
}
#[inline]
pub fn byte_at(&self, index: ptrdiff_t) -> u8 {
unsafe { *self.const_data_ptr().offset(index) }
}
}
pub struct LispStringRefIterator<'a> {
string_ref: &'a LispStringRef,
cur: usize,
}
pub struct LispStringRefCharIterator<'a>(LispStringRefIterator<'a>);
// Substitute for FETCH_STRING_CHAR_ADVANCE
impl<'a> Iterator for LispStringRefIterator<'a> {
type Item = (usize, Codepoint);
fn next(&mut self) -> Option<(usize, Codepoint)> {
if self.cur < self.string_ref.len_bytes() as usize {
let codepoint: Codepoint;
let old_index = self.cur;
let ref_slice = self.string_ref.as_slice();
if self.string_ref.is_multibyte() {
let (cp, advance) = multibyte_char_at(&ref_slice[self.cur..]);
codepoint = cp;
self.cur += advance;
} else {
codepoint = ref_slice[self.cur] as Codepoint;
self.cur += 1;
}
Some((old_index, codepoint))
} else {
None
}
}
}
impl<'a> Iterator for LispStringRefCharIterator<'a> {
type Item = Codepoint;
fn next(&mut self) -> Option<Codepoint> {
self.0.next().map(|result| result.1)
}
}
impl LispStringRef {
pub fn char_indices(&self) -> LispStringRefIterator {
LispStringRefIterator {
string_ref: self,
cur: 0,
}
}
pub fn chars(&self) -> LispStringRefCharIterator {
LispStringRefCharIterator(self.char_indices())
}
}
fn string_overflow() ->! {
error!("Maximum string size exceeded")
}
/// Parse unibyte string at STR of LEN bytes, and return the number of
/// bytes it may occupy when converted to multibyte string by
/// `str_to_multibyte`.
#[no_mangle]
pub fn count_size_as_multibyte(ptr: *const c_uchar, len: ptrdiff_t) -> ptrdiff_t {
let slice = unsafe { slice::from_raw_parts(ptr, len as usize) };
slice.iter().fold(0, |total, &byte| {
let n = if byte < 0x80 { 1 } else { 2 };
total.checked_add(n).unwrap_or_else(|| string_overflow())
})
}
/// Same as the `BYTE8_TO_CHAR` macro.
#[inline]
pub fn raw_byte_codepoint(byte: c_uchar) -> Codepoint {
if byte >= 0x80 {
byte as Codepoint + 0x3F_FF00
} else {
byte as Codepoint
}
}
/// Same as the `CHAR_TO_BYTE8` macro.
#[inline]
pub fn raw_byte_from_codepoint(cp: Codepoint) -> c_uchar {
(cp - 0x3F_FF00) as c_uchar
}
/// Same as the `CHAR_TO_BYTE_SAFE` macro.
/// Return the raw 8-bit byte for character CP,
/// or -1 if CP doesn't correspond to a byte.
#[inline]
pub fn raw_byte_from_codepoint_safe(cp: Codepoint) -> EmacsInt {
if cp < 0x80 {
cp as EmacsInt
} else if cp > MAX_5_BYTE_CHAR {
raw_byte_from_codepoint(cp) as EmacsInt
} else {
-1
}
}
/// `UNIBYTE_TO_CHAR` macro
#[inline]
pub fn unibyte_to_char(cp: Codepoint) -> Codepoint {
if cp < 0x80 {
cp
} else {
raw_byte_codepoint(cp as c_uchar)
}
}
/// `MAKE_CHAR_MULTIBYTE` macro
#[inline]
pub fn make_char_multibyte(cp: Codepoint) -> Codepoint {
debug_assert!(cp < 256);
unibyte_to_char(cp)
}
/// Same as the `CHAR_STRING` macro.
#[inline]
fn write_codepoint(to: &mut [c_uchar], cp: Codepoint) -> usize {
if cp <= MAX_1_BYTE_CHAR {
to[0] = cp as c_uchar;
1
} else if cp <= MAX_2_BYTE_CHAR {
// note: setting later bytes first to avoid multiple bound checks
to[1] = 0x80 | (cp & 0x3F) as c_uchar;
to[0] = 0xC0 | (cp >> 6) as c_uchar;
2
} else if cp <= MAX_3_BYTE_CHAR {
to[2] = 0x80 | (cp & 0x3F) as c_uchar;
to[1] = 0x80 | ((cp >> 6) & 0x3F) as c_uchar;
to[0] = 0xE0 | (cp >> 12) as c_uchar;
3
} else if cp <= MAX_4_BYTE_CHAR {
to[3] = 0x80 | (cp & 0x3F) as c_uchar;
to[2] = 0x80 | ((cp >> 6) & 0x3F) as c_uchar;
to[1] = 0x80 | ((cp >> 12) & 0x3F) as c_uchar;
to[0] = 0xF0 | (cp >> 18) as c_uchar;
4
} else if cp <= MAX_5_BYTE_CHAR {
to[4] = 0x80 | (cp & 0x3F) as c_uchar;
to[3] = 0x80 | ((cp >> 6) & 0x3F) as c_uchar;
to[2] = 0x80 | ((cp >> 12) & 0x3F) as c_uchar;
to[1] = 0x80 | ((cp >> 18) & 0x0F) as c_uchar;
to[0] = 0xF8;
5
} else if cp <= MAX_CHAR {
let b = raw_byte_from_codepoint(cp);
to[1] = 0x80 | (b & 0x3F);
to[0] = 0xC0 | ((b >> 6) & 1);
2
} else {
error!("Invalid character: {:#x}", cp)
}
}
/// If character code C has modifier masks, reflect them to the
/// character code if possible. Return the resulting code.
#[no_mangle]
pub fn char_resolve_modifier_mask(ch: EmacsInt) -> EmacsInt {
let mut cp = ch as Codepoint;
// A non-ASCII character can't reflect modifier bits to the code.
if (cp &!CHAR_MODIFIER_MASK) >= 0x80 {
return cp as EmacsInt;
}
let ascii = (cp & 0x7F) as u8;
// For Meta, Shift, and Control modifiers, we need special care.
if cp & CHAR_SHIFT!= 0 {
let unshifted = cp &!CHAR_SHIFT;
// Shift modifier is valid only with [A-Za-z].
if ascii >= b'A' && ascii <= b'Z' {
cp = unshifted;
} else if ascii >= b'a' && ascii <= b'z' {
cp = unshifted &!0x20;
} else if ascii <= b''{
// Shift modifier for control characters and SPC is ignored.
cp = unshifted;
}
}
// Simulate the code in lread.c.
if cp & CHAR_CTL!= 0 {
// Allow `\C-'and `\C-?'.
if ascii == b''{
cp &=!0x7F &!CHAR_CTL;
} else if ascii == b'?' {
cp = 0x7F | (cp &!0x7F &!CHAR_CTL);
} else if ascii >= b'@' && ascii <= b'_' {
// ASCII control chars are made from letters (both cases),
// as well as the non-letters within 0o100...0o137.
cp &= 0x1F | (!0x7F &!CHAR_CTL);
}
}
cp as EmacsInt
}
/// Store multibyte form of character CP at TO. If CP has modifier bits,
/// handle them appropriately.
#[no_mangle]
pub fn char_string(mut cp: c_uint, to: *mut c_uchar) -> c_int {
if cp & CHAR_MODIFIER_MASK!= 0 {
cp = char_resolve_modifier_mask(cp as EmacsInt) as Codepoint;
cp &=!CHAR_MODIFIER_MASK;
}
write_codepoint(
unsafe { slice::from_raw_parts_mut(to, MAX_MULTIBYTE_LENGTH) },
cp,
) as c_int
}
/// Convert unibyte text at STR of BYTES bytes to a multibyte text
/// that contains the same single-byte characters. It actually
/// converts all 8-bit characters to multibyte forms. It is assured
/// that we can use LEN bytes at STR as a work area and that is
/// enough. Returns the byte length of the multibyte string.
#[no_mangle]
pub fn str_to_multibyte(ptr: *mut c_uchar, len: ptrdiff_t, bytes: ptrdiff_t) -> ptrdiff_t {
// slice covers the whole work area to be able to write back
let slice = unsafe { slice::from_raw_parts_mut(ptr, len as usize) };
// first, search ASCII-only prefix that we can skip processing
let mut start = 0;
for (idx, &byte) in slice.iter().enumerate() {
if byte >= 0x80 {
start = idx;
break;
}
// whole string is ASCII-only, done!
if idx as ptrdiff_t == bytes - 1 {
return bytes;
}
}
// copy the rest to the end of the work area, which is guaranteed to be
// large enough, so we can read from there while writing the output
let offset = (len - bytes) as usize;
let slice = &mut slice[start..];
unsafe {
ptr::copy(
slice.as_mut_ptr(),
slice[offset..].as_mut_ptr(),
bytes as usize - start,
);
}
let mut to = 0;
for from in offset..slice.len() {
let byte = slice[from];
to += write_codepoint(&mut slice[to..], raw_byte_codepoint(byte));
}
(start + to) as ptrdiff_t
}
/// Same as `MULTIBYTE_LENGTH` macro in C.
fn multibyte_length(slice: &[c_uchar], allow_encoded_raw: bool) -> Option<usize> {
let len = slice.len();
if len < 1 {
None
} else if slice[0] & 0x80 == 0 {
Some(1)
} else if len < 2 || slice[1] & 0xC0!= 0x80 {
None
} else if!allow_encoded_raw && slice[0] & 0xFE == 0xC0 {
None
} else if slice[0] & 0xE0 == 0xC0 {
Some(2)
} else if len < 3 || slice[2] & 0xC0!= 0x80 {
None
} else if slice[0] & 0xF0 == 0xE0 {
Some(3)
} else if len < 4 || slice[3] & 0xC0!= 0x80 {
None
} else if slice[0] & 0xF8 == 0xF0 {
Some(4)
} else if len < 5 || slice[4] & 0xC0!= 0x80 {
None
} else if slice[0] == 0xF8 && slice[1] & 0xF0 == 0x80 {
Some(5)
} else {
None
}
}
/// Same as the `STRING_CHAR_ADVANCE` macro.
#[inline]
pub fn multibyte_char_at(slice: &[c_uchar]) -> (Codepoint, usize) {
let head = slice[0] as Codepoint;
if head & 0x80 == 0 {
(head, 1)
} else if head & 0x20 == 0 {
let cp = ((head & 0x1F) << 6) | (slice[1] as Codepoint & 0x3F);
if head < 0xC2 {
(cp | 0x3F_FF80, 2)
} else {
(cp, 2)
}
} else if head & 0x10 == 0 {
(
((head & 0x0F) << 12) | ((slice[1] as Codepoint & 0x3F) << 6)
| (slice[2] as Codepoint & 0x3F),
3,
)
} else if head & 0x08 == 0 {
(
((head & 0x07) << 18) | ((slice[1] as Codepoint & 0x3F) << 12)
| ((slice[2] as Codepoint & 0x3F) << 6)
| (slice[3] as Codepoint & 0x3F),
4,
)
} else {
// the relevant bytes of "head" are always zero
(
((slice[1] as Codepoint & 0x3F) << 18) | ((slice[2] as Codepoint & 0x3F) << 12)
| ((slice[3] as Codepoint & 0x3F) << 6)
| (slice[4] as Codepoint & 0x3F),
5,
)
}
}
/// Same as `BYTES_BY_CHAR_HEAD` macro in C.
fn multibyte_length_by_head(byte: c_uchar) -> usize {
if byte & 0x80 == 0 {
1
} else if byte & 0x20 == 0 {
2
} else if byte & 0x10 == 0 {
3
} else if byte & 0x08 == 0 {
4
} else {
5
}
}
/// Return the number of characters in the NBYTES bytes at PTR.
/// This works by looking at the contents and checking for multibyte
/// sequences while assuming that there's no invalid sequence. It
/// ignores enable-multibyte-characters.
#[no_mangle]
pub fn multibyte_chars_in_text(ptr: *const c_uchar, nbytes: ptrdiff_t) -> ptrdiff_t {
let slice = unsafe { slice::from_raw_parts(ptr, nbytes as usize) };
let len = slice.len();
let mut idx = 0;
let mut chars = 0;
// TODO: make this an iterator?
while idx < len {
idx += multibyte_length(&slice[idx..], true).unwrap_or_else(|| unsafe { emacs_abort() });
chars += 1;
}
chars as ptrdiff_t
}
/// Parse unibyte text at STR of LEN bytes as a multibyte text, count
/// characters and bytes in it, and store them in *NCHARS and *NBYTES
/// respectively. On counting bytes, pay attention to that 8-bit
/// characters not constructing a valid multibyte sequence are
/// represented by 2-byte in a multibyte text.
#[no_mangle]
pub fn parse_str_as_multibyte(
ptr: *const c_uchar,
len: ptrdiff_t,
nchars: *mut ptrdiff_t,
nbytes: *mut ptrdiff_t,
) {
let slice = unsafe { slice::from_raw_parts(ptr, len as usize) };
let len = slice.len();
let mut chars = 0;
let mut bytes = 0;
let mut idx = 0;
// XXX: in the original, there is an "unchecked" version of multibyte_length
// called while the remaining length is >= MAX_MULTIBYTE_LENGTH.
while idx < len {
chars += 1;
match multibyte_length(&slice[idx..], false) {
None => {
// This is either an invalid multibyte sequence, or
// one that would encode a raw 8-bit byte, which we
// only use internally when the string is *already*
// multibyte.
idx += 1;
bytes += 2;
}
Some(n) => {
idx += n;
bytes += n as ptrdiff_t;
}
}
}
unsafe {
*nchars = chars;
*nbytes = bytes;
}
}
/// Arrange unibyte text at STR of NBYTES bytes as a multibyte text.
/// It actually converts only such 8-bit characters that don't construct
/// a multibyte sequence to multibyte forms of Latin-1 characters. If
/// NCHARS is nonzero, set *NCHARS to the number of characters in the
/// text. It is assured that we can use LEN bytes at STR as a work
/// area and that is enough. Return the number of bytes of the
/// resulting text.
#[no_mangle]
pub fn str_as_multibyte(
ptr: *mut c_uchar,
len: ptrdiff_t,
mut nbytes: ptrdiff_t,
nchars: *mut ptrdiff_t,
) -> ptrdiff_t {
// slice covers the whole work area to be able to write back
let slice = unsafe { slice::from_raw_parts_mut(ptr, len as usize) };
// first, search ASCII-only prefix that we can skip processing
let mut start = None;
let mut chars = 0;
let mut idx = 0;
while idx < nbytes as usize {
match multibyte_length(&slice[idx..], false) {
None => {
start = Some(idx);
break;
}
Some(n) => {
idx += n;
chars += 1;
}
}
}
if let Some(start) = start {
// copy the rest to the end of the work area, which is guaranteed to be
// large enough, so we can read from there while writing the output
let offset = (len - nbytes) as usize;
let slice = &mut slice[start..];
unsafe {
ptr::copy(
slice.as_mut_ptr(),
slice[offset..].as_mut_ptr(),
nbytes as usize - start,
);
}
let mut to = 0;
let mut from = offset;
while from < slice.len() {
chars += 1;
match multibyte_length(&slice[from..], false) {
Some(n) => for _ in 0..n {
slice[to] = slice[from];
from += 1;
to += 1;
},
None => {
let byte = slice[from];
to += write_codepoint(&mut slice[to..], raw_byte_codepoint(byte));
from += 1;
}
}
}
nbytes = (start + to) as ptrdiff_t;
}
if!nchars.is_null() {
unsafe {
*nchars = chars;
}
}
nbytes
}
/// Arrange multibyte text at STR of LEN bytes as a unibyte text. It
/// actually converts characters in the range 0x80..0xFF to unibyte.
#[no_mangle]
pub fn str_as_unibyte(ptr: *mut c_uchar, bytes: ptrdiff_t) -> ptrdiff_t {
let slice = unsafe { slice::from_raw_parts_mut(ptr, bytes as usize) };
let mut from = 0;
while from < bytes as usize {
let byte = slice[from];
match multibyte_length_by_head(byte) {
2 if byte & 0xFE == 0xC0 => break,
n => from += n,
}
}
let mut to = from;
while from < bytes as usize {
let byte = slice[from];
match multibyte_length_by_head(byte) {
2 if byte & 0xFE == 0xC0 => {
let newbyte = 0x80 | ((byte & 1) << 6) | (slice[from + 1] & 0x3F);
slice[to] = newbyte;
from += 2;
to += 1;
}
n => for _ in 0..n {
slice[to] = slice[from];
|
as_mut_slice
|
identifier_name
|
multibyte.rs
|
xC1. These 2-byte sequences are disallowed in UTF-8,
//! because they would form a duplicate encoding for the the 1-byte
//! ASCII range.
//!
//! Due to these specialties, we cannot treat Emacs strings as Rust
//! `&str`, and this module regrettably contains adapted copies of
//! stretches of `std::str` functions.
use libc::{c_char, c_int, c_uchar, c_uint, ptrdiff_t};
use std::ptr;
use std::slice;
use remacs_sys::{EmacsInt, Lisp_String, CHARACTERBITS, CHAR_CTL, CHAR_MODIFIER_MASK, CHAR_SHIFT};
use remacs_sys::emacs_abort;
use lisp::ExternalPtr;
pub type LispStringRef = ExternalPtr<Lisp_String>;
// cannot use `char`, it takes values out of its range
pub type Codepoint = u32;
/// Maximum character code
pub const MAX_CHAR: Codepoint = (1 << CHARACTERBITS) - 1;
/// Maximum character codes for several encoded lengths
pub const MAX_1_BYTE_CHAR: Codepoint = 0x7F;
pub const MAX_2_BYTE_CHAR: Codepoint = 0x7FF;
pub const MAX_3_BYTE_CHAR: Codepoint = 0xFFFF;
pub const MAX_4_BYTE_CHAR: Codepoint = 0x1F_FFFF;
pub const MAX_5_BYTE_CHAR: Codepoint = 0x3F_FF7F;
/// Maximum length of a single encoded codepoint
pub const MAX_MULTIBYTE_LENGTH: usize = 5;
impl LispStringRef {
/// Return the string's len in bytes.
pub fn len_bytes(&self) -> ptrdiff_t {
if self.size_byte < 0 {
self.size
} else {
self.size_byte
}
}
/// Return the string's length in characters. Differs from
/// `len_bytes` for multibyte strings.
pub fn len_chars(&self) -> ptrdiff_t {
self.size
}
pub fn is_multibyte(&self) -> bool {
self.size_byte >= 0
}
pub fn data_ptr(&mut self) -> *mut c_uchar {
self.data as *mut c_uchar
}
pub fn sdata_ptr(&mut self) -> *mut c_char {
self.data as *mut c_char
}
pub fn const_data_ptr(&self) -> *const c_uchar {
self.data as *const c_uchar
}
pub fn const_sdata_ptr(&self) -> *const c_char {
self.data as *const c_char
}
#[inline]
pub fn as_slice(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.data as *const u8, self.len_bytes() as usize) }
}
#[inline]
pub fn as_mut_slice(&self) -> &mut [u8] {
unsafe { slice::from_raw_parts_mut(self.data as *mut u8, self.len_bytes() as usize) }
}
#[inline]
pub fn byte_at(&self, index: ptrdiff_t) -> u8 {
unsafe { *self.const_data_ptr().offset(index) }
}
}
pub struct LispStringRefIterator<'a> {
string_ref: &'a LispStringRef,
cur: usize,
}
pub struct LispStringRefCharIterator<'a>(LispStringRefIterator<'a>);
// Substitute for FETCH_STRING_CHAR_ADVANCE
impl<'a> Iterator for LispStringRefIterator<'a> {
type Item = (usize, Codepoint);
fn next(&mut self) -> Option<(usize, Codepoint)> {
if self.cur < self.string_ref.len_bytes() as usize {
let codepoint: Codepoint;
let old_index = self.cur;
let ref_slice = self.string_ref.as_slice();
if self.string_ref.is_multibyte() {
let (cp, advance) = multibyte_char_at(&ref_slice[self.cur..]);
codepoint = cp;
self.cur += advance;
} else {
codepoint = ref_slice[self.cur] as Codepoint;
self.cur += 1;
}
Some((old_index, codepoint))
} else {
None
}
}
}
impl<'a> Iterator for LispStringRefCharIterator<'a> {
type Item = Codepoint;
fn next(&mut self) -> Option<Codepoint> {
self.0.next().map(|result| result.1)
}
}
impl LispStringRef {
pub fn char_indices(&self) -> LispStringRefIterator {
LispStringRefIterator {
string_ref: self,
cur: 0,
}
}
pub fn chars(&self) -> LispStringRefCharIterator {
LispStringRefCharIterator(self.char_indices())
}
}
fn string_overflow() ->! {
error!("Maximum string size exceeded")
}
/// Parse unibyte string at STR of LEN bytes, and return the number of
/// bytes it may occupy when converted to multibyte string by
/// `str_to_multibyte`.
#[no_mangle]
pub fn count_size_as_multibyte(ptr: *const c_uchar, len: ptrdiff_t) -> ptrdiff_t
|
/// Same as the `BYTE8_TO_CHAR` macro.
#[inline]
pub fn raw_byte_codepoint(byte: c_uchar) -> Codepoint {
if byte >= 0x80 {
byte as Codepoint + 0x3F_FF00
} else {
byte as Codepoint
}
}
/// Same as the `CHAR_TO_BYTE8` macro.
#[inline]
pub fn raw_byte_from_codepoint(cp: Codepoint) -> c_uchar {
(cp - 0x3F_FF00) as c_uchar
}
/// Same as the `CHAR_TO_BYTE_SAFE` macro.
/// Return the raw 8-bit byte for character CP,
/// or -1 if CP doesn't correspond to a byte.
#[inline]
pub fn raw_byte_from_codepoint_safe(cp: Codepoint) -> EmacsInt {
if cp < 0x80 {
cp as EmacsInt
} else if cp > MAX_5_BYTE_CHAR {
raw_byte_from_codepoint(cp) as EmacsInt
} else {
-1
}
}
/// `UNIBYTE_TO_CHAR` macro
#[inline]
pub fn unibyte_to_char(cp: Codepoint) -> Codepoint {
if cp < 0x80 {
cp
} else {
raw_byte_codepoint(cp as c_uchar)
}
}
/// `MAKE_CHAR_MULTIBYTE` macro
#[inline]
pub fn make_char_multibyte(cp: Codepoint) -> Codepoint {
debug_assert!(cp < 256);
unibyte_to_char(cp)
}
/// Same as the `CHAR_STRING` macro.
#[inline]
fn write_codepoint(to: &mut [c_uchar], cp: Codepoint) -> usize {
if cp <= MAX_1_BYTE_CHAR {
to[0] = cp as c_uchar;
1
} else if cp <= MAX_2_BYTE_CHAR {
// note: setting later bytes first to avoid multiple bound checks
to[1] = 0x80 | (cp & 0x3F) as c_uchar;
to[0] = 0xC0 | (cp >> 6) as c_uchar;
2
} else if cp <= MAX_3_BYTE_CHAR {
to[2] = 0x80 | (cp & 0x3F) as c_uchar;
to[1] = 0x80 | ((cp >> 6) & 0x3F) as c_uchar;
to[0] = 0xE0 | (cp >> 12) as c_uchar;
3
} else if cp <= MAX_4_BYTE_CHAR {
to[3] = 0x80 | (cp & 0x3F) as c_uchar;
to[2] = 0x80 | ((cp >> 6) & 0x3F) as c_uchar;
to[1] = 0x80 | ((cp >> 12) & 0x3F) as c_uchar;
to[0] = 0xF0 | (cp >> 18) as c_uchar;
4
} else if cp <= MAX_5_BYTE_CHAR {
to[4] = 0x80 | (cp & 0x3F) as c_uchar;
to[3] = 0x80 | ((cp >> 6) & 0x3F) as c_uchar;
to[2] = 0x80 | ((cp >> 12) & 0x3F) as c_uchar;
to[1] = 0x80 | ((cp >> 18) & 0x0F) as c_uchar;
to[0] = 0xF8;
5
} else if cp <= MAX_CHAR {
let b = raw_byte_from_codepoint(cp);
to[1] = 0x80 | (b & 0x3F);
to[0] = 0xC0 | ((b >> 6) & 1);
2
} else {
error!("Invalid character: {:#x}", cp)
}
}
/// If character code C has modifier masks, reflect them to the
/// character code if possible. Return the resulting code.
#[no_mangle]
pub fn char_resolve_modifier_mask(ch: EmacsInt) -> EmacsInt {
let mut cp = ch as Codepoint;
// A non-ASCII character can't reflect modifier bits to the code.
if (cp &!CHAR_MODIFIER_MASK) >= 0x80 {
return cp as EmacsInt;
}
let ascii = (cp & 0x7F) as u8;
// For Meta, Shift, and Control modifiers, we need special care.
if cp & CHAR_SHIFT!= 0 {
let unshifted = cp &!CHAR_SHIFT;
// Shift modifier is valid only with [A-Za-z].
if ascii >= b'A' && ascii <= b'Z' {
cp = unshifted;
} else if ascii >= b'a' && ascii <= b'z' {
cp = unshifted &!0x20;
} else if ascii <= b''{
// Shift modifier for control characters and SPC is ignored.
cp = unshifted;
}
}
// Simulate the code in lread.c.
if cp & CHAR_CTL!= 0 {
// Allow `\C-'and `\C-?'.
if ascii == b''{
cp &=!0x7F &!CHAR_CTL;
} else if ascii == b'?' {
cp = 0x7F | (cp &!0x7F &!CHAR_CTL);
} else if ascii >= b'@' && ascii <= b'_' {
// ASCII control chars are made from letters (both cases),
// as well as the non-letters within 0o100...0o137.
cp &= 0x1F | (!0x7F &!CHAR_CTL);
}
}
cp as EmacsInt
}
/// Store multibyte form of character CP at TO. If CP has modifier bits,
/// handle them appropriately.
#[no_mangle]
pub fn char_string(mut cp: c_uint, to: *mut c_uchar) -> c_int {
if cp & CHAR_MODIFIER_MASK!= 0 {
cp = char_resolve_modifier_mask(cp as EmacsInt) as Codepoint;
cp &=!CHAR_MODIFIER_MASK;
}
write_codepoint(
unsafe { slice::from_raw_parts_mut(to, MAX_MULTIBYTE_LENGTH) },
cp,
) as c_int
}
/// Convert unibyte text at STR of BYTES bytes to a multibyte text
/// that contains the same single-byte characters. It actually
/// converts all 8-bit characters to multibyte forms. It is assured
/// that we can use LEN bytes at STR as a work area and that is
/// enough. Returns the byte length of the multibyte string.
#[no_mangle]
pub fn str_to_multibyte(ptr: *mut c_uchar, len: ptrdiff_t, bytes: ptrdiff_t) -> ptrdiff_t {
// slice covers the whole work area to be able to write back
let slice = unsafe { slice::from_raw_parts_mut(ptr, len as usize) };
// first, search ASCII-only prefix that we can skip processing
let mut start = 0;
for (idx, &byte) in slice.iter().enumerate() {
if byte >= 0x80 {
start = idx;
break;
}
// whole string is ASCII-only, done!
if idx as ptrdiff_t == bytes - 1 {
return bytes;
}
}
// copy the rest to the end of the work area, which is guaranteed to be
// large enough, so we can read from there while writing the output
let offset = (len - bytes) as usize;
let slice = &mut slice[start..];
unsafe {
ptr::copy(
slice.as_mut_ptr(),
slice[offset..].as_mut_ptr(),
bytes as usize - start,
);
}
let mut to = 0;
for from in offset..slice.len() {
let byte = slice[from];
to += write_codepoint(&mut slice[to..], raw_byte_codepoint(byte));
}
(start + to) as ptrdiff_t
}
/// Same as `MULTIBYTE_LENGTH` macro in C.
fn multibyte_length(slice: &[c_uchar], allow_encoded_raw: bool) -> Option<usize> {
let len = slice.len();
if len < 1 {
None
} else if slice[0] & 0x80 == 0 {
Some(1)
} else if len < 2 || slice[1] & 0xC0!= 0x80 {
None
} else if!allow_encoded_raw && slice[0] & 0xFE == 0xC0 {
None
} else if slice[0] & 0xE0 == 0xC0 {
Some(2)
} else if len < 3 || slice[2] & 0xC0!= 0x80 {
None
} else if slice[0] & 0xF0 == 0xE0 {
Some(3)
} else if len < 4 || slice[3] & 0xC0!= 0x80 {
None
} else if slice[0] & 0xF8 == 0xF0 {
Some(4)
} else if len < 5 || slice[4] & 0xC0!= 0x80 {
None
} else if slice[0] == 0xF8 && slice[1] & 0xF0 == 0x80 {
Some(5)
} else {
None
}
}
/// Same as the `STRING_CHAR_ADVANCE` macro.
#[inline]
pub fn multibyte_char_at(slice: &[c_uchar]) -> (Codepoint, usize) {
let head = slice[0] as Codepoint;
if head & 0x80 == 0 {
(head, 1)
} else if head & 0x20 == 0 {
let cp = ((head & 0x1F) << 6) | (slice[1] as Codepoint & 0x3F);
if head < 0xC2 {
(cp | 0x3F_FF80, 2)
} else {
(cp, 2)
}
} else if head & 0x10 == 0 {
(
((head & 0x0F) << 12) | ((slice[1] as Codepoint & 0x3F) << 6)
| (slice[2] as Codepoint & 0x3F),
3,
)
} else if head & 0x08 == 0 {
(
((head & 0x07) << 18) | ((slice[1] as Codepoint & 0x3F) << 12)
| ((slice[2] as Codepoint & 0x3F) << 6)
| (slice[3] as Codepoint & 0x3F),
4,
)
} else {
// the relevant bytes of "head" are always zero
(
((slice[1] as Codepoint & 0x3F) << 18) | ((slice[2] as Codepoint & 0x3F) << 12)
| ((slice[3] as Codepoint & 0x3F) << 6)
| (slice[4] as Codepoint & 0x3F),
5,
)
}
}
/// Same as `BYTES_BY_CHAR_HEAD` macro in C.
fn multibyte_length_by_head(byte: c_uchar) -> usize {
if byte & 0x80 == 0 {
1
} else if byte & 0x20 == 0 {
2
} else if byte & 0x10 == 0 {
3
} else if byte & 0x08 == 0 {
4
} else {
5
}
}
/// Return the number of characters in the NBYTES bytes at PTR.
/// This works by looking at the contents and checking for multibyte
/// sequences while assuming that there's no invalid sequence. It
/// ignores enable-multibyte-characters.
#[no_mangle]
pub fn multibyte_chars_in_text(ptr: *const c_uchar, nbytes: ptrdiff_t) -> ptrdiff_t {
let slice = unsafe { slice::from_raw_parts(ptr, nbytes as usize) };
let len = slice.len();
let mut idx = 0;
let mut chars = 0;
// TODO: make this an iterator?
while idx < len {
idx += multibyte_length(&slice[idx..], true).unwrap_or_else(|| unsafe { emacs_abort() });
chars += 1;
}
chars as ptrdiff_t
}
/// Parse unibyte text at STR of LEN bytes as a multibyte text, count
/// characters and bytes in it, and store them in *NCHARS and *NBYTES
/// respectively. On counting bytes, pay attention to that 8-bit
/// characters not constructing a valid multibyte sequence are
/// represented by 2-byte in a multibyte text.
#[no_mangle]
pub fn parse_str_as_multibyte(
ptr: *const c_uchar,
len: ptrdiff_t,
nchars: *mut ptrdiff_t,
nbytes: *mut ptrdiff_t,
) {
let slice = unsafe { slice::from_raw_parts(ptr, len as usize) };
let len = slice.len();
let mut chars = 0;
let mut bytes = 0;
let mut idx = 0;
// XXX: in the original, there is an "unchecked" version of multibyte_length
// called while the remaining length is >= MAX_MULTIBYTE_LENGTH.
while idx < len {
chars += 1;
match multibyte_length(&slice[idx..], false) {
None => {
// This is either an invalid multibyte sequence, or
// one that would encode a raw 8-bit byte, which we
// only use internally when the string is *already*
// multibyte.
idx += 1;
bytes += 2;
}
Some(n) => {
idx += n;
bytes += n as ptrdiff_t;
}
}
}
unsafe {
*nchars = chars;
*nbytes = bytes;
}
}
/// Arrange unibyte text at STR of NBYTES bytes as a multibyte text.
/// It actually converts only such 8-bit characters that don't construct
/// a multibyte sequence to multibyte forms of Latin-1 characters. If
/// NCHARS is nonzero, set *NCHARS to the number of characters in the
/// text. It is assured that we can use LEN bytes at STR as a work
/// area and that is enough. Return the number of bytes of the
/// resulting text.
#[no_mangle]
pub fn str_as_multibyte(
ptr: *mut c_uchar,
len: ptrdiff_t,
mut nbytes: ptrdiff_t,
nchars: *mut ptrdiff_t,
) -> ptrdiff_t {
// slice covers the whole work area to be able to write back
let slice = unsafe { slice::from_raw_parts_mut(ptr, len as usize) };
// first, search ASCII-only prefix that we can skip processing
let mut start = None;
let mut chars = 0;
let mut idx = 0;
while idx < nbytes as usize {
match multibyte_length(&slice[idx..], false) {
None => {
start = Some(idx);
break;
}
Some(n) => {
idx += n;
chars += 1;
}
}
}
if let Some(start) = start {
// copy the rest to the end of the work area, which is guaranteed to be
// large enough, so we can read from there while writing the output
let offset = (len - nbytes) as usize;
let slice = &mut slice[start..];
unsafe {
ptr::copy(
slice.as_mut_ptr(),
slice[offset..].as_mut_ptr(),
nbytes as usize - start,
);
}
let mut to = 0;
let mut from = offset;
while from < slice.len() {
chars += 1;
match multibyte_length(&slice[from..], false) {
Some(n) => for _ in 0..n {
slice[to] = slice[from];
from += 1;
to += 1;
},
None => {
let byte = slice[from];
to += write_codepoint(&mut slice[to..], raw_byte_codepoint(byte));
from += 1;
}
}
}
nbytes = (start + to) as ptrdiff_t;
}
if!nchars.is_null() {
unsafe {
*nchars = chars;
}
}
nbytes
}
/// Arrange multibyte text at STR of LEN bytes as a unibyte text. It
/// actually converts characters in the range 0x80..0xFF to unibyte.
#[no_mangle]
pub fn str_as_unibyte(ptr: *mut c_uchar, bytes: ptrdiff_t) -> ptrdiff_t {
let slice = unsafe { slice::from_raw_parts_mut(ptr, bytes as usize) };
let mut from = 0;
while from < bytes as usize {
let byte = slice[from];
match multibyte_length_by_head(byte) {
2 if byte & 0xFE == 0xC0 => break,
n => from += n,
}
}
let mut to = from;
while from < bytes as usize {
let byte = slice[from];
match multibyte_length_by_head(byte) {
2 if byte & 0xFE == 0xC0 => {
let newbyte = 0x80 | ((byte & 1) << 6) | (slice[from + 1] & 0x3F);
slice[to] = newbyte;
from += 2;
to += 1;
}
n => for _ in 0..n {
slice[to] = slice[from];
|
{
let slice = unsafe { slice::from_raw_parts(ptr, len as usize) };
slice.iter().fold(0, |total, &byte| {
let n = if byte < 0x80 { 1 } else { 2 };
total.checked_add(n).unwrap_or_else(|| string_overflow())
})
}
|
identifier_body
|
multibyte.rs
|
xC1. These 2-byte sequences are disallowed in UTF-8,
//! because they would form a duplicate encoding for the the 1-byte
//! ASCII range.
//!
//! Due to these specialties, we cannot treat Emacs strings as Rust
//! `&str`, and this module regrettably contains adapted copies of
//! stretches of `std::str` functions.
use libc::{c_char, c_int, c_uchar, c_uint, ptrdiff_t};
use std::ptr;
use std::slice;
use remacs_sys::{EmacsInt, Lisp_String, CHARACTERBITS, CHAR_CTL, CHAR_MODIFIER_MASK, CHAR_SHIFT};
use remacs_sys::emacs_abort;
use lisp::ExternalPtr;
pub type LispStringRef = ExternalPtr<Lisp_String>;
// cannot use `char`, it takes values out of its range
pub type Codepoint = u32;
/// Maximum character code
pub const MAX_CHAR: Codepoint = (1 << CHARACTERBITS) - 1;
/// Maximum character codes for several encoded lengths
pub const MAX_1_BYTE_CHAR: Codepoint = 0x7F;
pub const MAX_2_BYTE_CHAR: Codepoint = 0x7FF;
pub const MAX_3_BYTE_CHAR: Codepoint = 0xFFFF;
pub const MAX_4_BYTE_CHAR: Codepoint = 0x1F_FFFF;
pub const MAX_5_BYTE_CHAR: Codepoint = 0x3F_FF7F;
/// Maximum length of a single encoded codepoint
pub const MAX_MULTIBYTE_LENGTH: usize = 5;
impl LispStringRef {
/// Return the string's len in bytes.
pub fn len_bytes(&self) -> ptrdiff_t {
if self.size_byte < 0 {
self.size
} else {
self.size_byte
}
}
/// Return the string's length in characters. Differs from
/// `len_bytes` for multibyte strings.
pub fn len_chars(&self) -> ptrdiff_t {
self.size
}
pub fn is_multibyte(&self) -> bool {
self.size_byte >= 0
}
pub fn data_ptr(&mut self) -> *mut c_uchar {
self.data as *mut c_uchar
}
pub fn sdata_ptr(&mut self) -> *mut c_char {
self.data as *mut c_char
}
pub fn const_data_ptr(&self) -> *const c_uchar {
self.data as *const c_uchar
}
pub fn const_sdata_ptr(&self) -> *const c_char {
self.data as *const c_char
}
#[inline]
pub fn as_slice(&self) -> &[u8] {
unsafe { slice::from_raw_parts(self.data as *const u8, self.len_bytes() as usize) }
}
#[inline]
pub fn as_mut_slice(&self) -> &mut [u8] {
unsafe { slice::from_raw_parts_mut(self.data as *mut u8, self.len_bytes() as usize) }
}
#[inline]
pub fn byte_at(&self, index: ptrdiff_t) -> u8 {
unsafe { *self.const_data_ptr().offset(index) }
}
}
pub struct LispStringRefIterator<'a> {
string_ref: &'a LispStringRef,
cur: usize,
}
pub struct LispStringRefCharIterator<'a>(LispStringRefIterator<'a>);
// Substitute for FETCH_STRING_CHAR_ADVANCE
impl<'a> Iterator for LispStringRefIterator<'a> {
type Item = (usize, Codepoint);
fn next(&mut self) -> Option<(usize, Codepoint)> {
if self.cur < self.string_ref.len_bytes() as usize {
let codepoint: Codepoint;
let old_index = self.cur;
let ref_slice = self.string_ref.as_slice();
if self.string_ref.is_multibyte() {
let (cp, advance) = multibyte_char_at(&ref_slice[self.cur..]);
codepoint = cp;
self.cur += advance;
} else {
codepoint = ref_slice[self.cur] as Codepoint;
self.cur += 1;
}
Some((old_index, codepoint))
} else {
None
}
}
}
impl<'a> Iterator for LispStringRefCharIterator<'a> {
type Item = Codepoint;
fn next(&mut self) -> Option<Codepoint> {
self.0.next().map(|result| result.1)
}
}
impl LispStringRef {
pub fn char_indices(&self) -> LispStringRefIterator {
LispStringRefIterator {
string_ref: self,
cur: 0,
}
}
pub fn chars(&self) -> LispStringRefCharIterator {
LispStringRefCharIterator(self.char_indices())
}
}
fn string_overflow() ->! {
error!("Maximum string size exceeded")
}
/// Parse unibyte string at STR of LEN bytes, and return the number of
/// bytes it may occupy when converted to multibyte string by
/// `str_to_multibyte`.
#[no_mangle]
pub fn count_size_as_multibyte(ptr: *const c_uchar, len: ptrdiff_t) -> ptrdiff_t {
let slice = unsafe { slice::from_raw_parts(ptr, len as usize) };
slice.iter().fold(0, |total, &byte| {
let n = if byte < 0x80 { 1 } else { 2 };
total.checked_add(n).unwrap_or_else(|| string_overflow())
})
}
/// Same as the `BYTE8_TO_CHAR` macro.
#[inline]
pub fn raw_byte_codepoint(byte: c_uchar) -> Codepoint {
if byte >= 0x80 {
byte as Codepoint + 0x3F_FF00
} else {
byte as Codepoint
}
}
/// Same as the `CHAR_TO_BYTE8` macro.
#[inline]
pub fn raw_byte_from_codepoint(cp: Codepoint) -> c_uchar {
(cp - 0x3F_FF00) as c_uchar
}
/// Same as the `CHAR_TO_BYTE_SAFE` macro.
/// Return the raw 8-bit byte for character CP,
/// or -1 if CP doesn't correspond to a byte.
#[inline]
pub fn raw_byte_from_codepoint_safe(cp: Codepoint) -> EmacsInt {
if cp < 0x80 {
cp as EmacsInt
} else if cp > MAX_5_BYTE_CHAR {
raw_byte_from_codepoint(cp) as EmacsInt
} else {
-1
}
}
/// `UNIBYTE_TO_CHAR` macro
#[inline]
pub fn unibyte_to_char(cp: Codepoint) -> Codepoint {
if cp < 0x80 {
cp
} else {
raw_byte_codepoint(cp as c_uchar)
}
}
/// `MAKE_CHAR_MULTIBYTE` macro
#[inline]
pub fn make_char_multibyte(cp: Codepoint) -> Codepoint {
debug_assert!(cp < 256);
unibyte_to_char(cp)
}
/// Same as the `CHAR_STRING` macro.
#[inline]
fn write_codepoint(to: &mut [c_uchar], cp: Codepoint) -> usize {
if cp <= MAX_1_BYTE_CHAR {
to[0] = cp as c_uchar;
1
} else if cp <= MAX_2_BYTE_CHAR {
// note: setting later bytes first to avoid multiple bound checks
to[1] = 0x80 | (cp & 0x3F) as c_uchar;
to[0] = 0xC0 | (cp >> 6) as c_uchar;
2
} else if cp <= MAX_3_BYTE_CHAR {
to[2] = 0x80 | (cp & 0x3F) as c_uchar;
to[1] = 0x80 | ((cp >> 6) & 0x3F) as c_uchar;
to[0] = 0xE0 | (cp >> 12) as c_uchar;
3
} else if cp <= MAX_4_BYTE_CHAR {
to[3] = 0x80 | (cp & 0x3F) as c_uchar;
to[2] = 0x80 | ((cp >> 6) & 0x3F) as c_uchar;
to[1] = 0x80 | ((cp >> 12) & 0x3F) as c_uchar;
to[0] = 0xF0 | (cp >> 18) as c_uchar;
4
} else if cp <= MAX_5_BYTE_CHAR {
to[4] = 0x80 | (cp & 0x3F) as c_uchar;
to[3] = 0x80 | ((cp >> 6) & 0x3F) as c_uchar;
to[2] = 0x80 | ((cp >> 12) & 0x3F) as c_uchar;
to[1] = 0x80 | ((cp >> 18) & 0x0F) as c_uchar;
to[0] = 0xF8;
5
} else if cp <= MAX_CHAR {
let b = raw_byte_from_codepoint(cp);
to[1] = 0x80 | (b & 0x3F);
to[0] = 0xC0 | ((b >> 6) & 1);
2
} else {
error!("Invalid character: {:#x}", cp)
}
}
/// If character code C has modifier masks, reflect them to the
/// character code if possible. Return the resulting code.
#[no_mangle]
pub fn char_resolve_modifier_mask(ch: EmacsInt) -> EmacsInt {
let mut cp = ch as Codepoint;
// A non-ASCII character can't reflect modifier bits to the code.
if (cp &!CHAR_MODIFIER_MASK) >= 0x80 {
return cp as EmacsInt;
}
let ascii = (cp & 0x7F) as u8;
// For Meta, Shift, and Control modifiers, we need special care.
if cp & CHAR_SHIFT!= 0 {
let unshifted = cp &!CHAR_SHIFT;
// Shift modifier is valid only with [A-Za-z].
if ascii >= b'A' && ascii <= b'Z' {
cp = unshifted;
} else if ascii >= b'a' && ascii <= b'z' {
cp = unshifted &!0x20;
} else if ascii <= b''{
// Shift modifier for control characters and SPC is ignored.
cp = unshifted;
}
}
// Simulate the code in lread.c.
if cp & CHAR_CTL!= 0 {
// Allow `\C-'and `\C-?'.
if ascii == b''{
cp &=!0x7F &!CHAR_CTL;
} else if ascii == b'?' {
cp = 0x7F | (cp &!0x7F &!CHAR_CTL);
} else if ascii >= b'@' && ascii <= b'_' {
// ASCII control chars are made from letters (both cases),
// as well as the non-letters within 0o100...0o137.
cp &= 0x1F | (!0x7F &!CHAR_CTL);
}
}
cp as EmacsInt
}
/// Store multibyte form of character CP at TO. If CP has modifier bits,
/// handle them appropriately.
#[no_mangle]
pub fn char_string(mut cp: c_uint, to: *mut c_uchar) -> c_int {
if cp & CHAR_MODIFIER_MASK!= 0 {
cp = char_resolve_modifier_mask(cp as EmacsInt) as Codepoint;
cp &=!CHAR_MODIFIER_MASK;
}
write_codepoint(
unsafe { slice::from_raw_parts_mut(to, MAX_MULTIBYTE_LENGTH) },
cp,
) as c_int
}
/// Convert unibyte text at STR of BYTES bytes to a multibyte text
/// that contains the same single-byte characters. It actually
/// converts all 8-bit characters to multibyte forms. It is assured
/// that we can use LEN bytes at STR as a work area and that is
/// enough. Returns the byte length of the multibyte string.
#[no_mangle]
pub fn str_to_multibyte(ptr: *mut c_uchar, len: ptrdiff_t, bytes: ptrdiff_t) -> ptrdiff_t {
// slice covers the whole work area to be able to write back
let slice = unsafe { slice::from_raw_parts_mut(ptr, len as usize) };
// first, search ASCII-only prefix that we can skip processing
let mut start = 0;
for (idx, &byte) in slice.iter().enumerate() {
if byte >= 0x80 {
start = idx;
break;
}
// whole string is ASCII-only, done!
if idx as ptrdiff_t == bytes - 1 {
return bytes;
}
}
// copy the rest to the end of the work area, which is guaranteed to be
// large enough, so we can read from there while writing the output
let offset = (len - bytes) as usize;
let slice = &mut slice[start..];
unsafe {
ptr::copy(
slice.as_mut_ptr(),
slice[offset..].as_mut_ptr(),
bytes as usize - start,
);
}
let mut to = 0;
for from in offset..slice.len() {
let byte = slice[from];
to += write_codepoint(&mut slice[to..], raw_byte_codepoint(byte));
}
(start + to) as ptrdiff_t
}
/// Same as `MULTIBYTE_LENGTH` macro in C.
fn multibyte_length(slice: &[c_uchar], allow_encoded_raw: bool) -> Option<usize> {
let len = slice.len();
if len < 1 {
None
} else if slice[0] & 0x80 == 0 {
Some(1)
} else if len < 2 || slice[1] & 0xC0!= 0x80 {
None
} else if!allow_encoded_raw && slice[0] & 0xFE == 0xC0 {
None
} else if slice[0] & 0xE0 == 0xC0 {
Some(2)
} else if len < 3 || slice[2] & 0xC0!= 0x80 {
None
} else if slice[0] & 0xF0 == 0xE0 {
Some(3)
} else if len < 4 || slice[3] & 0xC0!= 0x80 {
None
} else if slice[0] & 0xF8 == 0xF0 {
Some(4)
} else if len < 5 || slice[4] & 0xC0!= 0x80
|
else if slice[0] == 0xF8 && slice[1] & 0xF0 == 0x80 {
Some(5)
} else {
None
}
}
/// Same as the `STRING_CHAR_ADVANCE` macro.
#[inline]
pub fn multibyte_char_at(slice: &[c_uchar]) -> (Codepoint, usize) {
let head = slice[0] as Codepoint;
if head & 0x80 == 0 {
(head, 1)
} else if head & 0x20 == 0 {
let cp = ((head & 0x1F) << 6) | (slice[1] as Codepoint & 0x3F);
if head < 0xC2 {
(cp | 0x3F_FF80, 2)
} else {
(cp, 2)
}
} else if head & 0x10 == 0 {
(
((head & 0x0F) << 12) | ((slice[1] as Codepoint & 0x3F) << 6)
| (slice[2] as Codepoint & 0x3F),
3,
)
} else if head & 0x08 == 0 {
(
((head & 0x07) << 18) | ((slice[1] as Codepoint & 0x3F) << 12)
| ((slice[2] as Codepoint & 0x3F) << 6)
| (slice[3] as Codepoint & 0x3F),
4,
)
} else {
// the relevant bytes of "head" are always zero
(
((slice[1] as Codepoint & 0x3F) << 18) | ((slice[2] as Codepoint & 0x3F) << 12)
| ((slice[3] as Codepoint & 0x3F) << 6)
| (slice[4] as Codepoint & 0x3F),
5,
)
}
}
/// Same as `BYTES_BY_CHAR_HEAD` macro in C.
fn multibyte_length_by_head(byte: c_uchar) -> usize {
if byte & 0x80 == 0 {
1
} else if byte & 0x20 == 0 {
2
} else if byte & 0x10 == 0 {
3
} else if byte & 0x08 == 0 {
4
} else {
5
}
}
/// Return the number of characters in the NBYTES bytes at PTR.
/// This works by looking at the contents and checking for multibyte
/// sequences while assuming that there's no invalid sequence. It
/// ignores enable-multibyte-characters.
#[no_mangle]
pub fn multibyte_chars_in_text(ptr: *const c_uchar, nbytes: ptrdiff_t) -> ptrdiff_t {
let slice = unsafe { slice::from_raw_parts(ptr, nbytes as usize) };
let len = slice.len();
let mut idx = 0;
let mut chars = 0;
// TODO: make this an iterator?
while idx < len {
idx += multibyte_length(&slice[idx..], true).unwrap_or_else(|| unsafe { emacs_abort() });
chars += 1;
}
chars as ptrdiff_t
}
/// Parse unibyte text at STR of LEN bytes as a multibyte text, count
/// characters and bytes in it, and store them in *NCHARS and *NBYTES
/// respectively. On counting bytes, pay attention to that 8-bit
/// characters not constructing a valid multibyte sequence are
/// represented by 2-byte in a multibyte text.
#[no_mangle]
pub fn parse_str_as_multibyte(
ptr: *const c_uchar,
len: ptrdiff_t,
nchars: *mut ptrdiff_t,
nbytes: *mut ptrdiff_t,
) {
let slice = unsafe { slice::from_raw_parts(ptr, len as usize) };
let len = slice.len();
let mut chars = 0;
let mut bytes = 0;
let mut idx = 0;
// XXX: in the original, there is an "unchecked" version of multibyte_length
// called while the remaining length is >= MAX_MULTIBYTE_LENGTH.
while idx < len {
chars += 1;
match multibyte_length(&slice[idx..], false) {
None => {
// This is either an invalid multibyte sequence, or
// one that would encode a raw 8-bit byte, which we
// only use internally when the string is *already*
// multibyte.
idx += 1;
bytes += 2;
}
Some(n) => {
idx += n;
bytes += n as ptrdiff_t;
}
}
}
unsafe {
*nchars = chars;
*nbytes = bytes;
}
}
/// Arrange unibyte text at STR of NBYTES bytes as a multibyte text.
/// It actually converts only such 8-bit characters that don't construct
/// a multibyte sequence to multibyte forms of Latin-1 characters. If
/// NCHARS is nonzero, set *NCHARS to the number of characters in the
/// text. It is assured that we can use LEN bytes at STR as a work
/// area and that is enough. Return the number of bytes of the
/// resulting text.
#[no_mangle]
pub fn str_as_multibyte(
ptr: *mut c_uchar,
len: ptrdiff_t,
mut nbytes: ptrdiff_t,
nchars: *mut ptrdiff_t,
) -> ptrdiff_t {
// slice covers the whole work area to be able to write back
let slice = unsafe { slice::from_raw_parts_mut(ptr, len as usize) };
// first, search ASCII-only prefix that we can skip processing
let mut start = None;
let mut chars = 0;
let mut idx = 0;
while idx < nbytes as usize {
match multibyte_length(&slice[idx..], false) {
None => {
start = Some(idx);
break;
}
Some(n) => {
idx += n;
chars += 1;
}
}
}
if let Some(start) = start {
// copy the rest to the end of the work area, which is guaranteed to be
// large enough, so we can read from there while writing the output
let offset = (len - nbytes) as usize;
let slice = &mut slice[start..];
unsafe {
ptr::copy(
slice.as_mut_ptr(),
slice[offset..].as_mut_ptr(),
nbytes as usize - start,
);
}
let mut to = 0;
let mut from = offset;
while from < slice.len() {
chars += 1;
match multibyte_length(&slice[from..], false) {
Some(n) => for _ in 0..n {
slice[to] = slice[from];
from += 1;
to += 1;
},
None => {
let byte = slice[from];
to += write_codepoint(&mut slice[to..], raw_byte_codepoint(byte));
from += 1;
}
}
}
nbytes = (start + to) as ptrdiff_t;
}
if!nchars.is_null() {
unsafe {
*nchars = chars;
}
}
nbytes
}
/// Arrange multibyte text at STR of LEN bytes as a unibyte text. It
/// actually converts characters in the range 0x80..0xFF to unibyte.
#[no_mangle]
pub fn str_as_unibyte(ptr: *mut c_uchar, bytes: ptrdiff_t) -> ptrdiff_t {
let slice = unsafe { slice::from_raw_parts_mut(ptr, bytes as usize) };
let mut from = 0;
while from < bytes as usize {
let byte = slice[from];
match multibyte_length_by_head(byte) {
2 if byte & 0xFE == 0xC0 => break,
n => from += n,
}
}
let mut to = from;
while from < bytes as usize {
let byte = slice[from];
match multibyte_length_by_head(byte) {
2 if byte & 0xFE == 0xC0 => {
let newbyte = 0x80 | ((byte & 1) << 6) | (slice[from + 1] & 0x3F);
slice[to] = newbyte;
from += 2;
to += 1;
}
n => for _ in 0..n {
slice[to] = slice[from];
|
{
None
}
|
conditional_block
|
static_mut.rs
|
/** `Internal variability` for [`Lazystatic`](https://crates.io/crates/lazy_static)
## Example:
### On Cargo.toml:
```toml
lazy_static = "^0.2.8"
stderr = "0.8.1"
```
### On Code:
```rust
#[macro_use]
extern crate lazy_static;
extern crate stderr;
use stderr::StaticMut;
lazy_static!{
static ref STRING : StaticMut<String> = StaticMut::new(String::new());
static ref USIZE : StaticMut<Usize> = StaticMut::new(Usize(0));
}
fn main() {
// Before write, You can read it Concurrent safely.
println!("{:?}", STRING.as_ref());
println!("{:?}", USIZE.as_ref());
let str = {
let mut str = "StaticMut, 雪之下".to_string();
// do some work
str
};
// But when write it, operate it Concurrent is unsafe.
{
STRING.set(str); // update by setting value
USIZE.as_mut().0 = 123; // update by modifying field
}
// After write, You can read it Concurrent safely.
println!("{:?}", STRING.as_ref());
println!("{:?}", USIZE.as_ref());
}
#[derive(Debug)]
struct Usize(usize);
```
## About safe and unsafe
If you read or write it when you write it, I don't ensure anything.
You can add `AtomicXXX` to ensure safe on above situation also.
If you need full Concurrent read and write, you maybe need `RwLock`
*/
#[derive(Debug)]
pub struct StaticMut<T>(UnsafeCell<T>);
use std::marker::Sync;
use std::cell::UnsafeCell;
unsafe impl<T> Sync for StaticMut<T> {}
impl<T> StaticMut<T> {
#[inline]
pub fn new(value: T) -> Self {
|
// read it
#[inline]
#[allow(unknown_lints,should_implement_trait)]
pub fn as_ref(&self) -> &T {
unsafe { self.0.get().as_ref().unwrap() }
}
/// write it
#[allow(unknown_lints,mut_from_ref)]
#[inline]
pub fn as_mut(&self) -> &mut T {
unsafe { self.0.get().as_mut().unwrap() }
}
/// update it
#[inline]
pub fn set(&self, value:T) {
*self.as_mut() = value
}
///Unwraps the value
#[inline]
pub fn into_inner(self)->T {
unsafe {self.0.into_inner()}
}
}
|
StaticMut(UnsafeCell::new(value))
}
/
|
identifier_body
|
static_mut.rs
|
/** `Internal variability` for [`Lazystatic`](https://crates.io/crates/lazy_static)
## Example:
### On Cargo.toml:
```toml
lazy_static = "^0.2.8"
stderr = "0.8.1"
```
### On Code:
```rust
#[macro_use]
extern crate lazy_static;
extern crate stderr;
use stderr::StaticMut;
lazy_static!{
static ref STRING : StaticMut<String> = StaticMut::new(String::new());
static ref USIZE : StaticMut<Usize> = StaticMut::new(Usize(0));
}
fn main() {
// Before write, You can read it Concurrent safely.
println!("{:?}", STRING.as_ref());
println!("{:?}", USIZE.as_ref());
let str = {
let mut str = "StaticMut, 雪之下".to_string();
// do some work
str
};
// But when write it, operate it Concurrent is unsafe.
{
STRING.set(str); // update by setting value
USIZE.as_mut().0 = 123; // update by modifying field
}
// After write, You can read it Concurrent safely.
println!("{:?}", STRING.as_ref());
println!("{:?}", USIZE.as_ref());
}
#[derive(Debug)]
struct Usize(usize);
```
## About safe and unsafe
If you read or write it when you write it, I don't ensure anything.
You can add `AtomicXXX` to ensure safe on above situation also.
If you need full Concurrent read and write, you maybe need `RwLock`
|
use std::cell::UnsafeCell;
unsafe impl<T> Sync for StaticMut<T> {}
impl<T> StaticMut<T> {
#[inline]
pub fn new(value: T) -> Self {
StaticMut(UnsafeCell::new(value))
}
/// read it
#[inline]
#[allow(unknown_lints,should_implement_trait)]
pub fn as_ref(&self) -> &T {
unsafe { self.0.get().as_ref().unwrap() }
}
/// write it
#[allow(unknown_lints,mut_from_ref)]
#[inline]
pub fn as_mut(&self) -> &mut T {
unsafe { self.0.get().as_mut().unwrap() }
}
/// update it
#[inline]
pub fn set(&self, value:T) {
*self.as_mut() = value
}
///Unwraps the value
#[inline]
pub fn into_inner(self)->T {
unsafe {self.0.into_inner()}
}
}
|
*/
#[derive(Debug)]
pub struct StaticMut<T>(UnsafeCell<T>);
use std::marker::Sync;
|
random_line_split
|
static_mut.rs
|
/** `Internal variability` for [`Lazystatic`](https://crates.io/crates/lazy_static)
## Example:
### On Cargo.toml:
```toml
lazy_static = "^0.2.8"
stderr = "0.8.1"
```
### On Code:
```rust
#[macro_use]
extern crate lazy_static;
extern crate stderr;
use stderr::StaticMut;
lazy_static!{
static ref STRING : StaticMut<String> = StaticMut::new(String::new());
static ref USIZE : StaticMut<Usize> = StaticMut::new(Usize(0));
}
fn main() {
// Before write, You can read it Concurrent safely.
println!("{:?}", STRING.as_ref());
println!("{:?}", USIZE.as_ref());
let str = {
let mut str = "StaticMut, 雪之下".to_string();
// do some work
str
};
// But when write it, operate it Concurrent is unsafe.
{
STRING.set(str); // update by setting value
USIZE.as_mut().0 = 123; // update by modifying field
}
// After write, You can read it Concurrent safely.
println!("{:?}", STRING.as_ref());
println!("{:?}", USIZE.as_ref());
}
#[derive(Debug)]
struct Usize(usize);
```
## About safe and unsafe
If you read or write it when you write it, I don't ensure anything.
You can add `AtomicXXX` to ensure safe on above situation also.
If you need full Concurrent read and write, you maybe need `RwLock`
*/
#[derive(Debug)]
pub struct Static
|
safeCell<T>);
use std::marker::Sync;
use std::cell::UnsafeCell;
unsafe impl<T> Sync for StaticMut<T> {}
impl<T> StaticMut<T> {
#[inline]
pub fn new(value: T) -> Self {
StaticMut(UnsafeCell::new(value))
}
/// read it
#[inline]
#[allow(unknown_lints,should_implement_trait)]
pub fn as_ref(&self) -> &T {
unsafe { self.0.get().as_ref().unwrap() }
}
/// write it
#[allow(unknown_lints,mut_from_ref)]
#[inline]
pub fn as_mut(&self) -> &mut T {
unsafe { self.0.get().as_mut().unwrap() }
}
/// update it
#[inline]
pub fn set(&self, value:T) {
*self.as_mut() = value
}
///Unwraps the value
#[inline]
pub fn into_inner(self)->T {
unsafe {self.0.into_inner()}
}
}
|
Mut<T>(Un
|
identifier_name
|
compare.rs
|
#![crate_id="compare#0.1"]
#![crate_type="bin"]
extern crate bancnova;
use std::os;
use bancnova::syntax::bscode;
use bancnova::syntax::tokenize;
use bancnova::syntax::tokenize::Tokenizer;
use bancnova::syntax::tree::Tree;
use std::io::IoResult;
use std::cmp::min;
mod util;
fn main() {
let args = os::args();
match compare_files(args.get(1).as_slice(), args.get(2).as_slice()) {
Err(err) => {
println!("error: [{}] {}", err.kind, err.desc);
if err.detail.is_some() {
println!("\t{}", err.detail.unwrap());
}
os::set_exit_status(-1);
},
_ => {
println!("files match");
},
}
}
fn read_tree<R: Reader>(tokenizer: &mut Tokenizer<R>) -> IoResult<Tree<bscode::Instruction>> {
match Tree::parse(tokenizer) {
Ok(tree) => Ok(tree),
Err(s) => util::make_ioerr(s, tokenizer),
}
}
struct ComparisonReporter<'a> {
filename1: &'a str,
filename2: &'a str,
count: uint,
}
impl<'a> ComparisonReporter<'a> {
fn new(filename1: &'a str, filename2: &'a str) -> ComparisonReporter<'a> {
ComparisonReporter::<'a> { filename1: filename1, filename2: filename2, count: 0 }
}
fn report_general(&mut self, line: uint, message1: &str, message2: &str) {
println!("{}:{}: {}", self.filename1, line, message1);
println!("{}:{}: {}", self.filename2, line, message2);
self.count += 1;
}
fn report(&mut self, line: uint, message: &str, inst1: &bscode::Instruction, inst2: &bscode::Instruction) {
println!("{}:{}: {} {}", self.filename1, line, message, inst1);
println!("{}:{}: {} {}", self.filename2, line, message, inst2);
self.count += 1;
}
fn end(&self) -> IoResult<()> {
if self.count == 0 {
Ok(())
} else {
util::make_ioerr_noline("files do not match")
}
}
}
fn compare(tree1: Tree<bscode::Instruction>, tree2: Tree<bscode::Instruction>, reporter: &mut ComparisonReporter) -> IoResult<()> {
let length = min(tree1.len(), tree2.len());
for i in range(0, length) {
let inst1 = tree1.get(i);
let inst2 = tree2.get(i);
if inst1!= inst2 {
reporter.report(i+1, "mismatch", inst1, inst2);
}
}
if tree1.len()!= tree2.len() {
reporter.report_general(0, format!("{} line(s)", tree1.len()).as_slice(), format!("{} line(s)", tree2.len()).as_slice() );
}
reporter.end()
}
fn
|
(filename1: &str, filename2: &str) -> IoResult<()> {
let file1 = match util::open_input(filename1) {
Ok(p) => p,
Err(e) => { return Err(e); },
};
let file2 = match util::open_input(filename2) {
Ok(p) => p,
Err(e) => { return Err(e); },
};
let mut tokenizer1 = tokenize::from_file(file1);
let mut tokenizer2 = tokenize::from_file(file2);
let tree1 = read_tree(&mut tokenizer1);
let tree2 = read_tree(&mut tokenizer2);
let mut reporter = ComparisonReporter::new(filename1, filename2);
match (tree1, tree2) {
(Ok(tree1), Ok(tree2)) => compare(tree1, tree2, &mut reporter),
(Err(e), Ok(_)) => Err(e),
(_, Err(e)) => Err(e),
}
}
|
compare_files
|
identifier_name
|
compare.rs
|
#![crate_id="compare#0.1"]
#![crate_type="bin"]
extern crate bancnova;
use std::os;
use bancnova::syntax::bscode;
use bancnova::syntax::tokenize;
use bancnova::syntax::tokenize::Tokenizer;
use bancnova::syntax::tree::Tree;
use std::io::IoResult;
use std::cmp::min;
mod util;
fn main() {
let args = os::args();
match compare_files(args.get(1).as_slice(), args.get(2).as_slice()) {
Err(err) => {
println!("error: [{}] {}", err.kind, err.desc);
if err.detail.is_some() {
println!("\t{}", err.detail.unwrap());
}
os::set_exit_status(-1);
},
_ => {
println!("files match");
},
}
}
fn read_tree<R: Reader>(tokenizer: &mut Tokenizer<R>) -> IoResult<Tree<bscode::Instruction>> {
match Tree::parse(tokenizer) {
Ok(tree) => Ok(tree),
Err(s) => util::make_ioerr(s, tokenizer),
}
}
struct ComparisonReporter<'a> {
filename1: &'a str,
|
impl<'a> ComparisonReporter<'a> {
fn new(filename1: &'a str, filename2: &'a str) -> ComparisonReporter<'a> {
ComparisonReporter::<'a> { filename1: filename1, filename2: filename2, count: 0 }
}
fn report_general(&mut self, line: uint, message1: &str, message2: &str) {
println!("{}:{}: {}", self.filename1, line, message1);
println!("{}:{}: {}", self.filename2, line, message2);
self.count += 1;
}
fn report(&mut self, line: uint, message: &str, inst1: &bscode::Instruction, inst2: &bscode::Instruction) {
println!("{}:{}: {} {}", self.filename1, line, message, inst1);
println!("{}:{}: {} {}", self.filename2, line, message, inst2);
self.count += 1;
}
fn end(&self) -> IoResult<()> {
if self.count == 0 {
Ok(())
} else {
util::make_ioerr_noline("files do not match")
}
}
}
fn compare(tree1: Tree<bscode::Instruction>, tree2: Tree<bscode::Instruction>, reporter: &mut ComparisonReporter) -> IoResult<()> {
let length = min(tree1.len(), tree2.len());
for i in range(0, length) {
let inst1 = tree1.get(i);
let inst2 = tree2.get(i);
if inst1!= inst2 {
reporter.report(i+1, "mismatch", inst1, inst2);
}
}
if tree1.len()!= tree2.len() {
reporter.report_general(0, format!("{} line(s)", tree1.len()).as_slice(), format!("{} line(s)", tree2.len()).as_slice() );
}
reporter.end()
}
fn compare_files(filename1: &str, filename2: &str) -> IoResult<()> {
let file1 = match util::open_input(filename1) {
Ok(p) => p,
Err(e) => { return Err(e); },
};
let file2 = match util::open_input(filename2) {
Ok(p) => p,
Err(e) => { return Err(e); },
};
let mut tokenizer1 = tokenize::from_file(file1);
let mut tokenizer2 = tokenize::from_file(file2);
let tree1 = read_tree(&mut tokenizer1);
let tree2 = read_tree(&mut tokenizer2);
let mut reporter = ComparisonReporter::new(filename1, filename2);
match (tree1, tree2) {
(Ok(tree1), Ok(tree2)) => compare(tree1, tree2, &mut reporter),
(Err(e), Ok(_)) => Err(e),
(_, Err(e)) => Err(e),
}
}
|
filename2: &'a str,
count: uint,
}
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.