file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
build.rs
|
extern crate cc;
extern crate bindgen;
use std::env;
use std::fs;
use std::path::PathBuf;
fn link(name: &str, bundled: bool) {
use std::env::var;
let target = var("TARGET").unwrap();
let target: Vec<_> = target.split('-').collect();
if target.get(2) == Some(&"windows") {
println!("cargo:rustc-link-lib=dylib={}", name);
if bundled && target.get(3) == Some(&"gnu") {
let dir = var("CARGO_MANIFEST_DIR").unwrap();
println!("cargo:rustc-link-search=native={}/{}", dir, target[0]);
}
}
}
fn fail_on_empty_directory(name: &str) {
if fs::read_dir(name).unwrap().count() == 0 {
println!(
"The `{}` directory is empty, did you forget to pull the submodules?",
name
);
println!("Try `git submodule update --init --recursive`");
panic!();
}
}
fn bindgen_rocksdb() {
let bindings = bindgen::Builder::default()
.header("rocksdb/include/rocksdb/c.h")
.blacklist_type("max_align_t") // https://github.com/rust-lang-nursery/rust-bindgen/issues/550
.ctypes_prefix("libc")
.generate()
.expect("unable to generate rocksdb bindings");
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
bindings
.write_to_file(out_path.join("bindings.rs"))
.expect("unable to write rocksdb bindings");
}
fn build_rocksdb() {
let mut config = cc::Build::new();
config.include("rocksdb/include/");
config.include("rocksdb/");
config.include("rocksdb/third-party/gtest-1.7.0/fused-src/");
config.include("snappy/");
config.include(".");
config.define("NDEBUG", Some("1"));
config.define("SNAPPY", Some("1"));
let mut lib_sources = include_str!("rocksdb_lib_sources.txt")
.split(" ")
.collect::<Vec<&'static str>>();
// We have a pregenerated a version of build_version.cc in the local directory
lib_sources = lib_sources
.iter()
.cloned()
.filter(|file| *file!= "util/build_version.cc")
.collect::<Vec<&'static str>>();
if cfg!(target_os = "macos")
|
if cfg!(target_os = "linux") {
config.define("OS_LINUX", Some("1"));
config.define("ROCKSDB_PLATFORM_POSIX", Some("1"));
config.define("ROCKSDB_LIB_IO_POSIX", Some("1"));
// COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp"
}
if cfg!(target_os = "freebsd") {
config.define("OS_FREEBSD", Some("1"));
config.define("ROCKSDB_PLATFORM_POSIX", Some("1"));
config.define("ROCKSDB_LIB_IO_POSIX", Some("1"));
}
if cfg!(windows) {
link("rpcrt4", false);
link("Shlwapi", false);
config.define("OS_WIN", Some("1"));
// Remove POSIX-specific sources
lib_sources = lib_sources
.iter()
.cloned()
.filter(|file| match *file {
"port/port_posix.cc" |
"env/env_posix.cc" |
"env/io_posix.cc" => false,
_ => true,
})
.collect::<Vec<&'static str>>();
// Add Windows-specific sources
lib_sources.push("port/win/port_win.cc");
lib_sources.push("port/win/env_win.cc");
lib_sources.push("port/win/env_default.cc");
lib_sources.push("port/win/win_logger.cc");
lib_sources.push("port/win/io_win.cc");
lib_sources.push("port/win/win_thread.cc");
}
if cfg!(target_env = "msvc") {
config.flag("-EHsc");
} else {
config.flag("-std=c++11");
// this was breaking the build on travis due to
// > 4mb of warnings emitted.
config.flag("-Wno-unused-parameter");
}
for file in lib_sources {
let file = "rocksdb/".to_string() + file;
config.file(&file);
}
config.file("build_version.cc");
config.cpp(true);
config.compile("librocksdb.a");
}
fn build_snappy() {
let mut config = cc::Build::new();
config.include("snappy/");
config.include(".");
config.define("NDEBUG", Some("1"));
if cfg!(target_env = "msvc") {
config.flag("-EHsc");
} else {
config.flag("-std=c++11");
}
config.file("snappy/snappy.cc");
config.file("snappy/snappy-sinksource.cc");
config.file("snappy/snappy-c.cc");
config.cpp(true);
config.compile("libsnappy.a");
}
fn try_to_find_and_link_lib(lib_name: &str) -> bool {
if let Ok(lib_dir) = env::var(&format!("{}_LIB_DIR", lib_name)) {
println!("cargo:rustc-link-search=native={}", lib_dir);
let mode = match env::var_os(&format!("{}_STATIC", lib_name)) {
Some(_) => "static",
None => "dylib",
};
println!("cargo:rustc-link-lib={}={}", mode, lib_name.to_lowercase());
return true;
}
false
}
fn main() {
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=rocksdb/");
println!("cargo:rerun-if-changed=snappy/");
fail_on_empty_directory("rocksdb");
fail_on_empty_directory("snappy");
bindgen_rocksdb();
if!try_to_find_and_link_lib("ROCKSDB") {
build_rocksdb();
}
if!try_to_find_and_link_lib("SNAPPY") {
build_snappy();
}
}
|
{
config.define("OS_MACOSX", Some("1"));
config.define("ROCKSDB_PLATFORM_POSIX", Some("1"));
config.define("ROCKSDB_LIB_IO_POSIX", Some("1"));
}
|
conditional_block
|
build.rs
|
extern crate cc;
extern crate bindgen;
use std::env;
use std::fs;
use std::path::PathBuf;
fn link(name: &str, bundled: bool) {
use std::env::var;
let target = var("TARGET").unwrap();
let target: Vec<_> = target.split('-').collect();
if target.get(2) == Some(&"windows") {
println!("cargo:rustc-link-lib=dylib={}", name);
if bundled && target.get(3) == Some(&"gnu") {
let dir = var("CARGO_MANIFEST_DIR").unwrap();
println!("cargo:rustc-link-search=native={}/{}", dir, target[0]);
}
}
}
fn fail_on_empty_directory(name: &str) {
if fs::read_dir(name).unwrap().count() == 0 {
println!(
"The `{}` directory is empty, did you forget to pull the submodules?",
name
);
println!("Try `git submodule update --init --recursive`");
panic!();
}
}
fn bindgen_rocksdb() {
let bindings = bindgen::Builder::default()
.header("rocksdb/include/rocksdb/c.h")
.blacklist_type("max_align_t") // https://github.com/rust-lang-nursery/rust-bindgen/issues/550
.ctypes_prefix("libc")
.generate()
.expect("unable to generate rocksdb bindings");
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
bindings
.write_to_file(out_path.join("bindings.rs"))
.expect("unable to write rocksdb bindings");
}
fn build_rocksdb() {
let mut config = cc::Build::new();
config.include("rocksdb/include/");
config.include("rocksdb/");
config.include("rocksdb/third-party/gtest-1.7.0/fused-src/");
config.include("snappy/");
config.include(".");
config.define("NDEBUG", Some("1"));
config.define("SNAPPY", Some("1"));
let mut lib_sources = include_str!("rocksdb_lib_sources.txt")
.split(" ")
.collect::<Vec<&'static str>>();
// We have a pregenerated a version of build_version.cc in the local directory
lib_sources = lib_sources
.iter()
.cloned()
.filter(|file| *file!= "util/build_version.cc")
.collect::<Vec<&'static str>>();
if cfg!(target_os = "macos") {
config.define("OS_MACOSX", Some("1"));
config.define("ROCKSDB_PLATFORM_POSIX", Some("1"));
config.define("ROCKSDB_LIB_IO_POSIX", Some("1"));
}
if cfg!(target_os = "linux") {
config.define("OS_LINUX", Some("1"));
config.define("ROCKSDB_PLATFORM_POSIX", Some("1"));
config.define("ROCKSDB_LIB_IO_POSIX", Some("1"));
// COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp"
}
if cfg!(target_os = "freebsd") {
config.define("OS_FREEBSD", Some("1"));
config.define("ROCKSDB_PLATFORM_POSIX", Some("1"));
config.define("ROCKSDB_LIB_IO_POSIX", Some("1"));
}
if cfg!(windows) {
link("rpcrt4", false);
link("Shlwapi", false);
config.define("OS_WIN", Some("1"));
// Remove POSIX-specific sources
lib_sources = lib_sources
.iter()
.cloned()
.filter(|file| match *file {
"port/port_posix.cc" |
"env/env_posix.cc" |
"env/io_posix.cc" => false,
_ => true,
})
.collect::<Vec<&'static str>>();
// Add Windows-specific sources
lib_sources.push("port/win/port_win.cc");
lib_sources.push("port/win/env_win.cc");
lib_sources.push("port/win/env_default.cc");
lib_sources.push("port/win/win_logger.cc");
lib_sources.push("port/win/io_win.cc");
lib_sources.push("port/win/win_thread.cc");
}
if cfg!(target_env = "msvc") {
|
config.flag("-Wno-unused-parameter");
}
for file in lib_sources {
let file = "rocksdb/".to_string() + file;
config.file(&file);
}
config.file("build_version.cc");
config.cpp(true);
config.compile("librocksdb.a");
}
fn build_snappy() {
let mut config = cc::Build::new();
config.include("snappy/");
config.include(".");
config.define("NDEBUG", Some("1"));
if cfg!(target_env = "msvc") {
config.flag("-EHsc");
} else {
config.flag("-std=c++11");
}
config.file("snappy/snappy.cc");
config.file("snappy/snappy-sinksource.cc");
config.file("snappy/snappy-c.cc");
config.cpp(true);
config.compile("libsnappy.a");
}
fn try_to_find_and_link_lib(lib_name: &str) -> bool {
if let Ok(lib_dir) = env::var(&format!("{}_LIB_DIR", lib_name)) {
println!("cargo:rustc-link-search=native={}", lib_dir);
let mode = match env::var_os(&format!("{}_STATIC", lib_name)) {
Some(_) => "static",
None => "dylib",
};
println!("cargo:rustc-link-lib={}={}", mode, lib_name.to_lowercase());
return true;
}
false
}
fn main() {
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=rocksdb/");
println!("cargo:rerun-if-changed=snappy/");
fail_on_empty_directory("rocksdb");
fail_on_empty_directory("snappy");
bindgen_rocksdb();
if!try_to_find_and_link_lib("ROCKSDB") {
build_rocksdb();
}
if!try_to_find_and_link_lib("SNAPPY") {
build_snappy();
}
}
|
config.flag("-EHsc");
} else {
config.flag("-std=c++11");
// this was breaking the build on travis due to
// > 4mb of warnings emitted.
|
random_line_split
|
build.rs
|
extern crate cc;
extern crate bindgen;
use std::env;
use std::fs;
use std::path::PathBuf;
fn link(name: &str, bundled: bool) {
use std::env::var;
let target = var("TARGET").unwrap();
let target: Vec<_> = target.split('-').collect();
if target.get(2) == Some(&"windows") {
println!("cargo:rustc-link-lib=dylib={}", name);
if bundled && target.get(3) == Some(&"gnu") {
let dir = var("CARGO_MANIFEST_DIR").unwrap();
println!("cargo:rustc-link-search=native={}/{}", dir, target[0]);
}
}
}
fn fail_on_empty_directory(name: &str) {
if fs::read_dir(name).unwrap().count() == 0 {
println!(
"The `{}` directory is empty, did you forget to pull the submodules?",
name
);
println!("Try `git submodule update --init --recursive`");
panic!();
}
}
fn bindgen_rocksdb() {
let bindings = bindgen::Builder::default()
.header("rocksdb/include/rocksdb/c.h")
.blacklist_type("max_align_t") // https://github.com/rust-lang-nursery/rust-bindgen/issues/550
.ctypes_prefix("libc")
.generate()
.expect("unable to generate rocksdb bindings");
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
bindings
.write_to_file(out_path.join("bindings.rs"))
.expect("unable to write rocksdb bindings");
}
fn build_rocksdb() {
let mut config = cc::Build::new();
config.include("rocksdb/include/");
config.include("rocksdb/");
config.include("rocksdb/third-party/gtest-1.7.0/fused-src/");
config.include("snappy/");
config.include(".");
config.define("NDEBUG", Some("1"));
config.define("SNAPPY", Some("1"));
let mut lib_sources = include_str!("rocksdb_lib_sources.txt")
.split(" ")
.collect::<Vec<&'static str>>();
// We have a pregenerated a version of build_version.cc in the local directory
lib_sources = lib_sources
.iter()
.cloned()
.filter(|file| *file!= "util/build_version.cc")
.collect::<Vec<&'static str>>();
if cfg!(target_os = "macos") {
config.define("OS_MACOSX", Some("1"));
config.define("ROCKSDB_PLATFORM_POSIX", Some("1"));
config.define("ROCKSDB_LIB_IO_POSIX", Some("1"));
}
if cfg!(target_os = "linux") {
config.define("OS_LINUX", Some("1"));
config.define("ROCKSDB_PLATFORM_POSIX", Some("1"));
config.define("ROCKSDB_LIB_IO_POSIX", Some("1"));
// COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp"
}
if cfg!(target_os = "freebsd") {
config.define("OS_FREEBSD", Some("1"));
config.define("ROCKSDB_PLATFORM_POSIX", Some("1"));
config.define("ROCKSDB_LIB_IO_POSIX", Some("1"));
}
if cfg!(windows) {
link("rpcrt4", false);
link("Shlwapi", false);
config.define("OS_WIN", Some("1"));
// Remove POSIX-specific sources
lib_sources = lib_sources
.iter()
.cloned()
.filter(|file| match *file {
"port/port_posix.cc" |
"env/env_posix.cc" |
"env/io_posix.cc" => false,
_ => true,
})
.collect::<Vec<&'static str>>();
// Add Windows-specific sources
lib_sources.push("port/win/port_win.cc");
lib_sources.push("port/win/env_win.cc");
lib_sources.push("port/win/env_default.cc");
lib_sources.push("port/win/win_logger.cc");
lib_sources.push("port/win/io_win.cc");
lib_sources.push("port/win/win_thread.cc");
}
if cfg!(target_env = "msvc") {
config.flag("-EHsc");
} else {
config.flag("-std=c++11");
// this was breaking the build on travis due to
// > 4mb of warnings emitted.
config.flag("-Wno-unused-parameter");
}
for file in lib_sources {
let file = "rocksdb/".to_string() + file;
config.file(&file);
}
config.file("build_version.cc");
config.cpp(true);
config.compile("librocksdb.a");
}
fn
|
() {
let mut config = cc::Build::new();
config.include("snappy/");
config.include(".");
config.define("NDEBUG", Some("1"));
if cfg!(target_env = "msvc") {
config.flag("-EHsc");
} else {
config.flag("-std=c++11");
}
config.file("snappy/snappy.cc");
config.file("snappy/snappy-sinksource.cc");
config.file("snappy/snappy-c.cc");
config.cpp(true);
config.compile("libsnappy.a");
}
fn try_to_find_and_link_lib(lib_name: &str) -> bool {
if let Ok(lib_dir) = env::var(&format!("{}_LIB_DIR", lib_name)) {
println!("cargo:rustc-link-search=native={}", lib_dir);
let mode = match env::var_os(&format!("{}_STATIC", lib_name)) {
Some(_) => "static",
None => "dylib",
};
println!("cargo:rustc-link-lib={}={}", mode, lib_name.to_lowercase());
return true;
}
false
}
fn main() {
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=rocksdb/");
println!("cargo:rerun-if-changed=snappy/");
fail_on_empty_directory("rocksdb");
fail_on_empty_directory("snappy");
bindgen_rocksdb();
if!try_to_find_and_link_lib("ROCKSDB") {
build_rocksdb();
}
if!try_to_find_and_link_lib("SNAPPY") {
build_snappy();
}
}
|
build_snappy
|
identifier_name
|
build.rs
|
extern crate cc;
extern crate bindgen;
use std::env;
use std::fs;
use std::path::PathBuf;
fn link(name: &str, bundled: bool) {
use std::env::var;
let target = var("TARGET").unwrap();
let target: Vec<_> = target.split('-').collect();
if target.get(2) == Some(&"windows") {
println!("cargo:rustc-link-lib=dylib={}", name);
if bundled && target.get(3) == Some(&"gnu") {
let dir = var("CARGO_MANIFEST_DIR").unwrap();
println!("cargo:rustc-link-search=native={}/{}", dir, target[0]);
}
}
}
fn fail_on_empty_directory(name: &str) {
if fs::read_dir(name).unwrap().count() == 0 {
println!(
"The `{}` directory is empty, did you forget to pull the submodules?",
name
);
println!("Try `git submodule update --init --recursive`");
panic!();
}
}
fn bindgen_rocksdb() {
let bindings = bindgen::Builder::default()
.header("rocksdb/include/rocksdb/c.h")
.blacklist_type("max_align_t") // https://github.com/rust-lang-nursery/rust-bindgen/issues/550
.ctypes_prefix("libc")
.generate()
.expect("unable to generate rocksdb bindings");
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
bindings
.write_to_file(out_path.join("bindings.rs"))
.expect("unable to write rocksdb bindings");
}
fn build_rocksdb()
|
.collect::<Vec<&'static str>>();
if cfg!(target_os = "macos") {
config.define("OS_MACOSX", Some("1"));
config.define("ROCKSDB_PLATFORM_POSIX", Some("1"));
config.define("ROCKSDB_LIB_IO_POSIX", Some("1"));
}
if cfg!(target_os = "linux") {
config.define("OS_LINUX", Some("1"));
config.define("ROCKSDB_PLATFORM_POSIX", Some("1"));
config.define("ROCKSDB_LIB_IO_POSIX", Some("1"));
// COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp"
}
if cfg!(target_os = "freebsd") {
config.define("OS_FREEBSD", Some("1"));
config.define("ROCKSDB_PLATFORM_POSIX", Some("1"));
config.define("ROCKSDB_LIB_IO_POSIX", Some("1"));
}
if cfg!(windows) {
link("rpcrt4", false);
link("Shlwapi", false);
config.define("OS_WIN", Some("1"));
// Remove POSIX-specific sources
lib_sources = lib_sources
.iter()
.cloned()
.filter(|file| match *file {
"port/port_posix.cc" |
"env/env_posix.cc" |
"env/io_posix.cc" => false,
_ => true,
})
.collect::<Vec<&'static str>>();
// Add Windows-specific sources
lib_sources.push("port/win/port_win.cc");
lib_sources.push("port/win/env_win.cc");
lib_sources.push("port/win/env_default.cc");
lib_sources.push("port/win/win_logger.cc");
lib_sources.push("port/win/io_win.cc");
lib_sources.push("port/win/win_thread.cc");
}
if cfg!(target_env = "msvc") {
config.flag("-EHsc");
} else {
config.flag("-std=c++11");
// this was breaking the build on travis due to
// > 4mb of warnings emitted.
config.flag("-Wno-unused-parameter");
}
for file in lib_sources {
let file = "rocksdb/".to_string() + file;
config.file(&file);
}
config.file("build_version.cc");
config.cpp(true);
config.compile("librocksdb.a");
}
fn build_snappy() {
let mut config = cc::Build::new();
config.include("snappy/");
config.include(".");
config.define("NDEBUG", Some("1"));
if cfg!(target_env = "msvc") {
config.flag("-EHsc");
} else {
config.flag("-std=c++11");
}
config.file("snappy/snappy.cc");
config.file("snappy/snappy-sinksource.cc");
config.file("snappy/snappy-c.cc");
config.cpp(true);
config.compile("libsnappy.a");
}
fn try_to_find_and_link_lib(lib_name: &str) -> bool {
if let Ok(lib_dir) = env::var(&format!("{}_LIB_DIR", lib_name)) {
println!("cargo:rustc-link-search=native={}", lib_dir);
let mode = match env::var_os(&format!("{}_STATIC", lib_name)) {
Some(_) => "static",
None => "dylib",
};
println!("cargo:rustc-link-lib={}={}", mode, lib_name.to_lowercase());
return true;
}
false
}
fn main() {
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=rocksdb/");
println!("cargo:rerun-if-changed=snappy/");
fail_on_empty_directory("rocksdb");
fail_on_empty_directory("snappy");
bindgen_rocksdb();
if!try_to_find_and_link_lib("ROCKSDB") {
build_rocksdb();
}
if!try_to_find_and_link_lib("SNAPPY") {
build_snappy();
}
}
|
{
let mut config = cc::Build::new();
config.include("rocksdb/include/");
config.include("rocksdb/");
config.include("rocksdb/third-party/gtest-1.7.0/fused-src/");
config.include("snappy/");
config.include(".");
config.define("NDEBUG", Some("1"));
config.define("SNAPPY", Some("1"));
let mut lib_sources = include_str!("rocksdb_lib_sources.txt")
.split(" ")
.collect::<Vec<&'static str>>();
// We have a pregenerated a version of build_version.cc in the local directory
lib_sources = lib_sources
.iter()
.cloned()
.filter(|file| *file != "util/build_version.cc")
|
identifier_body
|
tests.rs
|
use super::*;
use rustc_data_structures::sync::Lrc;
fn init_source_map() -> SourceMap {
let sm = SourceMap::new(FilePathMapping::empty());
sm.new_source_file(PathBuf::from("blork.rs").into(), "first line.\nsecond line".to_string());
sm.new_source_file(PathBuf::from("empty.rs").into(), String::new());
sm.new_source_file(PathBuf::from("blork2.rs").into(), "first line.\nsecond line".to_string());
sm
}
impl SourceMap {
/// Returns `Some(span)`, a union of the LHS and RHS span. The LHS must precede the RHS. If
/// there are gaps between LHS and RHS, the resulting union will cross these gaps.
/// For this to work,
///
/// * the syntax contexts of both spans much match,
/// * the LHS span needs to end on the same line the RHS span begins,
/// * the LHS span must start at or before the RHS span.
fn merge_spans(&self, sp_lhs: Span, sp_rhs: Span) -> Option<Span> {
// Ensure we're at the same expansion ID.
if sp_lhs.ctxt()!= sp_rhs.ctxt() {
return None;
}
let lhs_end = match self.lookup_line(sp_lhs.hi()) {
Ok(x) => x,
Err(_) => return None,
};
let rhs_begin = match self.lookup_line(sp_rhs.lo()) {
Ok(x) => x,
Err(_) => return None,
};
// If we must cross lines to merge, don't merge.
if lhs_end.line!= rhs_begin.line {
return None;
}
// Ensure these follow the expected order and that we don't overlap.
if (sp_lhs.lo() <= sp_rhs.lo()) && (sp_lhs.hi() <= sp_rhs.lo()) {
Some(sp_lhs.to(sp_rhs))
} else {
None
}
}
/// Converts an absolute `BytePos` to a `CharPos` relative to the `SourceFile`.
fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
let idx = self.lookup_source_file_idx(bpos);
let sf = &(*self.files.borrow().source_files)[idx];
sf.bytepos_to_file_charpos(bpos)
}
}
/// Tests `lookup_byte_offset`.
#[test]
fn t3() {
let sm = init_source_map();
let srcfbp1 = sm.lookup_byte_offset(BytePos(23));
assert_eq!(srcfbp1.sf.name, PathBuf::from("blork.rs").into());
assert_eq!(srcfbp1.pos, BytePos(23));
let srcfbp1 = sm.lookup_byte_offset(BytePos(24));
assert_eq!(srcfbp1.sf.name, PathBuf::from("empty.rs").into());
assert_eq!(srcfbp1.pos, BytePos(0));
let srcfbp2 = sm.lookup_byte_offset(BytePos(25));
assert_eq!(srcfbp2.sf.name, PathBuf::from("blork2.rs").into());
assert_eq!(srcfbp2.pos, BytePos(0));
}
/// Tests `bytepos_to_file_charpos`.
#[test]
fn t4() {
let sm = init_source_map();
let cp1 = sm.bytepos_to_file_charpos(BytePos(22));
assert_eq!(cp1, CharPos(22));
let cp2 = sm.bytepos_to_file_charpos(BytePos(25));
assert_eq!(cp2, CharPos(0));
}
/// Tests zero-length `SourceFile`s.
#[test]
fn t5()
|
fn init_source_map_mbc() -> SourceMap {
let sm = SourceMap::new(FilePathMapping::empty());
// "€" is a three-byte UTF8 char.
sm.new_source_file(
PathBuf::from("blork.rs").into(),
"fir€st €€€€ line.\nsecond line".to_string(),
);
sm.new_source_file(
PathBuf::from("blork2.rs").into(),
"first line€€.\n€ second line".to_string(),
);
sm
}
/// Tests `bytepos_to_file_charpos` in the presence of multi-byte chars.
#[test]
fn t6() {
let sm = init_source_map_mbc();
let cp1 = sm.bytepos_to_file_charpos(BytePos(3));
assert_eq!(cp1, CharPos(3));
let cp2 = sm.bytepos_to_file_charpos(BytePos(6));
assert_eq!(cp2, CharPos(4));
let cp3 = sm.bytepos_to_file_charpos(BytePos(56));
assert_eq!(cp3, CharPos(12));
let cp4 = sm.bytepos_to_file_charpos(BytePos(61));
assert_eq!(cp4, CharPos(15));
}
/// Test `span_to_lines` for a span ending at the end of a `SourceFile`.
#[test]
fn t7() {
let sm = init_source_map();
let span = Span::with_root_ctxt(BytePos(12), BytePos(23));
let file_lines = sm.span_to_lines(span).unwrap();
assert_eq!(file_lines.file.name, PathBuf::from("blork.rs").into());
assert_eq!(file_lines.lines.len(), 1);
assert_eq!(file_lines.lines[0].line_index, 1);
}
/// Given a string like " ~~~~~~~~~~~~ ", produces a span
/// converting that range. The idea is that the string has the same
/// length as the input, and we uncover the byte positions. Note
/// that this can span lines and so on.
fn span_from_selection(input: &str, selection: &str) -> Span {
assert_eq!(input.len(), selection.len());
let left_index = selection.find('~').unwrap() as u32;
let right_index = selection.rfind('~').map_or(left_index, |x| x as u32);
Span::with_root_ctxt(BytePos(left_index), BytePos(right_index + 1))
}
/// Tests `span_to_snippet` and `span_to_lines` for a span converting 3
/// lines in the middle of a file.
#[test]
fn span_to_snippet_and_lines_spanning_multiple_lines() {
let sm = SourceMap::new(FilePathMapping::empty());
let inputtext = "aaaaa\nbbbbBB\nCCC\nDDDDDddddd\neee\n";
let selection = " \n ~~\n~~~\n~~~~~ \n \n";
sm.new_source_file(Path::new("blork.rs").to_owned().into(), inputtext.to_string());
let span = span_from_selection(inputtext, selection);
// Check that we are extracting the text we thought we were extracting.
assert_eq!(&sm.span_to_snippet(span).unwrap(), "BB\nCCC\nDDDDD");
// Check that span_to_lines gives us the complete result with the lines/cols we expected.
let lines = sm.span_to_lines(span).unwrap();
let expected = vec![
LineInfo { line_index: 1, start_col: CharPos(4), end_col: CharPos(6) },
LineInfo { line_index: 2, start_col: CharPos(0), end_col: CharPos(3) },
LineInfo { line_index: 3, start_col: CharPos(0), end_col: CharPos(5) },
];
assert_eq!(lines.lines, expected);
}
/// Test span_to_snippet for a span ending at the end of a `SourceFile`.
#[test]
fn t8() {
let sm = init_source_map();
let span = Span::with_root_ctxt(BytePos(12), BytePos(23));
let snippet = sm.span_to_snippet(span);
assert_eq!(snippet, Ok("second line".to_string()));
}
/// Test `span_to_str` for a span ending at the end of a `SourceFile`.
#[test]
fn t9() {
let sm = init_source_map();
let span = Span::with_root_ctxt(BytePos(12), BytePos(23));
let sstr = sm.span_to_diagnostic_string(span);
assert_eq!(sstr, "blork.rs:2:1: 2:12");
}
/// Tests failing to merge two spans on different lines.
#[test]
fn span_merging_fail() {
let sm = SourceMap::new(FilePathMapping::empty());
let inputtext = "bbbb BB\ncc CCC\n";
let selection1 = " ~~\n \n";
let selection2 = " \n ~~~\n";
sm.new_source_file(Path::new("blork.rs").to_owned().into(), inputtext.to_owned());
let span1 = span_from_selection(inputtext, selection1);
let span2 = span_from_selection(inputtext, selection2);
assert!(sm.merge_spans(span1, span2).is_none());
}
/// Tests loading an external source file that requires normalization.
#[test]
fn t10() {
let sm = SourceMap::new(FilePathMapping::empty());
let unnormalized = "first line.\r\nsecond line";
let normalized = "first line.\nsecond line";
let src_file = sm.new_source_file(PathBuf::from("blork.rs").into(), unnormalized.to_string());
assert_eq!(src_file.src.as_ref().unwrap().as_ref(), normalized);
assert!(
src_file.src_hash.matches(unnormalized),
"src_hash should use the source before normalization"
);
let SourceFile {
name,
src_hash,
start_pos,
end_pos,
lines,
multibyte_chars,
non_narrow_chars,
normalized_pos,
name_hash,
..
} = (*src_file).clone();
let imported_src_file = sm.new_imported_source_file(
name,
src_hash,
name_hash,
(end_pos - start_pos).to_usize(),
CrateNum::new(0),
lines,
multibyte_chars,
non_narrow_chars,
normalized_pos,
start_pos,
end_pos,
);
assert!(
imported_src_file.external_src.borrow().get_source().is_none(),
"imported source file should not have source yet"
);
imported_src_file.add_external_src(|| Some(unnormalized.to_string()));
assert_eq!(
imported_src_file.external_src.borrow().get_source().unwrap().as_ref(),
normalized,
"imported source file should be normalized"
);
}
/// Returns the span corresponding to the `n`th occurrence of `substring` in `source_text`.
trait SourceMapExtension {
fn span_substr(
&self,
file: &Lrc<SourceFile>,
source_text: &str,
substring: &str,
n: usize,
) -> Span;
}
impl SourceMapExtension for SourceMap {
fn span_substr(
&self,
file: &Lrc<SourceFile>,
source_text: &str,
substring: &str,
n: usize,
) -> Span {
eprintln!(
"span_substr(file={:?}/{:?}, substring={:?}, n={})",
file.name, file.start_pos, substring, n
);
let mut i = 0;
let mut hi = 0;
loop {
let offset = source_text[hi..].find(substring).unwrap_or_else(|| {
panic!(
"source_text `{}` does not have {} occurrences of `{}`, only {}",
source_text, n, substring, i
);
});
let lo = hi + offset;
hi = lo + substring.len();
if i == n {
let span = Span::with_root_ctxt(
BytePos(lo as u32 + file.start_pos.0),
BytePos(hi as u32 + file.start_pos.0),
);
assert_eq!(&self.span_to_snippet(span).unwrap()[..], substring);
return span;
}
i += 1;
}
}
}
|
{
let sm = init_source_map();
let loc1 = sm.lookup_char_pos(BytePos(22));
assert_eq!(loc1.file.name, PathBuf::from("blork.rs").into());
assert_eq!(loc1.line, 2);
assert_eq!(loc1.col, CharPos(10));
let loc2 = sm.lookup_char_pos(BytePos(25));
assert_eq!(loc2.file.name, PathBuf::from("blork2.rs").into());
assert_eq!(loc2.line, 1);
assert_eq!(loc2.col, CharPos(0));
}
|
identifier_body
|
tests.rs
|
use super::*;
use rustc_data_structures::sync::Lrc;
fn init_source_map() -> SourceMap {
let sm = SourceMap::new(FilePathMapping::empty());
sm.new_source_file(PathBuf::from("blork.rs").into(), "first line.\nsecond line".to_string());
sm.new_source_file(PathBuf::from("empty.rs").into(), String::new());
sm.new_source_file(PathBuf::from("blork2.rs").into(), "first line.\nsecond line".to_string());
sm
}
impl SourceMap {
/// Returns `Some(span)`, a union of the LHS and RHS span. The LHS must precede the RHS. If
/// there are gaps between LHS and RHS, the resulting union will cross these gaps.
/// For this to work,
///
/// * the syntax contexts of both spans much match,
/// * the LHS span needs to end on the same line the RHS span begins,
/// * the LHS span must start at or before the RHS span.
fn merge_spans(&self, sp_lhs: Span, sp_rhs: Span) -> Option<Span> {
// Ensure we're at the same expansion ID.
if sp_lhs.ctxt()!= sp_rhs.ctxt() {
return None;
}
let lhs_end = match self.lookup_line(sp_lhs.hi()) {
Ok(x) => x,
Err(_) => return None,
};
let rhs_begin = match self.lookup_line(sp_rhs.lo()) {
Ok(x) => x,
Err(_) => return None,
};
// If we must cross lines to merge, don't merge.
if lhs_end.line!= rhs_begin.line {
return None;
}
// Ensure these follow the expected order and that we don't overlap.
if (sp_lhs.lo() <= sp_rhs.lo()) && (sp_lhs.hi() <= sp_rhs.lo()) {
Some(sp_lhs.to(sp_rhs))
} else {
None
}
}
/// Converts an absolute `BytePos` to a `CharPos` relative to the `SourceFile`.
fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
let idx = self.lookup_source_file_idx(bpos);
let sf = &(*self.files.borrow().source_files)[idx];
sf.bytepos_to_file_charpos(bpos)
}
}
/// Tests `lookup_byte_offset`.
#[test]
fn t3() {
let sm = init_source_map();
let srcfbp1 = sm.lookup_byte_offset(BytePos(23));
assert_eq!(srcfbp1.sf.name, PathBuf::from("blork.rs").into());
assert_eq!(srcfbp1.pos, BytePos(23));
let srcfbp1 = sm.lookup_byte_offset(BytePos(24));
assert_eq!(srcfbp1.sf.name, PathBuf::from("empty.rs").into());
assert_eq!(srcfbp1.pos, BytePos(0));
let srcfbp2 = sm.lookup_byte_offset(BytePos(25));
assert_eq!(srcfbp2.sf.name, PathBuf::from("blork2.rs").into());
assert_eq!(srcfbp2.pos, BytePos(0));
}
/// Tests `bytepos_to_file_charpos`.
#[test]
fn t4() {
let sm = init_source_map();
|
assert_eq!(cp1, CharPos(22));
let cp2 = sm.bytepos_to_file_charpos(BytePos(25));
assert_eq!(cp2, CharPos(0));
}
/// Tests zero-length `SourceFile`s.
#[test]
fn t5() {
let sm = init_source_map();
let loc1 = sm.lookup_char_pos(BytePos(22));
assert_eq!(loc1.file.name, PathBuf::from("blork.rs").into());
assert_eq!(loc1.line, 2);
assert_eq!(loc1.col, CharPos(10));
let loc2 = sm.lookup_char_pos(BytePos(25));
assert_eq!(loc2.file.name, PathBuf::from("blork2.rs").into());
assert_eq!(loc2.line, 1);
assert_eq!(loc2.col, CharPos(0));
}
fn init_source_map_mbc() -> SourceMap {
let sm = SourceMap::new(FilePathMapping::empty());
// "€" is a three-byte UTF8 char.
sm.new_source_file(
PathBuf::from("blork.rs").into(),
"fir€st €€€€ line.\nsecond line".to_string(),
);
sm.new_source_file(
PathBuf::from("blork2.rs").into(),
"first line€€.\n€ second line".to_string(),
);
sm
}
/// Tests `bytepos_to_file_charpos` in the presence of multi-byte chars.
#[test]
fn t6() {
let sm = init_source_map_mbc();
let cp1 = sm.bytepos_to_file_charpos(BytePos(3));
assert_eq!(cp1, CharPos(3));
let cp2 = sm.bytepos_to_file_charpos(BytePos(6));
assert_eq!(cp2, CharPos(4));
let cp3 = sm.bytepos_to_file_charpos(BytePos(56));
assert_eq!(cp3, CharPos(12));
let cp4 = sm.bytepos_to_file_charpos(BytePos(61));
assert_eq!(cp4, CharPos(15));
}
/// Test `span_to_lines` for a span ending at the end of a `SourceFile`.
#[test]
fn t7() {
let sm = init_source_map();
let span = Span::with_root_ctxt(BytePos(12), BytePos(23));
let file_lines = sm.span_to_lines(span).unwrap();
assert_eq!(file_lines.file.name, PathBuf::from("blork.rs").into());
assert_eq!(file_lines.lines.len(), 1);
assert_eq!(file_lines.lines[0].line_index, 1);
}
/// Given a string like " ~~~~~~~~~~~~ ", produces a span
/// converting that range. The idea is that the string has the same
/// length as the input, and we uncover the byte positions. Note
/// that this can span lines and so on.
fn span_from_selection(input: &str, selection: &str) -> Span {
assert_eq!(input.len(), selection.len());
let left_index = selection.find('~').unwrap() as u32;
let right_index = selection.rfind('~').map_or(left_index, |x| x as u32);
Span::with_root_ctxt(BytePos(left_index), BytePos(right_index + 1))
}
/// Tests `span_to_snippet` and `span_to_lines` for a span converting 3
/// lines in the middle of a file.
#[test]
fn span_to_snippet_and_lines_spanning_multiple_lines() {
let sm = SourceMap::new(FilePathMapping::empty());
let inputtext = "aaaaa\nbbbbBB\nCCC\nDDDDDddddd\neee\n";
let selection = " \n ~~\n~~~\n~~~~~ \n \n";
sm.new_source_file(Path::new("blork.rs").to_owned().into(), inputtext.to_string());
let span = span_from_selection(inputtext, selection);
// Check that we are extracting the text we thought we were extracting.
assert_eq!(&sm.span_to_snippet(span).unwrap(), "BB\nCCC\nDDDDD");
// Check that span_to_lines gives us the complete result with the lines/cols we expected.
let lines = sm.span_to_lines(span).unwrap();
let expected = vec![
LineInfo { line_index: 1, start_col: CharPos(4), end_col: CharPos(6) },
LineInfo { line_index: 2, start_col: CharPos(0), end_col: CharPos(3) },
LineInfo { line_index: 3, start_col: CharPos(0), end_col: CharPos(5) },
];
assert_eq!(lines.lines, expected);
}
/// Test span_to_snippet for a span ending at the end of a `SourceFile`.
#[test]
fn t8() {
let sm = init_source_map();
let span = Span::with_root_ctxt(BytePos(12), BytePos(23));
let snippet = sm.span_to_snippet(span);
assert_eq!(snippet, Ok("second line".to_string()));
}
/// Test `span_to_str` for a span ending at the end of a `SourceFile`.
#[test]
fn t9() {
let sm = init_source_map();
let span = Span::with_root_ctxt(BytePos(12), BytePos(23));
let sstr = sm.span_to_diagnostic_string(span);
assert_eq!(sstr, "blork.rs:2:1: 2:12");
}
/// Tests failing to merge two spans on different lines.
#[test]
fn span_merging_fail() {
let sm = SourceMap::new(FilePathMapping::empty());
let inputtext = "bbbb BB\ncc CCC\n";
let selection1 = " ~~\n \n";
let selection2 = " \n ~~~\n";
sm.new_source_file(Path::new("blork.rs").to_owned().into(), inputtext.to_owned());
let span1 = span_from_selection(inputtext, selection1);
let span2 = span_from_selection(inputtext, selection2);
assert!(sm.merge_spans(span1, span2).is_none());
}
/// Tests loading an external source file that requires normalization.
#[test]
fn t10() {
let sm = SourceMap::new(FilePathMapping::empty());
let unnormalized = "first line.\r\nsecond line";
let normalized = "first line.\nsecond line";
let src_file = sm.new_source_file(PathBuf::from("blork.rs").into(), unnormalized.to_string());
assert_eq!(src_file.src.as_ref().unwrap().as_ref(), normalized);
assert!(
src_file.src_hash.matches(unnormalized),
"src_hash should use the source before normalization"
);
let SourceFile {
name,
src_hash,
start_pos,
end_pos,
lines,
multibyte_chars,
non_narrow_chars,
normalized_pos,
name_hash,
..
} = (*src_file).clone();
let imported_src_file = sm.new_imported_source_file(
name,
src_hash,
name_hash,
(end_pos - start_pos).to_usize(),
CrateNum::new(0),
lines,
multibyte_chars,
non_narrow_chars,
normalized_pos,
start_pos,
end_pos,
);
assert!(
imported_src_file.external_src.borrow().get_source().is_none(),
"imported source file should not have source yet"
);
imported_src_file.add_external_src(|| Some(unnormalized.to_string()));
assert_eq!(
imported_src_file.external_src.borrow().get_source().unwrap().as_ref(),
normalized,
"imported source file should be normalized"
);
}
/// Returns the span corresponding to the `n`th occurrence of `substring` in `source_text`.
trait SourceMapExtension {
fn span_substr(
&self,
file: &Lrc<SourceFile>,
source_text: &str,
substring: &str,
n: usize,
) -> Span;
}
impl SourceMapExtension for SourceMap {
fn span_substr(
&self,
file: &Lrc<SourceFile>,
source_text: &str,
substring: &str,
n: usize,
) -> Span {
eprintln!(
"span_substr(file={:?}/{:?}, substring={:?}, n={})",
file.name, file.start_pos, substring, n
);
let mut i = 0;
let mut hi = 0;
loop {
let offset = source_text[hi..].find(substring).unwrap_or_else(|| {
panic!(
"source_text `{}` does not have {} occurrences of `{}`, only {}",
source_text, n, substring, i
);
});
let lo = hi + offset;
hi = lo + substring.len();
if i == n {
let span = Span::with_root_ctxt(
BytePos(lo as u32 + file.start_pos.0),
BytePos(hi as u32 + file.start_pos.0),
);
assert_eq!(&self.span_to_snippet(span).unwrap()[..], substring);
return span;
}
i += 1;
}
}
}
|
let cp1 = sm.bytepos_to_file_charpos(BytePos(22));
|
random_line_split
|
tests.rs
|
use super::*;
use rustc_data_structures::sync::Lrc;
fn init_source_map() -> SourceMap {
let sm = SourceMap::new(FilePathMapping::empty());
sm.new_source_file(PathBuf::from("blork.rs").into(), "first line.\nsecond line".to_string());
sm.new_source_file(PathBuf::from("empty.rs").into(), String::new());
sm.new_source_file(PathBuf::from("blork2.rs").into(), "first line.\nsecond line".to_string());
sm
}
impl SourceMap {
/// Returns `Some(span)`, a union of the LHS and RHS span. The LHS must precede the RHS. If
/// there are gaps between LHS and RHS, the resulting union will cross these gaps.
/// For this to work,
///
/// * the syntax contexts of both spans much match,
/// * the LHS span needs to end on the same line the RHS span begins,
/// * the LHS span must start at or before the RHS span.
fn merge_spans(&self, sp_lhs: Span, sp_rhs: Span) -> Option<Span> {
// Ensure we're at the same expansion ID.
if sp_lhs.ctxt()!= sp_rhs.ctxt() {
return None;
}
let lhs_end = match self.lookup_line(sp_lhs.hi()) {
Ok(x) => x,
Err(_) => return None,
};
let rhs_begin = match self.lookup_line(sp_rhs.lo()) {
Ok(x) => x,
Err(_) => return None,
};
// If we must cross lines to merge, don't merge.
if lhs_end.line!= rhs_begin.line {
return None;
}
// Ensure these follow the expected order and that we don't overlap.
if (sp_lhs.lo() <= sp_rhs.lo()) && (sp_lhs.hi() <= sp_rhs.lo()) {
Some(sp_lhs.to(sp_rhs))
} else {
None
}
}
/// Converts an absolute `BytePos` to a `CharPos` relative to the `SourceFile`.
fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
let idx = self.lookup_source_file_idx(bpos);
let sf = &(*self.files.borrow().source_files)[idx];
sf.bytepos_to_file_charpos(bpos)
}
}
/// Tests `lookup_byte_offset`.
#[test]
fn t3() {
let sm = init_source_map();
let srcfbp1 = sm.lookup_byte_offset(BytePos(23));
assert_eq!(srcfbp1.sf.name, PathBuf::from("blork.rs").into());
assert_eq!(srcfbp1.pos, BytePos(23));
let srcfbp1 = sm.lookup_byte_offset(BytePos(24));
assert_eq!(srcfbp1.sf.name, PathBuf::from("empty.rs").into());
assert_eq!(srcfbp1.pos, BytePos(0));
let srcfbp2 = sm.lookup_byte_offset(BytePos(25));
assert_eq!(srcfbp2.sf.name, PathBuf::from("blork2.rs").into());
assert_eq!(srcfbp2.pos, BytePos(0));
}
/// Tests `bytepos_to_file_charpos`.
#[test]
fn t4() {
let sm = init_source_map();
let cp1 = sm.bytepos_to_file_charpos(BytePos(22));
assert_eq!(cp1, CharPos(22));
let cp2 = sm.bytepos_to_file_charpos(BytePos(25));
assert_eq!(cp2, CharPos(0));
}
/// Tests zero-length `SourceFile`s.
#[test]
fn t5() {
let sm = init_source_map();
let loc1 = sm.lookup_char_pos(BytePos(22));
assert_eq!(loc1.file.name, PathBuf::from("blork.rs").into());
assert_eq!(loc1.line, 2);
assert_eq!(loc1.col, CharPos(10));
let loc2 = sm.lookup_char_pos(BytePos(25));
assert_eq!(loc2.file.name, PathBuf::from("blork2.rs").into());
assert_eq!(loc2.line, 1);
assert_eq!(loc2.col, CharPos(0));
}
fn init_source_map_mbc() -> SourceMap {
let sm = SourceMap::new(FilePathMapping::empty());
// "€" is a three-byte UTF8 char.
sm.new_source_file(
PathBuf::from("blork.rs").into(),
"fir€st €€€€ line.\nsecond line".to_string(),
);
sm.new_source_file(
PathBuf::from("blork2.rs").into(),
"first line€€.\n€ second line".to_string(),
);
sm
}
/// Tests `bytepos_to_file_charpos` in the presence of multi-byte chars.
#[test]
fn t6() {
let sm = init_source_map_mbc();
let cp1 = sm.bytepos_to_file_charpos(BytePos(3));
assert_eq!(cp1, CharPos(3));
let cp2 = sm.bytepos_to_file_charpos(BytePos(6));
assert_eq!(cp2, CharPos(4));
let cp3 = sm.bytepos_to_file_charpos(BytePos(56));
assert_eq!(cp3, CharPos(12));
let cp4 = sm.bytepos_to_file_charpos(BytePos(61));
assert_eq!(cp4, CharPos(15));
}
/// Test `span_to_lines` for a span ending at the end of a `SourceFile`.
#[test]
fn t7() {
let sm = init_source_map();
let span = Span::with_root_ctxt(BytePos(12), BytePos(23));
let file_lines = sm.span_to_lines(span).unwrap();
assert_eq!(file_lines.file.name, PathBuf::from("blork.rs").into());
assert_eq!(file_lines.lines.len(), 1);
assert_eq!(file_lines.lines[0].line_index, 1);
}
/// Given a string like " ~~~~~~~~~~~~ ", produces a span
/// converting that range. The idea is that the string has the same
/// length as the input, and we uncover the byte positions. Note
/// that this can span lines and so on.
fn span_from_selection(input: &str, selection: &str) -> Span {
assert_eq!(input.len(), selection.len());
let left_index = selection.find('~').unwrap() as u32;
let right_index = selection.rfind('~').map_or(left_index, |x| x as u32);
Span::with_root_ctxt(BytePos(left_index), BytePos(right_index + 1))
}
/// Tests `span_to_snippet` and `span_to_lines` for a span converting 3
/// lines in the middle of a file.
#[test]
fn span_to_snippet_and_lines_spanning_multiple_lines() {
let sm = SourceMap::new(FilePathMapping::empty());
let inputtext = "aaaaa\nbbbbBB\nCCC\nDDDDDddddd\neee\n";
let selection = " \n ~~\n~~~\n~~~~~ \n \n";
sm.new_source_file(Path::new("blork.rs").to_owned().into(), inputtext.to_string());
let span = span_from_selection(inputtext, selection);
// Check that we are extracting the text we thought we were extracting.
assert_eq!(&sm.span_to_snippet(span).unwrap(), "BB\nCCC\nDDDDD");
// Check that span_to_lines gives us the complete result with the lines/cols we expected.
let lines = sm.span_to_lines(span).unwrap();
let expected = vec![
LineInfo { line_index: 1, start_col: CharPos(4), end_col: CharPos(6) },
LineInfo { line_index: 2, start_col: CharPos(0), end_col: CharPos(3) },
LineInfo { line_index: 3, start_col: CharPos(0), end_col: CharPos(5) },
];
assert_eq!(lines.lines, expected);
}
/// Test span_to_snippet for a span ending at the end of a `SourceFile`.
#[test]
fn t8() {
let sm
|
init_source_map();
let span = Span::with_root_ctxt(BytePos(12), BytePos(23));
let snippet = sm.span_to_snippet(span);
assert_eq!(snippet, Ok("second line".to_string()));
}
/// Test `span_to_str` for a span ending at the end of a `SourceFile`.
#[test]
fn t9() {
let sm = init_source_map();
let span = Span::with_root_ctxt(BytePos(12), BytePos(23));
let sstr = sm.span_to_diagnostic_string(span);
assert_eq!(sstr, "blork.rs:2:1: 2:12");
}
/// Tests failing to merge two spans on different lines.
#[test]
fn span_merging_fail() {
let sm = SourceMap::new(FilePathMapping::empty());
let inputtext = "bbbb BB\ncc CCC\n";
let selection1 = " ~~\n \n";
let selection2 = " \n ~~~\n";
sm.new_source_file(Path::new("blork.rs").to_owned().into(), inputtext.to_owned());
let span1 = span_from_selection(inputtext, selection1);
let span2 = span_from_selection(inputtext, selection2);
assert!(sm.merge_spans(span1, span2).is_none());
}
/// Tests loading an external source file that requires normalization.
#[test]
fn t10() {
let sm = SourceMap::new(FilePathMapping::empty());
let unnormalized = "first line.\r\nsecond line";
let normalized = "first line.\nsecond line";
let src_file = sm.new_source_file(PathBuf::from("blork.rs").into(), unnormalized.to_string());
assert_eq!(src_file.src.as_ref().unwrap().as_ref(), normalized);
assert!(
src_file.src_hash.matches(unnormalized),
"src_hash should use the source before normalization"
);
let SourceFile {
name,
src_hash,
start_pos,
end_pos,
lines,
multibyte_chars,
non_narrow_chars,
normalized_pos,
name_hash,
..
} = (*src_file).clone();
let imported_src_file = sm.new_imported_source_file(
name,
src_hash,
name_hash,
(end_pos - start_pos).to_usize(),
CrateNum::new(0),
lines,
multibyte_chars,
non_narrow_chars,
normalized_pos,
start_pos,
end_pos,
);
assert!(
imported_src_file.external_src.borrow().get_source().is_none(),
"imported source file should not have source yet"
);
imported_src_file.add_external_src(|| Some(unnormalized.to_string()));
assert_eq!(
imported_src_file.external_src.borrow().get_source().unwrap().as_ref(),
normalized,
"imported source file should be normalized"
);
}
/// Returns the span corresponding to the `n`th occurrence of `substring` in `source_text`.
trait SourceMapExtension {
fn span_substr(
&self,
file: &Lrc<SourceFile>,
source_text: &str,
substring: &str,
n: usize,
) -> Span;
}
impl SourceMapExtension for SourceMap {
fn span_substr(
&self,
file: &Lrc<SourceFile>,
source_text: &str,
substring: &str,
n: usize,
) -> Span {
eprintln!(
"span_substr(file={:?}/{:?}, substring={:?}, n={})",
file.name, file.start_pos, substring, n
);
let mut i = 0;
let mut hi = 0;
loop {
let offset = source_text[hi..].find(substring).unwrap_or_else(|| {
panic!(
"source_text `{}` does not have {} occurrences of `{}`, only {}",
source_text, n, substring, i
);
});
let lo = hi + offset;
hi = lo + substring.len();
if i == n {
let span = Span::with_root_ctxt(
BytePos(lo as u32 + file.start_pos.0),
BytePos(hi as u32 + file.start_pos.0),
);
assert_eq!(&self.span_to_snippet(span).unwrap()[..], substring);
return span;
}
i += 1;
}
}
}
|
=
|
identifier_name
|
result.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A type representing either success or failure
#[allow(missing_doc)];
use cmp::Eq;
use either;
use either::Either;
use kinds::Copy;
use iterator::IteratorUtil;
use option::{None, Option, Some};
use vec;
use vec::{OwnedVector, ImmutableVector};
use container::Container;
/// The result type
#[deriving(Clone, Eq)]
pub enum Result<T, U> {
/// Contains the successful result value
Ok(T),
/// Contains the error value
Err(U)
}
/**
* Get the value out of a successful result
*
* # Failure
*
* If the result is an error
*/
#[inline]
pub fn get<T:Copy,U>(res: &Result<T, U>) -> T {
match *res {
Ok(ref t) => copy *t,
Err(ref the_err) =>
fail!("get called on error result: %?", *the_err)
}
}
/**
* Get a reference to the value out of a successful result
*
* # Failure
*
* If the result is an error
*/
#[inline]
pub fn get_ref<'a, T, U>(res: &'a Result<T, U>) -> &'a T {
match *res {
Ok(ref t) => t,
Err(ref the_err) =>
fail!("get_ref called on error result: %?", *the_err)
}
}
/**
* Get the value out of an error result
*
* # Failure
*
* If the result is not an error
*/
#[inline]
pub fn get_err<T, U: Copy>(res: &Result<T, U>) -> U {
match *res {
Err(ref u) => copy *u,
Ok(_) => fail!("get_err called on ok result")
}
}
/// Returns true if the result is `ok`
#[inline]
pub fn is_ok<T, U>(res: &Result<T, U>) -> bool {
match *res {
Ok(_) => true,
Err(_) => false
}
}
/// Returns true if the result is `err`
#[inline]
pub fn is_err<T, U>(res: &Result<T, U>) -> bool {
!is_ok(res)
}
/**
* Convert to the `either` type
*
* `ok` result variants are converted to `either::right` variants, `err`
* result variants are converted to `either::left`.
*/
#[inline]
pub fn to_either<T:Copy,U:Copy>(res: &Result<U, T>)
-> Either<T, U> {
match *res {
Ok(ref res) => either::Right(copy *res),
Err(ref fail_) => either::Left(copy *fail_)
}
}
/**
* Call a function based on a previous result
*
* If `res` is `ok` then the value is extracted and passed to `op` whereupon
* `op`s result is returned. if `res` is `err` then it is immediately
* returned. This function can be used to compose the results of two
* functions.
*
* Example:
*
* let res = chain(read_file(file)) { |buf|
* ok(parse_bytes(buf))
* }
*/
#[inline]
pub fn chain<T, U, V>(res: Result<T, V>, op: &fn(T)
-> Result<U, V>) -> Result<U, V> {
match res {
Ok(t) => op(t),
Err(e) => Err(e)
}
}
/**
* Call a function based on a previous result
*
* If `res` is `err` then the value is extracted and passed to `op`
* whereupon `op`s result is returned. if `res` is `ok` then it is
* immediately returned. This function can be used to pass through a
* successful result while handling an error.
*/
#[inline]
pub fn chain_err<T, U, V>(
res: Result<T, V>,
op: &fn(t: V) -> Result<T, U>)
-> Result<T, U> {
match res {
Ok(t) => Ok(t),
Err(v) => op(v)
}
}
/**
* Call a function based on a previous result
*
* If `res` is `ok` then the value is extracted and passed to `op` whereupon
* `op`s result is returned. if `res` is `err` then it is immediately
* returned. This function can be used to compose the results of two
* functions.
*
* Example:
*
* iter(read_file(file)) { |buf|
* print_buf(buf)
* }
*/
#[inline]
pub fn iter<T, E>(res: &Result<T, E>, f: &fn(&T)) {
match *res {
Ok(ref t) => f(t),
Err(_) => ()
}
}
/**
* Call a function based on a previous result
*
* If `res` is `err` then the value is extracted and passed to `op` whereupon
* `op`s result is returned. if `res` is `ok` then it is immediately returned.
* This function can be used to pass through a successful result while
* handling an error.
*/
#[inline]
pub fn iter_err<T, E>(res: &Result<T, E>, f: &fn(&E)) {
match *res {
Ok(_) => (),
Err(ref e) => f(e)
}
}
/**
* Call a function based on a previous result
*
* If `res` is `ok` then the value is extracted and passed to `op` whereupon
* `op`s result is wrapped in `ok` and returned. if `res` is `err` then it is
* immediately returned. This function can be used to compose the results of
* two functions.
*
* Example:
*
* let res = map(read_file(file)) { |buf|
* parse_bytes(buf)
* }
*/
#[inline]
pub fn map<T, E: Copy, U: Copy>(res: &Result<T, E>, op: &fn(&T) -> U)
-> Result<U, E> {
match *res {
Ok(ref t) => Ok(op(t)),
Err(ref e) => Err(copy *e)
}
}
/**
* Call a function based on a previous result
*
* If `res` is `err` then the value is extracted and passed to `op` whereupon
* `op`s result is wrapped in an `err` and returned. if `res` is `ok` then it
* is immediately returned. This function can be used to pass through a
* successful result while handling an error.
*/
#[inline]
pub fn map_err<T:Copy,E,F:Copy>(res: &Result<T, E>, op: &fn(&E) -> F)
-> Result<T, F> {
match *res {
Ok(ref t) => Ok(copy *t),
Err(ref e) => Err(op(e))
}
}
impl<T, E> Result<T, E> {
#[inline]
pub fn get_ref<'a>(&'a self) -> &'a T { get_ref(self) }
#[inline]
pub fn is_ok(&self) -> bool { is_ok(self) }
#[inline]
pub fn is_err(&self) -> bool { is_err(self) }
#[inline]
pub fn iter(&self, f: &fn(&T)) { iter(self, f) }
#[inline]
pub fn iter_err(&self, f: &fn(&E)) { iter_err(self, f) }
#[inline]
pub fn unwrap(self) -> T { unwrap(self) }
#[inline]
pub fn unwrap_err(self) -> E { unwrap_err(self) }
#[inline]
pub fn chain<U>(self, op: &fn(T) -> Result<U,E>) -> Result<U,E> {
chain(self, op)
}
#[inline]
pub fn chain_err<F>(self, op: &fn(E) -> Result<T,F>) -> Result<T,F> {
chain_err(self, op)
}
}
impl<T:Copy,E> Result<T, E> {
#[inline]
pub fn get(&self) -> T { get(self) }
#[inline]
pub fn map_err<F:Copy>(&self, op: &fn(&E) -> F) -> Result<T,F> {
map_err(self, op)
}
}
impl<T, E: Copy> Result<T, E> {
#[inline]
pub fn get_err(&self) -> E { get_err(self) }
#[inline]
pub fn map<U:Copy>(&self, op: &fn(&T) -> U) -> Result<U,E> {
map(self, op)
}
}
/**
* Maps each element in the vector `ts` using the operation `op`. Should an
* error occur, no further mappings are performed and the error is returned.
* Should no error occur, a vector containing the result of each map is
* returned.
*
* Here is an example which increments every integer in a vector,
* checking for overflow:
*
* fn inc_conditionally(x: uint) -> result<uint,str> {
* if x == uint::max_value { return err("overflow"); }
* else { return ok(x+1u); }
* }
* map(~[1u, 2u, 3u], inc_conditionally).chain {|incd|
* assert!(incd == ~[2u, 3u, 4u]);
* }
*/
#[inline]
pub fn map_vec<T,U:Copy,V:Copy>(
ts: &[T], op: &fn(&T) -> Result<V,U>) -> Result<~[V],U> {
let mut vs: ~[V] = vec::with_capacity(ts.len());
for ts.iter().advance |t| {
match op(t) {
Ok(v) => vs.push(v),
Err(u) => return Err(u)
}
}
return Ok(vs);
}
#[inline]
#[allow(missing_doc)]
pub fn map_opt<T,U:Copy,V:Copy>(
o_t: &Option<T>, op: &fn(&T) -> Result<V,U>) -> Result<Option<V>,U> {
match *o_t {
None => Ok(None),
Some(ref t) => match op(t) {
Ok(v) => Ok(Some(v)),
Err(e) => Err(e)
}
}
}
/**
* Same as map, but it operates over two parallel vectors.
*
* A precondition is used here to ensure that the vectors are the same
* length. While we do not often use preconditions in the standard
* library, a precondition is used here because result::t is generally
* used in 'careful' code contexts where it is both appropriate and easy
* to accommodate an error like the vectors being of different lengths.
*/
#[inline]
pub fn map_vec2<S,T,U:Copy,V:Copy>(ss: &[S], ts: &[T],
op: &fn(&S,&T) -> Result<V,U>) -> Result<~[V],U> {
assert!(vec::same_length(ss, ts));
let n = ts.len();
let mut vs = vec::with_capacity(n);
let mut i = 0u;
while i < n {
match op(&ss[i],&ts[i]) {
Ok(v) => vs.push(v),
Err(u) => return Err(u)
}
i += 1u;
}
return Ok(vs);
}
/**
* Applies op to the pairwise elements from `ss` and `ts`, aborting on
* error. This could be implemented using `map_zip()` but it is more efficient
* on its own as no result vector is built.
*/
#[inline]
pub fn iter_vec2<S,T,U:Copy>(ss: &[S], ts: &[T],
op: &fn(&S,&T) -> Result<(),U>) -> Result<(),U> {
assert!(vec::same_length(ss, ts));
let n = ts.len();
let mut i = 0u;
while i < n {
match op(&ss[i],&ts[i]) {
Ok(()) => (),
Err(u) => return Err(u)
}
i += 1u;
}
return Ok(());
}
/// Unwraps a result, assuming it is an `ok(T)`
#[inline]
pub fn unwrap<T, U>(res: Result<T, U>) -> T {
match res {
Ok(t) => t,
Err(_) => fail!("unwrap called on an err result")
}
}
/// Unwraps a result, assuming it is an `err(U)`
#[inline]
pub fn unwrap_err<T, U>(res: Result<T, U>) -> U {
match res {
Err(u) => u,
Ok(_) => fail!("unwrap called on an ok result")
}
}
#[cfg(test)]
#[allow(non_implicitly_copyable_typarams)]
mod tests {
use result::{Err, Ok, Result, chain, get, get_err};
use result;
pub fn op1() -> result::Result<int, ~str> { result::Ok(666) }
pub fn op2(i: int) -> result::Result<uint, ~str> {
result::Ok(i as uint + 1u)
}
pub fn op3() -> result::Result<int, ~str> { result::Err(~"sadface") }
#[test]
pub fn chain_success() {
assert_eq!(get(&chain(op1(), op2)), 667u);
}
#[test]
pub fn chain_failure() {
assert_eq!(get_err(&chain(op3(), op2)), ~"sadface");
}
#[test]
pub fn test_impl_iter() {
let mut valid = false;
Ok::<~str, ~str>(~"a").iter(|_x| valid = true);
assert!(valid);
Err::<~str, ~str>(~"b").iter(|_x| valid = false);
assert!(valid);
}
#[test]
pub fn test_impl_iter_err() {
let mut valid = true;
Ok::<~str, ~str>(~"a").iter_err(|_x| valid = false);
assert!(valid);
valid = false;
Err::<~str, ~str>(~"b").iter_err(|_x| valid = true);
assert!(valid);
}
#[test]
pub fn test_impl_map() {
assert_eq!(Ok::<~str, ~str>(~"a").map(|_x| ~"b"), Ok(~"b"));
assert_eq!(Err::<~str, ~str>(~"a").map(|_x| ~"b"), Err(~"a"));
|
#[test]
pub fn test_impl_map_err() {
assert_eq!(Ok::<~str, ~str>(~"a").map_err(|_x| ~"b"), Ok(~"a"));
assert_eq!(Err::<~str, ~str>(~"a").map_err(|_x| ~"b"), Err(~"b"));
}
#[test]
pub fn test_get_ref_method() {
let foo: Result<int, ()> = Ok(100);
assert_eq!(*foo.get_ref(), 100);
}
}
|
}
|
random_line_split
|
result.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A type representing either success or failure
#[allow(missing_doc)];
use cmp::Eq;
use either;
use either::Either;
use kinds::Copy;
use iterator::IteratorUtil;
use option::{None, Option, Some};
use vec;
use vec::{OwnedVector, ImmutableVector};
use container::Container;
/// The result type
#[deriving(Clone, Eq)]
pub enum Result<T, U> {
/// Contains the successful result value
Ok(T),
/// Contains the error value
Err(U)
}
/**
* Get the value out of a successful result
*
* # Failure
*
* If the result is an error
*/
#[inline]
pub fn get<T:Copy,U>(res: &Result<T, U>) -> T {
match *res {
Ok(ref t) => copy *t,
Err(ref the_err) =>
fail!("get called on error result: %?", *the_err)
}
}
/**
* Get a reference to the value out of a successful result
*
* # Failure
*
* If the result is an error
*/
#[inline]
pub fn get_ref<'a, T, U>(res: &'a Result<T, U>) -> &'a T {
match *res {
Ok(ref t) => t,
Err(ref the_err) =>
fail!("get_ref called on error result: %?", *the_err)
}
}
/**
* Get the value out of an error result
*
* # Failure
*
* If the result is not an error
*/
#[inline]
pub fn get_err<T, U: Copy>(res: &Result<T, U>) -> U {
match *res {
Err(ref u) => copy *u,
Ok(_) => fail!("get_err called on ok result")
}
}
/// Returns true if the result is `ok`
#[inline]
pub fn is_ok<T, U>(res: &Result<T, U>) -> bool {
match *res {
Ok(_) => true,
Err(_) => false
}
}
/// Returns true if the result is `err`
#[inline]
pub fn is_err<T, U>(res: &Result<T, U>) -> bool {
!is_ok(res)
}
/**
* Convert to the `either` type
*
* `ok` result variants are converted to `either::right` variants, `err`
* result variants are converted to `either::left`.
*/
#[inline]
pub fn
|
<T:Copy,U:Copy>(res: &Result<U, T>)
-> Either<T, U> {
match *res {
Ok(ref res) => either::Right(copy *res),
Err(ref fail_) => either::Left(copy *fail_)
}
}
/**
* Call a function based on a previous result
*
* If `res` is `ok` then the value is extracted and passed to `op` whereupon
* `op`s result is returned. if `res` is `err` then it is immediately
* returned. This function can be used to compose the results of two
* functions.
*
* Example:
*
* let res = chain(read_file(file)) { |buf|
* ok(parse_bytes(buf))
* }
*/
#[inline]
pub fn chain<T, U, V>(res: Result<T, V>, op: &fn(T)
-> Result<U, V>) -> Result<U, V> {
match res {
Ok(t) => op(t),
Err(e) => Err(e)
}
}
/**
* Call a function based on a previous result
*
* If `res` is `err` then the value is extracted and passed to `op`
* whereupon `op`s result is returned. if `res` is `ok` then it is
* immediately returned. This function can be used to pass through a
* successful result while handling an error.
*/
#[inline]
pub fn chain_err<T, U, V>(
res: Result<T, V>,
op: &fn(t: V) -> Result<T, U>)
-> Result<T, U> {
match res {
Ok(t) => Ok(t),
Err(v) => op(v)
}
}
/**
* Call a function based on a previous result
*
* If `res` is `ok` then the value is extracted and passed to `op` whereupon
* `op`s result is returned. if `res` is `err` then it is immediately
* returned. This function can be used to compose the results of two
* functions.
*
* Example:
*
* iter(read_file(file)) { |buf|
* print_buf(buf)
* }
*/
#[inline]
pub fn iter<T, E>(res: &Result<T, E>, f: &fn(&T)) {
match *res {
Ok(ref t) => f(t),
Err(_) => ()
}
}
/**
* Call a function based on a previous result
*
* If `res` is `err` then the value is extracted and passed to `op` whereupon
* `op`s result is returned. if `res` is `ok` then it is immediately returned.
* This function can be used to pass through a successful result while
* handling an error.
*/
#[inline]
pub fn iter_err<T, E>(res: &Result<T, E>, f: &fn(&E)) {
match *res {
Ok(_) => (),
Err(ref e) => f(e)
}
}
/**
* Call a function based on a previous result
*
* If `res` is `ok` then the value is extracted and passed to `op` whereupon
* `op`s result is wrapped in `ok` and returned. if `res` is `err` then it is
* immediately returned. This function can be used to compose the results of
* two functions.
*
* Example:
*
* let res = map(read_file(file)) { |buf|
* parse_bytes(buf)
* }
*/
#[inline]
pub fn map<T, E: Copy, U: Copy>(res: &Result<T, E>, op: &fn(&T) -> U)
-> Result<U, E> {
match *res {
Ok(ref t) => Ok(op(t)),
Err(ref e) => Err(copy *e)
}
}
/**
* Call a function based on a previous result
*
* If `res` is `err` then the value is extracted and passed to `op` whereupon
* `op`s result is wrapped in an `err` and returned. if `res` is `ok` then it
* is immediately returned. This function can be used to pass through a
* successful result while handling an error.
*/
#[inline]
pub fn map_err<T:Copy,E,F:Copy>(res: &Result<T, E>, op: &fn(&E) -> F)
-> Result<T, F> {
match *res {
Ok(ref t) => Ok(copy *t),
Err(ref e) => Err(op(e))
}
}
impl<T, E> Result<T, E> {
#[inline]
pub fn get_ref<'a>(&'a self) -> &'a T { get_ref(self) }
#[inline]
pub fn is_ok(&self) -> bool { is_ok(self) }
#[inline]
pub fn is_err(&self) -> bool { is_err(self) }
#[inline]
pub fn iter(&self, f: &fn(&T)) { iter(self, f) }
#[inline]
pub fn iter_err(&self, f: &fn(&E)) { iter_err(self, f) }
#[inline]
pub fn unwrap(self) -> T { unwrap(self) }
#[inline]
pub fn unwrap_err(self) -> E { unwrap_err(self) }
#[inline]
pub fn chain<U>(self, op: &fn(T) -> Result<U,E>) -> Result<U,E> {
chain(self, op)
}
#[inline]
pub fn chain_err<F>(self, op: &fn(E) -> Result<T,F>) -> Result<T,F> {
chain_err(self, op)
}
}
impl<T:Copy,E> Result<T, E> {
#[inline]
pub fn get(&self) -> T { get(self) }
#[inline]
pub fn map_err<F:Copy>(&self, op: &fn(&E) -> F) -> Result<T,F> {
map_err(self, op)
}
}
impl<T, E: Copy> Result<T, E> {
#[inline]
pub fn get_err(&self) -> E { get_err(self) }
#[inline]
pub fn map<U:Copy>(&self, op: &fn(&T) -> U) -> Result<U,E> {
map(self, op)
}
}
/**
* Maps each element in the vector `ts` using the operation `op`. Should an
* error occur, no further mappings are performed and the error is returned.
* Should no error occur, a vector containing the result of each map is
* returned.
*
* Here is an example which increments every integer in a vector,
* checking for overflow:
*
* fn inc_conditionally(x: uint) -> result<uint,str> {
* if x == uint::max_value { return err("overflow"); }
* else { return ok(x+1u); }
* }
* map(~[1u, 2u, 3u], inc_conditionally).chain {|incd|
* assert!(incd == ~[2u, 3u, 4u]);
* }
*/
#[inline]
pub fn map_vec<T,U:Copy,V:Copy>(
ts: &[T], op: &fn(&T) -> Result<V,U>) -> Result<~[V],U> {
let mut vs: ~[V] = vec::with_capacity(ts.len());
for ts.iter().advance |t| {
match op(t) {
Ok(v) => vs.push(v),
Err(u) => return Err(u)
}
}
return Ok(vs);
}
#[inline]
#[allow(missing_doc)]
pub fn map_opt<T,U:Copy,V:Copy>(
o_t: &Option<T>, op: &fn(&T) -> Result<V,U>) -> Result<Option<V>,U> {
match *o_t {
None => Ok(None),
Some(ref t) => match op(t) {
Ok(v) => Ok(Some(v)),
Err(e) => Err(e)
}
}
}
/**
* Same as map, but it operates over two parallel vectors.
*
* A precondition is used here to ensure that the vectors are the same
* length. While we do not often use preconditions in the standard
* library, a precondition is used here because result::t is generally
* used in 'careful' code contexts where it is both appropriate and easy
* to accommodate an error like the vectors being of different lengths.
*/
#[inline]
pub fn map_vec2<S,T,U:Copy,V:Copy>(ss: &[S], ts: &[T],
op: &fn(&S,&T) -> Result<V,U>) -> Result<~[V],U> {
assert!(vec::same_length(ss, ts));
let n = ts.len();
let mut vs = vec::with_capacity(n);
let mut i = 0u;
while i < n {
match op(&ss[i],&ts[i]) {
Ok(v) => vs.push(v),
Err(u) => return Err(u)
}
i += 1u;
}
return Ok(vs);
}
/**
* Applies op to the pairwise elements from `ss` and `ts`, aborting on
* error. This could be implemented using `map_zip()` but it is more efficient
* on its own as no result vector is built.
*/
#[inline]
pub fn iter_vec2<S,T,U:Copy>(ss: &[S], ts: &[T],
op: &fn(&S,&T) -> Result<(),U>) -> Result<(),U> {
assert!(vec::same_length(ss, ts));
let n = ts.len();
let mut i = 0u;
while i < n {
match op(&ss[i],&ts[i]) {
Ok(()) => (),
Err(u) => return Err(u)
}
i += 1u;
}
return Ok(());
}
/// Unwraps a result, assuming it is an `ok(T)`
#[inline]
pub fn unwrap<T, U>(res: Result<T, U>) -> T {
match res {
Ok(t) => t,
Err(_) => fail!("unwrap called on an err result")
}
}
/// Unwraps a result, assuming it is an `err(U)`
#[inline]
pub fn unwrap_err<T, U>(res: Result<T, U>) -> U {
match res {
Err(u) => u,
Ok(_) => fail!("unwrap called on an ok result")
}
}
#[cfg(test)]
#[allow(non_implicitly_copyable_typarams)]
mod tests {
use result::{Err, Ok, Result, chain, get, get_err};
use result;
pub fn op1() -> result::Result<int, ~str> { result::Ok(666) }
pub fn op2(i: int) -> result::Result<uint, ~str> {
result::Ok(i as uint + 1u)
}
pub fn op3() -> result::Result<int, ~str> { result::Err(~"sadface") }
#[test]
pub fn chain_success() {
assert_eq!(get(&chain(op1(), op2)), 667u);
}
#[test]
pub fn chain_failure() {
assert_eq!(get_err(&chain(op3(), op2)), ~"sadface");
}
#[test]
pub fn test_impl_iter() {
let mut valid = false;
Ok::<~str, ~str>(~"a").iter(|_x| valid = true);
assert!(valid);
Err::<~str, ~str>(~"b").iter(|_x| valid = false);
assert!(valid);
}
#[test]
pub fn test_impl_iter_err() {
let mut valid = true;
Ok::<~str, ~str>(~"a").iter_err(|_x| valid = false);
assert!(valid);
valid = false;
Err::<~str, ~str>(~"b").iter_err(|_x| valid = true);
assert!(valid);
}
#[test]
pub fn test_impl_map() {
assert_eq!(Ok::<~str, ~str>(~"a").map(|_x| ~"b"), Ok(~"b"));
assert_eq!(Err::<~str, ~str>(~"a").map(|_x| ~"b"), Err(~"a"));
}
#[test]
pub fn test_impl_map_err() {
assert_eq!(Ok::<~str, ~str>(~"a").map_err(|_x| ~"b"), Ok(~"a"));
assert_eq!(Err::<~str, ~str>(~"a").map_err(|_x| ~"b"), Err(~"b"));
}
#[test]
pub fn test_get_ref_method() {
let foo: Result<int, ()> = Ok(100);
assert_eq!(*foo.get_ref(), 100);
}
}
|
to_either
|
identifier_name
|
result.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A type representing either success or failure
#[allow(missing_doc)];
use cmp::Eq;
use either;
use either::Either;
use kinds::Copy;
use iterator::IteratorUtil;
use option::{None, Option, Some};
use vec;
use vec::{OwnedVector, ImmutableVector};
use container::Container;
/// The result type
#[deriving(Clone, Eq)]
pub enum Result<T, U> {
/// Contains the successful result value
Ok(T),
/// Contains the error value
Err(U)
}
/**
* Get the value out of a successful result
*
* # Failure
*
* If the result is an error
*/
#[inline]
pub fn get<T:Copy,U>(res: &Result<T, U>) -> T {
match *res {
Ok(ref t) => copy *t,
Err(ref the_err) =>
fail!("get called on error result: %?", *the_err)
}
}
/**
* Get a reference to the value out of a successful result
*
* # Failure
*
* If the result is an error
*/
#[inline]
pub fn get_ref<'a, T, U>(res: &'a Result<T, U>) -> &'a T {
match *res {
Ok(ref t) => t,
Err(ref the_err) =>
fail!("get_ref called on error result: %?", *the_err)
}
}
/**
* Get the value out of an error result
*
* # Failure
*
* If the result is not an error
*/
#[inline]
pub fn get_err<T, U: Copy>(res: &Result<T, U>) -> U {
match *res {
Err(ref u) => copy *u,
Ok(_) => fail!("get_err called on ok result")
}
}
/// Returns true if the result is `ok`
#[inline]
pub fn is_ok<T, U>(res: &Result<T, U>) -> bool {
match *res {
Ok(_) => true,
Err(_) => false
}
}
/// Returns true if the result is `err`
#[inline]
pub fn is_err<T, U>(res: &Result<T, U>) -> bool {
!is_ok(res)
}
/**
* Convert to the `either` type
*
* `ok` result variants are converted to `either::right` variants, `err`
* result variants are converted to `either::left`.
*/
#[inline]
pub fn to_either<T:Copy,U:Copy>(res: &Result<U, T>)
-> Either<T, U> {
match *res {
Ok(ref res) => either::Right(copy *res),
Err(ref fail_) => either::Left(copy *fail_)
}
}
/**
* Call a function based on a previous result
*
* If `res` is `ok` then the value is extracted and passed to `op` whereupon
* `op`s result is returned. if `res` is `err` then it is immediately
* returned. This function can be used to compose the results of two
* functions.
*
* Example:
*
* let res = chain(read_file(file)) { |buf|
* ok(parse_bytes(buf))
* }
*/
#[inline]
pub fn chain<T, U, V>(res: Result<T, V>, op: &fn(T)
-> Result<U, V>) -> Result<U, V> {
match res {
Ok(t) => op(t),
Err(e) => Err(e)
}
}
/**
* Call a function based on a previous result
*
* If `res` is `err` then the value is extracted and passed to `op`
* whereupon `op`s result is returned. if `res` is `ok` then it is
* immediately returned. This function can be used to pass through a
* successful result while handling an error.
*/
#[inline]
pub fn chain_err<T, U, V>(
res: Result<T, V>,
op: &fn(t: V) -> Result<T, U>)
-> Result<T, U> {
match res {
Ok(t) => Ok(t),
Err(v) => op(v)
}
}
/**
* Call a function based on a previous result
*
* If `res` is `ok` then the value is extracted and passed to `op` whereupon
* `op`s result is returned. if `res` is `err` then it is immediately
* returned. This function can be used to compose the results of two
* functions.
*
* Example:
*
* iter(read_file(file)) { |buf|
* print_buf(buf)
* }
*/
#[inline]
pub fn iter<T, E>(res: &Result<T, E>, f: &fn(&T)) {
match *res {
Ok(ref t) => f(t),
Err(_) => ()
}
}
/**
* Call a function based on a previous result
*
* If `res` is `err` then the value is extracted and passed to `op` whereupon
* `op`s result is returned. if `res` is `ok` then it is immediately returned.
* This function can be used to pass through a successful result while
* handling an error.
*/
#[inline]
pub fn iter_err<T, E>(res: &Result<T, E>, f: &fn(&E)) {
match *res {
Ok(_) => (),
Err(ref e) => f(e)
}
}
/**
* Call a function based on a previous result
*
* If `res` is `ok` then the value is extracted and passed to `op` whereupon
* `op`s result is wrapped in `ok` and returned. if `res` is `err` then it is
* immediately returned. This function can be used to compose the results of
* two functions.
*
* Example:
*
* let res = map(read_file(file)) { |buf|
* parse_bytes(buf)
* }
*/
#[inline]
pub fn map<T, E: Copy, U: Copy>(res: &Result<T, E>, op: &fn(&T) -> U)
-> Result<U, E> {
match *res {
Ok(ref t) => Ok(op(t)),
Err(ref e) => Err(copy *e)
}
}
/**
* Call a function based on a previous result
*
* If `res` is `err` then the value is extracted and passed to `op` whereupon
* `op`s result is wrapped in an `err` and returned. if `res` is `ok` then it
* is immediately returned. This function can be used to pass through a
* successful result while handling an error.
*/
#[inline]
pub fn map_err<T:Copy,E,F:Copy>(res: &Result<T, E>, op: &fn(&E) -> F)
-> Result<T, F> {
match *res {
Ok(ref t) => Ok(copy *t),
Err(ref e) => Err(op(e))
}
}
impl<T, E> Result<T, E> {
#[inline]
pub fn get_ref<'a>(&'a self) -> &'a T { get_ref(self) }
#[inline]
pub fn is_ok(&self) -> bool { is_ok(self) }
#[inline]
pub fn is_err(&self) -> bool { is_err(self) }
#[inline]
pub fn iter(&self, f: &fn(&T)) { iter(self, f) }
#[inline]
pub fn iter_err(&self, f: &fn(&E)) { iter_err(self, f) }
#[inline]
pub fn unwrap(self) -> T { unwrap(self) }
#[inline]
pub fn unwrap_err(self) -> E { unwrap_err(self) }
#[inline]
pub fn chain<U>(self, op: &fn(T) -> Result<U,E>) -> Result<U,E> {
chain(self, op)
}
#[inline]
pub fn chain_err<F>(self, op: &fn(E) -> Result<T,F>) -> Result<T,F> {
chain_err(self, op)
}
}
impl<T:Copy,E> Result<T, E> {
#[inline]
pub fn get(&self) -> T { get(self) }
#[inline]
pub fn map_err<F:Copy>(&self, op: &fn(&E) -> F) -> Result<T,F> {
map_err(self, op)
}
}
impl<T, E: Copy> Result<T, E> {
#[inline]
pub fn get_err(&self) -> E { get_err(self) }
#[inline]
pub fn map<U:Copy>(&self, op: &fn(&T) -> U) -> Result<U,E> {
map(self, op)
}
}
/**
* Maps each element in the vector `ts` using the operation `op`. Should an
* error occur, no further mappings are performed and the error is returned.
* Should no error occur, a vector containing the result of each map is
* returned.
*
* Here is an example which increments every integer in a vector,
* checking for overflow:
*
* fn inc_conditionally(x: uint) -> result<uint,str> {
* if x == uint::max_value { return err("overflow"); }
* else { return ok(x+1u); }
* }
* map(~[1u, 2u, 3u], inc_conditionally).chain {|incd|
* assert!(incd == ~[2u, 3u, 4u]);
* }
*/
#[inline]
pub fn map_vec<T,U:Copy,V:Copy>(
ts: &[T], op: &fn(&T) -> Result<V,U>) -> Result<~[V],U> {
let mut vs: ~[V] = vec::with_capacity(ts.len());
for ts.iter().advance |t| {
match op(t) {
Ok(v) => vs.push(v),
Err(u) => return Err(u)
}
}
return Ok(vs);
}
#[inline]
#[allow(missing_doc)]
pub fn map_opt<T,U:Copy,V:Copy>(
o_t: &Option<T>, op: &fn(&T) -> Result<V,U>) -> Result<Option<V>,U> {
match *o_t {
None => Ok(None),
Some(ref t) => match op(t) {
Ok(v) => Ok(Some(v)),
Err(e) => Err(e)
}
}
}
/**
* Same as map, but it operates over two parallel vectors.
*
* A precondition is used here to ensure that the vectors are the same
* length. While we do not often use preconditions in the standard
* library, a precondition is used here because result::t is generally
* used in 'careful' code contexts where it is both appropriate and easy
* to accommodate an error like the vectors being of different lengths.
*/
#[inline]
pub fn map_vec2<S,T,U:Copy,V:Copy>(ss: &[S], ts: &[T],
op: &fn(&S,&T) -> Result<V,U>) -> Result<~[V],U> {
assert!(vec::same_length(ss, ts));
let n = ts.len();
let mut vs = vec::with_capacity(n);
let mut i = 0u;
while i < n {
match op(&ss[i],&ts[i]) {
Ok(v) => vs.push(v),
Err(u) => return Err(u)
}
i += 1u;
}
return Ok(vs);
}
/**
* Applies op to the pairwise elements from `ss` and `ts`, aborting on
* error. This could be implemented using `map_zip()` but it is more efficient
* on its own as no result vector is built.
*/
#[inline]
pub fn iter_vec2<S,T,U:Copy>(ss: &[S], ts: &[T],
op: &fn(&S,&T) -> Result<(),U>) -> Result<(),U> {
assert!(vec::same_length(ss, ts));
let n = ts.len();
let mut i = 0u;
while i < n {
match op(&ss[i],&ts[i]) {
Ok(()) => (),
Err(u) => return Err(u)
}
i += 1u;
}
return Ok(());
}
/// Unwraps a result, assuming it is an `ok(T)`
#[inline]
pub fn unwrap<T, U>(res: Result<T, U>) -> T {
match res {
Ok(t) => t,
Err(_) => fail!("unwrap called on an err result")
}
}
/// Unwraps a result, assuming it is an `err(U)`
#[inline]
pub fn unwrap_err<T, U>(res: Result<T, U>) -> U {
match res {
Err(u) => u,
Ok(_) => fail!("unwrap called on an ok result")
}
}
#[cfg(test)]
#[allow(non_implicitly_copyable_typarams)]
mod tests {
use result::{Err, Ok, Result, chain, get, get_err};
use result;
pub fn op1() -> result::Result<int, ~str> { result::Ok(666) }
pub fn op2(i: int) -> result::Result<uint, ~str> {
result::Ok(i as uint + 1u)
}
pub fn op3() -> result::Result<int, ~str> { result::Err(~"sadface") }
#[test]
pub fn chain_success() {
assert_eq!(get(&chain(op1(), op2)), 667u);
}
#[test]
pub fn chain_failure() {
assert_eq!(get_err(&chain(op3(), op2)), ~"sadface");
}
#[test]
pub fn test_impl_iter() {
let mut valid = false;
Ok::<~str, ~str>(~"a").iter(|_x| valid = true);
assert!(valid);
Err::<~str, ~str>(~"b").iter(|_x| valid = false);
assert!(valid);
}
#[test]
pub fn test_impl_iter_err() {
let mut valid = true;
Ok::<~str, ~str>(~"a").iter_err(|_x| valid = false);
assert!(valid);
valid = false;
Err::<~str, ~str>(~"b").iter_err(|_x| valid = true);
assert!(valid);
}
#[test]
pub fn test_impl_map() {
assert_eq!(Ok::<~str, ~str>(~"a").map(|_x| ~"b"), Ok(~"b"));
assert_eq!(Err::<~str, ~str>(~"a").map(|_x| ~"b"), Err(~"a"));
}
#[test]
pub fn test_impl_map_err() {
assert_eq!(Ok::<~str, ~str>(~"a").map_err(|_x| ~"b"), Ok(~"a"));
assert_eq!(Err::<~str, ~str>(~"a").map_err(|_x| ~"b"), Err(~"b"));
}
#[test]
pub fn test_get_ref_method()
|
}
|
{
let foo: Result<int, ()> = Ok(100);
assert_eq!(*foo.get_ref(), 100);
}
|
identifier_body
|
boxed-class-type-substitution.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Regression test that rustc doesn't recurse infinitely substituting
// the boxed type parameter
struct Tree<T> {
parent: Option<T>
}
fn empty<T>() -> Tree<T> { fail!() }
struct Box {
tree: Tree<@Box>
}
fn Box() -> Box {
Box {
tree: empty()
}
}
struct LayoutData {
box: Option<@Box>
}
pub fn
|
() { }
|
main
|
identifier_name
|
boxed-class-type-substitution.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Regression test that rustc doesn't recurse infinitely substituting
// the boxed type parameter
struct Tree<T> {
parent: Option<T>
}
fn empty<T>() -> Tree<T>
|
struct Box {
tree: Tree<@Box>
}
fn Box() -> Box {
Box {
tree: empty()
}
}
struct LayoutData {
box: Option<@Box>
}
pub fn main() { }
|
{ fail!() }
|
identifier_body
|
boxed-class-type-substitution.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
|
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Regression test that rustc doesn't recurse infinitely substituting
// the boxed type parameter
struct Tree<T> {
parent: Option<T>
}
fn empty<T>() -> Tree<T> { fail!() }
struct Box {
tree: Tree<@Box>
}
fn Box() -> Box {
Box {
tree: empty()
}
}
struct LayoutData {
box: Option<@Box>
}
pub fn main() { }
|
// http://rust-lang.org/COPYRIGHT.
|
random_line_split
|
tests.rs
|
use parking_lot;
use rand;
use std::cmp;
use std::collections::{HashMap, HashSet};
use std::sync::{mpsc, Arc};
use std::thread;
use std::time::Duration;
use boxfuture::{BoxFuture, Boxable};
use futures01::future::{self, Future};
use hashing::Digest;
use parking_lot::Mutex;
use rand::Rng;
use crate::{EntryId, Graph, InvalidationResult, Node, NodeContext, NodeError};
#[test]
fn create() {
let graph = Arc::new(Graph::new());
let context = TContext::new(graph.clone());
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
}
#[test]
fn invalidate_and_clean() {
let graph = Arc::new(Graph::new());
let context = TContext::new(graph.clone());
// Create three nodes.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
assert_eq!(context.runs(), vec![TNode(2), TNode(1), TNode(0)]);
// Clear the middle Node, which dirties the upper node.
assert_eq!(
graph.invalidate_from_roots(|&TNode(n)| n == 1),
InvalidationResult {
cleared: 1,
dirtied: 1
}
);
// Confirm that the cleared Node re-runs, and the upper node is cleaned without re-running.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
assert_eq!(context.runs(), vec![TNode(2), TNode(1), TNode(0), TNode(1)]);
}
#[test]
fn invalidate_and_rerun() {
let graph = Arc::new(Graph::new());
let context = TContext::new(graph.clone());
// Create three nodes.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
assert_eq!(context.runs(), vec![TNode(2), TNode(1), TNode(0)]);
// Clear the middle Node, which dirties the upper node.
assert_eq!(
graph.invalidate_from_roots(|&TNode(n)| n == 1),
InvalidationResult {
cleared: 1,
dirtied: 1
}
);
// Request with a different salt, which will cause both the middle and upper nodes to rerun since
// their input values have changed.
let context = context.new_session(1).with_salt(1);
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 1), T(2, 1)])
);
assert_eq!(context.runs(), vec![TNode(1), TNode(2)]);
}
#[test]
fn invalidate_with_changed_dependencies() {
let graph = Arc::new(Graph::new());
let context = TContext::new(graph.clone());
// Create three nodes.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
// Clear the middle Node, which dirties the upper node.
assert_eq!(
graph.invalidate_from_roots(|&TNode(n)| n == 1),
InvalidationResult {
cleared: 1,
dirtied: 1
}
);
// Request with a new context that truncates execution at the middle Node.
let context =
TContext::new(graph.clone()).with_dependencies(vec![(TNode(1), None)].into_iter().collect());
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(1, 0), T(2, 0)])
);
// Confirm that dirtying the bottom Node does not affect the middle/upper Nodes, which no
// longer depend on it.
assert_eq!(
graph.invalidate_from_roots(|&TNode(n)| n == 0),
InvalidationResult {
cleared: 1,
dirtied: 0,
}
);
}
#[test]
fn invalidate_randomly() {
let graph = Arc::new(Graph::new());
let invalidations = 10;
let sleep_per_invalidation = Duration::from_millis(100);
let range = 100;
// Spawn a background thread to randomly invalidate in the relevant range. Hold its handle so
// it doesn't detach.
let graph2 = graph.clone();
let (send, recv) = mpsc::channel();
let _join = thread::spawn(move || {
let mut rng = rand::thread_rng();
let mut invalidations = invalidations;
while invalidations > 0 {
invalidations -= 1;
// Invalidate a random node in the graph.
let candidate = rng.gen_range(0, range);
graph2.invalidate_from_roots(|&TNode(n)| n == candidate);
thread::sleep(sleep_per_invalidation);
}
send.send(()).unwrap();
});
// Continuously re-request the root with increasing context values, and assert that Node and
// context values are ascending.
let mut iterations = 0;
let mut max_distinct_context_values = 0;
loop {
let context = TContext::new(graph.clone()).with_salt(iterations);
// Compute the root, and validate its output.
let node_output = match graph.create(TNode(range), &context).wait() {
Ok(output) => output,
Err(TError::Invalidated) => {
// Some amnount of concurrent invalidation is expected: retry.
continue;
}
Err(e) => panic!(
"Did not expect any errors other than Invalidation. Got: {:?}",
e
),
};
max_distinct_context_values = cmp::max(
max_distinct_context_values,
TNode::validate(&node_output).unwrap(),
);
// Poll the channel to see whether the background thread has exited.
if let Ok(_) = recv.try_recv() {
break;
}
iterations += 1;
}
assert!(
max_distinct_context_values > 1,
"In {} iterations, observed a maximum of {} distinct context values.",
iterations,
max_distinct_context_values
);
}
#[test]
fn dirty_dependents_of_uncacheable_node() {
let graph = Arc::new(Graph::new());
// Create a context for which the bottommost Node is not cacheable.
let context = {
let mut uncacheable = HashSet::new();
uncacheable.insert(TNode(0));
TContext::new(graph.clone()).with_uncacheable(uncacheable)
};
// Create three nodes.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
assert_eq!(context.runs(), vec![TNode(2), TNode(1), TNode(0)]);
// Re-request the root in a new session and confirm that only the bottom node re-runs.
let context = context.new_session(1);
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
assert_eq!(context.runs(), vec![TNode(0)]);
// Re-request with a new session and different salt, and confirm that everything re-runs bottom
// up (the order of node cleaning).
let context = context.new_session(2).with_salt(1);
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 1), T(1, 1), T(2, 1)])
);
assert_eq!(context.runs(), vec![TNode(0), TNode(1), TNode(2)]);
}
#[test]
fn drain_and_resume() {
// Confirms that after draining a Graph that has running work, we are able to resume the work
// and have it complete successfully.
let graph = Arc::new(Graph::new());
let delay_before_drain = Duration::from_millis(100);
let delay_in_task = delay_before_drain * 10;
// Create a context that will sleep long enough at TNode(1) to be interrupted before
// requesting TNode(0).
let context = {
let mut delays = HashMap::new();
delays.insert(TNode(1), delay_in_task);
TContext::new(graph.clone()).with_delays(delays)
};
// Spawn a background thread that will mark the Graph draining after a short delay.
let graph2 = graph.clone();
let _join = thread::spawn(move || {
thread::sleep(delay_before_drain);
graph2
.mark_draining(true)
.expect("Should not already be draining.");
});
// Request a TNode(1) in the "delayed" context, and expect it to be interrupted by the
// drain.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Err(TError::Invalidated),
);
// Unmark the Graph draining, and try again: we expect the `Invalidated` result we saw before
// due to the draining to not have been persisted.
graph
.mark_draining(false)
.expect("Should already be draining.");
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
}
#[test]
fn cyclic_failure() {
// Confirms that an attempt to create a cycle fails.
let graph = Arc::new(Graph::new());
let top = TNode(2);
let context = TContext::new(graph.clone()).with_dependencies(
// Request creation of a cycle by sending the bottom most node to the top.
vec![(TNode(0), Some(top))].into_iter().collect(),
);
assert_eq!(graph.create(TNode(2), &context).wait(), Err(TError::Cyclic));
}
#[test]
fn cyclic_dirtying() {
// Confirms that a dirtied path between two nodes is able to reverse direction while being
// cleaned.
let graph = Arc::new(Graph::new());
let initial_top = TNode(2);
let initial_bot = TNode(0);
// Request with a context that creates a path downward.
let context_down = TContext::new(graph.clone());
assert_eq!(
graph.create(initial_top.clone(), &context_down).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
// Clear the bottom node, and then clean it with a context that causes the path to reverse.
graph.invalidate_from_roots(|n| n == &initial_bot);
let context_up = context_down.with_salt(1).with_dependencies(
// Reverse the path from bottom to top.
vec![(TNode(1), None), (TNode(0), Some(TNode(1)))]
.into_iter()
.collect(),
);
let res = graph.create(initial_bot, &context_up).wait();
assert_eq!(res, Ok(vec![T(1, 1), T(0, 1)]));
let res = graph.create(initial_top, &context_up).wait();
assert_eq!(res, Ok(vec![T(1, 1), T(2, 1)]));
}
#[test]
fn critical_path() {
use super::entry::Entry;
// First, let's describe the scenario with plain data.
//
// We label the nodes with static strings to help visualise the situation.
// The first element of each tuple is a readable label. The second element represents the
// duration for this action.
let nodes = [
("download jvm", 10),
("download a", 1),
("download b", 2),
("download c", 3),
("compile a", 3),
("compile b", 20),
("compile c", 5),
];
let deps = [
("download jvm", "compile a"),
("download jvm", "compile b"),
("download jvm", "compile c"),
("download a", "compile a"),
("download b", "compile b"),
("download c", "compile c"),
("compile a", "compile c"),
("compile b", "compile c"),
];
// Describe a few transformations to navigate between our readable data and the actual types
// needed for the graph.
let tnode = |node: &str| {
TNode(
nodes
.iter()
.map(|(k, _)| k)
.position(|label| &node == label)
.unwrap(),
)
};
let node_key = |node: &str| tnode(node);
let node_entry = |node: &str| Entry::new(node_key(node));
let node_and_duration_from_entry = |entry: &super::entry::Entry<TNode>| nodes[entry.node().0];
let node_duration =
|entry: &super::entry::Entry<TNode>| Duration::from_secs(node_and_duration_from_entry(entry).1);
// Construct a graph and populate it with the nodes and edges prettily defined above.
let graph = Graph::new();
{
let inner = &mut graph.inner.lock();
for (node, _) in &nodes {
let node_index = inner.pg.add_node(node_entry(node));
inner.nodes.insert(node_key(node), node_index);
}
for (src, dst) in &deps {
let src = inner.nodes[&node_key(src)];
let dst = inner.nodes[&node_key(dst)];
inner.pg.add_edge(src, dst, 1.0);
}
}
// Calculate the critical path and validate it.
{
// The roots are all the sources, so we're covering the entire graph
let roots = ["download jvm", "download a", "download b", "download c"]
.iter()
.map(|n| tnode(n))
.collect::<Vec<_>>();
let (expected_total_duration, expected_critical_path) = (
Duration::from_secs(35),
vec!["download jvm", "compile b", "compile c"],
);
let (total_duration, critical_path) = graph.critical_path(&roots, &node_duration);
assert_eq!(expected_total_duration, total_duration);
let critical_path = critical_path
.iter()
.map(|entry| node_and_duration_from_entry(entry).0)
.collect::<Vec<_>>();
assert_eq!(expected_critical_path, critical_path);
}
{
// The roots exclude some nodes ("download jvm", "download a") from the graph.
let roots = ["download b", "download c"]
.iter()
.map(|n| tnode(n))
.collect::<Vec<_>>();
let (expected_total_duration, expected_critical_path) = (
Duration::from_secs(27),
vec!["download b", "compile b", "compile c"],
);
let (total_duration, critical_path) = graph.critical_path(&roots, &node_duration);
assert_eq!(expected_total_duration, total_duration);
let critical_path = critical_path
.iter()
.map(|entry| node_and_duration_from_entry(entry).0)
.collect::<Vec<_>>();
assert_eq!(expected_critical_path, critical_path);
}
}
///
/// A token containing the id of a Node and the id of a Context, respectively. Has a short name
/// to minimize the verbosity of tests.
///
#[derive(Clone, Debug, Eq, PartialEq)]
struct T(usize, usize);
///
/// A node that builds a Vec of tokens by recursively requesting itself and appending its value
/// to the result.
///
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
struct TNode(usize);
impl Node for TNode {
type Context = TContext;
type Item = Vec<T>;
type Error = TError;
fn run(self, context: TContext) -> BoxFuture<Vec<T>, TError> {
context.ran(self.clone());
let token = T(self.0, context.salt());
if let Some(dep) = context.dependency_of(&self) {
context.maybe_delay(&self);
context
.get(dep)
.map(move |mut v| {
v.push(token);
v
})
.to_boxed()
} else {
future::ok(vec![token]).to_boxed()
}
}
fn digest(_result: Self::Item) -> Option<Digest> {
None
}
fn cacheable(&self, context: &Self::Context) -> bool {
!context.uncacheable.contains(self)
}
}
impl std::fmt::Display for TNode {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
write!(f, "{:?}", self)
}
}
impl TNode {
///
/// Validates the given TNode output. Both node ids and context ids should increase left to
/// right: node ids monotonically, and context ids non-monotonically.
///
/// Valid:
/// (0,0), (1,1), (2,2), (3,3)
/// (0,0), (1,0), (2,1), (3,1)
///
/// Invalid:
/// (0,0), (1,1), (2,1), (3,0)
/// (0,0), (1,0), (2,0), (1,0)
///
/// If successful, returns the count of distinct context ids in the path.
///
fn validate(output: &Vec<T>) -> Result<usize, String> {
let (node_ids, context_ids): (Vec<_>, Vec<_>) = output
.iter()
.map(|&T(node_id, context_id)| {
// We cast to isize to allow comparison to -1.
(node_id as isize, context_id)
})
.unzip();
// Confirm monotonically ordered.
let mut previous: isize = -1;
for node_id in node_ids {
if previous + 1!= node_id {
return Err(format!(
"Node ids in {:?} were not monotonically ordered.",
output
));
}
previous = node_id;
}
// Confirm ordered (non-monotonically).
let mut previous: usize = 0;
for &context_id in &context_ids {
if previous > context_id {
return Err(format!("Context ids in {:?} were not ordered.", output));
}
previous = context_id;
}
Ok(context_ids.into_iter().collect::<HashSet<_>>().len())
}
}
///
/// A context that keeps a record of Nodes that have been run.
///
#[derive(Clone)]
struct TContext {
session_id: usize,
// A value that is included in every value computed by this context. Stands in for "the state of the
// outside world". A test that wants to "change the outside world" and observe its effect on the
// graph should change the salt to do so.
salt: usize,
// A mapping from source to optional destination that drives what values each TNode depends on.
// If there is no entry in this map for a node, then TNode::run will default to requesting
// the next smallest node. Finally, if a None entry is present, a node will have no
// dependencies.
edges: Arc<HashMap<TNode, Option<TNode>>>,
delays: Arc<HashMap<TNode, Duration>>,
uncacheable: Arc<HashSet<TNode>>,
graph: Arc<Graph<TNode>>,
runs: Arc<Mutex<Vec<TNode>>>,
entry_id: Option<EntryId>,
}
impl NodeContext for TContext {
type Node = TNode;
type SessionId = usize;
fn clone_for(&self, entry_id: EntryId) -> TContext {
TContext {
session_id: self.session_id,
salt: self.salt,
edges: self.edges.clone(),
delays: self.delays.clone(),
uncacheable: self.uncacheable.clone(),
graph: self.graph.clone(),
runs: self.runs.clone(),
entry_id: Some(entry_id),
}
}
fn session_id(&self) -> &usize {
&self.session_id
}
fn graph(&self) -> &Graph<TNode> {
&self.graph
}
fn spawn<F>(&self, future: F)
where
F: Future<Item = (), Error = ()> + Send +'static,
{
// Avoids introducing a dependency on a threadpool.
thread::spawn(move || {
future.wait().unwrap();
});
}
}
impl TContext {
fn new(graph: Arc<Graph<TNode>>) -> TContext {
TContext {
session_id: 0,
salt: 0,
edges: Arc::default(),
delays: Arc::default(),
uncacheable: Arc::default(),
graph,
runs: Arc::new(Mutex::new(Vec::new())),
entry_id: None,
}
}
fn with_dependencies(mut self, edges: HashMap<TNode, Option<TNode>>) -> TContext {
self.edges = Arc::new(edges);
self
}
fn with_delays(mut self, delays: HashMap<TNode, Duration>) -> TContext {
self.delays = Arc::new(delays);
self
}
fn with_uncacheable(mut self, uncacheable: HashSet<TNode>) -> TContext
|
fn with_salt(mut self, salt: usize) -> TContext {
self.salt = salt;
self
}
fn new_session(mut self, new_session_id: usize) -> TContext {
self.session_id = new_session_id;
{
let mut runs = self.runs.lock();
runs.clear();
}
self
}
fn salt(&self) -> usize {
self.salt
}
fn get(&self, dst: TNode) -> BoxFuture<Vec<T>, TError> {
self.graph.get(self.entry_id.unwrap(), self, dst)
}
fn ran(&self, node: TNode) {
let mut runs = self.runs.lock();
runs.push(node);
}
fn maybe_delay(&self, node: &TNode) {
if let Some(delay) = self.delays.get(node) {
thread::sleep(*delay);
}
}
///
/// If the given TNode should declare a dependency on another TNode, returns that dependency.
///
fn dependency_of(&self, node: &TNode) -> Option<TNode> {
match self.edges.get(node) {
Some(Some(ref dep)) => Some(dep.clone()),
Some(None) => None,
None if node.0 > 0 => Some(TNode(node.0 - 1)),
None => None,
}
}
fn runs(&self) -> Vec<TNode> {
self.runs.lock().clone()
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
enum TError {
Cyclic,
Invalidated,
}
impl NodeError for TError {
fn invalidated() -> Self {
TError::Invalidated
}
fn cyclic(_path: Vec<String>) -> Self {
TError::Cyclic
}
}
|
{
self.uncacheable = Arc::new(uncacheable);
self
}
|
identifier_body
|
tests.rs
|
use parking_lot;
use rand;
use std::cmp;
use std::collections::{HashMap, HashSet};
use std::sync::{mpsc, Arc};
use std::thread;
use std::time::Duration;
use boxfuture::{BoxFuture, Boxable};
use futures01::future::{self, Future};
use hashing::Digest;
use parking_lot::Mutex;
use rand::Rng;
use crate::{EntryId, Graph, InvalidationResult, Node, NodeContext, NodeError};
#[test]
fn create() {
let graph = Arc::new(Graph::new());
let context = TContext::new(graph.clone());
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
}
#[test]
fn invalidate_and_clean() {
let graph = Arc::new(Graph::new());
let context = TContext::new(graph.clone());
// Create three nodes.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
assert_eq!(context.runs(), vec![TNode(2), TNode(1), TNode(0)]);
// Clear the middle Node, which dirties the upper node.
assert_eq!(
graph.invalidate_from_roots(|&TNode(n)| n == 1),
InvalidationResult {
cleared: 1,
dirtied: 1
}
);
// Confirm that the cleared Node re-runs, and the upper node is cleaned without re-running.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
assert_eq!(context.runs(), vec![TNode(2), TNode(1), TNode(0), TNode(1)]);
}
#[test]
fn invalidate_and_rerun() {
let graph = Arc::new(Graph::new());
let context = TContext::new(graph.clone());
// Create three nodes.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
assert_eq!(context.runs(), vec![TNode(2), TNode(1), TNode(0)]);
// Clear the middle Node, which dirties the upper node.
assert_eq!(
graph.invalidate_from_roots(|&TNode(n)| n == 1),
InvalidationResult {
cleared: 1,
dirtied: 1
}
);
// Request with a different salt, which will cause both the middle and upper nodes to rerun since
// their input values have changed.
let context = context.new_session(1).with_salt(1);
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 1), T(2, 1)])
);
assert_eq!(context.runs(), vec![TNode(1), TNode(2)]);
}
#[test]
fn invalidate_with_changed_dependencies() {
let graph = Arc::new(Graph::new());
let context = TContext::new(graph.clone());
// Create three nodes.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
// Clear the middle Node, which dirties the upper node.
assert_eq!(
graph.invalidate_from_roots(|&TNode(n)| n == 1),
InvalidationResult {
cleared: 1,
dirtied: 1
}
);
// Request with a new context that truncates execution at the middle Node.
let context =
TContext::new(graph.clone()).with_dependencies(vec![(TNode(1), None)].into_iter().collect());
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(1, 0), T(2, 0)])
);
// Confirm that dirtying the bottom Node does not affect the middle/upper Nodes, which no
// longer depend on it.
assert_eq!(
graph.invalidate_from_roots(|&TNode(n)| n == 0),
InvalidationResult {
cleared: 1,
dirtied: 0,
}
);
}
#[test]
fn invalidate_randomly() {
let graph = Arc::new(Graph::new());
let invalidations = 10;
let sleep_per_invalidation = Duration::from_millis(100);
let range = 100;
// Spawn a background thread to randomly invalidate in the relevant range. Hold its handle so
// it doesn't detach.
let graph2 = graph.clone();
let (send, recv) = mpsc::channel();
let _join = thread::spawn(move || {
let mut rng = rand::thread_rng();
let mut invalidations = invalidations;
while invalidations > 0 {
invalidations -= 1;
// Invalidate a random node in the graph.
let candidate = rng.gen_range(0, range);
graph2.invalidate_from_roots(|&TNode(n)| n == candidate);
thread::sleep(sleep_per_invalidation);
}
send.send(()).unwrap();
});
// Continuously re-request the root with increasing context values, and assert that Node and
// context values are ascending.
let mut iterations = 0;
let mut max_distinct_context_values = 0;
loop {
let context = TContext::new(graph.clone()).with_salt(iterations);
// Compute the root, and validate its output.
let node_output = match graph.create(TNode(range), &context).wait() {
Ok(output) => output,
Err(TError::Invalidated) => {
// Some amnount of concurrent invalidation is expected: retry.
continue;
}
Err(e) => panic!(
"Did not expect any errors other than Invalidation. Got: {:?}",
e
),
};
max_distinct_context_values = cmp::max(
max_distinct_context_values,
TNode::validate(&node_output).unwrap(),
);
// Poll the channel to see whether the background thread has exited.
if let Ok(_) = recv.try_recv() {
break;
}
iterations += 1;
}
assert!(
max_distinct_context_values > 1,
"In {} iterations, observed a maximum of {} distinct context values.",
iterations,
max_distinct_context_values
);
}
#[test]
fn dirty_dependents_of_uncacheable_node() {
let graph = Arc::new(Graph::new());
// Create a context for which the bottommost Node is not cacheable.
let context = {
let mut uncacheable = HashSet::new();
uncacheable.insert(TNode(0));
TContext::new(graph.clone()).with_uncacheable(uncacheable)
};
// Create three nodes.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
assert_eq!(context.runs(), vec![TNode(2), TNode(1), TNode(0)]);
// Re-request the root in a new session and confirm that only the bottom node re-runs.
let context = context.new_session(1);
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
assert_eq!(context.runs(), vec![TNode(0)]);
// Re-request with a new session and different salt, and confirm that everything re-runs bottom
// up (the order of node cleaning).
let context = context.new_session(2).with_salt(1);
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 1), T(1, 1), T(2, 1)])
);
assert_eq!(context.runs(), vec![TNode(0), TNode(1), TNode(2)]);
}
#[test]
fn drain_and_resume() {
// Confirms that after draining a Graph that has running work, we are able to resume the work
// and have it complete successfully.
let graph = Arc::new(Graph::new());
let delay_before_drain = Duration::from_millis(100);
let delay_in_task = delay_before_drain * 10;
// Create a context that will sleep long enough at TNode(1) to be interrupted before
// requesting TNode(0).
let context = {
let mut delays = HashMap::new();
delays.insert(TNode(1), delay_in_task);
TContext::new(graph.clone()).with_delays(delays)
};
// Spawn a background thread that will mark the Graph draining after a short delay.
let graph2 = graph.clone();
let _join = thread::spawn(move || {
thread::sleep(delay_before_drain);
graph2
.mark_draining(true)
.expect("Should not already be draining.");
});
// Request a TNode(1) in the "delayed" context, and expect it to be interrupted by the
// drain.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Err(TError::Invalidated),
);
// Unmark the Graph draining, and try again: we expect the `Invalidated` result we saw before
// due to the draining to not have been persisted.
graph
.mark_draining(false)
.expect("Should already be draining.");
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
}
#[test]
fn cyclic_failure() {
// Confirms that an attempt to create a cycle fails.
let graph = Arc::new(Graph::new());
let top = TNode(2);
let context = TContext::new(graph.clone()).with_dependencies(
// Request creation of a cycle by sending the bottom most node to the top.
vec![(TNode(0), Some(top))].into_iter().collect(),
);
assert_eq!(graph.create(TNode(2), &context).wait(), Err(TError::Cyclic));
}
#[test]
fn cyclic_dirtying() {
// Confirms that a dirtied path between two nodes is able to reverse direction while being
// cleaned.
let graph = Arc::new(Graph::new());
let initial_top = TNode(2);
let initial_bot = TNode(0);
// Request with a context that creates a path downward.
let context_down = TContext::new(graph.clone());
assert_eq!(
graph.create(initial_top.clone(), &context_down).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
// Clear the bottom node, and then clean it with a context that causes the path to reverse.
graph.invalidate_from_roots(|n| n == &initial_bot);
let context_up = context_down.with_salt(1).with_dependencies(
// Reverse the path from bottom to top.
vec![(TNode(1), None), (TNode(0), Some(TNode(1)))]
.into_iter()
.collect(),
);
let res = graph.create(initial_bot, &context_up).wait();
assert_eq!(res, Ok(vec![T(1, 1), T(0, 1)]));
let res = graph.create(initial_top, &context_up).wait();
assert_eq!(res, Ok(vec![T(1, 1), T(2, 1)]));
}
#[test]
fn critical_path() {
use super::entry::Entry;
// First, let's describe the scenario with plain data.
//
// We label the nodes with static strings to help visualise the situation.
// The first element of each tuple is a readable label. The second element represents the
// duration for this action.
let nodes = [
("download jvm", 10),
("download a", 1),
("download b", 2),
("download c", 3),
("compile a", 3),
("compile b", 20),
("compile c", 5),
];
let deps = [
("download jvm", "compile a"),
("download jvm", "compile b"),
("download jvm", "compile c"),
("download a", "compile a"),
("download b", "compile b"),
("download c", "compile c"),
("compile a", "compile c"),
("compile b", "compile c"),
];
// Describe a few transformations to navigate between our readable data and the actual types
// needed for the graph.
let tnode = |node: &str| {
TNode(
nodes
.iter()
.map(|(k, _)| k)
.position(|label| &node == label)
.unwrap(),
)
};
let node_key = |node: &str| tnode(node);
let node_entry = |node: &str| Entry::new(node_key(node));
let node_and_duration_from_entry = |entry: &super::entry::Entry<TNode>| nodes[entry.node().0];
let node_duration =
|entry: &super::entry::Entry<TNode>| Duration::from_secs(node_and_duration_from_entry(entry).1);
// Construct a graph and populate it with the nodes and edges prettily defined above.
let graph = Graph::new();
{
let inner = &mut graph.inner.lock();
for (node, _) in &nodes {
let node_index = inner.pg.add_node(node_entry(node));
inner.nodes.insert(node_key(node), node_index);
}
for (src, dst) in &deps {
let src = inner.nodes[&node_key(src)];
let dst = inner.nodes[&node_key(dst)];
inner.pg.add_edge(src, dst, 1.0);
}
}
// Calculate the critical path and validate it.
{
// The roots are all the sources, so we're covering the entire graph
let roots = ["download jvm", "download a", "download b", "download c"]
.iter()
.map(|n| tnode(n))
.collect::<Vec<_>>();
let (expected_total_duration, expected_critical_path) = (
Duration::from_secs(35),
vec!["download jvm", "compile b", "compile c"],
);
let (total_duration, critical_path) = graph.critical_path(&roots, &node_duration);
assert_eq!(expected_total_duration, total_duration);
let critical_path = critical_path
.iter()
.map(|entry| node_and_duration_from_entry(entry).0)
.collect::<Vec<_>>();
assert_eq!(expected_critical_path, critical_path);
}
{
// The roots exclude some nodes ("download jvm", "download a") from the graph.
let roots = ["download b", "download c"]
.iter()
.map(|n| tnode(n))
.collect::<Vec<_>>();
let (expected_total_duration, expected_critical_path) = (
Duration::from_secs(27),
vec!["download b", "compile b", "compile c"],
);
let (total_duration, critical_path) = graph.critical_path(&roots, &node_duration);
assert_eq!(expected_total_duration, total_duration);
let critical_path = critical_path
.iter()
.map(|entry| node_and_duration_from_entry(entry).0)
.collect::<Vec<_>>();
assert_eq!(expected_critical_path, critical_path);
}
}
///
/// A token containing the id of a Node and the id of a Context, respectively. Has a short name
/// to minimize the verbosity of tests.
///
#[derive(Clone, Debug, Eq, PartialEq)]
struct T(usize, usize);
///
/// A node that builds a Vec of tokens by recursively requesting itself and appending its value
/// to the result.
///
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
struct TNode(usize);
impl Node for TNode {
type Context = TContext;
type Item = Vec<T>;
type Error = TError;
fn run(self, context: TContext) -> BoxFuture<Vec<T>, TError> {
context.ran(self.clone());
let token = T(self.0, context.salt());
if let Some(dep) = context.dependency_of(&self) {
context.maybe_delay(&self);
context
.get(dep)
.map(move |mut v| {
v.push(token);
v
})
.to_boxed()
} else {
future::ok(vec![token]).to_boxed()
}
}
fn digest(_result: Self::Item) -> Option<Digest> {
None
}
fn cacheable(&self, context: &Self::Context) -> bool {
!context.uncacheable.contains(self)
}
}
impl std::fmt::Display for TNode {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
write!(f, "{:?}", self)
}
}
impl TNode {
///
/// Validates the given TNode output. Both node ids and context ids should increase left to
/// right: node ids monotonically, and context ids non-monotonically.
///
/// Valid:
/// (0,0), (1,1), (2,2), (3,3)
/// (0,0), (1,0), (2,1), (3,1)
///
/// Invalid:
/// (0,0), (1,1), (2,1), (3,0)
/// (0,0), (1,0), (2,0), (1,0)
///
/// If successful, returns the count of distinct context ids in the path.
///
fn validate(output: &Vec<T>) -> Result<usize, String> {
let (node_ids, context_ids): (Vec<_>, Vec<_>) = output
.iter()
.map(|&T(node_id, context_id)| {
// We cast to isize to allow comparison to -1.
(node_id as isize, context_id)
})
.unzip();
// Confirm monotonically ordered.
let mut previous: isize = -1;
for node_id in node_ids {
if previous + 1!= node_id {
return Err(format!(
"Node ids in {:?} were not monotonically ordered.",
output
));
}
previous = node_id;
}
// Confirm ordered (non-monotonically).
let mut previous: usize = 0;
for &context_id in &context_ids {
if previous > context_id {
return Err(format!("Context ids in {:?} were not ordered.", output));
}
previous = context_id;
}
Ok(context_ids.into_iter().collect::<HashSet<_>>().len())
}
}
///
/// A context that keeps a record of Nodes that have been run.
///
#[derive(Clone)]
struct TContext {
session_id: usize,
// A value that is included in every value computed by this context. Stands in for "the state of the
// outside world". A test that wants to "change the outside world" and observe its effect on the
// graph should change the salt to do so.
salt: usize,
// A mapping from source to optional destination that drives what values each TNode depends on.
// If there is no entry in this map for a node, then TNode::run will default to requesting
// the next smallest node. Finally, if a None entry is present, a node will have no
// dependencies.
edges: Arc<HashMap<TNode, Option<TNode>>>,
delays: Arc<HashMap<TNode, Duration>>,
uncacheable: Arc<HashSet<TNode>>,
graph: Arc<Graph<TNode>>,
runs: Arc<Mutex<Vec<TNode>>>,
entry_id: Option<EntryId>,
}
impl NodeContext for TContext {
type Node = TNode;
type SessionId = usize;
fn clone_for(&self, entry_id: EntryId) -> TContext {
TContext {
session_id: self.session_id,
salt: self.salt,
edges: self.edges.clone(),
delays: self.delays.clone(),
uncacheable: self.uncacheable.clone(),
graph: self.graph.clone(),
runs: self.runs.clone(),
entry_id: Some(entry_id),
}
}
fn session_id(&self) -> &usize {
&self.session_id
}
fn graph(&self) -> &Graph<TNode> {
&self.graph
}
fn spawn<F>(&self, future: F)
where
F: Future<Item = (), Error = ()> + Send +'static,
{
// Avoids introducing a dependency on a threadpool.
thread::spawn(move || {
future.wait().unwrap();
});
}
}
impl TContext {
fn new(graph: Arc<Graph<TNode>>) -> TContext {
TContext {
session_id: 0,
salt: 0,
edges: Arc::default(),
delays: Arc::default(),
uncacheable: Arc::default(),
graph,
runs: Arc::new(Mutex::new(Vec::new())),
entry_id: None,
}
}
|
}
fn with_delays(mut self, delays: HashMap<TNode, Duration>) -> TContext {
self.delays = Arc::new(delays);
self
}
fn with_uncacheable(mut self, uncacheable: HashSet<TNode>) -> TContext {
self.uncacheable = Arc::new(uncacheable);
self
}
fn with_salt(mut self, salt: usize) -> TContext {
self.salt = salt;
self
}
fn new_session(mut self, new_session_id: usize) -> TContext {
self.session_id = new_session_id;
{
let mut runs = self.runs.lock();
runs.clear();
}
self
}
fn salt(&self) -> usize {
self.salt
}
fn get(&self, dst: TNode) -> BoxFuture<Vec<T>, TError> {
self.graph.get(self.entry_id.unwrap(), self, dst)
}
fn ran(&self, node: TNode) {
let mut runs = self.runs.lock();
runs.push(node);
}
fn maybe_delay(&self, node: &TNode) {
if let Some(delay) = self.delays.get(node) {
thread::sleep(*delay);
}
}
///
/// If the given TNode should declare a dependency on another TNode, returns that dependency.
///
fn dependency_of(&self, node: &TNode) -> Option<TNode> {
match self.edges.get(node) {
Some(Some(ref dep)) => Some(dep.clone()),
Some(None) => None,
None if node.0 > 0 => Some(TNode(node.0 - 1)),
None => None,
}
}
fn runs(&self) -> Vec<TNode> {
self.runs.lock().clone()
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
enum TError {
Cyclic,
Invalidated,
}
impl NodeError for TError {
fn invalidated() -> Self {
TError::Invalidated
}
fn cyclic(_path: Vec<String>) -> Self {
TError::Cyclic
}
}
|
fn with_dependencies(mut self, edges: HashMap<TNode, Option<TNode>>) -> TContext {
self.edges = Arc::new(edges);
self
|
random_line_split
|
tests.rs
|
use parking_lot;
use rand;
use std::cmp;
use std::collections::{HashMap, HashSet};
use std::sync::{mpsc, Arc};
use std::thread;
use std::time::Duration;
use boxfuture::{BoxFuture, Boxable};
use futures01::future::{self, Future};
use hashing::Digest;
use parking_lot::Mutex;
use rand::Rng;
use crate::{EntryId, Graph, InvalidationResult, Node, NodeContext, NodeError};
#[test]
fn create() {
let graph = Arc::new(Graph::new());
let context = TContext::new(graph.clone());
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
}
#[test]
fn invalidate_and_clean() {
let graph = Arc::new(Graph::new());
let context = TContext::new(graph.clone());
// Create three nodes.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
assert_eq!(context.runs(), vec![TNode(2), TNode(1), TNode(0)]);
// Clear the middle Node, which dirties the upper node.
assert_eq!(
graph.invalidate_from_roots(|&TNode(n)| n == 1),
InvalidationResult {
cleared: 1,
dirtied: 1
}
);
// Confirm that the cleared Node re-runs, and the upper node is cleaned without re-running.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
assert_eq!(context.runs(), vec![TNode(2), TNode(1), TNode(0), TNode(1)]);
}
#[test]
fn invalidate_and_rerun() {
let graph = Arc::new(Graph::new());
let context = TContext::new(graph.clone());
// Create three nodes.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
assert_eq!(context.runs(), vec![TNode(2), TNode(1), TNode(0)]);
// Clear the middle Node, which dirties the upper node.
assert_eq!(
graph.invalidate_from_roots(|&TNode(n)| n == 1),
InvalidationResult {
cleared: 1,
dirtied: 1
}
);
// Request with a different salt, which will cause both the middle and upper nodes to rerun since
// their input values have changed.
let context = context.new_session(1).with_salt(1);
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 1), T(2, 1)])
);
assert_eq!(context.runs(), vec![TNode(1), TNode(2)]);
}
#[test]
fn invalidate_with_changed_dependencies() {
let graph = Arc::new(Graph::new());
let context = TContext::new(graph.clone());
// Create three nodes.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
// Clear the middle Node, which dirties the upper node.
assert_eq!(
graph.invalidate_from_roots(|&TNode(n)| n == 1),
InvalidationResult {
cleared: 1,
dirtied: 1
}
);
// Request with a new context that truncates execution at the middle Node.
let context =
TContext::new(graph.clone()).with_dependencies(vec![(TNode(1), None)].into_iter().collect());
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(1, 0), T(2, 0)])
);
// Confirm that dirtying the bottom Node does not affect the middle/upper Nodes, which no
// longer depend on it.
assert_eq!(
graph.invalidate_from_roots(|&TNode(n)| n == 0),
InvalidationResult {
cleared: 1,
dirtied: 0,
}
);
}
#[test]
fn invalidate_randomly() {
let graph = Arc::new(Graph::new());
let invalidations = 10;
let sleep_per_invalidation = Duration::from_millis(100);
let range = 100;
// Spawn a background thread to randomly invalidate in the relevant range. Hold its handle so
// it doesn't detach.
let graph2 = graph.clone();
let (send, recv) = mpsc::channel();
let _join = thread::spawn(move || {
let mut rng = rand::thread_rng();
let mut invalidations = invalidations;
while invalidations > 0 {
invalidations -= 1;
// Invalidate a random node in the graph.
let candidate = rng.gen_range(0, range);
graph2.invalidate_from_roots(|&TNode(n)| n == candidate);
thread::sleep(sleep_per_invalidation);
}
send.send(()).unwrap();
});
// Continuously re-request the root with increasing context values, and assert that Node and
// context values are ascending.
let mut iterations = 0;
let mut max_distinct_context_values = 0;
loop {
let context = TContext::new(graph.clone()).with_salt(iterations);
// Compute the root, and validate its output.
let node_output = match graph.create(TNode(range), &context).wait() {
Ok(output) => output,
Err(TError::Invalidated) => {
// Some amnount of concurrent invalidation is expected: retry.
continue;
}
Err(e) => panic!(
"Did not expect any errors other than Invalidation. Got: {:?}",
e
),
};
max_distinct_context_values = cmp::max(
max_distinct_context_values,
TNode::validate(&node_output).unwrap(),
);
// Poll the channel to see whether the background thread has exited.
if let Ok(_) = recv.try_recv() {
break;
}
iterations += 1;
}
assert!(
max_distinct_context_values > 1,
"In {} iterations, observed a maximum of {} distinct context values.",
iterations,
max_distinct_context_values
);
}
#[test]
fn dirty_dependents_of_uncacheable_node() {
let graph = Arc::new(Graph::new());
// Create a context for which the bottommost Node is not cacheable.
let context = {
let mut uncacheable = HashSet::new();
uncacheable.insert(TNode(0));
TContext::new(graph.clone()).with_uncacheable(uncacheable)
};
// Create three nodes.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
assert_eq!(context.runs(), vec![TNode(2), TNode(1), TNode(0)]);
// Re-request the root in a new session and confirm that only the bottom node re-runs.
let context = context.new_session(1);
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
assert_eq!(context.runs(), vec![TNode(0)]);
// Re-request with a new session and different salt, and confirm that everything re-runs bottom
// up (the order of node cleaning).
let context = context.new_session(2).with_salt(1);
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 1), T(1, 1), T(2, 1)])
);
assert_eq!(context.runs(), vec![TNode(0), TNode(1), TNode(2)]);
}
#[test]
fn drain_and_resume() {
// Confirms that after draining a Graph that has running work, we are able to resume the work
// and have it complete successfully.
let graph = Arc::new(Graph::new());
let delay_before_drain = Duration::from_millis(100);
let delay_in_task = delay_before_drain * 10;
// Create a context that will sleep long enough at TNode(1) to be interrupted before
// requesting TNode(0).
let context = {
let mut delays = HashMap::new();
delays.insert(TNode(1), delay_in_task);
TContext::new(graph.clone()).with_delays(delays)
};
// Spawn a background thread that will mark the Graph draining after a short delay.
let graph2 = graph.clone();
let _join = thread::spawn(move || {
thread::sleep(delay_before_drain);
graph2
.mark_draining(true)
.expect("Should not already be draining.");
});
// Request a TNode(1) in the "delayed" context, and expect it to be interrupted by the
// drain.
assert_eq!(
graph.create(TNode(2), &context).wait(),
Err(TError::Invalidated),
);
// Unmark the Graph draining, and try again: we expect the `Invalidated` result we saw before
// due to the draining to not have been persisted.
graph
.mark_draining(false)
.expect("Should already be draining.");
assert_eq!(
graph.create(TNode(2), &context).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
}
#[test]
fn cyclic_failure() {
// Confirms that an attempt to create a cycle fails.
let graph = Arc::new(Graph::new());
let top = TNode(2);
let context = TContext::new(graph.clone()).with_dependencies(
// Request creation of a cycle by sending the bottom most node to the top.
vec![(TNode(0), Some(top))].into_iter().collect(),
);
assert_eq!(graph.create(TNode(2), &context).wait(), Err(TError::Cyclic));
}
#[test]
fn cyclic_dirtying() {
// Confirms that a dirtied path between two nodes is able to reverse direction while being
// cleaned.
let graph = Arc::new(Graph::new());
let initial_top = TNode(2);
let initial_bot = TNode(0);
// Request with a context that creates a path downward.
let context_down = TContext::new(graph.clone());
assert_eq!(
graph.create(initial_top.clone(), &context_down).wait(),
Ok(vec![T(0, 0), T(1, 0), T(2, 0)])
);
// Clear the bottom node, and then clean it with a context that causes the path to reverse.
graph.invalidate_from_roots(|n| n == &initial_bot);
let context_up = context_down.with_salt(1).with_dependencies(
// Reverse the path from bottom to top.
vec![(TNode(1), None), (TNode(0), Some(TNode(1)))]
.into_iter()
.collect(),
);
let res = graph.create(initial_bot, &context_up).wait();
assert_eq!(res, Ok(vec![T(1, 1), T(0, 1)]));
let res = graph.create(initial_top, &context_up).wait();
assert_eq!(res, Ok(vec![T(1, 1), T(2, 1)]));
}
#[test]
fn
|
() {
use super::entry::Entry;
// First, let's describe the scenario with plain data.
//
// We label the nodes with static strings to help visualise the situation.
// The first element of each tuple is a readable label. The second element represents the
// duration for this action.
let nodes = [
("download jvm", 10),
("download a", 1),
("download b", 2),
("download c", 3),
("compile a", 3),
("compile b", 20),
("compile c", 5),
];
let deps = [
("download jvm", "compile a"),
("download jvm", "compile b"),
("download jvm", "compile c"),
("download a", "compile a"),
("download b", "compile b"),
("download c", "compile c"),
("compile a", "compile c"),
("compile b", "compile c"),
];
// Describe a few transformations to navigate between our readable data and the actual types
// needed for the graph.
let tnode = |node: &str| {
TNode(
nodes
.iter()
.map(|(k, _)| k)
.position(|label| &node == label)
.unwrap(),
)
};
let node_key = |node: &str| tnode(node);
let node_entry = |node: &str| Entry::new(node_key(node));
let node_and_duration_from_entry = |entry: &super::entry::Entry<TNode>| nodes[entry.node().0];
let node_duration =
|entry: &super::entry::Entry<TNode>| Duration::from_secs(node_and_duration_from_entry(entry).1);
// Construct a graph and populate it with the nodes and edges prettily defined above.
let graph = Graph::new();
{
let inner = &mut graph.inner.lock();
for (node, _) in &nodes {
let node_index = inner.pg.add_node(node_entry(node));
inner.nodes.insert(node_key(node), node_index);
}
for (src, dst) in &deps {
let src = inner.nodes[&node_key(src)];
let dst = inner.nodes[&node_key(dst)];
inner.pg.add_edge(src, dst, 1.0);
}
}
// Calculate the critical path and validate it.
{
// The roots are all the sources, so we're covering the entire graph
let roots = ["download jvm", "download a", "download b", "download c"]
.iter()
.map(|n| tnode(n))
.collect::<Vec<_>>();
let (expected_total_duration, expected_critical_path) = (
Duration::from_secs(35),
vec!["download jvm", "compile b", "compile c"],
);
let (total_duration, critical_path) = graph.critical_path(&roots, &node_duration);
assert_eq!(expected_total_duration, total_duration);
let critical_path = critical_path
.iter()
.map(|entry| node_and_duration_from_entry(entry).0)
.collect::<Vec<_>>();
assert_eq!(expected_critical_path, critical_path);
}
{
// The roots exclude some nodes ("download jvm", "download a") from the graph.
let roots = ["download b", "download c"]
.iter()
.map(|n| tnode(n))
.collect::<Vec<_>>();
let (expected_total_duration, expected_critical_path) = (
Duration::from_secs(27),
vec!["download b", "compile b", "compile c"],
);
let (total_duration, critical_path) = graph.critical_path(&roots, &node_duration);
assert_eq!(expected_total_duration, total_duration);
let critical_path = critical_path
.iter()
.map(|entry| node_and_duration_from_entry(entry).0)
.collect::<Vec<_>>();
assert_eq!(expected_critical_path, critical_path);
}
}
///
/// A token containing the id of a Node and the id of a Context, respectively. Has a short name
/// to minimize the verbosity of tests.
///
#[derive(Clone, Debug, Eq, PartialEq)]
struct T(usize, usize);
///
/// A node that builds a Vec of tokens by recursively requesting itself and appending its value
/// to the result.
///
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
struct TNode(usize);
impl Node for TNode {
type Context = TContext;
type Item = Vec<T>;
type Error = TError;
fn run(self, context: TContext) -> BoxFuture<Vec<T>, TError> {
context.ran(self.clone());
let token = T(self.0, context.salt());
if let Some(dep) = context.dependency_of(&self) {
context.maybe_delay(&self);
context
.get(dep)
.map(move |mut v| {
v.push(token);
v
})
.to_boxed()
} else {
future::ok(vec![token]).to_boxed()
}
}
fn digest(_result: Self::Item) -> Option<Digest> {
None
}
fn cacheable(&self, context: &Self::Context) -> bool {
!context.uncacheable.contains(self)
}
}
impl std::fmt::Display for TNode {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
write!(f, "{:?}", self)
}
}
impl TNode {
///
/// Validates the given TNode output. Both node ids and context ids should increase left to
/// right: node ids monotonically, and context ids non-monotonically.
///
/// Valid:
/// (0,0), (1,1), (2,2), (3,3)
/// (0,0), (1,0), (2,1), (3,1)
///
/// Invalid:
/// (0,0), (1,1), (2,1), (3,0)
/// (0,0), (1,0), (2,0), (1,0)
///
/// If successful, returns the count of distinct context ids in the path.
///
fn validate(output: &Vec<T>) -> Result<usize, String> {
let (node_ids, context_ids): (Vec<_>, Vec<_>) = output
.iter()
.map(|&T(node_id, context_id)| {
// We cast to isize to allow comparison to -1.
(node_id as isize, context_id)
})
.unzip();
// Confirm monotonically ordered.
let mut previous: isize = -1;
for node_id in node_ids {
if previous + 1!= node_id {
return Err(format!(
"Node ids in {:?} were not monotonically ordered.",
output
));
}
previous = node_id;
}
// Confirm ordered (non-monotonically).
let mut previous: usize = 0;
for &context_id in &context_ids {
if previous > context_id {
return Err(format!("Context ids in {:?} were not ordered.", output));
}
previous = context_id;
}
Ok(context_ids.into_iter().collect::<HashSet<_>>().len())
}
}
///
/// A context that keeps a record of Nodes that have been run.
///
#[derive(Clone)]
struct TContext {
session_id: usize,
// A value that is included in every value computed by this context. Stands in for "the state of the
// outside world". A test that wants to "change the outside world" and observe its effect on the
// graph should change the salt to do so.
salt: usize,
// A mapping from source to optional destination that drives what values each TNode depends on.
// If there is no entry in this map for a node, then TNode::run will default to requesting
// the next smallest node. Finally, if a None entry is present, a node will have no
// dependencies.
edges: Arc<HashMap<TNode, Option<TNode>>>,
delays: Arc<HashMap<TNode, Duration>>,
uncacheable: Arc<HashSet<TNode>>,
graph: Arc<Graph<TNode>>,
runs: Arc<Mutex<Vec<TNode>>>,
entry_id: Option<EntryId>,
}
impl NodeContext for TContext {
type Node = TNode;
type SessionId = usize;
fn clone_for(&self, entry_id: EntryId) -> TContext {
TContext {
session_id: self.session_id,
salt: self.salt,
edges: self.edges.clone(),
delays: self.delays.clone(),
uncacheable: self.uncacheable.clone(),
graph: self.graph.clone(),
runs: self.runs.clone(),
entry_id: Some(entry_id),
}
}
fn session_id(&self) -> &usize {
&self.session_id
}
fn graph(&self) -> &Graph<TNode> {
&self.graph
}
fn spawn<F>(&self, future: F)
where
F: Future<Item = (), Error = ()> + Send +'static,
{
// Avoids introducing a dependency on a threadpool.
thread::spawn(move || {
future.wait().unwrap();
});
}
}
impl TContext {
fn new(graph: Arc<Graph<TNode>>) -> TContext {
TContext {
session_id: 0,
salt: 0,
edges: Arc::default(),
delays: Arc::default(),
uncacheable: Arc::default(),
graph,
runs: Arc::new(Mutex::new(Vec::new())),
entry_id: None,
}
}
fn with_dependencies(mut self, edges: HashMap<TNode, Option<TNode>>) -> TContext {
self.edges = Arc::new(edges);
self
}
fn with_delays(mut self, delays: HashMap<TNode, Duration>) -> TContext {
self.delays = Arc::new(delays);
self
}
fn with_uncacheable(mut self, uncacheable: HashSet<TNode>) -> TContext {
self.uncacheable = Arc::new(uncacheable);
self
}
fn with_salt(mut self, salt: usize) -> TContext {
self.salt = salt;
self
}
fn new_session(mut self, new_session_id: usize) -> TContext {
self.session_id = new_session_id;
{
let mut runs = self.runs.lock();
runs.clear();
}
self
}
fn salt(&self) -> usize {
self.salt
}
fn get(&self, dst: TNode) -> BoxFuture<Vec<T>, TError> {
self.graph.get(self.entry_id.unwrap(), self, dst)
}
fn ran(&self, node: TNode) {
let mut runs = self.runs.lock();
runs.push(node);
}
fn maybe_delay(&self, node: &TNode) {
if let Some(delay) = self.delays.get(node) {
thread::sleep(*delay);
}
}
///
/// If the given TNode should declare a dependency on another TNode, returns that dependency.
///
fn dependency_of(&self, node: &TNode) -> Option<TNode> {
match self.edges.get(node) {
Some(Some(ref dep)) => Some(dep.clone()),
Some(None) => None,
None if node.0 > 0 => Some(TNode(node.0 - 1)),
None => None,
}
}
fn runs(&self) -> Vec<TNode> {
self.runs.lock().clone()
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
enum TError {
Cyclic,
Invalidated,
}
impl NodeError for TError {
fn invalidated() -> Self {
TError::Invalidated
}
fn cyclic(_path: Vec<String>) -> Self {
TError::Cyclic
}
}
|
critical_path
|
identifier_name
|
ast_util.rs
|
match b {
and => true,
or => true,
_ => false
}
}
pub fn is_shift_binop(b: binop) -> bool {
match b {
shl => true,
shr => true,
_ => false
}
}
pub fn unop_to_str(op: unop) -> ~str {
match op {
box(mt) => if mt == m_mutbl { ~"@mut " } else { ~"@" },
uniq(mt) => if mt == m_mutbl { ~"~mut " } else { ~"~" },
deref => ~"*",
not => ~"!",
neg => ~"-"
}
}
pub fn is_path(e: @expr) -> bool {
return match e.node { expr_path(_) => true, _ => false };
}
pub fn int_ty_to_str(t: int_ty) -> ~str {
match t {
ty_char => ~"u8", //???
ty_i => ~"",
ty_i8 => ~"i8",
ty_i16 => ~"i16",
ty_i32 => ~"i32",
ty_i64 => ~"i64"
}
}
pub fn int_ty_max(t: int_ty) -> u64 {
match t {
ty_i8 => 0x80u64,
ty_i16 => 0x8000u64,
ty_i | ty_char | ty_i32 => 0x80000000u64, // actually ni about ty_i
ty_i64 => 0x8000000000000000u64
}
}
pub fn uint_ty_to_str(t: uint_ty) -> ~str {
match t {
ty_u => ~"u",
ty_u8 => ~"u8",
ty_u16 => ~"u16",
ty_u32 => ~"u32",
ty_u64 => ~"u64"
}
}
pub fn uint_ty_max(t: uint_ty) -> u64 {
match t {
ty_u8 => 0xffu64,
ty_u16 => 0xffffu64,
ty_u | ty_u32 => 0xffffffffu64, // actually ni about ty_u
ty_u64 => 0xffffffffffffffffu64
}
}
pub fn float_ty_to_str(t: float_ty) -> ~str {
match t { ty_f => ~"f", ty_f32 => ~"f32", ty_f64 => ~"f64" }
}
pub fn is_call_expr(e: @expr) -> bool {
match e.node { expr_call(_, _, _) => true, _ => false }
}
// This makes def_id hashable
#[cfg(stage0)]
impl to_bytes::IterBytes for def_id {
#[inline(always)]
fn iter_bytes(&self, lsb0: bool, f: to_bytes::Cb) {
to_bytes::iter_bytes_2(&self.crate, &self.node, lsb0, f);
}
}
// This makes def_id hashable
#[cfg(not(stage0))]
impl to_bytes::IterBytes for def_id {
#[inline(always)]
fn
|
(&self, lsb0: bool, f: to_bytes::Cb) -> bool {
to_bytes::iter_bytes_2(&self.crate, &self.node, lsb0, f)
}
}
pub fn block_from_expr(e: @expr) -> blk {
let blk_ = default_block(~[], option::Some::<@expr>(e), e.id);
return spanned {node: blk_, span: e.span};
}
pub fn default_block(
stmts1: ~[@stmt],
expr1: Option<@expr>,
id1: node_id
) -> blk_ {
ast::blk_ {
view_items: ~[],
stmts: stmts1,
expr: expr1,
id: id1,
rules: default_blk,
}
}
pub fn ident_to_path(s: span, i: ident) -> @Path {
@ast::Path { span: s,
global: false,
idents: ~[i],
rp: None,
types: ~[] }
}
pub fn ident_to_pat(id: node_id, s: span, i: ident) -> @pat {
@ast::pat { id: id,
node: pat_ident(bind_by_copy, ident_to_path(s, i), None),
span: s }
}
pub fn is_unguarded(a: &arm) -> bool {
match a.guard {
None => true,
_ => false
}
}
pub fn unguarded_pat(a: &arm) -> Option<~[@pat]> {
if is_unguarded(a) { Some(/* FIXME (#2543) */ copy a.pats) } else { None }
}
pub fn public_methods(ms: ~[@method]) -> ~[@method] {
do ms.filtered |m| {
match m.vis {
public => true,
_ => false
}
}
}
// extract a ty_method from a trait_method. if the trait_method is
// a default, pull out the useful fields to make a ty_method
pub fn trait_method_to_ty_method(method: &trait_method) -> ty_method {
match *method {
required(ref m) => copy *m,
provided(ref m) => {
ty_method {
ident: m.ident,
attrs: copy m.attrs,
purity: m.purity,
decl: copy m.decl,
generics: copy m.generics,
explicit_self: m.explicit_self,
id: m.id,
span: m.span,
}
}
}
}
pub fn split_trait_methods(trait_methods: &[trait_method])
-> (~[ty_method], ~[@method]) {
let mut reqd = ~[], provd = ~[];
for trait_methods.each |trt_method| {
match *trt_method {
required(ref tm) => reqd.push(copy *tm),
provided(m) => provd.push(m)
}
};
(reqd, provd)
}
pub fn struct_field_visibility(field: ast::struct_field) -> visibility {
match field.node.kind {
ast::named_field(_, visibility) => visibility,
ast::unnamed_field => ast::public
}
}
pub trait inlined_item_utils {
fn ident(&self) -> ident;
fn id(&self) -> ast::node_id;
fn accept<E: Copy>(&self, e: E, v: visit::vt<E>);
}
impl inlined_item_utils for inlined_item {
fn ident(&self) -> ident {
match *self {
ii_item(i) => /* FIXME (#2543) */ copy i.ident,
ii_foreign(i) => /* FIXME (#2543) */ copy i.ident,
ii_method(_, m) => /* FIXME (#2543) */ copy m.ident,
}
}
fn id(&self) -> ast::node_id {
match *self {
ii_item(i) => i.id,
ii_foreign(i) => i.id,
ii_method(_, m) => m.id,
}
}
fn accept<E: Copy>(&self, e: E, v: visit::vt<E>) {
match *self {
ii_item(i) => (v.visit_item)(i, e, v),
ii_foreign(i) => (v.visit_foreign_item)(i, e, v),
ii_method(_, m) => visit::visit_method_helper(m, e, v),
}
}
}
/* True if d is either a def_self, or a chain of def_upvars
referring to a def_self */
pub fn is_self(d: ast::def) -> bool {
match d {
def_self(*) => true,
def_upvar(_, d, _, _) => is_self(*d),
_ => false
}
}
/// Maps a binary operator to its precedence
pub fn operator_prec(op: ast::binop) -> uint {
match op {
mul | div | rem => 12u,
// 'as' sits between here with 11
add | subtract => 10u,
shl | shr => 9u,
bitand => 8u,
bitxor => 7u,
bitor => 6u,
lt | le | ge | gt => 4u,
eq | ne => 3u,
and => 2u,
or => 1u
}
}
/// Precedence of the `as` operator, which is a binary operator
/// not appearing in the prior table.
pub static as_prec: uint = 11u;
pub fn empty_generics() -> Generics {
Generics {lifetimes: opt_vec::Empty,
ty_params: opt_vec::Empty}
}
// ______________________________________________________________________
// Enumerating the IDs which appear in an AST
#[deriving(Encodable, Decodable)]
pub struct id_range {
min: node_id,
max: node_id,
}
pub impl id_range {
fn max() -> id_range {
id_range {min: int::max_value,
max: int::min_value}
}
fn empty(&self) -> bool {
self.min >= self.max
}
fn add(&mut self, id: node_id) {
self.min = int::min(self.min, id);
self.max = int::max(self.max, id + 1);
}
}
pub fn id_visitor(vfn: @fn(node_id)) -> visit::vt<()> {
let visit_generics: @fn(&Generics) = |generics| {
for generics.ty_params.each |p| {
vfn(p.id);
}
for generics.lifetimes.each |p| {
vfn(p.id);
}
};
visit::mk_simple_visitor(@visit::SimpleVisitor {
visit_mod: |_m, _sp, id| vfn(id),
visit_view_item: |vi| {
match vi.node {
view_item_extern_mod(_, _, id) => vfn(id),
view_item_use(ref vps) => {
for vps.each |vp| {
match vp.node {
view_path_simple(_, _, id) => vfn(id),
view_path_glob(_, id) => vfn(id),
view_path_list(_, ref paths, id) => {
vfn(id);
for paths.each |p| {
vfn(p.node.id);
}
}
}
}
}
}
},
visit_foreign_item: |ni| vfn(ni.id),
visit_item: |i| {
vfn(i.id);
match i.node {
item_enum(ref enum_definition, _) =>
for (*enum_definition).variants.each |v| { vfn(v.node.id); },
_ => ()
}
},
visit_local: |l| vfn(l.node.id),
visit_block: |b| vfn(b.node.id),
visit_stmt: |s| vfn(ast_util::stmt_id(s)),
visit_arm: |_| {},
visit_pat: |p| vfn(p.id),
visit_decl: |_| {},
visit_expr: |e| {
vfn(e.callee_id);
vfn(e.id);
},
visit_expr_post: |_| {},
visit_ty: |t| {
match t.node {
ty_path(_, id) => vfn(id),
_ => { /* fall through */ }
}
},
visit_generics: visit_generics,
visit_fn: |fk, d, _, _, id| {
vfn(id);
match *fk {
visit::fk_item_fn(_, generics, _, _) => {
visit_generics(generics);
}
visit::fk_method(_, generics, m) => {
vfn(m.self_id);
visit_generics(generics);
}
visit::fk_anon(_) |
visit::fk_fn_block => {
}
}
for d.inputs.each |arg| {
vfn(arg.id)
}
},
visit_ty_method: |_| {},
visit_trait_method: |_| {},
visit_struct_def: |_, _, _, _| {},
visit_struct_field: |f| vfn(f.node.id),
visit_struct_method: |_| {}
})
}
pub fn visit_ids_for_inlined_item(item: &inlined_item, vfn: @fn(node_id)) {
item.accept((), id_visitor(vfn));
}
pub fn compute_id_range(visit_ids_fn: &fn(@fn(node_id))) -> id_range {
let result = @mut id_range::max();
do visit_ids_fn |id| {
result.add(id);
}
*result
}
pub fn compute_id_range_for_inlined_item(item: &inlined_item) -> id_range {
compute_id_range(|f| visit_ids_for_inlined_item(item, f))
}
pub fn is_item_impl(item: @ast::item) -> bool {
match item.node {
item_impl(*) => true,
_ => false
}
}
pub fn walk_pat(pat: @pat, it: &fn(@pat)) {
it(pat);
match pat.node {
pat_ident(_, _, Some(p)) => walk_pat(p, it),
pat_struct(_, ref fields, _) => {
for fields.each |f| {
walk_pat(f.pat, it)
}
}
pat_enum(_, Some(ref s)) | pat_tup(ref s) => {
for s.each |p| {
walk_pat(*p, it)
}
}
pat_box(s) | pat_uniq(s) | pat_region(s) => {
walk_pat(s, it)
}
pat_vec(ref before, ref slice, ref after) => {
for before.each |p| {
walk_pat(*p, it)
}
for slice.each |p| {
walk_pat(*p, it)
}
for after.each |p| {
walk_pat(*p, it)
}
}
pat_wild | pat_lit(_) | pat_range(_, _) | pat_ident(_, _, _) |
pat_enum(_, _) => { }
}
}
pub fn view_path_id(p: @view_path) -> node_id {
match p.node {
view_path_simple(_, _, id) |
view_path_glob(_, id) |
view_path_list(_, _, id) => id
}
}
/// Returns true if the given struct def is tuple-like; i.e. that its fields
/// are unnamed.
pub fn struct_def_is_tuple_like(struct_def: @ast::struct_def) -> bool {
struct_def.ctor_id.is_some()
}
pub fn visibility_to_privacy(visibility: visibility) -> Privacy {
match visibility {
public => Public,
inherited | private => Private
}
}
pub fn variant_visibility_to_privacy(visibility: visibility,
enclosing_is_public: bool)
-> Privacy {
if enclosing_is_public {
match visibility {
public | inherited => Public,
private => Private
}
} else {
visibility_to_privacy(visibility)
}
}
#[deriving(Eq)]
pub enum Privacy {
Private,
Public
}
// HYGIENE FUNCTIONS
/// Construct an identifier with the given repr and an empty context:
pub fn mk_ident(repr: uint) -> ident { ident {repr: repr, ctxt: 0}}
/// Extend a syntax context with a given mark
pub fn mk_mark (m:Mrk,ctxt:SyntaxContext,table:&mut SCTable)
-> SyntaxContext {
idx_push(table,Mark(m,ctxt))
}
/// Extend a syntax context with a given rename
pub fn mk_rename (id:ident, to:Name, tail:SyntaxContext, table: &mut SCTable)
-> SyntaxContext {
idx_push(table,Rename(id,to,tail))
}
/// Make a fresh syntax context table with EmptyCtxt in slot zero
pub fn mk_sctable() -> SCTable { ~[EmptyCtxt] }
/// Add a value to the end of a vec, return its index
fn idx_push<T>(vec: &mut ~[T], val: T) -> uint {
vec.push(val);
vec.len() - 1
}
/// Resolve a syntax object to a name, per MTWT.
pub fn resolve (id : ident, table : &SCTable) -> Name {
match table[id.ctxt] {
EmptyCtxt => id.repr,
// ignore marks here:
Mark(_,subctxt) => resolve (ident{repr:id.repr, ctxt: subctxt},table),
// do the rename if necessary:
Rename(ident{repr,ctxt},toname,subctxt) => {
// this could be cached or computed eagerly:
let resolvedfrom = resolve(ident{repr:repr,ctxt:ctxt},table);
let resolvedthis = resolve(ident{repr:id.repr,ctxt:subctxt},table);
if ((resolvedthis == resolvedfrom)
&& (marksof (ctxt,resolvedthis,table)
== marksof (subctxt,resolvedthis,table))) {
toname
} else {
resolvedthis
}
}
}
}
/// Compute the marks associated with a syntax context.
// it's not clear to me whether it's better to use a [] mutable
// vector or a cons-list for this.
pub fn marksof(ctxt: SyntaxContext, stopname: Name, table: &SCTable) -> ~[Mrk] {
let mut result = ~[];
let mut loopvar = ctxt;
loop {
match table[loopvar] {
EmptyCtxt => {return result;},
Mark(mark,tl) => {
xorPush(&mut result,mark);
loopvar = tl;
},
Rename(_,name,tl) => {
// see MTWT for details on the purpose of the stopname.
// short version: it prevents duplication of effort.
if (name == stopname) {
return result;
} else {
loopvar = tl;
}
}
}
}
}
/// Push a name... unless it matches the one on top, in which
/// case pop and discard (so two of the same marks cancel)
pub fn xorPush(marks: &mut ~[uint], mark: uint) {
if ((marks.len() > 0) && (getLast(marks) == mark)) {
marks.pop();
} else {
marks.push(mark);
}
}
// get the last element of a mutable array.
// FIXME #4903:, must be a separate procedure for now.
pub fn getLast(arr: &~[Mrk]) -> uint {
*arr.last()
}
#[cfg(test)]
mod test {
use ast::*;
use super::*;
use core::io;
#[test] fn xorpush_test () {
let mut s = ~[];
xorPush(&mut s,14);
assert_eq!(copy s,~[14]);
xorPush(&mut s,14);
assert_eq!(copy s,~[]);
xorPush(&mut s,14);
assert_eq!(copy s,~[14]);
xorPush(&mut s,15);
assert_eq!(copy s,~[14,15]);
xorPush (&mut s,16);
assert_eq!(copy s,~[14,15,16]);
xorPush (&mut s,16);
assert_eq!(copy s,~[14,15]);
xorPush (&mut s,15);
assert_eq!(copy s,~[14]);
}
// convert a list of uints to an @~[ident]
// (ignores the interner completely)
fn uints_to_idents (uints: &~[uint]) -> @~[ident] {
@uints.map(|u|{ ident {repr:*u, ctxt: empty_ctxt} })
}
fn id (u : uint, s: SyntaxContext) -> ident {
ident{repr:u, ctxt: s}
}
// because of the SCTable, I now need a tidy way of
// creating syntax objects. Sigh.
#[deriving(Eq)]
enum TestSC {
M(Mrk),
R(ident,Name)
}
|
iter_bytes
|
identifier_name
|
ast_util.rs
|
pub fn def_id_of_def(d: def) -> def_id {
match d {
def_fn(id, _) | def_static_method(id, _, _) | def_mod(id) |
def_foreign_mod(id) | def_const(id) |
def_variant(_, id) | def_ty(id) | def_ty_param(id, _) |
def_use(id) | def_struct(id) | def_trait(id) => {
id
}
def_arg(id, _) | def_local(id, _) | def_self(id, _) | def_self_ty(id)
| def_upvar(id, _, _, _) | def_binding(id, _) | def_region(id)
| def_typaram_binder(id) | def_label(id) => {
local_def(id)
}
def_prim_ty(_) => fail!()
}
}
pub fn binop_to_str(op: binop) -> ~str {
match op {
add => return ~"+",
subtract => return ~"-",
mul => return ~"*",
div => return ~"/",
rem => return ~"%",
and => return ~"&&",
or => return ~"||",
bitxor => return ~"^",
bitand => return ~"&",
bitor => return ~"|",
shl => return ~"<<",
shr => return ~">>",
eq => return ~"==",
lt => return ~"<",
le => return ~"<=",
ne => return ~"!=",
ge => return ~">=",
gt => return ~">"
}
}
pub fn binop_to_method_name(op: binop) -> Option<~str> {
match op {
add => return Some(~"add"),
subtract => return Some(~"sub"),
mul => return Some(~"mul"),
div => return Some(~"div"),
rem => return Some(~"rem"),
bitxor => return Some(~"bitxor"),
bitand => return Some(~"bitand"),
bitor => return Some(~"bitor"),
shl => return Some(~"shl"),
shr => return Some(~"shr"),
lt => return Some(~"lt"),
le => return Some(~"le"),
ge => return Some(~"ge"),
gt => return Some(~"gt"),
eq => return Some(~"eq"),
ne => return Some(~"ne"),
and | or => return None
}
}
pub fn lazy_binop(b: binop) -> bool {
match b {
and => true,
or => true,
_ => false
}
}
pub fn is_shift_binop(b: binop) -> bool {
match b {
shl => true,
shr => true,
_ => false
}
}
pub fn unop_to_str(op: unop) -> ~str {
match op {
box(mt) => if mt == m_mutbl { ~"@mut " } else { ~"@" },
uniq(mt) => if mt == m_mutbl { ~"~mut " } else { ~"~" },
deref => ~"*",
not => ~"!",
neg => ~"-"
}
}
pub fn is_path(e: @expr) -> bool {
return match e.node { expr_path(_) => true, _ => false };
}
pub fn int_ty_to_str(t: int_ty) -> ~str {
match t {
ty_char => ~"u8", //???
ty_i => ~"",
ty_i8 => ~"i8",
ty_i16 => ~"i16",
ty_i32 => ~"i32",
ty_i64 => ~"i64"
}
}
pub fn int_ty_max(t: int_ty) -> u64 {
match t {
ty_i8 => 0x80u64,
ty_i16 => 0x8000u64,
ty_i | ty_char | ty_i32 => 0x80000000u64, // actually ni about ty_i
ty_i64 => 0x8000000000000000u64
}
}
pub fn uint_ty_to_str(t: uint_ty) -> ~str {
match t {
ty_u => ~"u",
ty_u8 => ~"u8",
ty_u16 => ~"u16",
ty_u32 => ~"u32",
ty_u64 => ~"u64"
}
}
pub fn uint_ty_max(t: uint_ty) -> u64 {
match t {
ty_u8 => 0xffu64,
ty_u16 => 0xffffu64,
ty_u | ty_u32 => 0xffffffffu64, // actually ni about ty_u
ty_u64 => 0xffffffffffffffffu64
}
}
pub fn float_ty_to_str(t: float_ty) -> ~str {
match t { ty_f => ~"f", ty_f32 => ~"f32", ty_f64 => ~"f64" }
}
pub fn is_call_expr(e: @expr) -> bool {
match e.node { expr_call(_, _, _) => true, _ => false }
}
// This makes def_id hashable
#[cfg(stage0)]
impl to_bytes::IterBytes for def_id {
#[inline(always)]
fn iter_bytes(&self, lsb0: bool, f: to_bytes::Cb) {
to_bytes::iter_bytes_2(&self.crate, &self.node, lsb0, f);
}
}
// This makes def_id hashable
#[cfg(not(stage0))]
impl to_bytes::IterBytes for def_id {
#[inline(always)]
fn iter_bytes(&self, lsb0: bool, f: to_bytes::Cb) -> bool {
to_bytes::iter_bytes_2(&self.crate, &self.node, lsb0, f)
}
}
pub fn block_from_expr(e: @expr) -> blk {
let blk_ = default_block(~[], option::Some::<@expr>(e), e.id);
return spanned {node: blk_, span: e.span};
}
pub fn default_block(
stmts1: ~[@stmt],
expr1: Option<@expr>,
id1: node_id
) -> blk_ {
ast::blk_ {
view_items: ~[],
stmts: stmts1,
expr: expr1,
id: id1,
rules: default_blk,
}
}
pub fn ident_to_path(s: span, i: ident) -> @Path {
@ast::Path { span: s,
global: false,
idents: ~[i],
rp: None,
types: ~[] }
}
pub fn ident_to_pat(id: node_id, s: span, i: ident) -> @pat {
@ast::pat { id: id,
node: pat_ident(bind_by_copy, ident_to_path(s, i), None),
span: s }
}
pub fn is_unguarded(a: &arm) -> bool {
match a.guard {
None => true,
_ => false
}
}
pub fn unguarded_pat(a: &arm) -> Option<~[@pat]> {
if is_unguarded(a) { Some(/* FIXME (#2543) */ copy a.pats) } else { None }
}
pub fn public_methods(ms: ~[@method]) -> ~[@method] {
do ms.filtered |m| {
match m.vis {
public => true,
_ => false
}
}
}
// extract a ty_method from a trait_method. if the trait_method is
// a default, pull out the useful fields to make a ty_method
pub fn trait_method_to_ty_method(method: &trait_method) -> ty_method {
match *method {
required(ref m) => copy *m,
provided(ref m) => {
ty_method {
ident: m.ident,
attrs: copy m.attrs,
purity: m.purity,
decl: copy m.decl,
generics: copy m.generics,
explicit_self: m.explicit_self,
id: m.id,
span: m.span,
}
}
}
}
pub fn split_trait_methods(trait_methods: &[trait_method])
-> (~[ty_method], ~[@method]) {
let mut reqd = ~[], provd = ~[];
for trait_methods.each |trt_method| {
match *trt_method {
required(ref tm) => reqd.push(copy *tm),
provided(m) => provd.push(m)
}
};
(reqd, provd)
}
pub fn struct_field_visibility(field: ast::struct_field) -> visibility {
match field.node.kind {
ast::named_field(_, visibility) => visibility,
ast::unnamed_field => ast::public
}
}
pub trait inlined_item_utils {
fn ident(&self) -> ident;
fn id(&self) -> ast::node_id;
fn accept<E: Copy>(&self, e: E, v: visit::vt<E>);
}
impl inlined_item_utils for inlined_item {
fn ident(&self) -> ident {
match *self {
ii_item(i) => /* FIXME (#2543) */ copy i.ident,
ii_foreign(i) => /* FIXME (#2543) */ copy i.ident,
ii_method(_, m) => /* FIXME (#2543) */ copy m.ident,
}
}
fn id(&self) -> ast::node_id {
match *self {
ii_item(i) => i.id,
ii_foreign(i) => i.id,
ii_method(_, m) => m.id,
}
}
fn accept<E: Copy>(&self, e: E, v: visit::vt<E>) {
match *self {
ii_item(i) => (v.visit_item)(i, e, v),
ii_foreign(i) => (v.visit_foreign_item)(i, e, v),
ii_method(_, m) => visit::visit_method_helper(m, e, v),
}
}
}
/* True if d is either a def_self, or a chain of def_upvars
referring to a def_self */
pub fn is_self(d: ast::def) -> bool {
match d {
def_self(*) => true,
def_upvar(_, d, _, _) => is_self(*d),
_ => false
}
}
/// Maps a binary operator to its precedence
pub fn operator_prec(op: ast::binop) -> uint {
match op {
mul | div | rem => 12u,
// 'as' sits between here with 11
add | subtract => 10u,
shl | shr => 9u,
bitand => 8u,
bitxor => 7u,
bitor => 6u,
lt | le | ge | gt => 4u,
eq | ne => 3u,
and => 2u,
or => 1u
}
}
/// Precedence of the `as` operator, which is a binary operator
/// not appearing in the prior table.
pub static as_prec: uint = 11u;
pub fn empty_generics() -> Generics {
Generics {lifetimes: opt_vec::Empty,
ty_params: opt_vec::Empty}
}
// ______________________________________________________________________
// Enumerating the IDs which appear in an AST
#[deriving(Encodable, Decodable)]
pub struct id_range {
min: node_id,
max: node_id,
}
pub impl id_range {
fn max() -> id_range {
id_range {min: int::max_value,
max: int::min_value}
}
fn empty(&self) -> bool {
self.min >= self.max
}
fn add(&mut self, id: node_id) {
self.min = int::min(self.min, id);
self.max = int::max(self.max, id + 1);
}
}
pub fn id_visitor(vfn: @fn(node_id)) -> visit::vt<()> {
let visit_generics: @fn(&Generics) = |generics| {
for generics.ty_params.each |p| {
vfn(p.id);
}
for generics.lifetimes.each |p| {
vfn(p.id);
}
};
visit::mk_simple_visitor(@visit::SimpleVisitor {
visit_mod: |_m, _sp, id| vfn(id),
visit_view_item: |vi| {
match vi.node {
view_item_extern_mod(_, _, id) => vfn(id),
view_item_use(ref vps) => {
for vps.each |vp| {
match vp.node {
view_path_simple(_, _, id) => vfn(id),
view_path_glob(_, id) => vfn(id),
view_path_list(_, ref paths, id) => {
vfn(id);
for paths.each |p| {
vfn(p.node.id);
}
}
}
}
}
}
},
visit_foreign_item: |ni| vfn(ni.id),
visit_item: |i| {
vfn(i.id);
match i.node {
item_enum(ref enum_definition, _) =>
for (*enum_definition).variants.each |v| { vfn(v.node.id); },
_ => ()
}
},
visit_local: |l| vfn(l.node.id),
visit_block: |b| vfn(b.node.id),
visit_stmt: |s| vfn(ast_util::stmt_id(s)),
visit_arm: |_| {},
visit_pat: |p| vfn(p.id),
visit_decl: |_| {},
visit_expr: |e| {
vfn(e.callee_id);
vfn(e.id);
},
visit_expr_post: |_| {},
visit_ty: |t| {
match t.node {
ty_path(_, id) => vfn(id),
_ => { /* fall through */ }
}
},
visit_generics: visit_generics,
visit_fn: |fk, d, _, _, id| {
vfn(id);
match *fk {
visit::fk_item_fn(_, generics, _, _) => {
visit_generics(generics);
}
visit::fk_method(_, generics, m) => {
vfn(m.self_id);
visit_generics(generics);
}
visit::fk_anon(_) |
visit::fk_fn_block => {
}
}
for d.inputs.each |arg| {
vfn(arg.id)
}
},
visit_ty_method: |_| {},
visit_trait_method: |_| {},
visit_struct_def: |_, _, _, _| {},
visit_struct_field: |f| vfn(f.node.id),
visit_struct_method: |_| {}
})
}
pub fn visit_ids_for_inlined_item(item: &inlined_item, vfn: @fn(node_id)) {
item.accept((), id_visitor(vfn));
}
pub fn compute_id_range(visit_ids_fn: &fn(@fn(node_id))) -> id_range {
let result = @mut id_range::max();
do visit_ids_fn |id| {
result.add(id);
}
*result
}
pub fn compute_id_range_for_inlined_item(item: &inlined_item) -> id_range {
compute_id_range(|f| visit_ids_for_inlined_item(item, f))
}
pub fn is_item_impl(item: @ast::item) -> bool {
match item.node {
item_impl(*) => true,
_ => false
}
}
pub fn walk_pat(pat: @pat, it: &fn(@pat)) {
it(pat);
match pat.node {
pat_ident(_, _, Some(p)) => walk_pat(p, it),
pat_struct(_, ref fields, _) => {
for fields.each |f| {
walk_pat(f.pat, it)
}
}
pat_enum(_, Some(ref s)) | pat_tup(ref s) => {
for s.each |p| {
walk_pat(*p, it)
}
}
pat_box(s) | pat_uniq(s) | pat_region(s) => {
walk_pat(s, it)
}
pat_vec(ref before, ref slice, ref after) => {
for before.each |p| {
walk_pat(*p, it)
}
for slice.each |p| {
walk_pat(*p, it)
}
for after.each |p| {
walk_pat(*p, it)
}
}
pat_wild | pat_lit(_) | pat_range(_, _) | pat_ident(_, _, _) |
pat_enum(_, _) => { }
}
}
pub fn view_path_id(p: @view_path) -> node_id {
match p.node {
view_path_simple(_, _, id) |
view_path_glob(_, id) |
view_path_list(_, _, id) => id
}
}
/// Returns true if the given struct def is tuple-like; i.e. that its fields
/// are unnamed.
pub fn struct_def_is_tuple_like(struct_def: @ast::struct_def) -> bool {
struct_def.ctor_id.is_some()
}
pub fn visibility_to_privacy(visibility: visibility) -> Privacy {
match visibility {
public => Public,
inherited | private => Private
}
}
pub fn variant_visibility_to_privacy(visibility: visibility,
enclosing_is_public: bool)
-> Privacy {
if enclosing_is_public {
match visibility {
public | inherited => Public,
private => Private
}
} else {
visibility_to_privacy(visibility)
}
}
#[deriving(Eq)]
pub enum Privacy {
Private,
Public
}
// HYGIENE FUNCTIONS
/// Construct an identifier with the given repr and an empty context:
pub fn mk_ident(repr: uint) -> ident { ident {repr: repr, ctxt: 0}}
/// Extend a syntax context with a given mark
pub fn mk_mark (m:Mrk,ctxt:SyntaxContext,table:&mut SCTable)
-> SyntaxContext {
idx_push(table,Mark(m,ctxt))
}
/// Extend a syntax context with a given rename
pub fn mk_rename (id:ident, to:Name, tail:SyntaxContext, table: &mut SCTable)
-> SyntaxContext {
idx_push(table,Rename(id,to,tail))
}
/// Make a fresh syntax context table with EmptyCtxt in slot zero
pub fn mk_sctable() -> SCTable { ~[EmptyCtxt] }
/// Add a value to the end of a vec, return its index
fn idx_push<T>(vec: &mut ~[T], val: T) -> uint {
vec.push(val);
vec.len() - 1
}
/// Resolve a syntax object to a name, per MTWT.
pub fn resolve (id : ident, table : &SCTable) -> Name {
match table[id.ctxt] {
EmptyCtxt => id.repr,
// ignore marks here:
Mark(_,subctxt) => resolve (ident{repr:id.repr, ctxt: subctxt},table),
// do the rename if necessary:
Rename(ident{repr,ctxt},toname,subctxt) => {
// this could be cached or computed eagerly:
let resolvedfrom = resolve(ident{repr:repr,ctxt:ctxt},table);
let resolvedthis = resolve(ident{repr:id.repr,ctxt:subctxt},table);
if ((resolvedthis == resolvedfrom)
&& (marksof (ctxt,resolvedthis,table)
== marksof (subctxt,resolvedthis,table))) {
toname
} else {
resolvedthis
}
}
}
}
/// Compute the marks associated with a syntax context.
// it's not clear to me whether it's better to use a [] mutable
// vector or a cons-list for this.
pub fn marksof(ctxt: SyntaxContext, stopname: Name, table: &SCTable) -> ~[Mrk] {
let mut result = ~[];
let mut loopvar = ctxt;
loop {
match table[loopvar] {
EmptyCtxt => {return result;},
Mark(mark,tl) => {
xorPush(&mut result,mark);
loopvar = tl;
},
|
{
match d {
def_variant(enum_id, var_id) => {
Some((enum_id, var_id))
}
_ => None
}
}
|
identifier_body
|
|
ast_util.rs
|
match b {
and => true,
or => true,
_ => false
}
}
pub fn is_shift_binop(b: binop) -> bool {
match b {
shl => true,
shr => true,
_ => false
}
}
pub fn unop_to_str(op: unop) -> ~str {
match op {
box(mt) => if mt == m_mutbl { ~"@mut " } else { ~"@" },
uniq(mt) => if mt == m_mutbl { ~"~mut " } else { ~"~" },
deref => ~"*",
not => ~"!",
neg => ~"-"
}
}
pub fn is_path(e: @expr) -> bool {
return match e.node { expr_path(_) => true, _ => false };
}
pub fn int_ty_to_str(t: int_ty) -> ~str {
match t {
ty_char => ~"u8", //???
ty_i => ~"",
ty_i8 => ~"i8",
ty_i16 => ~"i16",
ty_i32 => ~"i32",
ty_i64 => ~"i64"
}
}
pub fn int_ty_max(t: int_ty) -> u64 {
match t {
ty_i8 => 0x80u64,
ty_i16 => 0x8000u64,
ty_i | ty_char | ty_i32 => 0x80000000u64, // actually ni about ty_i
ty_i64 => 0x8000000000000000u64
}
}
pub fn uint_ty_to_str(t: uint_ty) -> ~str {
match t {
ty_u => ~"u",
ty_u8 => ~"u8",
ty_u16 => ~"u16",
ty_u32 => ~"u32",
ty_u64 => ~"u64"
}
}
pub fn uint_ty_max(t: uint_ty) -> u64 {
match t {
ty_u8 => 0xffu64,
ty_u16 => 0xffffu64,
ty_u | ty_u32 => 0xffffffffu64, // actually ni about ty_u
ty_u64 => 0xffffffffffffffffu64
}
}
pub fn float_ty_to_str(t: float_ty) -> ~str {
match t { ty_f => ~"f", ty_f32 => ~"f32", ty_f64 => ~"f64" }
}
pub fn is_call_expr(e: @expr) -> bool {
match e.node { expr_call(_, _, _) => true, _ => false }
}
// This makes def_id hashable
#[cfg(stage0)]
impl to_bytes::IterBytes for def_id {
#[inline(always)]
fn iter_bytes(&self, lsb0: bool, f: to_bytes::Cb) {
to_bytes::iter_bytes_2(&self.crate, &self.node, lsb0, f);
}
}
// This makes def_id hashable
#[cfg(not(stage0))]
impl to_bytes::IterBytes for def_id {
#[inline(always)]
fn iter_bytes(&self, lsb0: bool, f: to_bytes::Cb) -> bool {
to_bytes::iter_bytes_2(&self.crate, &self.node, lsb0, f)
}
}
pub fn block_from_expr(e: @expr) -> blk {
let blk_ = default_block(~[], option::Some::<@expr>(e), e.id);
return spanned {node: blk_, span: e.span};
}
pub fn default_block(
stmts1: ~[@stmt],
expr1: Option<@expr>,
id1: node_id
) -> blk_ {
ast::blk_ {
view_items: ~[],
stmts: stmts1,
expr: expr1,
id: id1,
rules: default_blk,
}
}
pub fn ident_to_path(s: span, i: ident) -> @Path {
@ast::Path { span: s,
global: false,
idents: ~[i],
rp: None,
types: ~[] }
}
pub fn ident_to_pat(id: node_id, s: span, i: ident) -> @pat {
@ast::pat { id: id,
node: pat_ident(bind_by_copy, ident_to_path(s, i), None),
span: s }
}
pub fn is_unguarded(a: &arm) -> bool {
match a.guard {
None => true,
_ => false
}
}
pub fn unguarded_pat(a: &arm) -> Option<~[@pat]> {
if is_unguarded(a) { Some(/* FIXME (#2543) */ copy a.pats) } else { None }
}
pub fn public_methods(ms: ~[@method]) -> ~[@method] {
do ms.filtered |m| {
match m.vis {
public => true,
_ => false
}
}
}
// extract a ty_method from a trait_method. if the trait_method is
// a default, pull out the useful fields to make a ty_method
pub fn trait_method_to_ty_method(method: &trait_method) -> ty_method {
match *method {
required(ref m) => copy *m,
provided(ref m) => {
ty_method {
ident: m.ident,
attrs: copy m.attrs,
purity: m.purity,
decl: copy m.decl,
generics: copy m.generics,
explicit_self: m.explicit_self,
id: m.id,
span: m.span,
}
}
}
}
pub fn split_trait_methods(trait_methods: &[trait_method])
-> (~[ty_method], ~[@method]) {
let mut reqd = ~[], provd = ~[];
for trait_methods.each |trt_method| {
match *trt_method {
required(ref tm) => reqd.push(copy *tm),
provided(m) => provd.push(m)
}
};
(reqd, provd)
}
pub fn struct_field_visibility(field: ast::struct_field) -> visibility {
match field.node.kind {
ast::named_field(_, visibility) => visibility,
ast::unnamed_field => ast::public
}
}
pub trait inlined_item_utils {
fn ident(&self) -> ident;
fn id(&self) -> ast::node_id;
fn accept<E: Copy>(&self, e: E, v: visit::vt<E>);
}
impl inlined_item_utils for inlined_item {
fn ident(&self) -> ident {
match *self {
ii_item(i) => /* FIXME (#2543) */ copy i.ident,
ii_foreign(i) => /* FIXME (#2543) */ copy i.ident,
ii_method(_, m) => /* FIXME (#2543) */ copy m.ident,
}
}
fn id(&self) -> ast::node_id {
match *self {
ii_item(i) => i.id,
ii_foreign(i) => i.id,
ii_method(_, m) => m.id,
}
}
fn accept<E: Copy>(&self, e: E, v: visit::vt<E>) {
match *self {
ii_item(i) => (v.visit_item)(i, e, v),
ii_foreign(i) => (v.visit_foreign_item)(i, e, v),
ii_method(_, m) => visit::visit_method_helper(m, e, v),
}
}
}
/* True if d is either a def_self, or a chain of def_upvars
referring to a def_self */
pub fn is_self(d: ast::def) -> bool {
match d {
def_self(*) => true,
def_upvar(_, d, _, _) => is_self(*d),
_ => false
}
}
/// Maps a binary operator to its precedence
pub fn operator_prec(op: ast::binop) -> uint {
match op {
mul | div | rem => 12u,
// 'as' sits between here with 11
add | subtract => 10u,
shl | shr => 9u,
bitand => 8u,
bitxor => 7u,
bitor => 6u,
lt | le | ge | gt => 4u,
eq | ne => 3u,
and => 2u,
or => 1u
}
}
/// Precedence of the `as` operator, which is a binary operator
/// not appearing in the prior table.
pub static as_prec: uint = 11u;
pub fn empty_generics() -> Generics {
Generics {lifetimes: opt_vec::Empty,
ty_params: opt_vec::Empty}
}
// ______________________________________________________________________
// Enumerating the IDs which appear in an AST
#[deriving(Encodable, Decodable)]
pub struct id_range {
min: node_id,
max: node_id,
}
pub impl id_range {
fn max() -> id_range {
id_range {min: int::max_value,
max: int::min_value}
}
fn empty(&self) -> bool {
self.min >= self.max
}
fn add(&mut self, id: node_id) {
self.min = int::min(self.min, id);
self.max = int::max(self.max, id + 1);
}
}
pub fn id_visitor(vfn: @fn(node_id)) -> visit::vt<()> {
let visit_generics: @fn(&Generics) = |generics| {
for generics.ty_params.each |p| {
vfn(p.id);
}
for generics.lifetimes.each |p| {
vfn(p.id);
}
};
visit::mk_simple_visitor(@visit::SimpleVisitor {
visit_mod: |_m, _sp, id| vfn(id),
visit_view_item: |vi| {
match vi.node {
view_item_extern_mod(_, _, id) => vfn(id),
view_item_use(ref vps) => {
for vps.each |vp| {
match vp.node {
view_path_simple(_, _, id) => vfn(id),
view_path_glob(_, id) => vfn(id),
view_path_list(_, ref paths, id) => {
vfn(id);
for paths.each |p| {
vfn(p.node.id);
}
}
}
}
}
}
},
visit_foreign_item: |ni| vfn(ni.id),
visit_item: |i| {
vfn(i.id);
match i.node {
item_enum(ref enum_definition, _) =>
for (*enum_definition).variants.each |v| { vfn(v.node.id); },
_ => ()
}
},
visit_local: |l| vfn(l.node.id),
visit_block: |b| vfn(b.node.id),
visit_stmt: |s| vfn(ast_util::stmt_id(s)),
visit_arm: |_| {},
visit_pat: |p| vfn(p.id),
visit_decl: |_| {},
visit_expr: |e| {
vfn(e.callee_id);
vfn(e.id);
},
visit_expr_post: |_| {},
visit_ty: |t| {
match t.node {
ty_path(_, id) => vfn(id),
_ => { /* fall through */ }
}
},
visit_generics: visit_generics,
visit_fn: |fk, d, _, _, id| {
vfn(id);
match *fk {
visit::fk_item_fn(_, generics, _, _) => {
visit_generics(generics);
}
visit::fk_method(_, generics, m) => {
vfn(m.self_id);
visit_generics(generics);
}
visit::fk_anon(_) |
visit::fk_fn_block => {
}
}
for d.inputs.each |arg| {
vfn(arg.id)
}
},
visit_ty_method: |_| {},
visit_trait_method: |_| {},
visit_struct_def: |_, _, _, _| {},
visit_struct_field: |f| vfn(f.node.id),
visit_struct_method: |_| {}
})
}
pub fn visit_ids_for_inlined_item(item: &inlined_item, vfn: @fn(node_id)) {
item.accept((), id_visitor(vfn));
}
pub fn compute_id_range(visit_ids_fn: &fn(@fn(node_id))) -> id_range {
let result = @mut id_range::max();
do visit_ids_fn |id| {
result.add(id);
}
*result
}
pub fn compute_id_range_for_inlined_item(item: &inlined_item) -> id_range {
compute_id_range(|f| visit_ids_for_inlined_item(item, f))
}
pub fn is_item_impl(item: @ast::item) -> bool {
match item.node {
item_impl(*) => true,
_ => false
}
}
pub fn walk_pat(pat: @pat, it: &fn(@pat)) {
it(pat);
match pat.node {
pat_ident(_, _, Some(p)) => walk_pat(p, it),
pat_struct(_, ref fields, _) => {
for fields.each |f| {
walk_pat(f.pat, it)
}
}
pat_enum(_, Some(ref s)) | pat_tup(ref s) => {
for s.each |p| {
walk_pat(*p, it)
}
}
pat_box(s) | pat_uniq(s) | pat_region(s) => {
walk_pat(s, it)
}
pat_vec(ref before, ref slice, ref after) => {
for before.each |p| {
walk_pat(*p, it)
}
for slice.each |p| {
walk_pat(*p, it)
}
for after.each |p| {
walk_pat(*p, it)
}
}
pat_wild | pat_lit(_) | pat_range(_, _) | pat_ident(_, _, _) |
pat_enum(_, _) => { }
}
}
pub fn view_path_id(p: @view_path) -> node_id {
match p.node {
view_path_simple(_, _, id) |
view_path_glob(_, id) |
view_path_list(_, _, id) => id
}
}
/// Returns true if the given struct def is tuple-like; i.e. that its fields
/// are unnamed.
pub fn struct_def_is_tuple_like(struct_def: @ast::struct_def) -> bool {
struct_def.ctor_id.is_some()
}
pub fn visibility_to_privacy(visibility: visibility) -> Privacy {
match visibility {
public => Public,
inherited | private => Private
}
}
pub fn variant_visibility_to_privacy(visibility: visibility,
enclosing_is_public: bool)
-> Privacy {
if enclosing_is_public {
match visibility {
public | inherited => Public,
private => Private
}
} else {
visibility_to_privacy(visibility)
}
}
#[deriving(Eq)]
pub enum Privacy {
Private,
Public
}
// HYGIENE FUNCTIONS
/// Construct an identifier with the given repr and an empty context:
pub fn mk_ident(repr: uint) -> ident { ident {repr: repr, ctxt: 0}}
/// Extend a syntax context with a given mark
pub fn mk_mark (m:Mrk,ctxt:SyntaxContext,table:&mut SCTable)
-> SyntaxContext {
idx_push(table,Mark(m,ctxt))
}
/// Extend a syntax context with a given rename
pub fn mk_rename (id:ident, to:Name, tail:SyntaxContext, table: &mut SCTable)
-> SyntaxContext {
idx_push(table,Rename(id,to,tail))
}
/// Make a fresh syntax context table with EmptyCtxt in slot zero
pub fn mk_sctable() -> SCTable { ~[EmptyCtxt] }
/// Add a value to the end of a vec, return its index
fn idx_push<T>(vec: &mut ~[T], val: T) -> uint {
vec.push(val);
vec.len() - 1
}
/// Resolve a syntax object to a name, per MTWT.
pub fn resolve (id : ident, table : &SCTable) -> Name {
match table[id.ctxt] {
EmptyCtxt => id.repr,
// ignore marks here:
Mark(_,subctxt) => resolve (ident{repr:id.repr, ctxt: subctxt},table),
// do the rename if necessary:
Rename(ident{repr,ctxt},toname,subctxt) => {
// this could be cached or computed eagerly:
let resolvedfrom = resolve(ident{repr:repr,ctxt:ctxt},table);
let resolvedthis = resolve(ident{repr:id.repr,ctxt:subctxt},table);
if ((resolvedthis == resolvedfrom)
&& (marksof (ctxt,resolvedthis,table)
== marksof (subctxt,resolvedthis,table))) {
toname
} else {
resolvedthis
}
}
}
}
/// Compute the marks associated with a syntax context.
// it's not clear to me whether it's better to use a [] mutable
// vector or a cons-list for this.
pub fn marksof(ctxt: SyntaxContext, stopname: Name, table: &SCTable) -> ~[Mrk] {
let mut result = ~[];
let mut loopvar = ctxt;
loop {
match table[loopvar] {
EmptyCtxt => {return result;},
Mark(mark,tl) => {
xorPush(&mut result,mark);
loopvar = tl;
},
Rename(_,name,tl) => {
// see MTWT for details on the purpose of the stopname.
// short version: it prevents duplication of effort.
|
}
}
}
}
/// Push a name... unless it matches the one on top, in which
/// case pop and discard (so two of the same marks cancel)
pub fn xorPush(marks: &mut ~[uint], mark: uint) {
if ((marks.len() > 0) && (getLast(marks) == mark)) {
marks.pop();
} else {
marks.push(mark);
}
}
// get the last element of a mutable array.
// FIXME #4903:, must be a separate procedure for now.
pub fn getLast(arr: &~[Mrk]) -> uint {
*arr.last()
}
#[cfg(test)]
mod test {
use ast::*;
use super::*;
use core::io;
#[test] fn xorpush_test () {
let mut s = ~[];
xorPush(&mut s,14);
assert_eq!(copy s,~[14]);
xorPush(&mut s,14);
assert_eq!(copy s,~[]);
xorPush(&mut s,14);
assert_eq!(copy s,~[14]);
xorPush(&mut s,15);
assert_eq!(copy s,~[14,15]);
xorPush (&mut s,16);
assert_eq!(copy s,~[14,15,16]);
xorPush (&mut s,16);
assert_eq!(copy s,~[14,15]);
xorPush (&mut s,15);
assert_eq!(copy s,~[14]);
}
// convert a list of uints to an @~[ident]
// (ignores the interner completely)
fn uints_to_idents (uints: &~[uint]) -> @~[ident] {
@uints.map(|u|{ ident {repr:*u, ctxt: empty_ctxt} })
}
fn id (u : uint, s: SyntaxContext) -> ident {
ident{repr:u, ctxt: s}
}
// because of the SCTable, I now need a tidy way of
// creating syntax objects. Sigh.
#[deriving(Eq)]
enum TestSC {
M(Mrk),
R(ident,Name)
}
|
if (name == stopname) {
return result;
} else {
loopvar = tl;
}
|
random_line_split
|
level_bar.rs
|
// This file is part of rgtk.
//
// rgtk is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// rgtk is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with rgtk. If not, see <http://www.gnu.org/licenses/>.
//! A bar that can used as a level indicator
#![cfg_attr(not(feature = "GTK_3_8"), allow(unused_imports))]
use libc::c_double;
use glib::translate::ToGlibPtr;
use gtk::{self, ffi};
use glib::{to_bool, to_gboolean};
use gtk::{LevelBarMode};
use gtk::cast::GTK_LEVELBAR;
/// LevelBar — A bar that can used as a level indicator
/*
* # Signal availables:
* * `offset-changed` : Has Details
*/
struct_Widget!(LevelBar);
impl LevelBar {
pub fn new() -> Option<LevelBar> {
let tmp_pointer = unsafe { ffi::gtk_level_bar_new() };
check_pointer!(tmp_pointer, LevelBar)
}
pub fn new_for_interval(min: f64, max: f64) -> Option<LevelBar> {
let tmp_pointer = unsafe { ffi::gtk_level_bar_new_for_interval(min as c_double, max as c_double) };
check_pointer!(tmp_pointer, LevelBar)
}
pub fn set_value(&mut self, value: f64) -> () {
unsafe {
ffi::gtk_level_bar_set_value(GTK_LEVELBAR(self.pointer), value as c_double);
}
}
pub fn get_value(&self) -> f64 {
unsafe {
ffi::gtk_level_bar_get_value(GTK_LEVELBAR(self.pointer)) as f64
}
}
pub fn set_mode(&mut self, mode: LevelBarMode) -> () {
unsafe {
ffi::gtk_level_bar_set_mode(GTK_LEVELBAR(self.pointer), mode);
}
}
pub fn get_mode(&self) -> LevelBarMode {
unsafe {
ffi::gtk_level_bar_get_mode(GTK_LEVELBAR(self.pointer))
}
}
pub fn set_min_value(&mut self, value: f64) -> () {
unsafe {
ffi::gtk_level_bar_set_min_value(GTK_LEVELBAR(self.pointer), value as c_double);
}
}
pub fn get_min_value(&self) -> f64 {
unsafe {
ffi::gtk_level_bar_get_min_value(GTK_LEVELBAR(self.pointer)) as c_double
}
}
pub fn set_max_value(&mut self, value: f64) -> () {
unsafe {
ffi::gtk_level_bar_set_max_value(GTK_LEVELBAR(self.pointer), value as c_double);
}
}
pub fn get_max_value(&self) -> f64 {
unsafe {
ffi::gtk_level_bar_get_max_value(GTK_LEVELBAR(self.pointer)) as c_double
}
}
#[cfg(feature = "GTK_3_8")]
pub fn set_inverted(&mut self, inverted: bool) -> () {
unsafe { ffi::gtk_level_bar_set_inverted(GTK_LEVELBAR(self.pointer), to_gboolean(inverted)); }
}
#[cfg(feature = "GTK_3_8")]
pub fn ge
|
self) -> bool {
unsafe { to_bool(ffi::gtk_level_bar_get_inverted(GTK_LEVELBAR(self.pointer))) }
}
pub fn add_offset_value(&mut self, name: &str, value: f64) -> () {
unsafe {
ffi::gtk_level_bar_add_offset_value(
GTK_LEVELBAR(self.pointer),
name.borrow_to_glib().0,
value as c_double)
}
}
pub fn remove_offset_value(&mut self, name: &str) -> () {
unsafe {
ffi::gtk_level_bar_remove_offset_value(
GTK_LEVELBAR(self.pointer),
name.borrow_to_glib().0);
}
}
pub fn get_offset_value(&self, name: &str) -> Option<f64> {
unsafe {
let mut value = 0.;
let res = to_bool(
ffi::gtk_level_bar_get_offset_value(
GTK_LEVELBAR(self.pointer),
name.borrow_to_glib().0,
&mut value));
if res {
Some(value)
}
else {
None
}
}
}
}
impl_drop!(LevelBar);
impl_TraitWidget!(LevelBar);
impl gtk::OrientableTrait for LevelBar {}
impl_widget_events!(LevelBar);
|
t_inverted(&
|
identifier_name
|
level_bar.rs
|
// This file is part of rgtk.
//
// rgtk is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// rgtk is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with rgtk. If not, see <http://www.gnu.org/licenses/>.
//! A bar that can used as a level indicator
#![cfg_attr(not(feature = "GTK_3_8"), allow(unused_imports))]
use libc::c_double;
use glib::translate::ToGlibPtr;
use gtk::{self, ffi};
use glib::{to_bool, to_gboolean};
use gtk::{LevelBarMode};
use gtk::cast::GTK_LEVELBAR;
/// LevelBar — A bar that can used as a level indicator
/*
* # Signal availables:
* * `offset-changed` : Has Details
*/
struct_Widget!(LevelBar);
impl LevelBar {
pub fn new() -> Option<LevelBar> {
let tmp_pointer = unsafe { ffi::gtk_level_bar_new() };
check_pointer!(tmp_pointer, LevelBar)
}
pub fn new_for_interval(min: f64, max: f64) -> Option<LevelBar> {
let tmp_pointer = unsafe { ffi::gtk_level_bar_new_for_interval(min as c_double, max as c_double) };
check_pointer!(tmp_pointer, LevelBar)
}
pub fn set_value(&mut self, value: f64) -> () {
unsafe {
ffi::gtk_level_bar_set_value(GTK_LEVELBAR(self.pointer), value as c_double);
}
}
pub fn get_value(&self) -> f64 {
|
pub fn set_mode(&mut self, mode: LevelBarMode) -> () {
unsafe {
ffi::gtk_level_bar_set_mode(GTK_LEVELBAR(self.pointer), mode);
}
}
pub fn get_mode(&self) -> LevelBarMode {
unsafe {
ffi::gtk_level_bar_get_mode(GTK_LEVELBAR(self.pointer))
}
}
pub fn set_min_value(&mut self, value: f64) -> () {
unsafe {
ffi::gtk_level_bar_set_min_value(GTK_LEVELBAR(self.pointer), value as c_double);
}
}
pub fn get_min_value(&self) -> f64 {
unsafe {
ffi::gtk_level_bar_get_min_value(GTK_LEVELBAR(self.pointer)) as c_double
}
}
pub fn set_max_value(&mut self, value: f64) -> () {
unsafe {
ffi::gtk_level_bar_set_max_value(GTK_LEVELBAR(self.pointer), value as c_double);
}
}
pub fn get_max_value(&self) -> f64 {
unsafe {
ffi::gtk_level_bar_get_max_value(GTK_LEVELBAR(self.pointer)) as c_double
}
}
#[cfg(feature = "GTK_3_8")]
pub fn set_inverted(&mut self, inverted: bool) -> () {
unsafe { ffi::gtk_level_bar_set_inverted(GTK_LEVELBAR(self.pointer), to_gboolean(inverted)); }
}
#[cfg(feature = "GTK_3_8")]
pub fn get_inverted(&self) -> bool {
unsafe { to_bool(ffi::gtk_level_bar_get_inverted(GTK_LEVELBAR(self.pointer))) }
}
pub fn add_offset_value(&mut self, name: &str, value: f64) -> () {
unsafe {
ffi::gtk_level_bar_add_offset_value(
GTK_LEVELBAR(self.pointer),
name.borrow_to_glib().0,
value as c_double)
}
}
pub fn remove_offset_value(&mut self, name: &str) -> () {
unsafe {
ffi::gtk_level_bar_remove_offset_value(
GTK_LEVELBAR(self.pointer),
name.borrow_to_glib().0);
}
}
pub fn get_offset_value(&self, name: &str) -> Option<f64> {
unsafe {
let mut value = 0.;
let res = to_bool(
ffi::gtk_level_bar_get_offset_value(
GTK_LEVELBAR(self.pointer),
name.borrow_to_glib().0,
&mut value));
if res {
Some(value)
}
else {
None
}
}
}
}
impl_drop!(LevelBar);
impl_TraitWidget!(LevelBar);
impl gtk::OrientableTrait for LevelBar {}
impl_widget_events!(LevelBar);
|
unsafe {
ffi::gtk_level_bar_get_value(GTK_LEVELBAR(self.pointer)) as f64
}
}
|
identifier_body
|
level_bar.rs
|
// This file is part of rgtk.
//
// rgtk is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// rgtk is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with rgtk. If not, see <http://www.gnu.org/licenses/>.
|
use libc::c_double;
use glib::translate::ToGlibPtr;
use gtk::{self, ffi};
use glib::{to_bool, to_gboolean};
use gtk::{LevelBarMode};
use gtk::cast::GTK_LEVELBAR;
/// LevelBar — A bar that can used as a level indicator
/*
* # Signal availables:
* * `offset-changed` : Has Details
*/
struct_Widget!(LevelBar);
impl LevelBar {
pub fn new() -> Option<LevelBar> {
let tmp_pointer = unsafe { ffi::gtk_level_bar_new() };
check_pointer!(tmp_pointer, LevelBar)
}
pub fn new_for_interval(min: f64, max: f64) -> Option<LevelBar> {
let tmp_pointer = unsafe { ffi::gtk_level_bar_new_for_interval(min as c_double, max as c_double) };
check_pointer!(tmp_pointer, LevelBar)
}
pub fn set_value(&mut self, value: f64) -> () {
unsafe {
ffi::gtk_level_bar_set_value(GTK_LEVELBAR(self.pointer), value as c_double);
}
}
pub fn get_value(&self) -> f64 {
unsafe {
ffi::gtk_level_bar_get_value(GTK_LEVELBAR(self.pointer)) as f64
}
}
pub fn set_mode(&mut self, mode: LevelBarMode) -> () {
unsafe {
ffi::gtk_level_bar_set_mode(GTK_LEVELBAR(self.pointer), mode);
}
}
pub fn get_mode(&self) -> LevelBarMode {
unsafe {
ffi::gtk_level_bar_get_mode(GTK_LEVELBAR(self.pointer))
}
}
pub fn set_min_value(&mut self, value: f64) -> () {
unsafe {
ffi::gtk_level_bar_set_min_value(GTK_LEVELBAR(self.pointer), value as c_double);
}
}
pub fn get_min_value(&self) -> f64 {
unsafe {
ffi::gtk_level_bar_get_min_value(GTK_LEVELBAR(self.pointer)) as c_double
}
}
pub fn set_max_value(&mut self, value: f64) -> () {
unsafe {
ffi::gtk_level_bar_set_max_value(GTK_LEVELBAR(self.pointer), value as c_double);
}
}
pub fn get_max_value(&self) -> f64 {
unsafe {
ffi::gtk_level_bar_get_max_value(GTK_LEVELBAR(self.pointer)) as c_double
}
}
#[cfg(feature = "GTK_3_8")]
pub fn set_inverted(&mut self, inverted: bool) -> () {
unsafe { ffi::gtk_level_bar_set_inverted(GTK_LEVELBAR(self.pointer), to_gboolean(inverted)); }
}
#[cfg(feature = "GTK_3_8")]
pub fn get_inverted(&self) -> bool {
unsafe { to_bool(ffi::gtk_level_bar_get_inverted(GTK_LEVELBAR(self.pointer))) }
}
pub fn add_offset_value(&mut self, name: &str, value: f64) -> () {
unsafe {
ffi::gtk_level_bar_add_offset_value(
GTK_LEVELBAR(self.pointer),
name.borrow_to_glib().0,
value as c_double)
}
}
pub fn remove_offset_value(&mut self, name: &str) -> () {
unsafe {
ffi::gtk_level_bar_remove_offset_value(
GTK_LEVELBAR(self.pointer),
name.borrow_to_glib().0);
}
}
pub fn get_offset_value(&self, name: &str) -> Option<f64> {
unsafe {
let mut value = 0.;
let res = to_bool(
ffi::gtk_level_bar_get_offset_value(
GTK_LEVELBAR(self.pointer),
name.borrow_to_glib().0,
&mut value));
if res {
Some(value)
}
else {
None
}
}
}
}
impl_drop!(LevelBar);
impl_TraitWidget!(LevelBar);
impl gtk::OrientableTrait for LevelBar {}
impl_widget_events!(LevelBar);
|
//! A bar that can used as a level indicator
#![cfg_attr(not(feature = "GTK_3_8"), allow(unused_imports))]
|
random_line_split
|
level_bar.rs
|
// This file is part of rgtk.
//
// rgtk is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// rgtk is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with rgtk. If not, see <http://www.gnu.org/licenses/>.
//! A bar that can used as a level indicator
#![cfg_attr(not(feature = "GTK_3_8"), allow(unused_imports))]
use libc::c_double;
use glib::translate::ToGlibPtr;
use gtk::{self, ffi};
use glib::{to_bool, to_gboolean};
use gtk::{LevelBarMode};
use gtk::cast::GTK_LEVELBAR;
/// LevelBar — A bar that can used as a level indicator
/*
* # Signal availables:
* * `offset-changed` : Has Details
*/
struct_Widget!(LevelBar);
impl LevelBar {
pub fn new() -> Option<LevelBar> {
let tmp_pointer = unsafe { ffi::gtk_level_bar_new() };
check_pointer!(tmp_pointer, LevelBar)
}
pub fn new_for_interval(min: f64, max: f64) -> Option<LevelBar> {
let tmp_pointer = unsafe { ffi::gtk_level_bar_new_for_interval(min as c_double, max as c_double) };
check_pointer!(tmp_pointer, LevelBar)
}
pub fn set_value(&mut self, value: f64) -> () {
unsafe {
ffi::gtk_level_bar_set_value(GTK_LEVELBAR(self.pointer), value as c_double);
}
}
pub fn get_value(&self) -> f64 {
unsafe {
ffi::gtk_level_bar_get_value(GTK_LEVELBAR(self.pointer)) as f64
}
}
pub fn set_mode(&mut self, mode: LevelBarMode) -> () {
unsafe {
ffi::gtk_level_bar_set_mode(GTK_LEVELBAR(self.pointer), mode);
}
}
pub fn get_mode(&self) -> LevelBarMode {
unsafe {
ffi::gtk_level_bar_get_mode(GTK_LEVELBAR(self.pointer))
}
}
pub fn set_min_value(&mut self, value: f64) -> () {
unsafe {
ffi::gtk_level_bar_set_min_value(GTK_LEVELBAR(self.pointer), value as c_double);
}
}
pub fn get_min_value(&self) -> f64 {
unsafe {
ffi::gtk_level_bar_get_min_value(GTK_LEVELBAR(self.pointer)) as c_double
}
}
pub fn set_max_value(&mut self, value: f64) -> () {
unsafe {
ffi::gtk_level_bar_set_max_value(GTK_LEVELBAR(self.pointer), value as c_double);
}
}
pub fn get_max_value(&self) -> f64 {
unsafe {
ffi::gtk_level_bar_get_max_value(GTK_LEVELBAR(self.pointer)) as c_double
}
}
#[cfg(feature = "GTK_3_8")]
pub fn set_inverted(&mut self, inverted: bool) -> () {
unsafe { ffi::gtk_level_bar_set_inverted(GTK_LEVELBAR(self.pointer), to_gboolean(inverted)); }
}
#[cfg(feature = "GTK_3_8")]
pub fn get_inverted(&self) -> bool {
unsafe { to_bool(ffi::gtk_level_bar_get_inverted(GTK_LEVELBAR(self.pointer))) }
}
pub fn add_offset_value(&mut self, name: &str, value: f64) -> () {
unsafe {
ffi::gtk_level_bar_add_offset_value(
GTK_LEVELBAR(self.pointer),
name.borrow_to_glib().0,
value as c_double)
}
}
pub fn remove_offset_value(&mut self, name: &str) -> () {
unsafe {
ffi::gtk_level_bar_remove_offset_value(
GTK_LEVELBAR(self.pointer),
name.borrow_to_glib().0);
}
}
pub fn get_offset_value(&self, name: &str) -> Option<f64> {
unsafe {
let mut value = 0.;
let res = to_bool(
ffi::gtk_level_bar_get_offset_value(
GTK_LEVELBAR(self.pointer),
name.borrow_to_glib().0,
&mut value));
if res {
|
else {
None
}
}
}
}
impl_drop!(LevelBar);
impl_TraitWidget!(LevelBar);
impl gtk::OrientableTrait for LevelBar {}
impl_widget_events!(LevelBar);
|
Some(value)
}
|
conditional_block
|
helpers.rs
|
#[derive(Debug)]
pub enum Entry {
// either a unique page and its link data
Page {
title: String,
children: Vec<u32>,
parents: Vec<u32>,
},
// or the redirect page and its address
// these will eventually be taken out of db::entries
Redirect {
title: String,
target: Option<u32>,
}
}
//what phase the database is in
//TODO should I get rid of the numbers? They don't matter except that without
// them it might not be clear that the order of the values is what determines
|
#[derive(PartialEq, PartialOrd, Debug)]
pub enum State {
Begin = 0,
AddPages = 1,
AddRedirects = 2,
TidyEntries = 3,
AddLinks = 4,
Done = 5,
}
|
// their inequality; here concrete values make it clearer
|
random_line_split
|
helpers.rs
|
#[derive(Debug)]
pub enum
|
{
// either a unique page and its link data
Page {
title: String,
children: Vec<u32>,
parents: Vec<u32>,
},
// or the redirect page and its address
// these will eventually be taken out of db::entries
Redirect {
title: String,
target: Option<u32>,
}
}
//what phase the database is in
//TODO should I get rid of the numbers? They don't matter except that without
// them it might not be clear that the order of the values is what determines
// their inequality; here concrete values make it clearer
#[derive(PartialEq, PartialOrd, Debug)]
pub enum State {
Begin = 0,
AddPages = 1,
AddRedirects = 2,
TidyEntries = 3,
AddLinks = 4,
Done = 5,
}
|
Entry
|
identifier_name
|
counter.rs
|
use sfml::graphics::*;
use sfml::system::{Vector2f, Vector2u};
use resize_handler::ResizeHandler;
|
const OFFSET: Vector2f = Vector2f { x: 32., y: 32. };
pub struct Counter<'s> {
icon: Sprite<'s>,
text: Text<'s>,
rel_pos: Vector2f,
}
impl<'s> Counter<'s> {
pub fn new(tex: &'s TextureRef, font: &'s Font, rel_pos: &Vector2f) -> Counter<'s> {
Counter {
icon: Sprite::with_texture(tex),
text: Text::new_init(" 0", font, 32),
rel_pos: *rel_pos,
}
}
pub fn set_value(&mut self, value: u8) {
let string = if value <= 9 {
format!(" {}", value)
} else if value <= 99 {
format!(" {}", value)
} else {
format!("{}", value)
};
self.text.set_string(&string);
}
fn recalculate(&mut self, win_width: u32, win_height: u32) {
self.icon.set_position2f(win_width as f32 * self.rel_pos.x,
win_height as f32 * self.rel_pos.y);
let bounds = self.icon.global_bounds();
self.text.set_position2f(bounds.left, bounds.top);
self.text.move_(&OFFSET);
}
}
impl<'s> ResizeHandler for Counter<'s> {
fn on_resize(&mut self, width: u32, height: u32) {
self.recalculate(width, height);
}
}
impl<'s> UiDrawable for Counter<'s> {
fn draw(&self, target: &mut RenderTarget) {
target.draw(&self.icon);
target.draw(&self.text);
}
}
impl<'s> Element for Counter<'s> {
fn set_position_relative(&mut self, pos: &Vector2f, win_size: &Vector2u) {
self.rel_pos = *pos;
self.recalculate(win_size.x, win_size.y);
}
}
|
use super::element::*;
|
random_line_split
|
counter.rs
|
use sfml::graphics::*;
use sfml::system::{Vector2f, Vector2u};
use resize_handler::ResizeHandler;
use super::element::*;
const OFFSET: Vector2f = Vector2f { x: 32., y: 32. };
pub struct Counter<'s> {
icon: Sprite<'s>,
text: Text<'s>,
rel_pos: Vector2f,
}
impl<'s> Counter<'s> {
pub fn new(tex: &'s TextureRef, font: &'s Font, rel_pos: &Vector2f) -> Counter<'s> {
Counter {
icon: Sprite::with_texture(tex),
text: Text::new_init(" 0", font, 32),
rel_pos: *rel_pos,
}
}
pub fn set_value(&mut self, value: u8) {
let string = if value <= 9 {
format!(" {}", value)
} else if value <= 99 {
format!(" {}", value)
} else {
format!("{}", value)
};
self.text.set_string(&string);
}
fn recalculate(&mut self, win_width: u32, win_height: u32) {
self.icon.set_position2f(win_width as f32 * self.rel_pos.x,
win_height as f32 * self.rel_pos.y);
let bounds = self.icon.global_bounds();
self.text.set_position2f(bounds.left, bounds.top);
self.text.move_(&OFFSET);
}
}
impl<'s> ResizeHandler for Counter<'s> {
fn on_resize(&mut self, width: u32, height: u32) {
self.recalculate(width, height);
}
}
impl<'s> UiDrawable for Counter<'s> {
fn draw(&self, target: &mut RenderTarget) {
target.draw(&self.icon);
target.draw(&self.text);
}
}
impl<'s> Element for Counter<'s> {
fn set_position_relative(&mut self, pos: &Vector2f, win_size: &Vector2u)
|
}
|
{
self.rel_pos = *pos;
self.recalculate(win_size.x, win_size.y);
}
|
identifier_body
|
counter.rs
|
use sfml::graphics::*;
use sfml::system::{Vector2f, Vector2u};
use resize_handler::ResizeHandler;
use super::element::*;
const OFFSET: Vector2f = Vector2f { x: 32., y: 32. };
pub struct Counter<'s> {
icon: Sprite<'s>,
text: Text<'s>,
rel_pos: Vector2f,
}
impl<'s> Counter<'s> {
pub fn new(tex: &'s TextureRef, font: &'s Font, rel_pos: &Vector2f) -> Counter<'s> {
Counter {
icon: Sprite::with_texture(tex),
text: Text::new_init(" 0", font, 32),
rel_pos: *rel_pos,
}
}
pub fn set_value(&mut self, value: u8) {
let string = if value <= 9 {
format!(" {}", value)
} else if value <= 99 {
format!(" {}", value)
} else {
format!("{}", value)
};
self.text.set_string(&string);
}
fn
|
(&mut self, win_width: u32, win_height: u32) {
self.icon.set_position2f(win_width as f32 * self.rel_pos.x,
win_height as f32 * self.rel_pos.y);
let bounds = self.icon.global_bounds();
self.text.set_position2f(bounds.left, bounds.top);
self.text.move_(&OFFSET);
}
}
impl<'s> ResizeHandler for Counter<'s> {
fn on_resize(&mut self, width: u32, height: u32) {
self.recalculate(width, height);
}
}
impl<'s> UiDrawable for Counter<'s> {
fn draw(&self, target: &mut RenderTarget) {
target.draw(&self.icon);
target.draw(&self.text);
}
}
impl<'s> Element for Counter<'s> {
fn set_position_relative(&mut self, pos: &Vector2f, win_size: &Vector2u) {
self.rel_pos = *pos;
self.recalculate(win_size.x, win_size.y);
}
}
|
recalculate
|
identifier_name
|
polygon.rs
|
//! Draw polygon
use crate::{
math::{Matrix2d, Scalar},
triangulation, types,
types::Color,
DrawState, Graphics,
};
/// A polygon
#[derive(Copy, Clone)]
pub struct Polygon {
/// The color of the polygon
pub color: Color,
}
impl Polygon {
/// Creates new polygon
pub fn new(color: Color) -> Polygon {
Polygon { color }
}
/// Sets color.
pub fn color(mut self, color: Color) -> Self {
self.color = color;
self
}
/// Draws polygon using the default method.
#[inline(always)]
pub fn draw<G>(
&self,
polygon: types::Polygon<'_>,
draw_state: &DrawState,
transform: Matrix2d,
g: &mut G,
) where
G: Graphics,
{
g.polygon(self, polygon, draw_state, transform);
}
/// Draws polygon using triangulation.
pub fn draw_tri<G>(
&self,
polygon: types::Polygon<'_>,
draw_state: &DrawState,
transform: Matrix2d,
g: &mut G,
) where
G: Graphics,
{
g.tri_list(draw_state, &self.color, |f| {
triangulation::with_polygon_tri_list(transform, polygon, |vertices| f(vertices))
});
}
/// Draws tweened polygon with linear interpolation, using default method.
#[inline(always)]
pub fn draw_tween_lerp<G>(
&self,
polygons: types::Polygons<'_>,
tween_factor: Scalar,
draw_state: &DrawState,
transform: Matrix2d,
g: &mut G,
) where
G: Graphics,
{
g.polygon_tween_lerp(self, polygons, tween_factor, draw_state, transform);
}
/// Draws tweened polygon with linear interpolation, using triangulation.
pub fn draw_tween_lerp_tri<G>(
&self,
polygons: types::Polygons<'_>,
tween_factor: Scalar,
draw_state: &DrawState,
transform: Matrix2d,
g: &mut G,
) where
G: Graphics,
{
if self.color[3] == 0.0
|
g.tri_list(draw_state, &self.color, |f| {
triangulation::with_lerp_polygons_tri_list(
transform,
polygons,
tween_factor,
|vertices| f(vertices),
)
});
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_polygon() {
let _polygon = Polygon::new([1.0; 4]).color([0.0; 4]);
}
}
|
{
return;
}
|
conditional_block
|
polygon.rs
|
//! Draw polygon
use crate::{
math::{Matrix2d, Scalar},
triangulation, types,
types::Color,
DrawState, Graphics,
};
/// A polygon
#[derive(Copy, Clone)]
pub struct Polygon {
/// The color of the polygon
pub color: Color,
}
impl Polygon {
/// Creates new polygon
pub fn new(color: Color) -> Polygon {
Polygon { color }
}
/// Sets color.
pub fn color(mut self, color: Color) -> Self {
self.color = color;
self
}
/// Draws polygon using the default method.
#[inline(always)]
pub fn draw<G>(
&self,
polygon: types::Polygon<'_>,
draw_state: &DrawState,
transform: Matrix2d,
g: &mut G,
) where
G: Graphics,
{
g.polygon(self, polygon, draw_state, transform);
}
/// Draws polygon using triangulation.
pub fn
|
<G>(
&self,
polygon: types::Polygon<'_>,
draw_state: &DrawState,
transform: Matrix2d,
g: &mut G,
) where
G: Graphics,
{
g.tri_list(draw_state, &self.color, |f| {
triangulation::with_polygon_tri_list(transform, polygon, |vertices| f(vertices))
});
}
/// Draws tweened polygon with linear interpolation, using default method.
#[inline(always)]
pub fn draw_tween_lerp<G>(
&self,
polygons: types::Polygons<'_>,
tween_factor: Scalar,
draw_state: &DrawState,
transform: Matrix2d,
g: &mut G,
) where
G: Graphics,
{
g.polygon_tween_lerp(self, polygons, tween_factor, draw_state, transform);
}
/// Draws tweened polygon with linear interpolation, using triangulation.
pub fn draw_tween_lerp_tri<G>(
&self,
polygons: types::Polygons<'_>,
tween_factor: Scalar,
draw_state: &DrawState,
transform: Matrix2d,
g: &mut G,
) where
G: Graphics,
{
if self.color[3] == 0.0 {
return;
}
g.tri_list(draw_state, &self.color, |f| {
triangulation::with_lerp_polygons_tri_list(
transform,
polygons,
tween_factor,
|vertices| f(vertices),
)
});
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_polygon() {
let _polygon = Polygon::new([1.0; 4]).color([0.0; 4]);
}
}
|
draw_tri
|
identifier_name
|
polygon.rs
|
//! Draw polygon
use crate::{
math::{Matrix2d, Scalar},
triangulation, types,
types::Color,
DrawState, Graphics,
};
/// A polygon
#[derive(Copy, Clone)]
pub struct Polygon {
/// The color of the polygon
pub color: Color,
}
impl Polygon {
/// Creates new polygon
pub fn new(color: Color) -> Polygon
|
/// Sets color.
pub fn color(mut self, color: Color) -> Self {
self.color = color;
self
}
/// Draws polygon using the default method.
#[inline(always)]
pub fn draw<G>(
&self,
polygon: types::Polygon<'_>,
draw_state: &DrawState,
transform: Matrix2d,
g: &mut G,
) where
G: Graphics,
{
g.polygon(self, polygon, draw_state, transform);
}
/// Draws polygon using triangulation.
pub fn draw_tri<G>(
&self,
polygon: types::Polygon<'_>,
draw_state: &DrawState,
transform: Matrix2d,
g: &mut G,
) where
G: Graphics,
{
g.tri_list(draw_state, &self.color, |f| {
triangulation::with_polygon_tri_list(transform, polygon, |vertices| f(vertices))
});
}
/// Draws tweened polygon with linear interpolation, using default method.
#[inline(always)]
pub fn draw_tween_lerp<G>(
&self,
polygons: types::Polygons<'_>,
tween_factor: Scalar,
draw_state: &DrawState,
transform: Matrix2d,
g: &mut G,
) where
G: Graphics,
{
g.polygon_tween_lerp(self, polygons, tween_factor, draw_state, transform);
}
/// Draws tweened polygon with linear interpolation, using triangulation.
pub fn draw_tween_lerp_tri<G>(
&self,
polygons: types::Polygons<'_>,
tween_factor: Scalar,
draw_state: &DrawState,
transform: Matrix2d,
g: &mut G,
) where
G: Graphics,
{
if self.color[3] == 0.0 {
return;
}
g.tri_list(draw_state, &self.color, |f| {
triangulation::with_lerp_polygons_tri_list(
transform,
polygons,
tween_factor,
|vertices| f(vertices),
)
});
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_polygon() {
let _polygon = Polygon::new([1.0; 4]).color([0.0; 4]);
}
}
|
{
Polygon { color }
}
|
identifier_body
|
polygon.rs
|
//! Draw polygon
use crate::{
math::{Matrix2d, Scalar},
triangulation, types,
types::Color,
DrawState, Graphics,
};
/// A polygon
#[derive(Copy, Clone)]
pub struct Polygon {
/// The color of the polygon
pub color: Color,
}
impl Polygon {
/// Creates new polygon
pub fn new(color: Color) -> Polygon {
Polygon { color }
}
/// Sets color.
pub fn color(mut self, color: Color) -> Self {
self.color = color;
self
}
/// Draws polygon using the default method.
#[inline(always)]
pub fn draw<G>(
&self,
polygon: types::Polygon<'_>,
draw_state: &DrawState,
transform: Matrix2d,
g: &mut G,
) where
G: Graphics,
{
g.polygon(self, polygon, draw_state, transform);
}
/// Draws polygon using triangulation.
pub fn draw_tri<G>(
&self,
polygon: types::Polygon<'_>,
draw_state: &DrawState,
transform: Matrix2d,
g: &mut G,
) where
G: Graphics,
{
g.tri_list(draw_state, &self.color, |f| {
triangulation::with_polygon_tri_list(transform, polygon, |vertices| f(vertices))
});
}
/// Draws tweened polygon with linear interpolation, using default method.
#[inline(always)]
pub fn draw_tween_lerp<G>(
&self,
polygons: types::Polygons<'_>,
tween_factor: Scalar,
draw_state: &DrawState,
transform: Matrix2d,
g: &mut G,
) where
G: Graphics,
{
g.polygon_tween_lerp(self, polygons, tween_factor, draw_state, transform);
}
/// Draws tweened polygon with linear interpolation, using triangulation.
pub fn draw_tween_lerp_tri<G>(
&self,
polygons: types::Polygons<'_>,
tween_factor: Scalar,
draw_state: &DrawState,
transform: Matrix2d,
g: &mut G,
) where
G: Graphics,
{
if self.color[3] == 0.0 {
return;
}
g.tri_list(draw_state, &self.color, |f| {
triangulation::with_lerp_polygons_tri_list(
transform,
polygons,
tween_factor,
|
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_polygon() {
let _polygon = Polygon::new([1.0; 4]).color([0.0; 4]);
}
}
|
|vertices| f(vertices),
)
});
}
|
random_line_split
|
TranslationCache.rs
|
extern crate libc;
use std::mem;
use std::marker;
use std::ops::{Index, IndexMut};
extern{
fn memset(s: *mut libc::c_void, c: libc::uint32_t, n: libc::size_t) -> *mut libc::c_void;
}
pub struct TranslationCache {
pub page : *mut u8
}
unsafe impl Send for TranslationCache {}
unsafe impl Sync for TranslationCache {}
const PAGE_SIZE: usize = 4096;
impl Index<usize> for TranslationCache {
type Output = u8;
fn index(&self, _index: usize) -> &u8 {
unsafe {&*self.page.offset(_index as isize) }
}
}
impl IndexMut<usize> for TranslationCache {
fn index_mut(&mut self, _index: usize) -> &mut u8 {
unsafe {&mut *self.page.offset(_index as isize) }
}
}
impl TranslationCache {
pub fn
|
(num_pages: usize) -> TranslationCache {
let page : *mut u8;
unsafe {
let cache_size = num_pages * PAGE_SIZE;
let mut _page : *mut libc::c_void = mem::uninitialized();
libc::posix_memalign(&mut _page, PAGE_SIZE, cache_size);
libc::mprotect(_page, cache_size, libc::PROT_EXEC | libc::PROT_READ | libc::PROT_WRITE);
memset(_page, 0xC3, cache_size);
page = mem::transmute(_page);
}
TranslationCache { page: page }
}
}
|
new
|
identifier_name
|
TranslationCache.rs
|
extern crate libc;
use std::mem;
use std::marker;
use std::ops::{Index, IndexMut};
extern{
fn memset(s: *mut libc::c_void, c: libc::uint32_t, n: libc::size_t) -> *mut libc::c_void;
}
pub struct TranslationCache {
pub page : *mut u8
}
unsafe impl Send for TranslationCache {}
unsafe impl Sync for TranslationCache {}
const PAGE_SIZE: usize = 4096;
impl Index<usize> for TranslationCache {
type Output = u8;
fn index(&self, _index: usize) -> &u8 {
unsafe {&*self.page.offset(_index as isize) }
|
impl IndexMut<usize> for TranslationCache {
fn index_mut(&mut self, _index: usize) -> &mut u8 {
unsafe {&mut *self.page.offset(_index as isize) }
}
}
impl TranslationCache {
pub fn new(num_pages: usize) -> TranslationCache {
let page : *mut u8;
unsafe {
let cache_size = num_pages * PAGE_SIZE;
let mut _page : *mut libc::c_void = mem::uninitialized();
libc::posix_memalign(&mut _page, PAGE_SIZE, cache_size);
libc::mprotect(_page, cache_size, libc::PROT_EXEC | libc::PROT_READ | libc::PROT_WRITE);
memset(_page, 0xC3, cache_size);
page = mem::transmute(_page);
}
TranslationCache { page: page }
}
}
|
}
}
|
random_line_split
|
main.rs
|
fn main() {
{
// 数组实现了 Copy trait,在栈上分配的数组可以被复制
let mut arr = [1,2,3,4];
let mut arr2 = arr; // arr2 是 arr 的拷贝并不是原有数据
arr[0] = 100;
arr2[0] = 200;
println!("arr: {:?}, arr2: {:?}", arr, arr2);
}
{
// 堆上分配的数组不能被复制
let arr = Box::new([1,2,3,4]);
let arr2 = arr; // arr 所有权转移给 arr2
// println!("arr: {:?}", arr); // 失败
println!("arr2: {:?}", arr2);
}
{
let arr = Box::new([1,2,3,4]);
{
let arr2 = arr;
println!("arr2: {:?}", arr2);
}
// println!("arr: {:?}", arr);
}
{
let mut arr = Box::new([1,2,3,4]);
arr = print_arr(arr);
println!("arr: {:?}", arr);
}
{
let arr = Box::new([1,2,3,4]);
print_arr1(&arr);
println!("arr: {:?}", arr);
}
{
let arr = Box::new([1,2,3,4]);
print_arr2(&arr);
println!("arr: {:?}", arr);
}
}
fn print_arr(arr: Box<[i32;4]>) -> Box<[i32;4]> {
println!("arr: {:?}", arr);
arr
}
fn print_arr1(arr: &[i32;4]) {
println!("arr: {:?}", arr);
}
fn print_arr2(arr: &[i32;4]) {
pr
|
r: {:?}", arr);
}
|
intln!("ar
|
identifier_name
|
main.rs
|
fn main() {
{
// 数组实现了 Copy trait,在栈上分配的数组可以被复制
let mut arr = [1,2,3,4];
let mut arr2 = arr; // arr2 是 arr 的拷贝并不是原有数据
arr[0] = 100;
arr2[0] = 200;
println!("arr: {:?}, arr2: {:?}", arr, arr2);
}
{
// 堆上分配的数组不能被复制
let arr = Box::new([1,2,3,4]);
let arr2 = arr; // arr 所有权转移给 arr2
// println!("arr: {:?}", arr); // 失败
println!("arr2: {:?}", arr2);
}
{
let arr = Box::new([1,2,3,4]);
{
let arr2 = arr;
println!("arr2: {:?}", arr2);
}
// println!("arr: {:?}", arr);
}
{
let mut arr = Box::new([1,2,3,4]);
arr = print_arr(arr);
println!("arr: {:?}", arr);
}
{
let arr = Box::new([1,2,3,4]);
print_arr1(&arr);
println!("arr: {:?}", arr);
}
{
let arr = Box::new([1,2,3,4]);
print_arr2(&arr);
println!("arr: {:?}", arr);
}
}
fn print_arr(arr: Box<[i32;4]>) -> Box<[i32;4]> {
println!("arr: {:?}", arr);
arr
}
fn print_arr1(arr: &[i32;4]) {
println!("arr: {:?}", arr);
}
fn print_arr2(arr: &[i32;4]) {
println!("arr: {:?}", arr);
|
}
|
identifier_body
|
|
main.rs
|
fn main() {
{
|
println!("arr: {:?}, arr2: {:?}", arr, arr2);
}
{
// 堆上分配的数组不能被复制
let arr = Box::new([1,2,3,4]);
let arr2 = arr; // arr 所有权转移给 arr2
// println!("arr: {:?}", arr); // 失败
println!("arr2: {:?}", arr2);
}
{
let arr = Box::new([1,2,3,4]);
{
let arr2 = arr;
println!("arr2: {:?}", arr2);
}
// println!("arr: {:?}", arr);
}
{
let mut arr = Box::new([1,2,3,4]);
arr = print_arr(arr);
println!("arr: {:?}", arr);
}
{
let arr = Box::new([1,2,3,4]);
print_arr1(&arr);
println!("arr: {:?}", arr);
}
{
let arr = Box::new([1,2,3,4]);
print_arr2(&arr);
println!("arr: {:?}", arr);
}
}
fn print_arr(arr: Box<[i32;4]>) -> Box<[i32;4]> {
println!("arr: {:?}", arr);
arr
}
fn print_arr1(arr: &[i32;4]) {
println!("arr: {:?}", arr);
}
fn print_arr2(arr: &[i32;4]) {
println!("arr: {:?}", arr);
}
|
// 数组实现了 Copy trait,在栈上分配的数组可以被复制
let mut arr = [1,2,3,4];
let mut arr2 = arr; // arr2 是 arr 的拷贝并不是原有数据
arr[0] = 100;
arr2[0] = 200;
|
random_line_split
|
signalargs.rs
|
use arg;
use {Message, MessageType, BusName, Path, Interface, Member};
/// Helper methods for structs representing a Signal
///
/// # Example
///
/// Listen to InterfacesRemoved signal from org.bluez.obex.
///
/// ```rust,no_run
/// use dbus::{Connection, ConnectionItem, BusType, SignalArgs};
/// use dbus::stdintf::OrgFreedesktopDBusObjectManagerInterfacesRemoved as IR;
///
/// let c = Connection::get_private(BusType::Session).unwrap();
/// // Add a match for this signal
/// let mstr = IR::match_str(Some(&"org.bluez.obex".into()), None);
/// c.add_match(&mstr).unwrap();
///
/// // Wait for the signal to arrive.
/// for n in c.iter(1000) {
/// if let ConnectionItem::Signal(msg) = n {
/// if let Some(ir) = IR::from_message(&msg) {
/// println!("Interfaces {:?} have been removed from bluez on path {}.", ir.interfaces, ir.object);
/// }
/// }
/// }
///
pub trait SignalArgs: Default {
/// D-Bus name of signal
const NAME: &'static str;
/// D-Bus name of interface this signal belongs to
const INTERFACE: &'static str;
/// Low-level method for appending this struct to a message.
///
/// You're more likely to use one of the more high level functions.
fn append(&self, i: &mut arg::IterAppend);
/// Low-level method for getting arguments from a message.
///
/// You're more likely to use one of the more high level functions.
fn get(&mut self, i: &mut arg::Iter) -> Result<(), arg::TypeMismatchError>;
/// Returns a message that emits the signal.
fn to_emit_message(&self, path: &Path) -> Message {
let mut m = Message::signal(path, &Interface::from(Self::INTERFACE), &Member::from(Self::NAME));
self.append(&mut arg::IterAppend::new(&mut m));
m
}
/// If the message is a signal of the correct type, return its arguments, otherwise return None.
///
/// This does not check sender and path of the message, which is likely relevant to you as well.
fn from_message(m: &Message) -> Option<Self> {
|
if m.msg_type()!= MessageType::Signal { None }
else if m.interface().as_ref().map(|x| &**x)!= Some(Self::INTERFACE) { None }
else if m.member().as_ref().map(|x| &**x)!= Some(Self::NAME) { None }
else {
let mut z: Self = Default::default();
z.get(&mut m.iter_init()).ok().map(|_| z)
}
}
/// Returns a string that can be sent to `Connection::add_match`.
///
/// If sender and/or path is None, matches all senders and/or paths.
fn match_str(sender: Option<&BusName>, path: Option<&Path>) -> String {
let mut r = format!("type='signal',interface='{}',member='{}'", Self::INTERFACE, Self::NAME);
sender.map(|s| r += &format!(",sender='{}'", s));
path.map(|s| r += &format!(",path='{}'", s));
r
}
}
#[test]
fn intf_removed() {
use {Connection, ConnectionItem, BusType};
use stdintf::OrgFreedesktopDBusObjectManagerInterfacesRemoved as IR;
let c = Connection::get_private(BusType::Session).unwrap();
let mstr = IR::match_str(Some(&c.unique_name().into()), Some(&"/hello".into()));
println!("Match str: {}", mstr);
c.add_match(&mstr).unwrap();
let ir = IR { object: "/hello".into(), interfaces: vec!("ABC.DEF".into(), "GHI.JKL".into()) };
let cp = c.with_path("dbus.dummy", "/hello", 2000);
cp.emit(&ir).unwrap();
for n in c.iter(1000) {
if let ConnectionItem::Signal(msg) = n {
if let Some(ir2) = IR::from_message(&msg) {
assert_eq!(ir2.object, ir.object);
assert_eq!(ir2.interfaces, ir.interfaces);
break;
}
}
}
}
|
random_line_split
|
|
signalargs.rs
|
use arg;
use {Message, MessageType, BusName, Path, Interface, Member};
/// Helper methods for structs representing a Signal
///
/// # Example
///
/// Listen to InterfacesRemoved signal from org.bluez.obex.
///
/// ```rust,no_run
/// use dbus::{Connection, ConnectionItem, BusType, SignalArgs};
/// use dbus::stdintf::OrgFreedesktopDBusObjectManagerInterfacesRemoved as IR;
///
/// let c = Connection::get_private(BusType::Session).unwrap();
/// // Add a match for this signal
/// let mstr = IR::match_str(Some(&"org.bluez.obex".into()), None);
/// c.add_match(&mstr).unwrap();
///
/// // Wait for the signal to arrive.
/// for n in c.iter(1000) {
/// if let ConnectionItem::Signal(msg) = n {
/// if let Some(ir) = IR::from_message(&msg) {
/// println!("Interfaces {:?} have been removed from bluez on path {}.", ir.interfaces, ir.object);
/// }
/// }
/// }
///
pub trait SignalArgs: Default {
/// D-Bus name of signal
const NAME: &'static str;
/// D-Bus name of interface this signal belongs to
const INTERFACE: &'static str;
/// Low-level method for appending this struct to a message.
///
/// You're more likely to use one of the more high level functions.
fn append(&self, i: &mut arg::IterAppend);
/// Low-level method for getting arguments from a message.
///
/// You're more likely to use one of the more high level functions.
fn get(&mut self, i: &mut arg::Iter) -> Result<(), arg::TypeMismatchError>;
/// Returns a message that emits the signal.
fn
|
(&self, path: &Path) -> Message {
let mut m = Message::signal(path, &Interface::from(Self::INTERFACE), &Member::from(Self::NAME));
self.append(&mut arg::IterAppend::new(&mut m));
m
}
/// If the message is a signal of the correct type, return its arguments, otherwise return None.
///
/// This does not check sender and path of the message, which is likely relevant to you as well.
fn from_message(m: &Message) -> Option<Self> {
if m.msg_type()!= MessageType::Signal { None }
else if m.interface().as_ref().map(|x| &**x)!= Some(Self::INTERFACE) { None }
else if m.member().as_ref().map(|x| &**x)!= Some(Self::NAME) { None }
else {
let mut z: Self = Default::default();
z.get(&mut m.iter_init()).ok().map(|_| z)
}
}
/// Returns a string that can be sent to `Connection::add_match`.
///
/// If sender and/or path is None, matches all senders and/or paths.
fn match_str(sender: Option<&BusName>, path: Option<&Path>) -> String {
let mut r = format!("type='signal',interface='{}',member='{}'", Self::INTERFACE, Self::NAME);
sender.map(|s| r += &format!(",sender='{}'", s));
path.map(|s| r += &format!(",path='{}'", s));
r
}
}
#[test]
fn intf_removed() {
use {Connection, ConnectionItem, BusType};
use stdintf::OrgFreedesktopDBusObjectManagerInterfacesRemoved as IR;
let c = Connection::get_private(BusType::Session).unwrap();
let mstr = IR::match_str(Some(&c.unique_name().into()), Some(&"/hello".into()));
println!("Match str: {}", mstr);
c.add_match(&mstr).unwrap();
let ir = IR { object: "/hello".into(), interfaces: vec!("ABC.DEF".into(), "GHI.JKL".into()) };
let cp = c.with_path("dbus.dummy", "/hello", 2000);
cp.emit(&ir).unwrap();
for n in c.iter(1000) {
if let ConnectionItem::Signal(msg) = n {
if let Some(ir2) = IR::from_message(&msg) {
assert_eq!(ir2.object, ir.object);
assert_eq!(ir2.interfaces, ir.interfaces);
break;
}
}
}
}
|
to_emit_message
|
identifier_name
|
signalargs.rs
|
use arg;
use {Message, MessageType, BusName, Path, Interface, Member};
/// Helper methods for structs representing a Signal
///
/// # Example
///
/// Listen to InterfacesRemoved signal from org.bluez.obex.
///
/// ```rust,no_run
/// use dbus::{Connection, ConnectionItem, BusType, SignalArgs};
/// use dbus::stdintf::OrgFreedesktopDBusObjectManagerInterfacesRemoved as IR;
///
/// let c = Connection::get_private(BusType::Session).unwrap();
/// // Add a match for this signal
/// let mstr = IR::match_str(Some(&"org.bluez.obex".into()), None);
/// c.add_match(&mstr).unwrap();
///
/// // Wait for the signal to arrive.
/// for n in c.iter(1000) {
/// if let ConnectionItem::Signal(msg) = n {
/// if let Some(ir) = IR::from_message(&msg) {
/// println!("Interfaces {:?} have been removed from bluez on path {}.", ir.interfaces, ir.object);
/// }
/// }
/// }
///
pub trait SignalArgs: Default {
/// D-Bus name of signal
const NAME: &'static str;
/// D-Bus name of interface this signal belongs to
const INTERFACE: &'static str;
/// Low-level method for appending this struct to a message.
///
/// You're more likely to use one of the more high level functions.
fn append(&self, i: &mut arg::IterAppend);
/// Low-level method for getting arguments from a message.
///
/// You're more likely to use one of the more high level functions.
fn get(&mut self, i: &mut arg::Iter) -> Result<(), arg::TypeMismatchError>;
/// Returns a message that emits the signal.
fn to_emit_message(&self, path: &Path) -> Message
|
/// If the message is a signal of the correct type, return its arguments, otherwise return None.
///
/// This does not check sender and path of the message, which is likely relevant to you as well.
fn from_message(m: &Message) -> Option<Self> {
if m.msg_type()!= MessageType::Signal { None }
else if m.interface().as_ref().map(|x| &**x)!= Some(Self::INTERFACE) { None }
else if m.member().as_ref().map(|x| &**x)!= Some(Self::NAME) { None }
else {
let mut z: Self = Default::default();
z.get(&mut m.iter_init()).ok().map(|_| z)
}
}
/// Returns a string that can be sent to `Connection::add_match`.
///
/// If sender and/or path is None, matches all senders and/or paths.
fn match_str(sender: Option<&BusName>, path: Option<&Path>) -> String {
let mut r = format!("type='signal',interface='{}',member='{}'", Self::INTERFACE, Self::NAME);
sender.map(|s| r += &format!(",sender='{}'", s));
path.map(|s| r += &format!(",path='{}'", s));
r
}
}
#[test]
fn intf_removed() {
use {Connection, ConnectionItem, BusType};
use stdintf::OrgFreedesktopDBusObjectManagerInterfacesRemoved as IR;
let c = Connection::get_private(BusType::Session).unwrap();
let mstr = IR::match_str(Some(&c.unique_name().into()), Some(&"/hello".into()));
println!("Match str: {}", mstr);
c.add_match(&mstr).unwrap();
let ir = IR { object: "/hello".into(), interfaces: vec!("ABC.DEF".into(), "GHI.JKL".into()) };
let cp = c.with_path("dbus.dummy", "/hello", 2000);
cp.emit(&ir).unwrap();
for n in c.iter(1000) {
if let ConnectionItem::Signal(msg) = n {
if let Some(ir2) = IR::from_message(&msg) {
assert_eq!(ir2.object, ir.object);
assert_eq!(ir2.interfaces, ir.interfaces);
break;
}
}
}
}
|
{
let mut m = Message::signal(path, &Interface::from(Self::INTERFACE), &Member::from(Self::NAME));
self.append(&mut arg::IterAppend::new(&mut m));
m
}
|
identifier_body
|
log.rs
|
/*
Copyright 2013 Jesse 'Jeaye' Wilkerson
See licensing in LICENSE file, or at:
http://www.opensource.org/licenses/BSD-3-Clause
File: shared/log/log.rs
Author: Jesse 'Jeaye' Wilkerson
Description:
A general logging collection
with differen verbosity levels.
Use Q3_LOG=num where num matches:
0 => Disable all logging
1 => Enable only error logging
2 => Enable info logging (default)
3 => Enable debug logging (very verbose)
Usage of logging utilities is generally
by means of the macros: log_debug!() and
log_error!(). Logs can be owned/indented
using a log_push!(), which must have an
associated log_pop!(). Assertions and
program halting can be accomplished with
log_assert!() and log_fail!().
*/
use std::{ local_data, os };
use std::rt::io;
use extra::term;
use extra::term::color;
use listener;
/* Default is error. */
pub type Verbosity = u8;
pub static VERBOSITY_NONE: Verbosity = 0;
pub static VERBOSITY_ERROR: Verbosity = 1;
pub static VERBOSITY_INFO: Verbosity = 2;
pub static VERBOSITY_DEBUG: Verbosity = 3;
static tls_key: local_data::Key<@mut Log> = &local_data::Key;
#[macro_escape]
pub mod macros;
pub struct Log
{
verbosity: Verbosity,
push_level: u8, /* The indentation level, for nested logs. */
terminal: term::Terminal,
listener: Option<@mut listener::Listener>,
}
impl Log
{
pub fn initialize()
{
let logger = @mut Log
{
verbosity: match os::getenv("Q3_LOG").take()
{
Some(val) => match val
{
~"0" => VERBOSITY_NONE,
~"1" => VERBOSITY_ERROR,
~"2" => VERBOSITY_INFO,
~"3" => VERBOSITY_DEBUG,
_ => VERBOSITY_INFO, /* default */
},
None => VERBOSITY_INFO /* default */
},
push_level: 0,
terminal: term::Terminal::new((@mut io::stdout()) as @mut io::Writer).unwrap(),
listener: None,
};
local_data::set(tls_key, logger);
log_debug!("Logging system initialized");
}
fn get() -> @mut Log
{
local_data::get(tls_key,
|opt|
{
match opt
{
Some(x) => *x,
None => log_fail!("Singleton not available")
}
})
}
pub fn set_listener(listener: @mut listener::Listener)
{
let logger = Log::get();
logger.listener = Some(listener);
}
pub fn debug(module: &str, message: &str)
{
let logger = Log::get();
if logger.verbosity >= VERBOSITY_DEBUG
{ Log::log(module, message, VERBOSITY_DEBUG); }
}
pub fn info(module: &str, message: &str)
{
let logger = Log::get();
if logger.verbosity >= VERBOSITY_INFO
{ Log::log(module, message, VERBOSITY_INFO); }
}
pub fn error(module: &str, message: &str)
{
let logger = Log::get();
if logger.verbosity >= VERBOSITY_ERROR
{ Log::log(module, message, VERBOSITY_ERROR); }
}
pub fn get_module(file: &str) -> ~str
{
let i = file.find_str("src").unwrap();
file.slice_from(i + 4).replace(".rs", "") /* 4 is strlen("src/") */
}
pub fn push()
{
let logger = Log::get();
logger.push_level += 1;
}
pub fn pop()
|
{
let logger = Log::get();
log_assert!(logger.push_level > 0);
logger.push_level -= 1;
}
fn log(module: &str, message: &str, verbosity: Verbosity)
{
let logger = Log::get();
/* Allow the listener to intervene. */
let log_to_stdout = match logger.listener
{
Some(ref mut listener) =>
{ (*listener).log(module, message, verbosity) }
None => { true },
};
if!log_to_stdout
{ return; }
/* Display the current module. */
logger.terminal.fg(color::BRIGHT_WHITE);
print(module);
logger.terminal.reset();
/* Indent as per the push level. */
for _ in range(0, logger.push_level)
{ print(" "); }
match verbosity
{
VERBOSITY_DEBUG =>
{
logger.terminal.fg(color::BRIGHT_GREEN);
print(" debug => ");
},
VERBOSITY_INFO =>
{
logger.terminal.fg(color::BRIGHT_YELLOW);
print(" info => ");
},
VERBOSITY_ERROR =>
{
logger.terminal.fg(color::BRIGHT_RED);
print(" error => ");
},
val => log_fail!("Invalid verbosity for logging: {}", val)
}
logger.terminal.reset();
println(message);
}
}
|
random_line_split
|
|
log.rs
|
/*
Copyright 2013 Jesse 'Jeaye' Wilkerson
See licensing in LICENSE file, or at:
http://www.opensource.org/licenses/BSD-3-Clause
File: shared/log/log.rs
Author: Jesse 'Jeaye' Wilkerson
Description:
A general logging collection
with differen verbosity levels.
Use Q3_LOG=num where num matches:
0 => Disable all logging
1 => Enable only error logging
2 => Enable info logging (default)
3 => Enable debug logging (very verbose)
Usage of logging utilities is generally
by means of the macros: log_debug!() and
log_error!(). Logs can be owned/indented
using a log_push!(), which must have an
associated log_pop!(). Assertions and
program halting can be accomplished with
log_assert!() and log_fail!().
*/
use std::{ local_data, os };
use std::rt::io;
use extra::term;
use extra::term::color;
use listener;
/* Default is error. */
pub type Verbosity = u8;
pub static VERBOSITY_NONE: Verbosity = 0;
pub static VERBOSITY_ERROR: Verbosity = 1;
pub static VERBOSITY_INFO: Verbosity = 2;
pub static VERBOSITY_DEBUG: Verbosity = 3;
static tls_key: local_data::Key<@mut Log> = &local_data::Key;
#[macro_escape]
pub mod macros;
pub struct Log
{
verbosity: Verbosity,
push_level: u8, /* The indentation level, for nested logs. */
terminal: term::Terminal,
listener: Option<@mut listener::Listener>,
}
impl Log
{
pub fn initialize()
{
let logger = @mut Log
{
verbosity: match os::getenv("Q3_LOG").take()
{
Some(val) => match val
{
~"0" => VERBOSITY_NONE,
~"1" => VERBOSITY_ERROR,
~"2" => VERBOSITY_INFO,
~"3" => VERBOSITY_DEBUG,
_ => VERBOSITY_INFO, /* default */
},
None => VERBOSITY_INFO /* default */
},
push_level: 0,
terminal: term::Terminal::new((@mut io::stdout()) as @mut io::Writer).unwrap(),
listener: None,
};
local_data::set(tls_key, logger);
log_debug!("Logging system initialized");
}
fn get() -> @mut Log
{
local_data::get(tls_key,
|opt|
{
match opt
{
Some(x) => *x,
None => log_fail!("Singleton not available")
}
})
}
pub fn set_listener(listener: @mut listener::Listener)
{
let logger = Log::get();
logger.listener = Some(listener);
}
pub fn debug(module: &str, message: &str)
{
let logger = Log::get();
if logger.verbosity >= VERBOSITY_DEBUG
{ Log::log(module, message, VERBOSITY_DEBUG); }
}
pub fn
|
(module: &str, message: &str)
{
let logger = Log::get();
if logger.verbosity >= VERBOSITY_INFO
{ Log::log(module, message, VERBOSITY_INFO); }
}
pub fn error(module: &str, message: &str)
{
let logger = Log::get();
if logger.verbosity >= VERBOSITY_ERROR
{ Log::log(module, message, VERBOSITY_ERROR); }
}
pub fn get_module(file: &str) -> ~str
{
let i = file.find_str("src").unwrap();
file.slice_from(i + 4).replace(".rs", "") /* 4 is strlen("src/") */
}
pub fn push()
{
let logger = Log::get();
logger.push_level += 1;
}
pub fn pop()
{
let logger = Log::get();
log_assert!(logger.push_level > 0);
logger.push_level -= 1;
}
fn log(module: &str, message: &str, verbosity: Verbosity)
{
let logger = Log::get();
/* Allow the listener to intervene. */
let log_to_stdout = match logger.listener
{
Some(ref mut listener) =>
{ (*listener).log(module, message, verbosity) }
None => { true },
};
if!log_to_stdout
{ return; }
/* Display the current module. */
logger.terminal.fg(color::BRIGHT_WHITE);
print(module);
logger.terminal.reset();
/* Indent as per the push level. */
for _ in range(0, logger.push_level)
{ print(" "); }
match verbosity
{
VERBOSITY_DEBUG =>
{
logger.terminal.fg(color::BRIGHT_GREEN);
print(" debug => ");
},
VERBOSITY_INFO =>
{
logger.terminal.fg(color::BRIGHT_YELLOW);
print(" info => ");
},
VERBOSITY_ERROR =>
{
logger.terminal.fg(color::BRIGHT_RED);
print(" error => ");
},
val => log_fail!("Invalid verbosity for logging: {}", val)
}
logger.terminal.reset();
println(message);
}
}
|
info
|
identifier_name
|
log.rs
|
/*
Copyright 2013 Jesse 'Jeaye' Wilkerson
See licensing in LICENSE file, or at:
http://www.opensource.org/licenses/BSD-3-Clause
File: shared/log/log.rs
Author: Jesse 'Jeaye' Wilkerson
Description:
A general logging collection
with differen verbosity levels.
Use Q3_LOG=num where num matches:
0 => Disable all logging
1 => Enable only error logging
2 => Enable info logging (default)
3 => Enable debug logging (very verbose)
Usage of logging utilities is generally
by means of the macros: log_debug!() and
log_error!(). Logs can be owned/indented
using a log_push!(), which must have an
associated log_pop!(). Assertions and
program halting can be accomplished with
log_assert!() and log_fail!().
*/
use std::{ local_data, os };
use std::rt::io;
use extra::term;
use extra::term::color;
use listener;
/* Default is error. */
pub type Verbosity = u8;
pub static VERBOSITY_NONE: Verbosity = 0;
pub static VERBOSITY_ERROR: Verbosity = 1;
pub static VERBOSITY_INFO: Verbosity = 2;
pub static VERBOSITY_DEBUG: Verbosity = 3;
static tls_key: local_data::Key<@mut Log> = &local_data::Key;
#[macro_escape]
pub mod macros;
pub struct Log
{
verbosity: Verbosity,
push_level: u8, /* The indentation level, for nested logs. */
terminal: term::Terminal,
listener: Option<@mut listener::Listener>,
}
impl Log
{
pub fn initialize()
{
let logger = @mut Log
{
verbosity: match os::getenv("Q3_LOG").take()
{
Some(val) => match val
{
~"0" => VERBOSITY_NONE,
~"1" => VERBOSITY_ERROR,
~"2" => VERBOSITY_INFO,
~"3" => VERBOSITY_DEBUG,
_ => VERBOSITY_INFO, /* default */
},
None => VERBOSITY_INFO /* default */
},
push_level: 0,
terminal: term::Terminal::new((@mut io::stdout()) as @mut io::Writer).unwrap(),
listener: None,
};
local_data::set(tls_key, logger);
log_debug!("Logging system initialized");
}
fn get() -> @mut Log
{
local_data::get(tls_key,
|opt|
{
match opt
{
Some(x) => *x,
None => log_fail!("Singleton not available")
}
})
}
pub fn set_listener(listener: @mut listener::Listener)
{
let logger = Log::get();
logger.listener = Some(listener);
}
pub fn debug(module: &str, message: &str)
{
let logger = Log::get();
if logger.verbosity >= VERBOSITY_DEBUG
{ Log::log(module, message, VERBOSITY_DEBUG); }
}
pub fn info(module: &str, message: &str)
{
let logger = Log::get();
if logger.verbosity >= VERBOSITY_INFO
{ Log::log(module, message, VERBOSITY_INFO); }
}
pub fn error(module: &str, message: &str)
{
let logger = Log::get();
if logger.verbosity >= VERBOSITY_ERROR
{ Log::log(module, message, VERBOSITY_ERROR); }
}
pub fn get_module(file: &str) -> ~str
{
let i = file.find_str("src").unwrap();
file.slice_from(i + 4).replace(".rs", "") /* 4 is strlen("src/") */
}
pub fn push()
{
let logger = Log::get();
logger.push_level += 1;
}
pub fn pop()
|
fn log(module: &str, message: &str, verbosity: Verbosity)
{
let logger = Log::get();
/* Allow the listener to intervene. */
let log_to_stdout = match logger.listener
{
Some(ref mut listener) =>
{ (*listener).log(module, message, verbosity) }
None => { true },
};
if!log_to_stdout
{ return; }
/* Display the current module. */
logger.terminal.fg(color::BRIGHT_WHITE);
print(module);
logger.terminal.reset();
/* Indent as per the push level. */
for _ in range(0, logger.push_level)
{ print(" "); }
match verbosity
{
VERBOSITY_DEBUG =>
{
logger.terminal.fg(color::BRIGHT_GREEN);
print(" debug => ");
},
VERBOSITY_INFO =>
{
logger.terminal.fg(color::BRIGHT_YELLOW);
print(" info => ");
},
VERBOSITY_ERROR =>
{
logger.terminal.fg(color::BRIGHT_RED);
print(" error => ");
},
val => log_fail!("Invalid verbosity for logging: {}", val)
}
logger.terminal.reset();
println(message);
}
}
|
{
let logger = Log::get();
log_assert!(logger.push_level > 0);
logger.push_level -= 1;
}
|
identifier_body
|
extensions.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use canvas_traits::webgl::WebGLError;
use core::iter::FromIterator;
use core::nonzero::NonZero;
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::OESTextureHalfFloatBinding::OESTextureHalfFloatConstants;
use dom::bindings::codegen::Bindings::WebGLRenderingContextBinding::WebGLRenderingContextConstants as constants;
use dom::bindings::js::Root;
use dom::bindings::trace::JSTraceable;
use dom::webglrenderingcontext::WebGLRenderingContext;
use gleam::gl::GLenum;
use heapsize::HeapSizeOf;
use js::jsapi::{JSContext, JSObject};
use js::jsval::JSVal;
use ref_filter_map::ref_filter_map;
use std::cell::Ref;
use std::collections::{HashMap, HashSet};
use super::{ext, WebGLExtension};
use super::wrapper::{WebGLExtensionWrapper, TypedWebGLExtensionWrapper};
// Data types that are implemented for texImage2D and texSubImage2D in WebGLRenderingContext
// but must trigger a InvalidValue error until the related WebGL Extensions are enabled.
// Example: https://www.khronos.org/registry/webgl/extensions/OES_texture_float/
const DEFAULT_DISABLED_TEX_TYPES: [GLenum; 2] = [
constants::FLOAT, OESTextureHalfFloatConstants::HALF_FLOAT_OES
];
// Data types that are implemented for textures in WebGLRenderingContext
// but not allowed to use with linear filtering until the related WebGL Extensions are enabled.
// Example: https://www.khronos.org/registry/webgl/extensions/OES_texture_float_linear/
const DEFAULT_NOT_FILTERABLE_TEX_TYPES: [GLenum; 2] = [
constants::FLOAT, OESTextureHalfFloatConstants::HALF_FLOAT_OES
];
/// WebGL features that are enabled/disabled by WebGL Extensions.
#[derive(JSTraceable, HeapSizeOf)]
struct WebGLExtensionFeatures {
gl_extensions: HashSet<String>,
disabled_tex_types: HashSet<GLenum>,
not_filterable_tex_types: HashSet<GLenum>,
effective_tex_internal_formats: HashMap<TexFormatType, u32>,
query_parameter_handlers: HashMap<GLenum, WebGLQueryParameterHandler>
}
impl Default for WebGLExtensionFeatures {
fn default() -> WebGLExtensionFeatures {
WebGLExtensionFeatures {
gl_extensions: HashSet::new(),
disabled_tex_types: DEFAULT_DISABLED_TEX_TYPES.iter().cloned().collect(),
not_filterable_tex_types: DEFAULT_NOT_FILTERABLE_TEX_TYPES.iter().cloned().collect(),
effective_tex_internal_formats: HashMap::new(),
query_parameter_handlers: HashMap::new()
}
}
}
/// Handles the list of implemented, supported and enabled WebGL extensions.
#[must_root]
#[derive(JSTraceable, HeapSizeOf)]
pub struct WebGLExtensions {
extensions: DOMRefCell<HashMap<String, Box<WebGLExtensionWrapper>>>,
features: DOMRefCell<WebGLExtensionFeatures>,
}
impl WebGLExtensions {
pub fn new() -> WebGLExtensions {
Self {
extensions: DOMRefCell::new(HashMap::new()),
features: DOMRefCell::new(Default::default())
}
}
pub fn init_once<F>(&self, cb: F) where F: FnOnce() -> String {
if self.extensions.borrow().len() == 0 {
let gl_str = cb();
self.features.borrow_mut().gl_extensions = HashSet::from_iter(gl_str.split(&[',',''][..])
.map(|s| s.into()));
self.register_all_extensions();
}
}
pub fn register<T:'static + WebGLExtension + JSTraceable + HeapSizeOf>(&self) {
let name = T::name().to_uppercase();
self.extensions.borrow_mut().insert(name, box TypedWebGLExtensionWrapper::<T>::new());
}
pub fn get_suported_extensions(&self) -> Vec<&'static str> {
self.extensions.borrow().iter()
.filter(|ref v| v.1.is_supported(&self))
.map(|ref v| v.1.name())
.collect()
}
pub fn get_or_init_extension(&self, name: &str, ctx: &WebGLRenderingContext) -> Option<NonZero<*mut JSObject>> {
let name = name.to_uppercase();
self.extensions.borrow().get(&name).and_then(|extension| {
if extension.is_supported(self)
|
else {
None
}
})
}
pub fn get_dom_object<T>(&self) -> Option<Root<T::Extension>>
where T:'static + WebGLExtension + JSTraceable + HeapSizeOf {
let name = T::name().to_uppercase();
self.extensions.borrow().get(&name).and_then(|extension| {
extension.as_any().downcast_ref::<TypedWebGLExtensionWrapper<T>>().and_then(|extension| {
extension.dom_object()
})
})
}
pub fn supports_gl_extension(&self, name: &str) -> bool {
self.features.borrow().gl_extensions.contains(name)
}
pub fn supports_any_gl_extension(&self, names: &[&str]) -> bool {
let features = self.features.borrow();
names.iter().any(|name| features.gl_extensions.contains(*name))
}
pub fn enable_tex_type(&self, data_type: GLenum) {
self.features.borrow_mut().disabled_tex_types.remove(&data_type);
}
pub fn is_tex_type_enabled(&self, data_type: GLenum) -> bool {
self.features.borrow().disabled_tex_types.get(&data_type).is_none()
}
pub fn add_effective_tex_internal_format(&self,
source_internal_format: u32,
source_data_type: u32,
effective_internal_format: u32)
{
let format = TexFormatType(source_internal_format, source_data_type);
self.features.borrow_mut().effective_tex_internal_formats.insert(format,
effective_internal_format);
}
pub fn get_effective_tex_internal_format(&self,
source_internal_format: u32,
source_data_type: u32) -> u32 {
let format = TexFormatType(source_internal_format, source_data_type);
*(self.features.borrow().effective_tex_internal_formats.get(&format)
.unwrap_or(&source_internal_format))
}
pub fn enable_filterable_tex_type(&self, text_data_type: GLenum) {
self.features.borrow_mut().not_filterable_tex_types.remove(&text_data_type);
}
pub fn is_filterable(&self, text_data_type: u32) -> bool {
self.features.borrow().not_filterable_tex_types.get(&text_data_type).is_none()
}
pub fn add_query_parameter_handler(&self, name: GLenum, f: Box<WebGLQueryParameterFunc>) {
let handler = WebGLQueryParameterHandler {
func: f
};
self.features.borrow_mut().query_parameter_handlers.insert(name, handler);
}
pub fn get_query_parameter_handler(&self, name: GLenum) -> Option<Ref<Box<WebGLQueryParameterFunc>>> {
ref_filter_map(self.features.borrow(), |features| {
features.query_parameter_handlers.get(&name).map(|item| &item.func)
})
}
fn register_all_extensions(&self) {
self.register::<ext::oestexturefloat::OESTextureFloat>();
self.register::<ext::oestexturefloatlinear::OESTextureFloatLinear>();
self.register::<ext::oestexturehalffloat::OESTextureHalfFloat>();
self.register::<ext::oestexturehalffloatlinear::OESTextureHalfFloatLinear>();
self.register::<ext::oesvertexarrayobject::OESVertexArrayObject>();
}
}
// Helper structs
#[derive(JSTraceable, HeapSizeOf, PartialEq, Eq, Hash)]
struct TexFormatType(u32, u32);
type WebGLQueryParameterFunc = Fn(*mut JSContext, &WebGLRenderingContext)
-> Result<JSVal, WebGLError>;
#[derive(HeapSizeOf)]
struct WebGLQueryParameterHandler {
#[ignore_heap_size_of = "Closures are hard"]
func: Box<WebGLQueryParameterFunc>
}
unsafe_no_jsmanaged_fields!(WebGLQueryParameterHandler);
|
{
Some(extension.instance_or_init(ctx, self))
}
|
conditional_block
|
extensions.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use canvas_traits::webgl::WebGLError;
use core::iter::FromIterator;
use core::nonzero::NonZero;
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::OESTextureHalfFloatBinding::OESTextureHalfFloatConstants;
use dom::bindings::codegen::Bindings::WebGLRenderingContextBinding::WebGLRenderingContextConstants as constants;
use dom::bindings::js::Root;
use dom::bindings::trace::JSTraceable;
use dom::webglrenderingcontext::WebGLRenderingContext;
use gleam::gl::GLenum;
use heapsize::HeapSizeOf;
use js::jsapi::{JSContext, JSObject};
use js::jsval::JSVal;
use ref_filter_map::ref_filter_map;
use std::cell::Ref;
use std::collections::{HashMap, HashSet};
use super::{ext, WebGLExtension};
use super::wrapper::{WebGLExtensionWrapper, TypedWebGLExtensionWrapper};
// Data types that are implemented for texImage2D and texSubImage2D in WebGLRenderingContext
// but must trigger a InvalidValue error until the related WebGL Extensions are enabled.
// Example: https://www.khronos.org/registry/webgl/extensions/OES_texture_float/
const DEFAULT_DISABLED_TEX_TYPES: [GLenum; 2] = [
constants::FLOAT, OESTextureHalfFloatConstants::HALF_FLOAT_OES
];
// Data types that are implemented for textures in WebGLRenderingContext
// but not allowed to use with linear filtering until the related WebGL Extensions are enabled.
// Example: https://www.khronos.org/registry/webgl/extensions/OES_texture_float_linear/
const DEFAULT_NOT_FILTERABLE_TEX_TYPES: [GLenum; 2] = [
constants::FLOAT, OESTextureHalfFloatConstants::HALF_FLOAT_OES
];
/// WebGL features that are enabled/disabled by WebGL Extensions.
#[derive(JSTraceable, HeapSizeOf)]
struct WebGLExtensionFeatures {
gl_extensions: HashSet<String>,
disabled_tex_types: HashSet<GLenum>,
not_filterable_tex_types: HashSet<GLenum>,
effective_tex_internal_formats: HashMap<TexFormatType, u32>,
query_parameter_handlers: HashMap<GLenum, WebGLQueryParameterHandler>
}
impl Default for WebGLExtensionFeatures {
fn default() -> WebGLExtensionFeatures {
WebGLExtensionFeatures {
gl_extensions: HashSet::new(),
disabled_tex_types: DEFAULT_DISABLED_TEX_TYPES.iter().cloned().collect(),
not_filterable_tex_types: DEFAULT_NOT_FILTERABLE_TEX_TYPES.iter().cloned().collect(),
effective_tex_internal_formats: HashMap::new(),
query_parameter_handlers: HashMap::new()
}
}
}
/// Handles the list of implemented, supported and enabled WebGL extensions.
#[must_root]
#[derive(JSTraceable, HeapSizeOf)]
pub struct WebGLExtensions {
extensions: DOMRefCell<HashMap<String, Box<WebGLExtensionWrapper>>>,
features: DOMRefCell<WebGLExtensionFeatures>,
}
impl WebGLExtensions {
pub fn new() -> WebGLExtensions {
Self {
extensions: DOMRefCell::new(HashMap::new()),
features: DOMRefCell::new(Default::default())
}
}
pub fn init_once<F>(&self, cb: F) where F: FnOnce() -> String {
if self.extensions.borrow().len() == 0 {
let gl_str = cb();
self.features.borrow_mut().gl_extensions = HashSet::from_iter(gl_str.split(&[',',''][..])
.map(|s| s.into()));
self.register_all_extensions();
}
}
pub fn register<T:'static + WebGLExtension + JSTraceable + HeapSizeOf>(&self) {
let name = T::name().to_uppercase();
self.extensions.borrow_mut().insert(name, box TypedWebGLExtensionWrapper::<T>::new());
}
pub fn get_suported_extensions(&self) -> Vec<&'static str> {
self.extensions.borrow().iter()
.filter(|ref v| v.1.is_supported(&self))
.map(|ref v| v.1.name())
.collect()
}
pub fn get_or_init_extension(&self, name: &str, ctx: &WebGLRenderingContext) -> Option<NonZero<*mut JSObject>> {
let name = name.to_uppercase();
self.extensions.borrow().get(&name).and_then(|extension| {
if extension.is_supported(self) {
Some(extension.instance_or_init(ctx, self))
} else {
None
}
})
}
pub fn get_dom_object<T>(&self) -> Option<Root<T::Extension>>
where T:'static + WebGLExtension + JSTraceable + HeapSizeOf {
let name = T::name().to_uppercase();
self.extensions.borrow().get(&name).and_then(|extension| {
extension.as_any().downcast_ref::<TypedWebGLExtensionWrapper<T>>().and_then(|extension| {
extension.dom_object()
})
})
}
pub fn supports_gl_extension(&self, name: &str) -> bool {
self.features.borrow().gl_extensions.contains(name)
}
pub fn supports_any_gl_extension(&self, names: &[&str]) -> bool {
let features = self.features.borrow();
names.iter().any(|name| features.gl_extensions.contains(*name))
}
pub fn enable_tex_type(&self, data_type: GLenum) {
self.features.borrow_mut().disabled_tex_types.remove(&data_type);
}
pub fn is_tex_type_enabled(&self, data_type: GLenum) -> bool {
self.features.borrow().disabled_tex_types.get(&data_type).is_none()
}
pub fn add_effective_tex_internal_format(&self,
source_internal_format: u32,
source_data_type: u32,
effective_internal_format: u32)
{
let format = TexFormatType(source_internal_format, source_data_type);
self.features.borrow_mut().effective_tex_internal_formats.insert(format,
effective_internal_format);
}
pub fn
|
(&self,
source_internal_format: u32,
source_data_type: u32) -> u32 {
let format = TexFormatType(source_internal_format, source_data_type);
*(self.features.borrow().effective_tex_internal_formats.get(&format)
.unwrap_or(&source_internal_format))
}
pub fn enable_filterable_tex_type(&self, text_data_type: GLenum) {
self.features.borrow_mut().not_filterable_tex_types.remove(&text_data_type);
}
pub fn is_filterable(&self, text_data_type: u32) -> bool {
self.features.borrow().not_filterable_tex_types.get(&text_data_type).is_none()
}
pub fn add_query_parameter_handler(&self, name: GLenum, f: Box<WebGLQueryParameterFunc>) {
let handler = WebGLQueryParameterHandler {
func: f
};
self.features.borrow_mut().query_parameter_handlers.insert(name, handler);
}
pub fn get_query_parameter_handler(&self, name: GLenum) -> Option<Ref<Box<WebGLQueryParameterFunc>>> {
ref_filter_map(self.features.borrow(), |features| {
features.query_parameter_handlers.get(&name).map(|item| &item.func)
})
}
fn register_all_extensions(&self) {
self.register::<ext::oestexturefloat::OESTextureFloat>();
self.register::<ext::oestexturefloatlinear::OESTextureFloatLinear>();
self.register::<ext::oestexturehalffloat::OESTextureHalfFloat>();
self.register::<ext::oestexturehalffloatlinear::OESTextureHalfFloatLinear>();
self.register::<ext::oesvertexarrayobject::OESVertexArrayObject>();
}
}
// Helper structs
#[derive(JSTraceable, HeapSizeOf, PartialEq, Eq, Hash)]
struct TexFormatType(u32, u32);
type WebGLQueryParameterFunc = Fn(*mut JSContext, &WebGLRenderingContext)
-> Result<JSVal, WebGLError>;
#[derive(HeapSizeOf)]
struct WebGLQueryParameterHandler {
#[ignore_heap_size_of = "Closures are hard"]
func: Box<WebGLQueryParameterFunc>
}
unsafe_no_jsmanaged_fields!(WebGLQueryParameterHandler);
|
get_effective_tex_internal_format
|
identifier_name
|
extensions.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use canvas_traits::webgl::WebGLError;
use core::iter::FromIterator;
use core::nonzero::NonZero;
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::OESTextureHalfFloatBinding::OESTextureHalfFloatConstants;
use dom::bindings::codegen::Bindings::WebGLRenderingContextBinding::WebGLRenderingContextConstants as constants;
use dom::bindings::js::Root;
use dom::bindings::trace::JSTraceable;
use dom::webglrenderingcontext::WebGLRenderingContext;
use gleam::gl::GLenum;
use heapsize::HeapSizeOf;
use js::jsapi::{JSContext, JSObject};
use js::jsval::JSVal;
use ref_filter_map::ref_filter_map;
use std::cell::Ref;
use std::collections::{HashMap, HashSet};
use super::{ext, WebGLExtension};
use super::wrapper::{WebGLExtensionWrapper, TypedWebGLExtensionWrapper};
// Data types that are implemented for texImage2D and texSubImage2D in WebGLRenderingContext
// but must trigger a InvalidValue error until the related WebGL Extensions are enabled.
// Example: https://www.khronos.org/registry/webgl/extensions/OES_texture_float/
const DEFAULT_DISABLED_TEX_TYPES: [GLenum; 2] = [
constants::FLOAT, OESTextureHalfFloatConstants::HALF_FLOAT_OES
];
// Data types that are implemented for textures in WebGLRenderingContext
// but not allowed to use with linear filtering until the related WebGL Extensions are enabled.
// Example: https://www.khronos.org/registry/webgl/extensions/OES_texture_float_linear/
const DEFAULT_NOT_FILTERABLE_TEX_TYPES: [GLenum; 2] = [
constants::FLOAT, OESTextureHalfFloatConstants::HALF_FLOAT_OES
];
/// WebGL features that are enabled/disabled by WebGL Extensions.
#[derive(JSTraceable, HeapSizeOf)]
struct WebGLExtensionFeatures {
gl_extensions: HashSet<String>,
disabled_tex_types: HashSet<GLenum>,
not_filterable_tex_types: HashSet<GLenum>,
effective_tex_internal_formats: HashMap<TexFormatType, u32>,
query_parameter_handlers: HashMap<GLenum, WebGLQueryParameterHandler>
}
impl Default for WebGLExtensionFeatures {
fn default() -> WebGLExtensionFeatures {
WebGLExtensionFeatures {
gl_extensions: HashSet::new(),
disabled_tex_types: DEFAULT_DISABLED_TEX_TYPES.iter().cloned().collect(),
not_filterable_tex_types: DEFAULT_NOT_FILTERABLE_TEX_TYPES.iter().cloned().collect(),
effective_tex_internal_formats: HashMap::new(),
query_parameter_handlers: HashMap::new()
}
}
}
/// Handles the list of implemented, supported and enabled WebGL extensions.
#[must_root]
#[derive(JSTraceable, HeapSizeOf)]
pub struct WebGLExtensions {
extensions: DOMRefCell<HashMap<String, Box<WebGLExtensionWrapper>>>,
features: DOMRefCell<WebGLExtensionFeatures>,
}
impl WebGLExtensions {
pub fn new() -> WebGLExtensions {
Self {
extensions: DOMRefCell::new(HashMap::new()),
features: DOMRefCell::new(Default::default())
}
}
pub fn init_once<F>(&self, cb: F) where F: FnOnce() -> String {
if self.extensions.borrow().len() == 0 {
let gl_str = cb();
self.features.borrow_mut().gl_extensions = HashSet::from_iter(gl_str.split(&[',',''][..])
.map(|s| s.into()));
self.register_all_extensions();
}
}
pub fn register<T:'static + WebGLExtension + JSTraceable + HeapSizeOf>(&self) {
let name = T::name().to_uppercase();
self.extensions.borrow_mut().insert(name, box TypedWebGLExtensionWrapper::<T>::new());
}
pub fn get_suported_extensions(&self) -> Vec<&'static str> {
self.extensions.borrow().iter()
.filter(|ref v| v.1.is_supported(&self))
.map(|ref v| v.1.name())
.collect()
}
pub fn get_or_init_extension(&self, name: &str, ctx: &WebGLRenderingContext) -> Option<NonZero<*mut JSObject>> {
let name = name.to_uppercase();
self.extensions.borrow().get(&name).and_then(|extension| {
if extension.is_supported(self) {
Some(extension.instance_or_init(ctx, self))
} else {
None
}
})
}
pub fn get_dom_object<T>(&self) -> Option<Root<T::Extension>>
where T:'static + WebGLExtension + JSTraceable + HeapSizeOf {
let name = T::name().to_uppercase();
self.extensions.borrow().get(&name).and_then(|extension| {
extension.as_any().downcast_ref::<TypedWebGLExtensionWrapper<T>>().and_then(|extension| {
extension.dom_object()
})
})
}
pub fn supports_gl_extension(&self, name: &str) -> bool {
self.features.borrow().gl_extensions.contains(name)
}
pub fn supports_any_gl_extension(&self, names: &[&str]) -> bool {
let features = self.features.borrow();
names.iter().any(|name| features.gl_extensions.contains(*name))
}
pub fn enable_tex_type(&self, data_type: GLenum) {
self.features.borrow_mut().disabled_tex_types.remove(&data_type);
}
|
pub fn add_effective_tex_internal_format(&self,
source_internal_format: u32,
source_data_type: u32,
effective_internal_format: u32)
{
let format = TexFormatType(source_internal_format, source_data_type);
self.features.borrow_mut().effective_tex_internal_formats.insert(format,
effective_internal_format);
}
pub fn get_effective_tex_internal_format(&self,
source_internal_format: u32,
source_data_type: u32) -> u32 {
let format = TexFormatType(source_internal_format, source_data_type);
*(self.features.borrow().effective_tex_internal_formats.get(&format)
.unwrap_or(&source_internal_format))
}
pub fn enable_filterable_tex_type(&self, text_data_type: GLenum) {
self.features.borrow_mut().not_filterable_tex_types.remove(&text_data_type);
}
pub fn is_filterable(&self, text_data_type: u32) -> bool {
self.features.borrow().not_filterable_tex_types.get(&text_data_type).is_none()
}
pub fn add_query_parameter_handler(&self, name: GLenum, f: Box<WebGLQueryParameterFunc>) {
let handler = WebGLQueryParameterHandler {
func: f
};
self.features.borrow_mut().query_parameter_handlers.insert(name, handler);
}
pub fn get_query_parameter_handler(&self, name: GLenum) -> Option<Ref<Box<WebGLQueryParameterFunc>>> {
ref_filter_map(self.features.borrow(), |features| {
features.query_parameter_handlers.get(&name).map(|item| &item.func)
})
}
fn register_all_extensions(&self) {
self.register::<ext::oestexturefloat::OESTextureFloat>();
self.register::<ext::oestexturefloatlinear::OESTextureFloatLinear>();
self.register::<ext::oestexturehalffloat::OESTextureHalfFloat>();
self.register::<ext::oestexturehalffloatlinear::OESTextureHalfFloatLinear>();
self.register::<ext::oesvertexarrayobject::OESVertexArrayObject>();
}
}
// Helper structs
#[derive(JSTraceable, HeapSizeOf, PartialEq, Eq, Hash)]
struct TexFormatType(u32, u32);
type WebGLQueryParameterFunc = Fn(*mut JSContext, &WebGLRenderingContext)
-> Result<JSVal, WebGLError>;
#[derive(HeapSizeOf)]
struct WebGLQueryParameterHandler {
#[ignore_heap_size_of = "Closures are hard"]
func: Box<WebGLQueryParameterFunc>
}
unsafe_no_jsmanaged_fields!(WebGLQueryParameterHandler);
|
pub fn is_tex_type_enabled(&self, data_type: GLenum) -> bool {
self.features.borrow().disabled_tex_types.get(&data_type).is_none()
}
|
random_line_split
|
char.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Character manipulation (`char` type, Unicode Scalar Value)
//!
//! This module provides the `Char` trait, as well as its implementation
//! for the primitive `char` type, in order to allow basic character manipulation.
//!
//! A `char` actually represents a
//! *[Unicode Scalar Value](http://www.unicode.org/glossary/#unicode_scalar_value)*,
//! as it can contain any Unicode code point except high-surrogate and
//! low-surrogate code points.
//!
//! As such, only values in the ranges \[0x0,0xD7FF\] and \[0xE000,0x10FFFF\]
//! (inclusive) are allowed. A `char` can always be safely cast to a `u32`;
//! however the converse is not always true due to the above range limits
//! and, as such, should be performed via the `from_u32` function..
#![allow(non_snake_case_functions)]
#![doc(primitive = "char")]
use mem::transmute;
use option::{None, Option, Some};
use iter::{Iterator, range_step};
use unicode::{derived_property, property, general_category, conversions};
/// Returns the canonical decomposition of a character.
pub use unicode::normalization::decompose_canonical;
/// Returns the compatibility decomposition of a character.
pub use unicode::normalization::decompose_compatible;
// UTF-8 ranges and tags for encoding characters
static TAG_CONT: u8 = 0b1000_0000u8;
static TAG_TWO_B: u8 = 0b1100_0000u8;
static TAG_THREE_B: u8 = 0b1110_0000u8;
static TAG_FOUR_B: u8 = 0b1111_0000u8;
static MAX_ONE_B: u32 = 0x80u32;
static MAX_TWO_B: u32 = 0x800u32;
static MAX_THREE_B: u32 = 0x10000u32;
static MAX_FOUR_B: u32 = 0x200000u32;
/*
Lu Uppercase_Letter an uppercase letter
Ll Lowercase_Letter a lowercase letter
Lt Titlecase_Letter a digraphic character, with first part uppercase
Lm Modifier_Letter a modifier letter
Lo Other_Letter other letters, including syllables and ideographs
Mn Nonspacing_Mark a nonspacing combining mark (zero advance width)
Mc Spacing_Mark a spacing combining mark (positive advance width)
Me Enclosing_Mark an enclosing combining mark
Nd Decimal_Number a decimal digit
Nl Letter_Number a letterlike numeric character
No Other_Number a numeric character of other type
Pc Connector_Punctuation a connecting punctuation mark, like a tie
Pd Dash_Punctuation a dash or hyphen punctuation mark
Ps Open_Punctuation an opening punctuation mark (of a pair)
Pe Close_Punctuation a closing punctuation mark (of a pair)
Pi Initial_Punctuation an initial quotation mark
Pf Final_Punctuation a final quotation mark
Po Other_Punctuation a punctuation mark of other type
Sm Math_Symbol a symbol of primarily mathematical use
Sc Currency_Symbol a currency sign
Sk Modifier_Symbol a non-letterlike modifier symbol
So Other_Symbol a symbol of other type
Zs Space_Separator a space character (of various non-zero widths)
Zl Line_Separator U+2028 LINE SEPARATOR only
Zp Paragraph_Separator U+2029 PARAGRAPH SEPARATOR only
Cc Control a C0 or C1 control code
Cf Format a format control character
Cs Surrogate a surrogate code point
Co Private_Use a private-use character
Cn Unassigned a reserved unassigned code point or a noncharacter
*/
/// The highest valid code point
pub static MAX: char = '\U0010ffff';
/// Converts from `u32` to a `char`
#[inline]
pub fn from_u32(i: u32) -> Option<char> {
// catch out-of-bounds and surrogates
if (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF) {
None
} else {
Some(unsafe { transmute(i) })
}
}
/// Returns whether the specified `char` is considered a Unicode alphabetic
/// code point
pub fn is_alphabetic(c: char) -> bool { derived_property::Alphabetic(c) }
/// Returns whether the specified `char` satisfies the 'XID_Start' Unicode property
///
/// 'XID_Start' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to ID_Start but modified for closure under NFKx.
pub fn is_XID_start(c: char) -> bool { derived_property::XID_Start(c) }
/// Returns whether the specified `char` satisfies the 'XID_Continue' Unicode property
///
/// 'XID_Continue' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to 'ID_Continue' but modified for closure under NFKx.
pub fn is_XID_continue(c: char) -> bool { derived_property::XID_Continue(c) }
///
/// Indicates whether a `char` is in lower case
///
/// This is defined according to the terms of the Unicode Derived Core Property 'Lowercase'.
///
#[inline]
pub fn is_lowercase(c: char) -> bool { derived_property::Lowercase(c) }
///
/// Indicates whether a `char` is in upper case
///
/// This is defined according to the terms of the Unicode Derived Core Property 'Uppercase'.
///
#[inline]
pub fn is_uppercase(c: char) -> bool { derived_property::Uppercase(c) }
///
/// Indicates whether a `char` is whitespace
///
/// Whitespace is defined in terms of the Unicode Property 'White_Space'.
///
#[inline]
pub fn is_whitespace(c: char) -> bool {
// As an optimization ASCII whitespace characters are checked separately
c ==''
|| ('\x09' <= c && c <= '\x0d')
|| property::White_Space(c)
}
///
/// Indicates whether a `char` is alphanumeric
///
/// Alphanumericness is defined in terms of the Unicode General Categories
/// 'Nd', 'Nl', 'No' and the Derived Core Property 'Alphabetic'.
///
#[inline]
pub fn is_alphanumeric(c: char) -> bool {
derived_property::Alphabetic(c)
|| general_category::Nd(c)
|| general_category::Nl(c)
|| general_category::No(c)
}
///
/// Indicates whether a `char` is a control code point
///
/// Control code points are defined in terms of the Unicode General Category
/// 'Cc'.
///
#[inline]
pub fn is_control(c: char) -> bool { general_category::Cc(c) }
/// Indicates whether the `char` is numeric (Nd, Nl, or No)
#[inline]
pub fn is_digit(c: char) -> bool {
general_category::Nd(c)
|| general_category::Nl(c)
|| general_category::No(c)
}
///
/// Checks if a `char` parses as a numeric digit in the given radix
///
/// Compared to `is_digit()`, this function only recognizes the
/// characters `0-9`, `a-z` and `A-Z`.
///
/// # Return value
///
/// Returns `true` if `c` is a valid digit under `radix`, and `false`
/// otherwise.
///
/// # Failure
///
/// Fails if given a `radix` > 36.
///
/// # Note
///
/// This just wraps `to_digit()`.
///
#[inline]
pub fn is_digit_radix(c: char, radix: uint) -> bool {
match to_digit(c, radix) {
Some(_) => true,
None => false,
}
}
///
/// Converts a `char` to the corresponding digit
///
/// # Return value
///
/// If `c` is between '0' and '9', the corresponding value
/// between 0 and 9. If `c` is 'a' or 'A', 10. If `c` is
/// 'b' or 'B', 11, etc. Returns none if the `char` does not
/// refer to a digit in the given radix.
///
/// # Failure
///
/// Fails if given a `radix` outside the range `[0..36]`.
///
#[inline]
pub fn to_digit(c: char, radix: uint) -> Option<uint> {
if radix > 36 {
fail!("to_digit: radix is too high (maximum 36)");
}
let val = match c {
'0'.. '9' => c as uint - ('0' as uint),
'a'.. 'z' => c as uint + 10u - ('a' as uint),
'A'.. 'Z' => c as uint + 10u - ('A' as uint),
_ => return None,
};
if val < radix { Some(val) }
else { None }
}
/// Convert a char to its uppercase equivalent
///
/// The case-folding performed is the common or simple mapping:
/// it maps one unicode codepoint (one char in Rust) to its uppercase equivalent according
/// to the Unicode database at ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt
/// The additional SpecialCasing.txt is not considered here, as it expands to multiple
/// codepoints in some cases.
///
/// A full reference can be found here
/// http://www.unicode.org/versions/Unicode4.0.0/ch03.pdf#G33992
///
/// # Return value
///
/// Returns the char itself if no conversion was made
#[inline]
pub fn to_uppercase(c: char) -> char {
conversions::to_upper(c)
}
/// Convert a char to its lowercase equivalent
///
/// The case-folding performed is the common or simple mapping
/// see `to_uppercase` for references and more information
///
/// # Return value
///
/// Returns the char itself if no conversion if possible
#[inline]
pub fn to_lowercase(c: char) -> char {
conversions::to_lower(c)
}
///
/// Converts a number to the character representing it
///
/// # Return value
///
/// Returns `Some(char)` if `num` represents one digit under `radix`,
/// using one character of `0-9` or `a-z`, or `None` if it doesn't.
///
/// # Failure
///
/// Fails if given an `radix` > 36.
///
#[inline]
pub fn from_digit(num: uint, radix: uint) -> Option<char> {
if radix > 36 {
fail!("from_digit: radix is to high (maximum 36)");
}
if num < radix {
unsafe {
if num < 10 {
Some(transmute(('0' as uint + num) as u32))
} else {
Some(transmute(('a' as uint + num - 10u) as u32))
}
}
} else {
None
}
}
///
/// Returns the hexadecimal Unicode escape of a `char`
///
/// The rules are as follows:
///
/// - chars in [0,0xff] get 2-digit escapes: `\\xNN`
/// - chars in [0x100,0xffff] get 4-digit escapes: `\\uNNNN`
/// - chars above 0x10000 get 8-digit escapes: `\\UNNNNNNNN`
///
pub fn escape_unicode(c: char, f: |char|) {
// avoid calling str::to_str_radix because we don't really need to allocate
// here.
f('\\');
let pad = match () {
_ if c <= '\xff' => { f('x'); 2 }
_ if c <= '\uffff' => { f('u'); 4 }
_ => { f('U'); 8 }
};
for offset in range_step::<i32>(4 * (pad - 1), -1, -4) {
let offset = offset as uint;
unsafe {
match ((c as i32) >> offset) & 0xf {
i @ 0.. 9 => { f(transmute('0' as i32 + i)); }
i => { f(transmute('a' as i32 + (i - 10))); }
}
}
}
}
///
/// Returns a 'default' ASCII and C++11-like literal escape of a `char`
///
/// The default is chosen with a bias toward producing literals that are
/// legal in a variety of languages, including C++11 and similar C-family
/// languages. The exact rules are:
///
/// - Tab, CR and LF are escaped as '\t', '\r' and '\n' respectively.
/// - Single-quote, double-quote and backslash chars are backslash-escaped.
/// - Any other chars in the range [0x20,0x7e] are not escaped.
/// - Any other chars are given hex unicode escapes; see `escape_unicode`.
///
pub fn escape_default(c: char, f: |char|) {
match c {
'\t' => { f('\\'); f('t'); }
'\r' => { f('\\'); f('r'); }
'\n' => { f('\\'); f('n'); }
'\\' => { f('\\'); f('\\'); }
'\'' => { f('\\'); f('\''); }
'"' => { f('\\'); f('"'); }
'\x20'.. '\x7e' => { f(c); }
_ => c.escape_unicode(f),
}
}
/// Returns the amount of bytes this `char` would need if encoded in UTF-8
pub fn len_utf8_bytes(c: char) -> uint {
let code = c as u32;
match () {
_ if code < MAX_ONE_B => 1u,
_ if code < MAX_TWO_B => 2u,
_ if code < MAX_THREE_B => 3u,
_ if code < MAX_FOUR_B => 4u,
_ => fail!("invalid character!"),
}
}
/// Useful functions for Unicode characters.
pub trait Char {
/// Returns whether the specified character is considered a Unicode
/// alphabetic code point.
fn is_alphabetic(&self) -> bool;
/// Returns whether the specified character satisfies the 'XID_Start'
/// Unicode property.
///
/// 'XID_Start' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to ID_Start but modified for closure under NFKx.
fn is_XID_start(&self) -> bool;
/// Returns whether the specified `char` satisfies the 'XID_Continue'
/// Unicode property.
///
/// 'XID_Continue' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to 'ID_Continue' but modified for closure under NFKx.
fn is_XID_continue(&self) -> bool;
/// Indicates whether a character is in lowercase.
///
/// This is defined according to the terms of the Unicode Derived Core
/// Property `Lowercase`.
fn is_lowercase(&self) -> bool;
/// Indicates whether a character is in uppercase.
///
/// This is defined according to the terms of the Unicode Derived Core
/// Property `Uppercase`.
fn is_uppercase(&self) -> bool;
/// Indicates whether a character is whitespace.
///
/// Whitespace is defined in terms of the Unicode Property `White_Space`.
fn is_whitespace(&self) -> bool;
/// Indicates whether a character is alphanumeric.
///
/// Alphanumericness is defined in terms of the Unicode General Categories
/// 'Nd', 'Nl', 'No' and the Derived Core Property 'Alphabetic'.
fn is_alphanumeric(&self) -> bool;
/// Indicates whether a character is a control code point.
///
/// Control code points are defined in terms of the Unicode General
/// Category `Cc`.
fn is_control(&self) -> bool;
/// Indicates whether the character is numeric (Nd, Nl, or No).
fn is_digit(&self) -> bool;
/// Checks if a `char` parses as a numeric digit in the given radix.
///
/// Compared to `is_digit()`, this function only recognizes the characters
/// `0-9`, `a-z` and `A-Z`.
///
/// # Return value
///
/// Returns `true` if `c` is a valid digit under `radix`, and `false`
/// otherwise.
///
/// # Failure
///
/// Fails if given a radix > 36.
fn is_digit_radix(&self, radix: uint) -> bool;
/// Converts a character to the corresponding digit.
///
/// # Return value
///
/// If `c` is between '0' and '9', the corresponding value between 0 and
/// 9. If `c` is 'a' or 'A', 10. If `c` is 'b' or 'B', 11, etc. Returns
/// none if the character does not refer to a digit in the given radix.
///
/// # Failure
///
/// Fails if given a radix outside the range [0..36].
fn to_digit(&self, radix: uint) -> Option<uint>;
/// Converts a character to its lowercase equivalent.
///
/// The case-folding performed is the common or simple mapping. See
/// `to_uppercase()` for references and more information.
///
/// # Return value
///
/// Returns the lowercase equivalent of the character, or the character
/// itself if no conversion is possible.
fn to_lowercase(&self) -> char;
/// Converts a character to its uppercase equivalent.
///
/// The case-folding performed is the common or simple mapping: it maps
/// one unicode codepoint (one character in Rust) to its uppercase
/// equivalent according to the Unicode database [1]. The additional
/// `SpecialCasing.txt` is not considered here, as it expands to multiple
/// codepoints in some cases.
///
/// A full reference can be found here [2].
///
/// # Return value
///
/// Returns the uppercase equivalent of the character, or the character
/// itself if no conversion was made.
///
/// [1]: ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt
///
/// [2]: http://www.unicode.org/versions/Unicode4.0.0/ch03.pdf#G33992
fn to_uppercase(&self) -> char;
/// Converts a number to the character representing it.
///
/// # Return value
///
/// Returns `Some(char)` if `num` represents one digit under `radix`,
/// using one character of `0-9` or `a-z`, or `None` if it doesn't.
///
/// # Failure
///
/// Fails if given a radix > 36.
fn from_digit(num: uint, radix: uint) -> Option<char>;
/// Returns the hexadecimal Unicode escape of a character.
///
/// The rules are as follows:
///
/// * Characters in [0,0xff] get 2-digit escapes: `\\xNN`
/// * Characters in [0x100,0xffff] get 4-digit escapes: `\\uNNNN`.
/// * Characters above 0x10000 get 8-digit escapes: `\\UNNNNNNNN`.
fn escape_unicode(&self, f: |char|);
/// Returns a 'default' ASCII and C++11-like literal escape of a
/// character.
///
/// The default is chosen with a bias toward producing literals that are
/// legal in a variety of languages, including C++11 and similar C-family
/// languages. The exact rules are:
///
/// * Tab, CR and LF are escaped as '\t', '\r' and '\n' respectively.
/// * Single-quote, double-quote and backslash chars are backslash-
/// escaped.
/// * Any other chars in the range [0x20,0x7e] are not escaped.
/// * Any other chars are given hex unicode escapes; see `escape_unicode`.
fn escape_default(&self, f: |char|);
/// Returns the amount of bytes this character would need if encoded in
/// UTF-8.
fn len_utf8_bytes(&self) -> uint;
/// Encodes this character as UTF-8 into the provided byte buffer.
///
/// The buffer must be at least 4 bytes long or a runtime failure may
/// occur.
///
/// This will then return the number of bytes written to the slice.
fn encode_utf8(&self, dst: &mut [u8]) -> uint;
/// Encodes this character as UTF-16 into the provided `u16` buffer.
///
/// The buffer must be at least 2 elements long or a runtime failure may
/// occur.
///
/// This will then return the number of `u16`s written to the slice.
fn encode_utf16(&self, dst: &mut [u16]) -> uint;
}
impl Char for char {
fn is_alphabetic(&self) -> bool { is_alphabetic(*self) }
fn is_XID_start(&self) -> bool { is_XID_start(*self) }
fn is_XID_continue(&self) -> bool { is_XID_continue(*self) }
fn is_lowercase(&self) -> bool { is_lowercase(*self) }
fn is_uppercase(&self) -> bool { is_uppercase(*self) }
fn is_whitespace(&self) -> bool { is_whitespace(*self) }
fn is_alphanumeric(&self) -> bool { is_alphanumeric(*self) }
fn is_control(&self) -> bool { is_control(*self) }
fn is_digit(&self) -> bool { is_digit(*self) }
fn is_digit_radix(&self, radix: uint) -> bool { is_digit_radix(*self, radix) }
fn
|
(&self, radix: uint) -> Option<uint> { to_digit(*self, radix) }
fn to_lowercase(&self) -> char { to_lowercase(*self) }
fn to_uppercase(&self) -> char { to_uppercase(*self) }
fn from_digit(num: uint, radix: uint) -> Option<char> { from_digit(num, radix) }
fn escape_unicode(&self, f: |char|) { escape_unicode(*self, f) }
fn escape_default(&self, f: |char|) { escape_default(*self, f) }
fn len_utf8_bytes(&self) -> uint { len_utf8_bytes(*self) }
fn encode_utf8<'a>(&self, dst: &'a mut [u8]) -> uint {
let code = *self as u32;
if code < MAX_ONE_B {
dst[0] = code as u8;
1
} else if code < MAX_TWO_B {
dst[0] = (code >> 6u & 0x1F_u32) as u8 | TAG_TWO_B;
dst[1] = (code & 0x3F_u32) as u8 | TAG_CONT;
2
} else if code < MAX_THREE_B {
dst[0] = (code >> 12u & 0x0F_u32) as u8 | TAG_THREE_B;
dst[1] = (code >> 6u & 0x3F_u32) as u8 | TAG_CONT;
dst[2] = (code & 0x3F_u32) as u8 | TAG_CONT;
3
} else {
dst[0] = (code >> 18u & 0x07_u32) as u8 | TAG_FOUR_B;
dst[1] = (code >> 12u & 0x3F_u32) as u8 | TAG_CONT;
dst[2] = (code >> 6u & 0x3F_u32) as u8 | TAG_CONT;
dst[3] = (code & 0x3F_u32) as u8 | TAG_CONT;
4
}
}
fn encode_utf16(&self, dst: &mut [u16]) -> uint {
let mut ch = *self as u32;
if (ch & 0xFFFF_u32) == ch {
// The BMP falls through (assuming non-surrogate, as it should)
assert!(ch <= 0xD7FF_u32 || ch >= 0xE000_u32);
dst[0] = ch as u16;
1
} else {
// Supplementary planes break into surrogates.
assert!(ch >= 0x1_0000_u32 && ch <= 0x10_FFFF_u32);
ch -= 0x1_0000_u32;
dst[0] = 0xD800_u16 | ((ch >> 10) as u16);
dst[1] = 0xDC00_u16 | ((ch as u16) & 0x3FF_u16);
2
}
}
}
|
to_digit
|
identifier_name
|
char.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Character manipulation (`char` type, Unicode Scalar Value)
//!
//! This module provides the `Char` trait, as well as its implementation
//! for the primitive `char` type, in order to allow basic character manipulation.
//!
//! A `char` actually represents a
//! *[Unicode Scalar Value](http://www.unicode.org/glossary/#unicode_scalar_value)*,
//! as it can contain any Unicode code point except high-surrogate and
//! low-surrogate code points.
//!
//! As such, only values in the ranges \[0x0,0xD7FF\] and \[0xE000,0x10FFFF\]
//! (inclusive) are allowed. A `char` can always be safely cast to a `u32`;
//! however the converse is not always true due to the above range limits
//! and, as such, should be performed via the `from_u32` function..
#![allow(non_snake_case_functions)]
#![doc(primitive = "char")]
use mem::transmute;
use option::{None, Option, Some};
use iter::{Iterator, range_step};
use unicode::{derived_property, property, general_category, conversions};
/// Returns the canonical decomposition of a character.
pub use unicode::normalization::decompose_canonical;
/// Returns the compatibility decomposition of a character.
pub use unicode::normalization::decompose_compatible;
// UTF-8 ranges and tags for encoding characters
static TAG_CONT: u8 = 0b1000_0000u8;
static TAG_TWO_B: u8 = 0b1100_0000u8;
static TAG_THREE_B: u8 = 0b1110_0000u8;
static TAG_FOUR_B: u8 = 0b1111_0000u8;
static MAX_ONE_B: u32 = 0x80u32;
static MAX_TWO_B: u32 = 0x800u32;
static MAX_THREE_B: u32 = 0x10000u32;
static MAX_FOUR_B: u32 = 0x200000u32;
/*
Lu Uppercase_Letter an uppercase letter
Ll Lowercase_Letter a lowercase letter
Lt Titlecase_Letter a digraphic character, with first part uppercase
Lm Modifier_Letter a modifier letter
Lo Other_Letter other letters, including syllables and ideographs
Mn Nonspacing_Mark a nonspacing combining mark (zero advance width)
Mc Spacing_Mark a spacing combining mark (positive advance width)
Me Enclosing_Mark an enclosing combining mark
Nd Decimal_Number a decimal digit
Nl Letter_Number a letterlike numeric character
No Other_Number a numeric character of other type
Pc Connector_Punctuation a connecting punctuation mark, like a tie
Pd Dash_Punctuation a dash or hyphen punctuation mark
Ps Open_Punctuation an opening punctuation mark (of a pair)
Pe Close_Punctuation a closing punctuation mark (of a pair)
Pi Initial_Punctuation an initial quotation mark
Pf Final_Punctuation a final quotation mark
Po Other_Punctuation a punctuation mark of other type
Sm Math_Symbol a symbol of primarily mathematical use
Sc Currency_Symbol a currency sign
Sk Modifier_Symbol a non-letterlike modifier symbol
So Other_Symbol a symbol of other type
Zs Space_Separator a space character (of various non-zero widths)
Zl Line_Separator U+2028 LINE SEPARATOR only
Zp Paragraph_Separator U+2029 PARAGRAPH SEPARATOR only
Cc Control a C0 or C1 control code
Cf Format a format control character
Cs Surrogate a surrogate code point
Co Private_Use a private-use character
Cn Unassigned a reserved unassigned code point or a noncharacter
*/
/// The highest valid code point
pub static MAX: char = '\U0010ffff';
/// Converts from `u32` to a `char`
#[inline]
pub fn from_u32(i: u32) -> Option<char> {
// catch out-of-bounds and surrogates
if (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF) {
None
} else {
Some(unsafe { transmute(i) })
}
}
/// Returns whether the specified `char` is considered a Unicode alphabetic
/// code point
pub fn is_alphabetic(c: char) -> bool { derived_property::Alphabetic(c) }
/// Returns whether the specified `char` satisfies the 'XID_Start' Unicode property
///
/// 'XID_Start' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to ID_Start but modified for closure under NFKx.
pub fn is_XID_start(c: char) -> bool { derived_property::XID_Start(c) }
/// Returns whether the specified `char` satisfies the 'XID_Continue' Unicode property
///
/// 'XID_Continue' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to 'ID_Continue' but modified for closure under NFKx.
pub fn is_XID_continue(c: char) -> bool { derived_property::XID_Continue(c) }
///
/// Indicates whether a `char` is in lower case
///
/// This is defined according to the terms of the Unicode Derived Core Property 'Lowercase'.
///
#[inline]
pub fn is_lowercase(c: char) -> bool { derived_property::Lowercase(c) }
///
/// Indicates whether a `char` is in upper case
///
/// This is defined according to the terms of the Unicode Derived Core Property 'Uppercase'.
///
#[inline]
pub fn is_uppercase(c: char) -> bool { derived_property::Uppercase(c) }
///
/// Indicates whether a `char` is whitespace
///
/// Whitespace is defined in terms of the Unicode Property 'White_Space'.
///
#[inline]
pub fn is_whitespace(c: char) -> bool {
// As an optimization ASCII whitespace characters are checked separately
c ==''
|| ('\x09' <= c && c <= '\x0d')
|| property::White_Space(c)
}
///
/// Indicates whether a `char` is alphanumeric
///
/// Alphanumericness is defined in terms of the Unicode General Categories
/// 'Nd', 'Nl', 'No' and the Derived Core Property 'Alphabetic'.
///
#[inline]
pub fn is_alphanumeric(c: char) -> bool {
derived_property::Alphabetic(c)
|| general_category::Nd(c)
|| general_category::Nl(c)
|| general_category::No(c)
}
///
/// Indicates whether a `char` is a control code point
///
/// Control code points are defined in terms of the Unicode General Category
/// 'Cc'.
///
#[inline]
pub fn is_control(c: char) -> bool { general_category::Cc(c) }
/// Indicates whether the `char` is numeric (Nd, Nl, or No)
#[inline]
pub fn is_digit(c: char) -> bool {
general_category::Nd(c)
|| general_category::Nl(c)
|| general_category::No(c)
}
///
/// Checks if a `char` parses as a numeric digit in the given radix
///
/// Compared to `is_digit()`, this function only recognizes the
/// characters `0-9`, `a-z` and `A-Z`.
///
/// # Return value
///
/// Returns `true` if `c` is a valid digit under `radix`, and `false`
/// otherwise.
///
/// # Failure
///
/// Fails if given a `radix` > 36.
///
/// # Note
///
/// This just wraps `to_digit()`.
///
#[inline]
pub fn is_digit_radix(c: char, radix: uint) -> bool {
match to_digit(c, radix) {
Some(_) => true,
None => false,
}
}
///
/// Converts a `char` to the corresponding digit
///
/// # Return value
///
/// If `c` is between '0' and '9', the corresponding value
/// between 0 and 9. If `c` is 'a' or 'A', 10. If `c` is
/// 'b' or 'B', 11, etc. Returns none if the `char` does not
/// refer to a digit in the given radix.
///
/// # Failure
///
/// Fails if given a `radix` outside the range `[0..36]`.
///
#[inline]
pub fn to_digit(c: char, radix: uint) -> Option<uint> {
if radix > 36 {
fail!("to_digit: radix is too high (maximum 36)");
}
let val = match c {
'0'.. '9' => c as uint - ('0' as uint),
'a'.. 'z' => c as uint + 10u - ('a' as uint),
'A'.. 'Z' => c as uint + 10u - ('A' as uint),
_ => return None,
};
if val < radix { Some(val) }
else { None }
}
/// Convert a char to its uppercase equivalent
///
/// The case-folding performed is the common or simple mapping:
/// it maps one unicode codepoint (one char in Rust) to its uppercase equivalent according
/// to the Unicode database at ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt
/// The additional SpecialCasing.txt is not considered here, as it expands to multiple
/// codepoints in some cases.
///
/// A full reference can be found here
/// http://www.unicode.org/versions/Unicode4.0.0/ch03.pdf#G33992
///
/// # Return value
///
/// Returns the char itself if no conversion was made
#[inline]
pub fn to_uppercase(c: char) -> char {
conversions::to_upper(c)
}
/// Convert a char to its lowercase equivalent
///
/// The case-folding performed is the common or simple mapping
/// see `to_uppercase` for references and more information
///
/// # Return value
///
/// Returns the char itself if no conversion if possible
#[inline]
pub fn to_lowercase(c: char) -> char {
conversions::to_lower(c)
}
///
/// Converts a number to the character representing it
///
/// # Return value
///
/// Returns `Some(char)` if `num` represents one digit under `radix`,
/// using one character of `0-9` or `a-z`, or `None` if it doesn't.
///
/// # Failure
///
/// Fails if given an `radix` > 36.
///
#[inline]
pub fn from_digit(num: uint, radix: uint) -> Option<char> {
if radix > 36 {
fail!("from_digit: radix is to high (maximum 36)");
}
if num < radix {
unsafe {
if num < 10 {
Some(transmute(('0' as uint + num) as u32))
} else {
Some(transmute(('a' as uint + num - 10u) as u32))
}
}
} else {
None
}
}
///
/// Returns the hexadecimal Unicode escape of a `char`
///
/// The rules are as follows:
///
/// - chars in [0,0xff] get 2-digit escapes: `\\xNN`
/// - chars in [0x100,0xffff] get 4-digit escapes: `\\uNNNN`
/// - chars above 0x10000 get 8-digit escapes: `\\UNNNNNNNN`
///
pub fn escape_unicode(c: char, f: |char|) {
// avoid calling str::to_str_radix because we don't really need to allocate
// here.
f('\\');
let pad = match () {
_ if c <= '\xff' => { f('x'); 2 }
_ if c <= '\uffff' => { f('u'); 4 }
_ => { f('U'); 8 }
};
for offset in range_step::<i32>(4 * (pad - 1), -1, -4) {
let offset = offset as uint;
unsafe {
match ((c as i32) >> offset) & 0xf {
i @ 0.. 9 => { f(transmute('0' as i32 + i)); }
i => { f(transmute('a' as i32 + (i - 10))); }
}
}
}
}
///
/// Returns a 'default' ASCII and C++11-like literal escape of a `char`
///
/// The default is chosen with a bias toward producing literals that are
/// legal in a variety of languages, including C++11 and similar C-family
/// languages. The exact rules are:
///
/// - Tab, CR and LF are escaped as '\t', '\r' and '\n' respectively.
/// - Single-quote, double-quote and backslash chars are backslash-escaped.
/// - Any other chars in the range [0x20,0x7e] are not escaped.
/// - Any other chars are given hex unicode escapes; see `escape_unicode`.
///
pub fn escape_default(c: char, f: |char|) {
match c {
'\t' => { f('\\'); f('t'); }
'\r' => { f('\\'); f('r'); }
'\n' => { f('\\'); f('n'); }
'\\' => { f('\\'); f('\\'); }
'\'' => { f('\\'); f('\''); }
'"' => { f('\\'); f('"'); }
'\x20'.. '\x7e' => { f(c); }
_ => c.escape_unicode(f),
}
}
/// Returns the amount of bytes this `char` would need if encoded in UTF-8
pub fn len_utf8_bytes(c: char) -> uint {
let code = c as u32;
match () {
_ if code < MAX_ONE_B => 1u,
_ if code < MAX_TWO_B => 2u,
_ if code < MAX_THREE_B => 3u,
_ if code < MAX_FOUR_B => 4u,
_ => fail!("invalid character!"),
}
}
/// Useful functions for Unicode characters.
pub trait Char {
/// Returns whether the specified character is considered a Unicode
/// alphabetic code point.
fn is_alphabetic(&self) -> bool;
/// Returns whether the specified character satisfies the 'XID_Start'
/// Unicode property.
///
/// 'XID_Start' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to ID_Start but modified for closure under NFKx.
fn is_XID_start(&self) -> bool;
/// Returns whether the specified `char` satisfies the 'XID_Continue'
/// Unicode property.
///
/// 'XID_Continue' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to 'ID_Continue' but modified for closure under NFKx.
fn is_XID_continue(&self) -> bool;
/// Indicates whether a character is in lowercase.
///
/// This is defined according to the terms of the Unicode Derived Core
/// Property `Lowercase`.
fn is_lowercase(&self) -> bool;
/// Indicates whether a character is in uppercase.
///
/// This is defined according to the terms of the Unicode Derived Core
/// Property `Uppercase`.
fn is_uppercase(&self) -> bool;
/// Indicates whether a character is whitespace.
///
/// Whitespace is defined in terms of the Unicode Property `White_Space`.
fn is_whitespace(&self) -> bool;
/// Indicates whether a character is alphanumeric.
///
/// Alphanumericness is defined in terms of the Unicode General Categories
/// 'Nd', 'Nl', 'No' and the Derived Core Property 'Alphabetic'.
fn is_alphanumeric(&self) -> bool;
/// Indicates whether a character is a control code point.
///
/// Control code points are defined in terms of the Unicode General
/// Category `Cc`.
fn is_control(&self) -> bool;
/// Indicates whether the character is numeric (Nd, Nl, or No).
fn is_digit(&self) -> bool;
/// Checks if a `char` parses as a numeric digit in the given radix.
///
/// Compared to `is_digit()`, this function only recognizes the characters
/// `0-9`, `a-z` and `A-Z`.
///
/// # Return value
///
/// Returns `true` if `c` is a valid digit under `radix`, and `false`
/// otherwise.
///
/// # Failure
///
/// Fails if given a radix > 36.
fn is_digit_radix(&self, radix: uint) -> bool;
/// Converts a character to the corresponding digit.
///
/// # Return value
///
/// If `c` is between '0' and '9', the corresponding value between 0 and
/// 9. If `c` is 'a' or 'A', 10. If `c` is 'b' or 'B', 11, etc. Returns
/// none if the character does not refer to a digit in the given radix.
///
/// # Failure
///
/// Fails if given a radix outside the range [0..36].
fn to_digit(&self, radix: uint) -> Option<uint>;
/// Converts a character to its lowercase equivalent.
///
/// The case-folding performed is the common or simple mapping. See
/// `to_uppercase()` for references and more information.
///
/// # Return value
///
/// Returns the lowercase equivalent of the character, or the character
/// itself if no conversion is possible.
fn to_lowercase(&self) -> char;
/// Converts a character to its uppercase equivalent.
///
/// The case-folding performed is the common or simple mapping: it maps
/// one unicode codepoint (one character in Rust) to its uppercase
/// equivalent according to the Unicode database [1]. The additional
/// `SpecialCasing.txt` is not considered here, as it expands to multiple
/// codepoints in some cases.
///
/// A full reference can be found here [2].
///
/// # Return value
///
/// Returns the uppercase equivalent of the character, or the character
/// itself if no conversion was made.
///
/// [1]: ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt
///
/// [2]: http://www.unicode.org/versions/Unicode4.0.0/ch03.pdf#G33992
fn to_uppercase(&self) -> char;
/// Converts a number to the character representing it.
///
/// # Return value
///
/// Returns `Some(char)` if `num` represents one digit under `radix`,
/// using one character of `0-9` or `a-z`, or `None` if it doesn't.
///
/// # Failure
///
/// Fails if given a radix > 36.
fn from_digit(num: uint, radix: uint) -> Option<char>;
/// Returns the hexadecimal Unicode escape of a character.
///
/// The rules are as follows:
///
/// * Characters in [0,0xff] get 2-digit escapes: `\\xNN`
/// * Characters in [0x100,0xffff] get 4-digit escapes: `\\uNNNN`.
/// * Characters above 0x10000 get 8-digit escapes: `\\UNNNNNNNN`.
fn escape_unicode(&self, f: |char|);
/// Returns a 'default' ASCII and C++11-like literal escape of a
/// character.
///
/// The default is chosen with a bias toward producing literals that are
/// legal in a variety of languages, including C++11 and similar C-family
/// languages. The exact rules are:
///
/// * Tab, CR and LF are escaped as '\t', '\r' and '\n' respectively.
/// * Single-quote, double-quote and backslash chars are backslash-
/// escaped.
/// * Any other chars in the range [0x20,0x7e] are not escaped.
/// * Any other chars are given hex unicode escapes; see `escape_unicode`.
fn escape_default(&self, f: |char|);
/// Returns the amount of bytes this character would need if encoded in
/// UTF-8.
fn len_utf8_bytes(&self) -> uint;
/// Encodes this character as UTF-8 into the provided byte buffer.
///
/// The buffer must be at least 4 bytes long or a runtime failure may
/// occur.
///
/// This will then return the number of bytes written to the slice.
fn encode_utf8(&self, dst: &mut [u8]) -> uint;
/// Encodes this character as UTF-16 into the provided `u16` buffer.
///
/// The buffer must be at least 2 elements long or a runtime failure may
/// occur.
///
/// This will then return the number of `u16`s written to the slice.
fn encode_utf16(&self, dst: &mut [u16]) -> uint;
}
impl Char for char {
fn is_alphabetic(&self) -> bool { is_alphabetic(*self) }
fn is_XID_start(&self) -> bool { is_XID_start(*self) }
fn is_XID_continue(&self) -> bool { is_XID_continue(*self) }
fn is_lowercase(&self) -> bool { is_lowercase(*self) }
fn is_uppercase(&self) -> bool { is_uppercase(*self) }
fn is_whitespace(&self) -> bool { is_whitespace(*self) }
fn is_alphanumeric(&self) -> bool { is_alphanumeric(*self) }
fn is_control(&self) -> bool { is_control(*self) }
fn is_digit(&self) -> bool { is_digit(*self) }
fn is_digit_radix(&self, radix: uint) -> bool { is_digit_radix(*self, radix) }
fn to_digit(&self, radix: uint) -> Option<uint> { to_digit(*self, radix) }
fn to_lowercase(&self) -> char { to_lowercase(*self) }
fn to_uppercase(&self) -> char { to_uppercase(*self) }
fn from_digit(num: uint, radix: uint) -> Option<char> { from_digit(num, radix) }
fn escape_unicode(&self, f: |char|) { escape_unicode(*self, f) }
fn escape_default(&self, f: |char|) { escape_default(*self, f) }
fn len_utf8_bytes(&self) -> uint { len_utf8_bytes(*self) }
fn encode_utf8<'a>(&self, dst: &'a mut [u8]) -> uint {
let code = *self as u32;
if code < MAX_ONE_B {
dst[0] = code as u8;
1
} else if code < MAX_TWO_B {
dst[0] = (code >> 6u & 0x1F_u32) as u8 | TAG_TWO_B;
dst[1] = (code & 0x3F_u32) as u8 | TAG_CONT;
2
} else if code < MAX_THREE_B {
dst[0] = (code >> 12u & 0x0F_u32) as u8 | TAG_THREE_B;
dst[1] = (code >> 6u & 0x3F_u32) as u8 | TAG_CONT;
dst[2] = (code & 0x3F_u32) as u8 | TAG_CONT;
3
} else {
dst[0] = (code >> 18u & 0x07_u32) as u8 | TAG_FOUR_B;
dst[1] = (code >> 12u & 0x3F_u32) as u8 | TAG_CONT;
dst[2] = (code >> 6u & 0x3F_u32) as u8 | TAG_CONT;
dst[3] = (code & 0x3F_u32) as u8 | TAG_CONT;
4
}
}
fn encode_utf16(&self, dst: &mut [u16]) -> uint {
let mut ch = *self as u32;
if (ch & 0xFFFF_u32) == ch {
// The BMP falls through (assuming non-surrogate, as it should)
assert!(ch <= 0xD7FF_u32 || ch >= 0xE000_u32);
dst[0] = ch as u16;
1
} else {
|
2
}
}
}
|
// Supplementary planes break into surrogates.
assert!(ch >= 0x1_0000_u32 && ch <= 0x10_FFFF_u32);
ch -= 0x1_0000_u32;
dst[0] = 0xD800_u16 | ((ch >> 10) as u16);
dst[1] = 0xDC00_u16 | ((ch as u16) & 0x3FF_u16);
|
random_line_split
|
char.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Character manipulation (`char` type, Unicode Scalar Value)
//!
//! This module provides the `Char` trait, as well as its implementation
//! for the primitive `char` type, in order to allow basic character manipulation.
//!
//! A `char` actually represents a
//! *[Unicode Scalar Value](http://www.unicode.org/glossary/#unicode_scalar_value)*,
//! as it can contain any Unicode code point except high-surrogate and
//! low-surrogate code points.
//!
//! As such, only values in the ranges \[0x0,0xD7FF\] and \[0xE000,0x10FFFF\]
//! (inclusive) are allowed. A `char` can always be safely cast to a `u32`;
//! however the converse is not always true due to the above range limits
//! and, as such, should be performed via the `from_u32` function..
#![allow(non_snake_case_functions)]
#![doc(primitive = "char")]
use mem::transmute;
use option::{None, Option, Some};
use iter::{Iterator, range_step};
use unicode::{derived_property, property, general_category, conversions};
/// Returns the canonical decomposition of a character.
pub use unicode::normalization::decompose_canonical;
/// Returns the compatibility decomposition of a character.
pub use unicode::normalization::decompose_compatible;
// UTF-8 ranges and tags for encoding characters
static TAG_CONT: u8 = 0b1000_0000u8;
static TAG_TWO_B: u8 = 0b1100_0000u8;
static TAG_THREE_B: u8 = 0b1110_0000u8;
static TAG_FOUR_B: u8 = 0b1111_0000u8;
static MAX_ONE_B: u32 = 0x80u32;
static MAX_TWO_B: u32 = 0x800u32;
static MAX_THREE_B: u32 = 0x10000u32;
static MAX_FOUR_B: u32 = 0x200000u32;
/*
Lu Uppercase_Letter an uppercase letter
Ll Lowercase_Letter a lowercase letter
Lt Titlecase_Letter a digraphic character, with first part uppercase
Lm Modifier_Letter a modifier letter
Lo Other_Letter other letters, including syllables and ideographs
Mn Nonspacing_Mark a nonspacing combining mark (zero advance width)
Mc Spacing_Mark a spacing combining mark (positive advance width)
Me Enclosing_Mark an enclosing combining mark
Nd Decimal_Number a decimal digit
Nl Letter_Number a letterlike numeric character
No Other_Number a numeric character of other type
Pc Connector_Punctuation a connecting punctuation mark, like a tie
Pd Dash_Punctuation a dash or hyphen punctuation mark
Ps Open_Punctuation an opening punctuation mark (of a pair)
Pe Close_Punctuation a closing punctuation mark (of a pair)
Pi Initial_Punctuation an initial quotation mark
Pf Final_Punctuation a final quotation mark
Po Other_Punctuation a punctuation mark of other type
Sm Math_Symbol a symbol of primarily mathematical use
Sc Currency_Symbol a currency sign
Sk Modifier_Symbol a non-letterlike modifier symbol
So Other_Symbol a symbol of other type
Zs Space_Separator a space character (of various non-zero widths)
Zl Line_Separator U+2028 LINE SEPARATOR only
Zp Paragraph_Separator U+2029 PARAGRAPH SEPARATOR only
Cc Control a C0 or C1 control code
Cf Format a format control character
Cs Surrogate a surrogate code point
Co Private_Use a private-use character
Cn Unassigned a reserved unassigned code point or a noncharacter
*/
/// The highest valid code point
pub static MAX: char = '\U0010ffff';
/// Converts from `u32` to a `char`
#[inline]
pub fn from_u32(i: u32) -> Option<char> {
// catch out-of-bounds and surrogates
if (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF) {
None
} else {
Some(unsafe { transmute(i) })
}
}
/// Returns whether the specified `char` is considered a Unicode alphabetic
/// code point
pub fn is_alphabetic(c: char) -> bool { derived_property::Alphabetic(c) }
/// Returns whether the specified `char` satisfies the 'XID_Start' Unicode property
///
/// 'XID_Start' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to ID_Start but modified for closure under NFKx.
pub fn is_XID_start(c: char) -> bool { derived_property::XID_Start(c) }
/// Returns whether the specified `char` satisfies the 'XID_Continue' Unicode property
///
/// 'XID_Continue' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to 'ID_Continue' but modified for closure under NFKx.
pub fn is_XID_continue(c: char) -> bool { derived_property::XID_Continue(c) }
///
/// Indicates whether a `char` is in lower case
///
/// This is defined according to the terms of the Unicode Derived Core Property 'Lowercase'.
///
#[inline]
pub fn is_lowercase(c: char) -> bool { derived_property::Lowercase(c) }
///
/// Indicates whether a `char` is in upper case
///
/// This is defined according to the terms of the Unicode Derived Core Property 'Uppercase'.
///
#[inline]
pub fn is_uppercase(c: char) -> bool { derived_property::Uppercase(c) }
///
/// Indicates whether a `char` is whitespace
///
/// Whitespace is defined in terms of the Unicode Property 'White_Space'.
///
#[inline]
pub fn is_whitespace(c: char) -> bool {
// As an optimization ASCII whitespace characters are checked separately
c ==''
|| ('\x09' <= c && c <= '\x0d')
|| property::White_Space(c)
}
///
/// Indicates whether a `char` is alphanumeric
///
/// Alphanumericness is defined in terms of the Unicode General Categories
/// 'Nd', 'Nl', 'No' and the Derived Core Property 'Alphabetic'.
///
#[inline]
pub fn is_alphanumeric(c: char) -> bool {
derived_property::Alphabetic(c)
|| general_category::Nd(c)
|| general_category::Nl(c)
|| general_category::No(c)
}
///
/// Indicates whether a `char` is a control code point
///
/// Control code points are defined in terms of the Unicode General Category
/// 'Cc'.
///
#[inline]
pub fn is_control(c: char) -> bool { general_category::Cc(c) }
/// Indicates whether the `char` is numeric (Nd, Nl, or No)
#[inline]
pub fn is_digit(c: char) -> bool {
general_category::Nd(c)
|| general_category::Nl(c)
|| general_category::No(c)
}
///
/// Checks if a `char` parses as a numeric digit in the given radix
///
/// Compared to `is_digit()`, this function only recognizes the
/// characters `0-9`, `a-z` and `A-Z`.
///
/// # Return value
///
/// Returns `true` if `c` is a valid digit under `radix`, and `false`
/// otherwise.
///
/// # Failure
///
/// Fails if given a `radix` > 36.
///
/// # Note
///
/// This just wraps `to_digit()`.
///
#[inline]
pub fn is_digit_radix(c: char, radix: uint) -> bool {
match to_digit(c, radix) {
Some(_) => true,
None => false,
}
}
///
/// Converts a `char` to the corresponding digit
///
/// # Return value
///
/// If `c` is between '0' and '9', the corresponding value
/// between 0 and 9. If `c` is 'a' or 'A', 10. If `c` is
/// 'b' or 'B', 11, etc. Returns none if the `char` does not
/// refer to a digit in the given radix.
///
/// # Failure
///
/// Fails if given a `radix` outside the range `[0..36]`.
///
#[inline]
pub fn to_digit(c: char, radix: uint) -> Option<uint> {
if radix > 36 {
fail!("to_digit: radix is too high (maximum 36)");
}
let val = match c {
'0'.. '9' => c as uint - ('0' as uint),
'a'.. 'z' => c as uint + 10u - ('a' as uint),
'A'.. 'Z' => c as uint + 10u - ('A' as uint),
_ => return None,
};
if val < radix { Some(val) }
else { None }
}
/// Convert a char to its uppercase equivalent
///
/// The case-folding performed is the common or simple mapping:
/// it maps one unicode codepoint (one char in Rust) to its uppercase equivalent according
/// to the Unicode database at ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt
/// The additional SpecialCasing.txt is not considered here, as it expands to multiple
/// codepoints in some cases.
///
/// A full reference can be found here
/// http://www.unicode.org/versions/Unicode4.0.0/ch03.pdf#G33992
///
/// # Return value
///
/// Returns the char itself if no conversion was made
#[inline]
pub fn to_uppercase(c: char) -> char {
conversions::to_upper(c)
}
/// Convert a char to its lowercase equivalent
///
/// The case-folding performed is the common or simple mapping
/// see `to_uppercase` for references and more information
///
/// # Return value
///
/// Returns the char itself if no conversion if possible
#[inline]
pub fn to_lowercase(c: char) -> char {
conversions::to_lower(c)
}
///
/// Converts a number to the character representing it
///
/// # Return value
///
/// Returns `Some(char)` if `num` represents one digit under `radix`,
/// using one character of `0-9` or `a-z`, or `None` if it doesn't.
///
/// # Failure
///
/// Fails if given an `radix` > 36.
///
#[inline]
pub fn from_digit(num: uint, radix: uint) -> Option<char> {
if radix > 36 {
fail!("from_digit: radix is to high (maximum 36)");
}
if num < radix {
unsafe {
if num < 10 {
Some(transmute(('0' as uint + num) as u32))
} else {
Some(transmute(('a' as uint + num - 10u) as u32))
}
}
} else {
None
}
}
///
/// Returns the hexadecimal Unicode escape of a `char`
///
/// The rules are as follows:
///
/// - chars in [0,0xff] get 2-digit escapes: `\\xNN`
/// - chars in [0x100,0xffff] get 4-digit escapes: `\\uNNNN`
/// - chars above 0x10000 get 8-digit escapes: `\\UNNNNNNNN`
///
pub fn escape_unicode(c: char, f: |char|) {
// avoid calling str::to_str_radix because we don't really need to allocate
// here.
f('\\');
let pad = match () {
_ if c <= '\xff' => { f('x'); 2 }
_ if c <= '\uffff' => { f('u'); 4 }
_ => { f('U'); 8 }
};
for offset in range_step::<i32>(4 * (pad - 1), -1, -4) {
let offset = offset as uint;
unsafe {
match ((c as i32) >> offset) & 0xf {
i @ 0.. 9 => { f(transmute('0' as i32 + i)); }
i => { f(transmute('a' as i32 + (i - 10))); }
}
}
}
}
///
/// Returns a 'default' ASCII and C++11-like literal escape of a `char`
///
/// The default is chosen with a bias toward producing literals that are
/// legal in a variety of languages, including C++11 and similar C-family
/// languages. The exact rules are:
///
/// - Tab, CR and LF are escaped as '\t', '\r' and '\n' respectively.
/// - Single-quote, double-quote and backslash chars are backslash-escaped.
/// - Any other chars in the range [0x20,0x7e] are not escaped.
/// - Any other chars are given hex unicode escapes; see `escape_unicode`.
///
pub fn escape_default(c: char, f: |char|) {
match c {
'\t' => { f('\\'); f('t'); }
'\r' =>
|
'\n' => { f('\\'); f('n'); }
'\\' => { f('\\'); f('\\'); }
'\'' => { f('\\'); f('\''); }
'"' => { f('\\'); f('"'); }
'\x20'.. '\x7e' => { f(c); }
_ => c.escape_unicode(f),
}
}
/// Returns the amount of bytes this `char` would need if encoded in UTF-8
pub fn len_utf8_bytes(c: char) -> uint {
let code = c as u32;
match () {
_ if code < MAX_ONE_B => 1u,
_ if code < MAX_TWO_B => 2u,
_ if code < MAX_THREE_B => 3u,
_ if code < MAX_FOUR_B => 4u,
_ => fail!("invalid character!"),
}
}
/// Useful functions for Unicode characters.
pub trait Char {
/// Returns whether the specified character is considered a Unicode
/// alphabetic code point.
fn is_alphabetic(&self) -> bool;
/// Returns whether the specified character satisfies the 'XID_Start'
/// Unicode property.
///
/// 'XID_Start' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to ID_Start but modified for closure under NFKx.
fn is_XID_start(&self) -> bool;
/// Returns whether the specified `char` satisfies the 'XID_Continue'
/// Unicode property.
///
/// 'XID_Continue' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to 'ID_Continue' but modified for closure under NFKx.
fn is_XID_continue(&self) -> bool;
/// Indicates whether a character is in lowercase.
///
/// This is defined according to the terms of the Unicode Derived Core
/// Property `Lowercase`.
fn is_lowercase(&self) -> bool;
/// Indicates whether a character is in uppercase.
///
/// This is defined according to the terms of the Unicode Derived Core
/// Property `Uppercase`.
fn is_uppercase(&self) -> bool;
/// Indicates whether a character is whitespace.
///
/// Whitespace is defined in terms of the Unicode Property `White_Space`.
fn is_whitespace(&self) -> bool;
/// Indicates whether a character is alphanumeric.
///
/// Alphanumericness is defined in terms of the Unicode General Categories
/// 'Nd', 'Nl', 'No' and the Derived Core Property 'Alphabetic'.
fn is_alphanumeric(&self) -> bool;
/// Indicates whether a character is a control code point.
///
/// Control code points are defined in terms of the Unicode General
/// Category `Cc`.
fn is_control(&self) -> bool;
/// Indicates whether the character is numeric (Nd, Nl, or No).
fn is_digit(&self) -> bool;
/// Checks if a `char` parses as a numeric digit in the given radix.
///
/// Compared to `is_digit()`, this function only recognizes the characters
/// `0-9`, `a-z` and `A-Z`.
///
/// # Return value
///
/// Returns `true` if `c` is a valid digit under `radix`, and `false`
/// otherwise.
///
/// # Failure
///
/// Fails if given a radix > 36.
fn is_digit_radix(&self, radix: uint) -> bool;
/// Converts a character to the corresponding digit.
///
/// # Return value
///
/// If `c` is between '0' and '9', the corresponding value between 0 and
/// 9. If `c` is 'a' or 'A', 10. If `c` is 'b' or 'B', 11, etc. Returns
/// none if the character does not refer to a digit in the given radix.
///
/// # Failure
///
/// Fails if given a radix outside the range [0..36].
fn to_digit(&self, radix: uint) -> Option<uint>;
/// Converts a character to its lowercase equivalent.
///
/// The case-folding performed is the common or simple mapping. See
/// `to_uppercase()` for references and more information.
///
/// # Return value
///
/// Returns the lowercase equivalent of the character, or the character
/// itself if no conversion is possible.
fn to_lowercase(&self) -> char;
/// Converts a character to its uppercase equivalent.
///
/// The case-folding performed is the common or simple mapping: it maps
/// one unicode codepoint (one character in Rust) to its uppercase
/// equivalent according to the Unicode database [1]. The additional
/// `SpecialCasing.txt` is not considered here, as it expands to multiple
/// codepoints in some cases.
///
/// A full reference can be found here [2].
///
/// # Return value
///
/// Returns the uppercase equivalent of the character, or the character
/// itself if no conversion was made.
///
/// [1]: ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt
///
/// [2]: http://www.unicode.org/versions/Unicode4.0.0/ch03.pdf#G33992
fn to_uppercase(&self) -> char;
/// Converts a number to the character representing it.
///
/// # Return value
///
/// Returns `Some(char)` if `num` represents one digit under `radix`,
/// using one character of `0-9` or `a-z`, or `None` if it doesn't.
///
/// # Failure
///
/// Fails if given a radix > 36.
fn from_digit(num: uint, radix: uint) -> Option<char>;
/// Returns the hexadecimal Unicode escape of a character.
///
/// The rules are as follows:
///
/// * Characters in [0,0xff] get 2-digit escapes: `\\xNN`
/// * Characters in [0x100,0xffff] get 4-digit escapes: `\\uNNNN`.
/// * Characters above 0x10000 get 8-digit escapes: `\\UNNNNNNNN`.
fn escape_unicode(&self, f: |char|);
/// Returns a 'default' ASCII and C++11-like literal escape of a
/// character.
///
/// The default is chosen with a bias toward producing literals that are
/// legal in a variety of languages, including C++11 and similar C-family
/// languages. The exact rules are:
///
/// * Tab, CR and LF are escaped as '\t', '\r' and '\n' respectively.
/// * Single-quote, double-quote and backslash chars are backslash-
/// escaped.
/// * Any other chars in the range [0x20,0x7e] are not escaped.
/// * Any other chars are given hex unicode escapes; see `escape_unicode`.
fn escape_default(&self, f: |char|);
/// Returns the amount of bytes this character would need if encoded in
/// UTF-8.
fn len_utf8_bytes(&self) -> uint;
/// Encodes this character as UTF-8 into the provided byte buffer.
///
/// The buffer must be at least 4 bytes long or a runtime failure may
/// occur.
///
/// This will then return the number of bytes written to the slice.
fn encode_utf8(&self, dst: &mut [u8]) -> uint;
/// Encodes this character as UTF-16 into the provided `u16` buffer.
///
/// The buffer must be at least 2 elements long or a runtime failure may
/// occur.
///
/// This will then return the number of `u16`s written to the slice.
fn encode_utf16(&self, dst: &mut [u16]) -> uint;
}
impl Char for char {
fn is_alphabetic(&self) -> bool { is_alphabetic(*self) }
fn is_XID_start(&self) -> bool { is_XID_start(*self) }
fn is_XID_continue(&self) -> bool { is_XID_continue(*self) }
fn is_lowercase(&self) -> bool { is_lowercase(*self) }
fn is_uppercase(&self) -> bool { is_uppercase(*self) }
fn is_whitespace(&self) -> bool { is_whitespace(*self) }
fn is_alphanumeric(&self) -> bool { is_alphanumeric(*self) }
fn is_control(&self) -> bool { is_control(*self) }
fn is_digit(&self) -> bool { is_digit(*self) }
fn is_digit_radix(&self, radix: uint) -> bool { is_digit_radix(*self, radix) }
fn to_digit(&self, radix: uint) -> Option<uint> { to_digit(*self, radix) }
fn to_lowercase(&self) -> char { to_lowercase(*self) }
fn to_uppercase(&self) -> char { to_uppercase(*self) }
fn from_digit(num: uint, radix: uint) -> Option<char> { from_digit(num, radix) }
fn escape_unicode(&self, f: |char|) { escape_unicode(*self, f) }
fn escape_default(&self, f: |char|) { escape_default(*self, f) }
fn len_utf8_bytes(&self) -> uint { len_utf8_bytes(*self) }
fn encode_utf8<'a>(&self, dst: &'a mut [u8]) -> uint {
let code = *self as u32;
if code < MAX_ONE_B {
dst[0] = code as u8;
1
} else if code < MAX_TWO_B {
dst[0] = (code >> 6u & 0x1F_u32) as u8 | TAG_TWO_B;
dst[1] = (code & 0x3F_u32) as u8 | TAG_CONT;
2
} else if code < MAX_THREE_B {
dst[0] = (code >> 12u & 0x0F_u32) as u8 | TAG_THREE_B;
dst[1] = (code >> 6u & 0x3F_u32) as u8 | TAG_CONT;
dst[2] = (code & 0x3F_u32) as u8 | TAG_CONT;
3
} else {
dst[0] = (code >> 18u & 0x07_u32) as u8 | TAG_FOUR_B;
dst[1] = (code >> 12u & 0x3F_u32) as u8 | TAG_CONT;
dst[2] = (code >> 6u & 0x3F_u32) as u8 | TAG_CONT;
dst[3] = (code & 0x3F_u32) as u8 | TAG_CONT;
4
}
}
fn encode_utf16(&self, dst: &mut [u16]) -> uint {
let mut ch = *self as u32;
if (ch & 0xFFFF_u32) == ch {
// The BMP falls through (assuming non-surrogate, as it should)
assert!(ch <= 0xD7FF_u32 || ch >= 0xE000_u32);
dst[0] = ch as u16;
1
} else {
// Supplementary planes break into surrogates.
assert!(ch >= 0x1_0000_u32 && ch <= 0x10_FFFF_u32);
ch -= 0x1_0000_u32;
dst[0] = 0xD800_u16 | ((ch >> 10) as u16);
dst[1] = 0xDC00_u16 | ((ch as u16) & 0x3FF_u16);
2
}
}
}
|
{ f('\\'); f('r'); }
|
conditional_block
|
char.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Character manipulation (`char` type, Unicode Scalar Value)
//!
//! This module provides the `Char` trait, as well as its implementation
//! for the primitive `char` type, in order to allow basic character manipulation.
//!
//! A `char` actually represents a
//! *[Unicode Scalar Value](http://www.unicode.org/glossary/#unicode_scalar_value)*,
//! as it can contain any Unicode code point except high-surrogate and
//! low-surrogate code points.
//!
//! As such, only values in the ranges \[0x0,0xD7FF\] and \[0xE000,0x10FFFF\]
//! (inclusive) are allowed. A `char` can always be safely cast to a `u32`;
//! however the converse is not always true due to the above range limits
//! and, as such, should be performed via the `from_u32` function..
#![allow(non_snake_case_functions)]
#![doc(primitive = "char")]
use mem::transmute;
use option::{None, Option, Some};
use iter::{Iterator, range_step};
use unicode::{derived_property, property, general_category, conversions};
/// Returns the canonical decomposition of a character.
pub use unicode::normalization::decompose_canonical;
/// Returns the compatibility decomposition of a character.
pub use unicode::normalization::decompose_compatible;
// UTF-8 ranges and tags for encoding characters
static TAG_CONT: u8 = 0b1000_0000u8;
static TAG_TWO_B: u8 = 0b1100_0000u8;
static TAG_THREE_B: u8 = 0b1110_0000u8;
static TAG_FOUR_B: u8 = 0b1111_0000u8;
static MAX_ONE_B: u32 = 0x80u32;
static MAX_TWO_B: u32 = 0x800u32;
static MAX_THREE_B: u32 = 0x10000u32;
static MAX_FOUR_B: u32 = 0x200000u32;
/*
Lu Uppercase_Letter an uppercase letter
Ll Lowercase_Letter a lowercase letter
Lt Titlecase_Letter a digraphic character, with first part uppercase
Lm Modifier_Letter a modifier letter
Lo Other_Letter other letters, including syllables and ideographs
Mn Nonspacing_Mark a nonspacing combining mark (zero advance width)
Mc Spacing_Mark a spacing combining mark (positive advance width)
Me Enclosing_Mark an enclosing combining mark
Nd Decimal_Number a decimal digit
Nl Letter_Number a letterlike numeric character
No Other_Number a numeric character of other type
Pc Connector_Punctuation a connecting punctuation mark, like a tie
Pd Dash_Punctuation a dash or hyphen punctuation mark
Ps Open_Punctuation an opening punctuation mark (of a pair)
Pe Close_Punctuation a closing punctuation mark (of a pair)
Pi Initial_Punctuation an initial quotation mark
Pf Final_Punctuation a final quotation mark
Po Other_Punctuation a punctuation mark of other type
Sm Math_Symbol a symbol of primarily mathematical use
Sc Currency_Symbol a currency sign
Sk Modifier_Symbol a non-letterlike modifier symbol
So Other_Symbol a symbol of other type
Zs Space_Separator a space character (of various non-zero widths)
Zl Line_Separator U+2028 LINE SEPARATOR only
Zp Paragraph_Separator U+2029 PARAGRAPH SEPARATOR only
Cc Control a C0 or C1 control code
Cf Format a format control character
Cs Surrogate a surrogate code point
Co Private_Use a private-use character
Cn Unassigned a reserved unassigned code point or a noncharacter
*/
/// The highest valid code point
pub static MAX: char = '\U0010ffff';
/// Converts from `u32` to a `char`
#[inline]
pub fn from_u32(i: u32) -> Option<char> {
// catch out-of-bounds and surrogates
if (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF) {
None
} else {
Some(unsafe { transmute(i) })
}
}
/// Returns whether the specified `char` is considered a Unicode alphabetic
/// code point
pub fn is_alphabetic(c: char) -> bool { derived_property::Alphabetic(c) }
/// Returns whether the specified `char` satisfies the 'XID_Start' Unicode property
///
/// 'XID_Start' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to ID_Start but modified for closure under NFKx.
pub fn is_XID_start(c: char) -> bool { derived_property::XID_Start(c) }
/// Returns whether the specified `char` satisfies the 'XID_Continue' Unicode property
///
/// 'XID_Continue' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to 'ID_Continue' but modified for closure under NFKx.
pub fn is_XID_continue(c: char) -> bool { derived_property::XID_Continue(c) }
///
/// Indicates whether a `char` is in lower case
///
/// This is defined according to the terms of the Unicode Derived Core Property 'Lowercase'.
///
#[inline]
pub fn is_lowercase(c: char) -> bool { derived_property::Lowercase(c) }
///
/// Indicates whether a `char` is in upper case
///
/// This is defined according to the terms of the Unicode Derived Core Property 'Uppercase'.
///
#[inline]
pub fn is_uppercase(c: char) -> bool { derived_property::Uppercase(c) }
///
/// Indicates whether a `char` is whitespace
///
/// Whitespace is defined in terms of the Unicode Property 'White_Space'.
///
#[inline]
pub fn is_whitespace(c: char) -> bool
|
///
/// Indicates whether a `char` is alphanumeric
///
/// Alphanumericness is defined in terms of the Unicode General Categories
/// 'Nd', 'Nl', 'No' and the Derived Core Property 'Alphabetic'.
///
#[inline]
pub fn is_alphanumeric(c: char) -> bool {
derived_property::Alphabetic(c)
|| general_category::Nd(c)
|| general_category::Nl(c)
|| general_category::No(c)
}
///
/// Indicates whether a `char` is a control code point
///
/// Control code points are defined in terms of the Unicode General Category
/// 'Cc'.
///
#[inline]
pub fn is_control(c: char) -> bool { general_category::Cc(c) }
/// Indicates whether the `char` is numeric (Nd, Nl, or No)
#[inline]
pub fn is_digit(c: char) -> bool {
general_category::Nd(c)
|| general_category::Nl(c)
|| general_category::No(c)
}
///
/// Checks if a `char` parses as a numeric digit in the given radix
///
/// Compared to `is_digit()`, this function only recognizes the
/// characters `0-9`, `a-z` and `A-Z`.
///
/// # Return value
///
/// Returns `true` if `c` is a valid digit under `radix`, and `false`
/// otherwise.
///
/// # Failure
///
/// Fails if given a `radix` > 36.
///
/// # Note
///
/// This just wraps `to_digit()`.
///
#[inline]
pub fn is_digit_radix(c: char, radix: uint) -> bool {
match to_digit(c, radix) {
Some(_) => true,
None => false,
}
}
///
/// Converts a `char` to the corresponding digit
///
/// # Return value
///
/// If `c` is between '0' and '9', the corresponding value
/// between 0 and 9. If `c` is 'a' or 'A', 10. If `c` is
/// 'b' or 'B', 11, etc. Returns none if the `char` does not
/// refer to a digit in the given radix.
///
/// # Failure
///
/// Fails if given a `radix` outside the range `[0..36]`.
///
#[inline]
pub fn to_digit(c: char, radix: uint) -> Option<uint> {
if radix > 36 {
fail!("to_digit: radix is too high (maximum 36)");
}
let val = match c {
'0'.. '9' => c as uint - ('0' as uint),
'a'.. 'z' => c as uint + 10u - ('a' as uint),
'A'.. 'Z' => c as uint + 10u - ('A' as uint),
_ => return None,
};
if val < radix { Some(val) }
else { None }
}
/// Convert a char to its uppercase equivalent
///
/// The case-folding performed is the common or simple mapping:
/// it maps one unicode codepoint (one char in Rust) to its uppercase equivalent according
/// to the Unicode database at ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt
/// The additional SpecialCasing.txt is not considered here, as it expands to multiple
/// codepoints in some cases.
///
/// A full reference can be found here
/// http://www.unicode.org/versions/Unicode4.0.0/ch03.pdf#G33992
///
/// # Return value
///
/// Returns the char itself if no conversion was made
#[inline]
pub fn to_uppercase(c: char) -> char {
conversions::to_upper(c)
}
/// Convert a char to its lowercase equivalent
///
/// The case-folding performed is the common or simple mapping
/// see `to_uppercase` for references and more information
///
/// # Return value
///
/// Returns the char itself if no conversion if possible
#[inline]
pub fn to_lowercase(c: char) -> char {
conversions::to_lower(c)
}
///
/// Converts a number to the character representing it
///
/// # Return value
///
/// Returns `Some(char)` if `num` represents one digit under `radix`,
/// using one character of `0-9` or `a-z`, or `None` if it doesn't.
///
/// # Failure
///
/// Fails if given an `radix` > 36.
///
#[inline]
pub fn from_digit(num: uint, radix: uint) -> Option<char> {
if radix > 36 {
fail!("from_digit: radix is to high (maximum 36)");
}
if num < radix {
unsafe {
if num < 10 {
Some(transmute(('0' as uint + num) as u32))
} else {
Some(transmute(('a' as uint + num - 10u) as u32))
}
}
} else {
None
}
}
///
/// Returns the hexadecimal Unicode escape of a `char`
///
/// The rules are as follows:
///
/// - chars in [0,0xff] get 2-digit escapes: `\\xNN`
/// - chars in [0x100,0xffff] get 4-digit escapes: `\\uNNNN`
/// - chars above 0x10000 get 8-digit escapes: `\\UNNNNNNNN`
///
pub fn escape_unicode(c: char, f: |char|) {
// avoid calling str::to_str_radix because we don't really need to allocate
// here.
f('\\');
let pad = match () {
_ if c <= '\xff' => { f('x'); 2 }
_ if c <= '\uffff' => { f('u'); 4 }
_ => { f('U'); 8 }
};
for offset in range_step::<i32>(4 * (pad - 1), -1, -4) {
let offset = offset as uint;
unsafe {
match ((c as i32) >> offset) & 0xf {
i @ 0.. 9 => { f(transmute('0' as i32 + i)); }
i => { f(transmute('a' as i32 + (i - 10))); }
}
}
}
}
///
/// Returns a 'default' ASCII and C++11-like literal escape of a `char`
///
/// The default is chosen with a bias toward producing literals that are
/// legal in a variety of languages, including C++11 and similar C-family
/// languages. The exact rules are:
///
/// - Tab, CR and LF are escaped as '\t', '\r' and '\n' respectively.
/// - Single-quote, double-quote and backslash chars are backslash-escaped.
/// - Any other chars in the range [0x20,0x7e] are not escaped.
/// - Any other chars are given hex unicode escapes; see `escape_unicode`.
///
pub fn escape_default(c: char, f: |char|) {
match c {
'\t' => { f('\\'); f('t'); }
'\r' => { f('\\'); f('r'); }
'\n' => { f('\\'); f('n'); }
'\\' => { f('\\'); f('\\'); }
'\'' => { f('\\'); f('\''); }
'"' => { f('\\'); f('"'); }
'\x20'.. '\x7e' => { f(c); }
_ => c.escape_unicode(f),
}
}
/// Returns the amount of bytes this `char` would need if encoded in UTF-8
pub fn len_utf8_bytes(c: char) -> uint {
let code = c as u32;
match () {
_ if code < MAX_ONE_B => 1u,
_ if code < MAX_TWO_B => 2u,
_ if code < MAX_THREE_B => 3u,
_ if code < MAX_FOUR_B => 4u,
_ => fail!("invalid character!"),
}
}
/// Useful functions for Unicode characters.
pub trait Char {
/// Returns whether the specified character is considered a Unicode
/// alphabetic code point.
fn is_alphabetic(&self) -> bool;
/// Returns whether the specified character satisfies the 'XID_Start'
/// Unicode property.
///
/// 'XID_Start' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to ID_Start but modified for closure under NFKx.
fn is_XID_start(&self) -> bool;
/// Returns whether the specified `char` satisfies the 'XID_Continue'
/// Unicode property.
///
/// 'XID_Continue' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to 'ID_Continue' but modified for closure under NFKx.
fn is_XID_continue(&self) -> bool;
/// Indicates whether a character is in lowercase.
///
/// This is defined according to the terms of the Unicode Derived Core
/// Property `Lowercase`.
fn is_lowercase(&self) -> bool;
/// Indicates whether a character is in uppercase.
///
/// This is defined according to the terms of the Unicode Derived Core
/// Property `Uppercase`.
fn is_uppercase(&self) -> bool;
/// Indicates whether a character is whitespace.
///
/// Whitespace is defined in terms of the Unicode Property `White_Space`.
fn is_whitespace(&self) -> bool;
/// Indicates whether a character is alphanumeric.
///
/// Alphanumericness is defined in terms of the Unicode General Categories
/// 'Nd', 'Nl', 'No' and the Derived Core Property 'Alphabetic'.
fn is_alphanumeric(&self) -> bool;
/// Indicates whether a character is a control code point.
///
/// Control code points are defined in terms of the Unicode General
/// Category `Cc`.
fn is_control(&self) -> bool;
/// Indicates whether the character is numeric (Nd, Nl, or No).
fn is_digit(&self) -> bool;
/// Checks if a `char` parses as a numeric digit in the given radix.
///
/// Compared to `is_digit()`, this function only recognizes the characters
/// `0-9`, `a-z` and `A-Z`.
///
/// # Return value
///
/// Returns `true` if `c` is a valid digit under `radix`, and `false`
/// otherwise.
///
/// # Failure
///
/// Fails if given a radix > 36.
fn is_digit_radix(&self, radix: uint) -> bool;
/// Converts a character to the corresponding digit.
///
/// # Return value
///
/// If `c` is between '0' and '9', the corresponding value between 0 and
/// 9. If `c` is 'a' or 'A', 10. If `c` is 'b' or 'B', 11, etc. Returns
/// none if the character does not refer to a digit in the given radix.
///
/// # Failure
///
/// Fails if given a radix outside the range [0..36].
fn to_digit(&self, radix: uint) -> Option<uint>;
/// Converts a character to its lowercase equivalent.
///
/// The case-folding performed is the common or simple mapping. See
/// `to_uppercase()` for references and more information.
///
/// # Return value
///
/// Returns the lowercase equivalent of the character, or the character
/// itself if no conversion is possible.
fn to_lowercase(&self) -> char;
/// Converts a character to its uppercase equivalent.
///
/// The case-folding performed is the common or simple mapping: it maps
/// one unicode codepoint (one character in Rust) to its uppercase
/// equivalent according to the Unicode database [1]. The additional
/// `SpecialCasing.txt` is not considered here, as it expands to multiple
/// codepoints in some cases.
///
/// A full reference can be found here [2].
///
/// # Return value
///
/// Returns the uppercase equivalent of the character, or the character
/// itself if no conversion was made.
///
/// [1]: ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt
///
/// [2]: http://www.unicode.org/versions/Unicode4.0.0/ch03.pdf#G33992
fn to_uppercase(&self) -> char;
/// Converts a number to the character representing it.
///
/// # Return value
///
/// Returns `Some(char)` if `num` represents one digit under `radix`,
/// using one character of `0-9` or `a-z`, or `None` if it doesn't.
///
/// # Failure
///
/// Fails if given a radix > 36.
fn from_digit(num: uint, radix: uint) -> Option<char>;
/// Returns the hexadecimal Unicode escape of a character.
///
/// The rules are as follows:
///
/// * Characters in [0,0xff] get 2-digit escapes: `\\xNN`
/// * Characters in [0x100,0xffff] get 4-digit escapes: `\\uNNNN`.
/// * Characters above 0x10000 get 8-digit escapes: `\\UNNNNNNNN`.
fn escape_unicode(&self, f: |char|);
/// Returns a 'default' ASCII and C++11-like literal escape of a
/// character.
///
/// The default is chosen with a bias toward producing literals that are
/// legal in a variety of languages, including C++11 and similar C-family
/// languages. The exact rules are:
///
/// * Tab, CR and LF are escaped as '\t', '\r' and '\n' respectively.
/// * Single-quote, double-quote and backslash chars are backslash-
/// escaped.
/// * Any other chars in the range [0x20,0x7e] are not escaped.
/// * Any other chars are given hex unicode escapes; see `escape_unicode`.
fn escape_default(&self, f: |char|);
/// Returns the amount of bytes this character would need if encoded in
/// UTF-8.
fn len_utf8_bytes(&self) -> uint;
/// Encodes this character as UTF-8 into the provided byte buffer.
///
/// The buffer must be at least 4 bytes long or a runtime failure may
/// occur.
///
/// This will then return the number of bytes written to the slice.
fn encode_utf8(&self, dst: &mut [u8]) -> uint;
/// Encodes this character as UTF-16 into the provided `u16` buffer.
///
/// The buffer must be at least 2 elements long or a runtime failure may
/// occur.
///
/// This will then return the number of `u16`s written to the slice.
fn encode_utf16(&self, dst: &mut [u16]) -> uint;
}
impl Char for char {
fn is_alphabetic(&self) -> bool { is_alphabetic(*self) }
fn is_XID_start(&self) -> bool { is_XID_start(*self) }
fn is_XID_continue(&self) -> bool { is_XID_continue(*self) }
fn is_lowercase(&self) -> bool { is_lowercase(*self) }
fn is_uppercase(&self) -> bool { is_uppercase(*self) }
fn is_whitespace(&self) -> bool { is_whitespace(*self) }
fn is_alphanumeric(&self) -> bool { is_alphanumeric(*self) }
fn is_control(&self) -> bool { is_control(*self) }
fn is_digit(&self) -> bool { is_digit(*self) }
fn is_digit_radix(&self, radix: uint) -> bool { is_digit_radix(*self, radix) }
fn to_digit(&self, radix: uint) -> Option<uint> { to_digit(*self, radix) }
fn to_lowercase(&self) -> char { to_lowercase(*self) }
fn to_uppercase(&self) -> char { to_uppercase(*self) }
fn from_digit(num: uint, radix: uint) -> Option<char> { from_digit(num, radix) }
fn escape_unicode(&self, f: |char|) { escape_unicode(*self, f) }
fn escape_default(&self, f: |char|) { escape_default(*self, f) }
fn len_utf8_bytes(&self) -> uint { len_utf8_bytes(*self) }
fn encode_utf8<'a>(&self, dst: &'a mut [u8]) -> uint {
let code = *self as u32;
if code < MAX_ONE_B {
dst[0] = code as u8;
1
} else if code < MAX_TWO_B {
dst[0] = (code >> 6u & 0x1F_u32) as u8 | TAG_TWO_B;
dst[1] = (code & 0x3F_u32) as u8 | TAG_CONT;
2
} else if code < MAX_THREE_B {
dst[0] = (code >> 12u & 0x0F_u32) as u8 | TAG_THREE_B;
dst[1] = (code >> 6u & 0x3F_u32) as u8 | TAG_CONT;
dst[2] = (code & 0x3F_u32) as u8 | TAG_CONT;
3
} else {
dst[0] = (code >> 18u & 0x07_u32) as u8 | TAG_FOUR_B;
dst[1] = (code >> 12u & 0x3F_u32) as u8 | TAG_CONT;
dst[2] = (code >> 6u & 0x3F_u32) as u8 | TAG_CONT;
dst[3] = (code & 0x3F_u32) as u8 | TAG_CONT;
4
}
}
fn encode_utf16(&self, dst: &mut [u16]) -> uint {
let mut ch = *self as u32;
if (ch & 0xFFFF_u32) == ch {
// The BMP falls through (assuming non-surrogate, as it should)
assert!(ch <= 0xD7FF_u32 || ch >= 0xE000_u32);
dst[0] = ch as u16;
1
} else {
// Supplementary planes break into surrogates.
assert!(ch >= 0x1_0000_u32 && ch <= 0x10_FFFF_u32);
ch -= 0x1_0000_u32;
dst[0] = 0xD800_u16 | ((ch >> 10) as u16);
dst[1] = 0xDC00_u16 | ((ch as u16) & 0x3FF_u16);
2
}
}
}
|
{
// As an optimization ASCII whitespace characters are checked separately
c == ' '
|| ('\x09' <= c && c <= '\x0d')
|| property::White_Space(c)
}
|
identifier_body
|
sccache-plus-cl.rs
|
use std::env;
use std::process::{self, Command};
fn main() {
let target = env::var("SCCACHE_TARGET").unwrap();
// Locate the actual compiler that we're invoking
env::set_var("CC", env::var_os("SCCACHE_CC").unwrap());
env::set_var("CXX", env::var_os("SCCACHE_CXX").unwrap());
let mut cfg = cc::Build::new();
cfg.cargo_metadata(false)
.out_dir("/")
.target(&target)
.host(&target)
.opt_level(0)
.warnings(false)
.debug(false);
let compiler = cfg.get_compiler();
// Invoke sccache with said compiler
let sccache_path = env::var_os("SCCACHE_PATH").unwrap();
let mut cmd = Command::new(&sccache_path);
cmd.arg(compiler.path());
for &(ref k, ref v) in compiler.env() {
cmd.env(k, v);
}
for arg in env::args().skip(1) {
cmd.arg(arg);
}
if let Ok(s) = env::var("SCCACHE_EXTRA_ARGS") {
for s in s.split_whitespace() {
cmd.arg(s);
}
}
|
process::exit(status.code().unwrap_or(2))
}
|
let status = cmd.status().expect("failed to spawn");
|
random_line_split
|
sccache-plus-cl.rs
|
use std::env;
use std::process::{self, Command};
fn
|
() {
let target = env::var("SCCACHE_TARGET").unwrap();
// Locate the actual compiler that we're invoking
env::set_var("CC", env::var_os("SCCACHE_CC").unwrap());
env::set_var("CXX", env::var_os("SCCACHE_CXX").unwrap());
let mut cfg = cc::Build::new();
cfg.cargo_metadata(false)
.out_dir("/")
.target(&target)
.host(&target)
.opt_level(0)
.warnings(false)
.debug(false);
let compiler = cfg.get_compiler();
// Invoke sccache with said compiler
let sccache_path = env::var_os("SCCACHE_PATH").unwrap();
let mut cmd = Command::new(&sccache_path);
cmd.arg(compiler.path());
for &(ref k, ref v) in compiler.env() {
cmd.env(k, v);
}
for arg in env::args().skip(1) {
cmd.arg(arg);
}
if let Ok(s) = env::var("SCCACHE_EXTRA_ARGS") {
for s in s.split_whitespace() {
cmd.arg(s);
}
}
let status = cmd.status().expect("failed to spawn");
process::exit(status.code().unwrap_or(2))
}
|
main
|
identifier_name
|
sccache-plus-cl.rs
|
use std::env;
use std::process::{self, Command};
fn main() {
let target = env::var("SCCACHE_TARGET").unwrap();
// Locate the actual compiler that we're invoking
env::set_var("CC", env::var_os("SCCACHE_CC").unwrap());
env::set_var("CXX", env::var_os("SCCACHE_CXX").unwrap());
let mut cfg = cc::Build::new();
cfg.cargo_metadata(false)
.out_dir("/")
.target(&target)
.host(&target)
.opt_level(0)
.warnings(false)
.debug(false);
let compiler = cfg.get_compiler();
// Invoke sccache with said compiler
let sccache_path = env::var_os("SCCACHE_PATH").unwrap();
let mut cmd = Command::new(&sccache_path);
cmd.arg(compiler.path());
for &(ref k, ref v) in compiler.env() {
cmd.env(k, v);
}
for arg in env::args().skip(1) {
cmd.arg(arg);
}
if let Ok(s) = env::var("SCCACHE_EXTRA_ARGS")
|
let status = cmd.status().expect("failed to spawn");
process::exit(status.code().unwrap_or(2))
}
|
{
for s in s.split_whitespace() {
cmd.arg(s);
}
}
|
conditional_block
|
sccache-plus-cl.rs
|
use std::env;
use std::process::{self, Command};
fn main()
|
cmd.env(k, v);
}
for arg in env::args().skip(1) {
cmd.arg(arg);
}
if let Ok(s) = env::var("SCCACHE_EXTRA_ARGS") {
for s in s.split_whitespace() {
cmd.arg(s);
}
}
let status = cmd.status().expect("failed to spawn");
process::exit(status.code().unwrap_or(2))
}
|
{
let target = env::var("SCCACHE_TARGET").unwrap();
// Locate the actual compiler that we're invoking
env::set_var("CC", env::var_os("SCCACHE_CC").unwrap());
env::set_var("CXX", env::var_os("SCCACHE_CXX").unwrap());
let mut cfg = cc::Build::new();
cfg.cargo_metadata(false)
.out_dir("/")
.target(&target)
.host(&target)
.opt_level(0)
.warnings(false)
.debug(false);
let compiler = cfg.get_compiler();
// Invoke sccache with said compiler
let sccache_path = env::var_os("SCCACHE_PATH").unwrap();
let mut cmd = Command::new(&sccache_path);
cmd.arg(compiler.path());
for &(ref k, ref v) in compiler.env() {
|
identifier_body
|
dynamic_lib.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use os::windows::prelude::*;
use ffi::{CString, OsStr};
use io;
use sys::c;
pub struct DynamicLibrary {
handle: c::HMODULE,
}
impl DynamicLibrary {
pub fn open(filename: &str) -> io::Result<DynamicLibrary> {
let filename = OsStr::new(filename)
.encode_wide()
.chain(Some(0))
.collect::<Vec<_>>();
let result = unsafe {
c::LoadLibraryW(filename.as_ptr())
};
if result.is_null() {
Err(io::Error::last_os_error())
} else {
Ok(DynamicLibrary { handle: result })
}
}
pub fn symbol(&self, symbol: &str) -> io::Result<usize> {
let symbol = CString::new(symbol)?;
unsafe {
match c::GetProcAddress(self.handle, symbol.as_ptr()) as usize {
|
}
}
impl Drop for DynamicLibrary {
fn drop(&mut self) {
unsafe {
c::FreeLibrary(self.handle);
}
}
}
|
0 => Err(io::Error::last_os_error()),
n => Ok(n),
}
}
|
random_line_split
|
dynamic_lib.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use os::windows::prelude::*;
use ffi::{CString, OsStr};
use io;
use sys::c;
pub struct DynamicLibrary {
handle: c::HMODULE,
}
impl DynamicLibrary {
pub fn open(filename: &str) -> io::Result<DynamicLibrary> {
let filename = OsStr::new(filename)
.encode_wide()
.chain(Some(0))
.collect::<Vec<_>>();
let result = unsafe {
c::LoadLibraryW(filename.as_ptr())
};
if result.is_null() {
Err(io::Error::last_os_error())
} else {
Ok(DynamicLibrary { handle: result })
}
}
pub fn
|
(&self, symbol: &str) -> io::Result<usize> {
let symbol = CString::new(symbol)?;
unsafe {
match c::GetProcAddress(self.handle, symbol.as_ptr()) as usize {
0 => Err(io::Error::last_os_error()),
n => Ok(n),
}
}
}
}
impl Drop for DynamicLibrary {
fn drop(&mut self) {
unsafe {
c::FreeLibrary(self.handle);
}
}
}
|
symbol
|
identifier_name
|
dynamic_lib.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use os::windows::prelude::*;
use ffi::{CString, OsStr};
use io;
use sys::c;
pub struct DynamicLibrary {
handle: c::HMODULE,
}
impl DynamicLibrary {
pub fn open(filename: &str) -> io::Result<DynamicLibrary> {
let filename = OsStr::new(filename)
.encode_wide()
.chain(Some(0))
.collect::<Vec<_>>();
let result = unsafe {
c::LoadLibraryW(filename.as_ptr())
};
if result.is_null() {
Err(io::Error::last_os_error())
} else {
Ok(DynamicLibrary { handle: result })
}
}
pub fn symbol(&self, symbol: &str) -> io::Result<usize> {
let symbol = CString::new(symbol)?;
unsafe {
match c::GetProcAddress(self.handle, symbol.as_ptr()) as usize {
0 => Err(io::Error::last_os_error()),
n => Ok(n),
}
}
}
}
impl Drop for DynamicLibrary {
fn drop(&mut self)
|
}
|
{
unsafe {
c::FreeLibrary(self.handle);
}
}
|
identifier_body
|
run.rs
|
use std::old_io::process::ExitStatus;
use cargo::ops;
use cargo::core::manifest::TargetKind;
use cargo::util::{CliResult, CliError, human, Config};
use cargo::util::important_paths::{find_root_manifest_for_cwd};
#[derive(RustcDecodable)]
struct Options {
flag_bin: Option<String>,
flag_example: Option<String>,
flag_jobs: Option<u32>,
flag_features: Vec<String>,
flag_no_default_features: bool,
flag_target: Option<String>,
flag_manifest_path: Option<String>,
flag_verbose: bool,
flag_release: bool,
arg_args: Vec<String>,
}
pub const USAGE: &'static str = "
Run the main binary of the local package (src/main.rs)
Usage:
cargo run [options] [--] [<args>...]
Options:
-h, --help Print this message
--bin NAME Name of the bin target to run
--example NAME Name of the example target to run
-j N, --jobs N The number of jobs to run in parallel
--release Build artifacts in release mode, with optimizations
--features FEATURES Space-separated list of features to also build
--no-default-features Do not build the `default` feature
--target TRIPLE Build for the target triple
--manifest-path PATH Path to the manifest to execute
-v, --verbose Use verbose output
If neither `--bin` or `--example` are given, then if the project only has one
bin target it will be run. Otherwise `--bin` specifies the bin target to run,
and `--example` specifies the example target to run. At most one of `--bin` or
`--example` can be provided.
All of the trailing arguments are passed as to the binary to run.
";
pub fn
|
(options: Options, config: &Config) -> CliResult<Option<()>> {
config.shell().set_verbose(options.flag_verbose);
let root = try!(find_root_manifest_for_cwd(options.flag_manifest_path));
let env = match (options.flag_release, options.flag_example.is_some()) {
(true, _) => "release",
(false, true) => "test",
(false, false) => "compile"
};
let compile_opts = ops::CompileOptions {
env: env,
config: config,
jobs: options.flag_jobs,
target: options.flag_target.as_ref().map(|t| t.as_slice()),
dev_deps: true,
features: &options.flag_features,
no_default_features: options.flag_no_default_features,
spec: None,
lib_only: false,
exec_engine: None,
};
let (target_kind, name) = match (options.flag_bin, options.flag_example) {
(Some(bin), None) => (TargetKind::Bin, Some(bin)),
(None, Some(example)) => (TargetKind::Example, Some(example)),
(None, None) => (TargetKind::Bin, None),
(Some(_), Some(_)) => return Err(CliError::from_boxed(
human("specify either `--bin` or `--example`, not both"), 1)),
};
let err = try!(ops::run(&root,
target_kind,
name,
&compile_opts,
&options.arg_args).map_err(|err| {
CliError::from_boxed(err, 101)
}));
match err {
None => Ok(None),
Some(err) => {
Err(match err.exit {
Some(ExitStatus(i)) => CliError::from_boxed(box err, i as i32),
_ => CliError::from_boxed(box err, 101),
})
}
}
}
|
execute
|
identifier_name
|
run.rs
|
use std::old_io::process::ExitStatus;
use cargo::ops;
use cargo::core::manifest::TargetKind;
use cargo::util::{CliResult, CliError, human, Config};
use cargo::util::important_paths::{find_root_manifest_for_cwd};
#[derive(RustcDecodable)]
struct Options {
flag_bin: Option<String>,
flag_example: Option<String>,
flag_jobs: Option<u32>,
flag_features: Vec<String>,
flag_no_default_features: bool,
flag_target: Option<String>,
flag_manifest_path: Option<String>,
flag_verbose: bool,
flag_release: bool,
arg_args: Vec<String>,
}
pub const USAGE: &'static str = "
Run the main binary of the local package (src/main.rs)
Usage:
cargo run [options] [--] [<args>...]
Options:
-h, --help Print this message
|
--example NAME Name of the example target to run
-j N, --jobs N The number of jobs to run in parallel
--release Build artifacts in release mode, with optimizations
--features FEATURES Space-separated list of features to also build
--no-default-features Do not build the `default` feature
--target TRIPLE Build for the target triple
--manifest-path PATH Path to the manifest to execute
-v, --verbose Use verbose output
If neither `--bin` or `--example` are given, then if the project only has one
bin target it will be run. Otherwise `--bin` specifies the bin target to run,
and `--example` specifies the example target to run. At most one of `--bin` or
`--example` can be provided.
All of the trailing arguments are passed as to the binary to run.
";
pub fn execute(options: Options, config: &Config) -> CliResult<Option<()>> {
config.shell().set_verbose(options.flag_verbose);
let root = try!(find_root_manifest_for_cwd(options.flag_manifest_path));
let env = match (options.flag_release, options.flag_example.is_some()) {
(true, _) => "release",
(false, true) => "test",
(false, false) => "compile"
};
let compile_opts = ops::CompileOptions {
env: env,
config: config,
jobs: options.flag_jobs,
target: options.flag_target.as_ref().map(|t| t.as_slice()),
dev_deps: true,
features: &options.flag_features,
no_default_features: options.flag_no_default_features,
spec: None,
lib_only: false,
exec_engine: None,
};
let (target_kind, name) = match (options.flag_bin, options.flag_example) {
(Some(bin), None) => (TargetKind::Bin, Some(bin)),
(None, Some(example)) => (TargetKind::Example, Some(example)),
(None, None) => (TargetKind::Bin, None),
(Some(_), Some(_)) => return Err(CliError::from_boxed(
human("specify either `--bin` or `--example`, not both"), 1)),
};
let err = try!(ops::run(&root,
target_kind,
name,
&compile_opts,
&options.arg_args).map_err(|err| {
CliError::from_boxed(err, 101)
}));
match err {
None => Ok(None),
Some(err) => {
Err(match err.exit {
Some(ExitStatus(i)) => CliError::from_boxed(box err, i as i32),
_ => CliError::from_boxed(box err, 101),
})
}
}
}
|
--bin NAME Name of the bin target to run
|
random_line_split
|
issue-3389.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
|
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct trie_node {
content: ~[~str],
children: ~[trie_node],
}
fn print_str_vector(vector: ~[~str]) {
for vector.iter().advance |string| {
println(*string);
}
}
pub fn main() {
let mut node: trie_node = trie_node {
content: ~[],
children: ~[]
};
let v = ~[~"123", ~"abc"];
node.content = ~[~"123", ~"abc"];
print_str_vector(v);
print_str_vector(node.content.clone());
}
|
random_line_split
|
|
issue-3389.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct
|
{
content: ~[~str],
children: ~[trie_node],
}
fn print_str_vector(vector: ~[~str]) {
for vector.iter().advance |string| {
println(*string);
}
}
pub fn main() {
let mut node: trie_node = trie_node {
content: ~[],
children: ~[]
};
let v = ~[~"123", ~"abc"];
node.content = ~[~"123", ~"abc"];
print_str_vector(v);
print_str_vector(node.content.clone());
}
|
trie_node
|
identifier_name
|
set_value.rs
|
#![allow(dead_code)]
#![allow(unused_imports)]
extern crate libc;
extern crate sysctl;
// Import the trait
use sysctl::Sysctl;
#[cfg(target_os = "freebsd")]
const CTLNAME: &str = "net.inet.ip.forwarding";
#[cfg(target_os = "macos")]
const CTLNAME: &str = "net.inet.ip.forwarding";
#[cfg(any(target_os = "linux", target_os = "android"))]
const CTLNAME: &str = "net.ipv4.ip_forward";
fn
|
() {
assert_eq!(
unsafe { libc::geteuid() },
0,
"This example must be run as root"
);
let ctl = sysctl::Ctl::new(CTLNAME).expect(&format!("could not get sysctl '{}'", CTLNAME));
let name = ctl.name().expect("could not get sysctl name");
println!("\nFlipping value of sysctl '{}'", name);
let old_value = ctl.value_string().expect("could not get sysctl value");
println!("Current value is '{}'", old_value);
let target_value = match old_value.as_ref() {
"0" => "1",
_ => "0",
};
println!("Setting value to '{}'...", target_value);
let new_value = ctl.set_value_string(target_value).unwrap_or_else(|e| {
panic!("Could not set value. Error: {:?}", e);
});
assert_eq!(new_value, target_value, "could not set value");
println!("OK. Now restoring old value '{}'...", old_value);
let ret = ctl
.set_value_string(&old_value)
.expect("could not restore old value");
println!("OK. Value restored to {}.", ret);
}
|
main
|
identifier_name
|
set_value.rs
|
#![allow(dead_code)]
#![allow(unused_imports)]
extern crate libc;
extern crate sysctl;
// Import the trait
use sysctl::Sysctl;
#[cfg(target_os = "freebsd")]
const CTLNAME: &str = "net.inet.ip.forwarding";
#[cfg(target_os = "macos")]
const CTLNAME: &str = "net.inet.ip.forwarding";
#[cfg(any(target_os = "linux", target_os = "android"))]
const CTLNAME: &str = "net.ipv4.ip_forward";
fn main()
|
println!("Setting value to '{}'...", target_value);
let new_value = ctl.set_value_string(target_value).unwrap_or_else(|e| {
panic!("Could not set value. Error: {:?}", e);
});
assert_eq!(new_value, target_value, "could not set value");
println!("OK. Now restoring old value '{}'...", old_value);
let ret = ctl
.set_value_string(&old_value)
.expect("could not restore old value");
println!("OK. Value restored to {}.", ret);
}
|
{
assert_eq!(
unsafe { libc::geteuid() },
0,
"This example must be run as root"
);
let ctl = sysctl::Ctl::new(CTLNAME).expect(&format!("could not get sysctl '{}'", CTLNAME));
let name = ctl.name().expect("could not get sysctl name");
println!("\nFlipping value of sysctl '{}'", name);
let old_value = ctl.value_string().expect("could not get sysctl value");
println!("Current value is '{}'", old_value);
let target_value = match old_value.as_ref() {
"0" => "1",
_ => "0",
};
|
identifier_body
|
set_value.rs
|
#![allow(dead_code)]
#![allow(unused_imports)]
extern crate libc;
extern crate sysctl;
// Import the trait
use sysctl::Sysctl;
#[cfg(target_os = "freebsd")]
const CTLNAME: &str = "net.inet.ip.forwarding";
#[cfg(target_os = "macos")]
const CTLNAME: &str = "net.inet.ip.forwarding";
#[cfg(any(target_os = "linux", target_os = "android"))]
const CTLNAME: &str = "net.ipv4.ip_forward";
fn main() {
assert_eq!(
unsafe { libc::geteuid() },
0,
"This example must be run as root"
);
let ctl = sysctl::Ctl::new(CTLNAME).expect(&format!("could not get sysctl '{}'", CTLNAME));
let name = ctl.name().expect("could not get sysctl name");
println!("\nFlipping value of sysctl '{}'", name);
let old_value = ctl.value_string().expect("could not get sysctl value");
println!("Current value is '{}'", old_value);
let target_value = match old_value.as_ref() {
|
let new_value = ctl.set_value_string(target_value).unwrap_or_else(|e| {
panic!("Could not set value. Error: {:?}", e);
});
assert_eq!(new_value, target_value, "could not set value");
println!("OK. Now restoring old value '{}'...", old_value);
let ret = ctl
.set_value_string(&old_value)
.expect("could not restore old value");
println!("OK. Value restored to {}.", ret);
}
|
"0" => "1",
_ => "0",
};
println!("Setting value to '{}'...", target_value);
|
random_line_split
|
issue-7911.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-pretty
// (Closes #7911) Test that we can use the same self expression
// with different mutability in macro in two methods
#![allow(unused_variable)] // unused foobar_immut + foobar_mut
trait FooBar {}
struct Bar(i32);
struct
|
{ bar: Bar }
impl FooBar for Bar {}
trait Test {
fn get_immut(&self) -> &FooBar;
fn get_mut(&mut self) -> &mut FooBar;
}
macro_rules! generate_test { ($type_:path, $slf:ident, $field:expr) => (
impl Test for $type_ {
fn get_immut(&$slf) -> &FooBar {
&$field as &FooBar
}
fn get_mut(&mut $slf) -> &mut FooBar {
&mut $field as &mut FooBar
}
}
)}
generate_test!(Foo, self, self.bar);
pub fn main() {
let mut foo: Foo = Foo { bar: Bar(42) };
{ let foobar_immut = foo.get_immut(); }
{ let foobar_mut = foo.get_mut(); }
}
|
Foo
|
identifier_name
|
issue-7911.rs
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-pretty
// (Closes #7911) Test that we can use the same self expression
// with different mutability in macro in two methods
#![allow(unused_variable)] // unused foobar_immut + foobar_mut
trait FooBar {}
struct Bar(i32);
struct Foo { bar: Bar }
impl FooBar for Bar {}
trait Test {
fn get_immut(&self) -> &FooBar;
fn get_mut(&mut self) -> &mut FooBar;
}
macro_rules! generate_test { ($type_:path, $slf:ident, $field:expr) => (
impl Test for $type_ {
fn get_immut(&$slf) -> &FooBar {
&$field as &FooBar
}
fn get_mut(&mut $slf) -> &mut FooBar {
&mut $field as &mut FooBar
}
}
)}
generate_test!(Foo, self, self.bar);
pub fn main() {
let mut foo: Foo = Foo { bar: Bar(42) };
{ let foobar_immut = foo.get_immut(); }
{ let foobar_mut = foo.get_mut(); }
}
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
|
random_line_split
|
|
issue-7911.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-pretty
// (Closes #7911) Test that we can use the same self expression
// with different mutability in macro in two methods
#![allow(unused_variable)] // unused foobar_immut + foobar_mut
trait FooBar {}
struct Bar(i32);
struct Foo { bar: Bar }
impl FooBar for Bar {}
trait Test {
fn get_immut(&self) -> &FooBar;
fn get_mut(&mut self) -> &mut FooBar;
}
macro_rules! generate_test { ($type_:path, $slf:ident, $field:expr) => (
impl Test for $type_ {
fn get_immut(&$slf) -> &FooBar {
&$field as &FooBar
}
fn get_mut(&mut $slf) -> &mut FooBar {
&mut $field as &mut FooBar
}
}
)}
generate_test!(Foo, self, self.bar);
pub fn main()
|
{
let mut foo: Foo = Foo { bar: Bar(42) };
{ let foobar_immut = foo.get_immut(); }
{ let foobar_mut = foo.get_mut(); }
}
|
identifier_body
|
|
lib.rs
|
#[cfg(test)]
mod test;
use smallvec::{Array, SmallVec};
use std::convert::{From, Into};
use std::fmt::{self, Debug, Formatter};
use std::iter::FromIterator;
/// A `NibbleVec` backed by a `SmallVec` with 64 inline element slots.
/// This will not allocate until more than 64 elements are added.
pub type Nibblet = NibbleVec<[u8; 64]>;
/// A data-structure for storing a sequence of 4-bit values.
///
/// Values are stored in a `Vec<u8>`, with two values per byte.
///
/// Values at even indices are stored in the most-significant half of their byte,
/// while values at odd indices are stored in the least-significant half.
///
/// Imagine a vector of [MSB][msb-wiki] first bytes, and you'll be right.
///
/// n = [_ _ | _ _ | _ _]
///
/// [msb-wiki]: http://en.wikipedia.org/wiki/Most_significant_bit
#[derive(Clone, Default)]
pub struct NibbleVec<A: Array<Item = u8>> {
length: usize,
data: SmallVec<A>,
}
impl<A: Array<Item = u8>> NibbleVec<A> {
/// Create an empty nibble vector.
pub fn new() -> NibbleVec<A> {
NibbleVec {
length: 0,
data: SmallVec::new(),
}
}
/// Create a nibble vector from a vector of bytes.
///
/// Each byte is split into two 4-bit entries (MSB, LSB).
#[inline]
pub fn from_byte_vec(vec: Vec<u8>) -> NibbleVec<A> {
let length = 2 * vec.len();
NibbleVec {
length,
data: SmallVec::from_iter(vec),
}
}
/// Returns a byte slice of the nibble vector's contents.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
&self.data[..]
}
/// Converts a nibble vector into a byte vector.
///
/// This consumes the nibble vector, so we do not need to copy its contents.
#[inline]
pub fn into_bytes(self) -> Vec<u8> {
self.data.to_vec()
}
/// Get the number of elements stored in the vector.
#[inline]
pub fn len(&self) -> usize {
self.length
}
/// Returns `true` if the nibble vector has a length of 0.
#[inline]
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
/// Fetch a single entry from the vector.
///
/// Guaranteed to be a value in the interval [0, 15].
///
/// **Panics** if `idx >= self.len()`.
#[inline]
pub fn get(&self, idx: usize) -> u8 {
if idx >= self.length {
panic!(
"NibbleVec index out of bounds: len is {}, index is {}",
self.length, idx
);
}
let vec_idx = idx / 2;
match idx % 2 {
// If the index is even, take the first (most significant) half of the stored byte.
0 => self.data[vec_idx] >> 4,
// If the index is odd, take the second (least significant) half.
_ => self.data[vec_idx] & 0x0F,
}
}
/// Add a single nibble to the vector.
///
/// Only the 4 least-significant bits of the value are used.
#[inline]
pub fn push(&mut self, val: u8) {
if self.length % 2 == 0 {
self.data.push(val << 4);
} else {
let vec_len = self.data.len();
// Zero the second half of the last byte just to be safe.
self.data[vec_len - 1] &= 0xF0;
// Write the new value.
self.data[vec_len - 1] |= val & 0x0F;
}
self.length += 1;
}
/// Split the vector into two parts.
///
/// All elements at or following the given index are returned in a new `NibbleVec`,
/// with exactly `idx` elements remaining in this vector.
///
/// **Panics** if `idx > self.len()`.
pub fn split(&mut self, idx: usize) -> NibbleVec<A> {
// assert! is a few percent slower surprisingly
if idx > self.length {
panic!(
"attempted to split past vector end. len is {}, index is {}",
self.length, idx
);
} else if idx == self.length {
NibbleVec::new()
} else if idx % 2 == 0 {
self.split_even(idx)
} else {
self.split_odd(idx)
}
}
/// Split function for odd *indices*.
#[inline]
fn split_odd(&mut self, idx: usize) -> NibbleVec<A> {
let mut tail = NibbleVec::new();
// Perform an overlap copy, copying the last nibble of the original vector only if
// the length of the new tail is *odd*.
let tail_length = self.length - idx;
let take_last = tail_length % 2 == 1;
self.overlap_copy(
idx / 2,
self.data.len(),
&mut tail.data,
&mut tail.length,
take_last,
);
// Remove the copied bytes, being careful to skip the idx byte.
for _ in (idx / 2 + 1)..self.data.len() {
self.data.pop();
}
// Zero the second half of the index byte so as to maintain the last-nibble invariant.
self.data[idx / 2] &= 0xF0;
// Update the length of the first NibbleVec.
self.length = idx;
tail
}
/// Split function for even *indices*.
#[inline]
fn split_even(&mut self, idx: usize) -> NibbleVec<A> {
// Avoid allocating a temporary vector by copying all the bytes in order, then popping them.
// Possible to prove: l_d - ⌊i / 2⌋ = ⌊(l_v - i + 1) / 2⌋
// where l_d = self.data.len()
// l_v = self.length
let half_idx = idx / 2;
let mut tail = NibbleVec::new();
// Copy the bytes.
for i in half_idx..self.data.len() {
tail.data.push(self.data[i]);
}
// Pop the same bytes.
for _ in half_idx..self.data.len() {
self.data.pop();
}
// Update lengths.
tail.length = self.length - idx;
self.length = idx;
tail
}
/// Copy data between the second half of self.data[start] and
/// self.data[end - 1]. The second half of the last entry is included
/// if include_last is true.
#[inline]
fn overlap_copy(
&self,
start: usize,
end: usize,
vec: &mut SmallVec<A>,
length: &mut usize,
include_last: bool,
) {
// Copy up to the first half of the last byte.
for i in start..(end - 1) {
// The first half is the second half of the old entry.
let first_half = self.data[i] & 0x0f;
// The second half is the first half of the next entry.
let second_half = self.data[i + 1] >> 4;
vec.push((first_half << 4) | second_half);
*length += 2;
}
if include_last {
let last = self.data[end - 1] & 0x0f;
vec.push(last << 4);
*length += 1;
}
}
/// Append another nibble vector whilst consuming this vector.
#[inline]
pub fn join(mut self, other: &NibbleVec<A>) -> NibbleVec<A> {
// If the length is even, we can append directly.
if self.length % 2 == 0 {
self.length += other.length;
self.data.extend_from_slice(&other.data);
return self;
}
// If the other vector is empty, bail out.
if other.is_empty() {
return self;
}
// If the length is odd, we have to perform an overlap copy.
// Copy the first half of the first element, to make the vector an even length.
self.push(other.get(0));
// Copy the rest of the vector using an overlap copy.
let take_last = other.len() % 2 == 0;
other.overlap_copy(
0,
other.data.len(),
&mut self.data,
&mut self.length,
take_last,
);
self
}
}
impl<A: Array<Item = u8>> PartialEq<NibbleVec<A>> for NibbleVec<A> {
#[inline]
fn eq(&self, other: &NibbleVec<A>) -> bool {
self.length == other.length && self.data == other.data
}
}
impl<A: Array<Item = u8>> Eq for NibbleVec<A> {}
/// Compare a `NibbleVec` and a slice of bytes *element-by-element*.
/// Bytes are **not** interpreted as two `NibbleVec` entries.
impl<A: Array<Item = u8>> PartialEq<[u8]> for NibbleVec<A> {
#[inline]
fn eq(&self, other: &[u8]) -> bool {
if other.len()!= self.len() {
return false;
}
for (i, x) in other.iter().enumerate() {
if self.get(i)!= *x {
return false;
}
}
true
}
}
impl<A: Array<Item = u8>> Debug for NibbleVec<A> {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
write!(fmt, "NibbleVec [")?;
if!self.is_empty() {
write!(fmt, "{}", self.get(0))?;
}
for i in 1..self.len() {
write!(fmt, ", {}", self.get(i))?;
}
write!(fmt, "]")
}
}
impl<A: Array<Item = u8>> From<Vec<u8>> for NibbleVec<A> {
#[inline]
fn from(v:
|
u8>) -> NibbleVec<A> {
NibbleVec::from_byte_vec(v)
}
}
impl<'a, A: Array<Item = u8>> From<&'a [u8]> for NibbleVec<A> {
#[inline]
fn from(v: &[u8]) -> NibbleVec<A> {
NibbleVec::from_byte_vec(v.into())
}
}
impl<A: Array<Item = u8>> Into<Vec<u8>> for NibbleVec<A> {
#[inline]
fn into(self) -> Vec<u8> {
self.data.to_vec()
}
}
impl<'a, A: Array<Item = u8>> Into<Vec<u8>> for &'a NibbleVec<A> {
#[inline]
fn into(self) -> Vec<u8> {
self.data.to_vec()
}
}
|
Vec<
|
identifier_name
|
lib.rs
|
#[cfg(test)]
mod test;
use smallvec::{Array, SmallVec};
use std::convert::{From, Into};
use std::fmt::{self, Debug, Formatter};
use std::iter::FromIterator;
/// A `NibbleVec` backed by a `SmallVec` with 64 inline element slots.
/// This will not allocate until more than 64 elements are added.
pub type Nibblet = NibbleVec<[u8; 64]>;
/// A data-structure for storing a sequence of 4-bit values.
///
/// Values are stored in a `Vec<u8>`, with two values per byte.
///
/// Values at even indices are stored in the most-significant half of their byte,
/// while values at odd indices are stored in the least-significant half.
///
/// Imagine a vector of [MSB][msb-wiki] first bytes, and you'll be right.
///
/// n = [_ _ | _ _ | _ _]
///
/// [msb-wiki]: http://en.wikipedia.org/wiki/Most_significant_bit
#[derive(Clone, Default)]
pub struct NibbleVec<A: Array<Item = u8>> {
length: usize,
data: SmallVec<A>,
}
impl<A: Array<Item = u8>> NibbleVec<A> {
/// Create an empty nibble vector.
pub fn new() -> NibbleVec<A> {
NibbleVec {
length: 0,
data: SmallVec::new(),
}
}
/// Create a nibble vector from a vector of bytes.
///
/// Each byte is split into two 4-bit entries (MSB, LSB).
#[inline]
pub fn from_byte_vec(vec: Vec<u8>) -> NibbleVec<A> {
let length = 2 * vec.len();
NibbleVec {
length,
data: SmallVec::from_iter(vec),
}
}
/// Returns a byte slice of the nibble vector's contents.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
&self.data[..]
}
/// Converts a nibble vector into a byte vector.
///
/// This consumes the nibble vector, so we do not need to copy its contents.
|
pub fn into_bytes(self) -> Vec<u8> {
self.data.to_vec()
}
/// Get the number of elements stored in the vector.
#[inline]
pub fn len(&self) -> usize {
self.length
}
/// Returns `true` if the nibble vector has a length of 0.
#[inline]
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
/// Fetch a single entry from the vector.
///
/// Guaranteed to be a value in the interval [0, 15].
///
/// **Panics** if `idx >= self.len()`.
#[inline]
pub fn get(&self, idx: usize) -> u8 {
if idx >= self.length {
panic!(
"NibbleVec index out of bounds: len is {}, index is {}",
self.length, idx
);
}
let vec_idx = idx / 2;
match idx % 2 {
// If the index is even, take the first (most significant) half of the stored byte.
0 => self.data[vec_idx] >> 4,
// If the index is odd, take the second (least significant) half.
_ => self.data[vec_idx] & 0x0F,
}
}
/// Add a single nibble to the vector.
///
/// Only the 4 least-significant bits of the value are used.
#[inline]
pub fn push(&mut self, val: u8) {
if self.length % 2 == 0 {
self.data.push(val << 4);
} else {
let vec_len = self.data.len();
// Zero the second half of the last byte just to be safe.
self.data[vec_len - 1] &= 0xF0;
// Write the new value.
self.data[vec_len - 1] |= val & 0x0F;
}
self.length += 1;
}
/// Split the vector into two parts.
///
/// All elements at or following the given index are returned in a new `NibbleVec`,
/// with exactly `idx` elements remaining in this vector.
///
/// **Panics** if `idx > self.len()`.
pub fn split(&mut self, idx: usize) -> NibbleVec<A> {
// assert! is a few percent slower surprisingly
if idx > self.length {
panic!(
"attempted to split past vector end. len is {}, index is {}",
self.length, idx
);
} else if idx == self.length {
NibbleVec::new()
} else if idx % 2 == 0 {
self.split_even(idx)
} else {
self.split_odd(idx)
}
}
/// Split function for odd *indices*.
#[inline]
fn split_odd(&mut self, idx: usize) -> NibbleVec<A> {
let mut tail = NibbleVec::new();
// Perform an overlap copy, copying the last nibble of the original vector only if
// the length of the new tail is *odd*.
let tail_length = self.length - idx;
let take_last = tail_length % 2 == 1;
self.overlap_copy(
idx / 2,
self.data.len(),
&mut tail.data,
&mut tail.length,
take_last,
);
// Remove the copied bytes, being careful to skip the idx byte.
for _ in (idx / 2 + 1)..self.data.len() {
self.data.pop();
}
// Zero the second half of the index byte so as to maintain the last-nibble invariant.
self.data[idx / 2] &= 0xF0;
// Update the length of the first NibbleVec.
self.length = idx;
tail
}
/// Split function for even *indices*.
#[inline]
fn split_even(&mut self, idx: usize) -> NibbleVec<A> {
// Avoid allocating a temporary vector by copying all the bytes in order, then popping them.
// Possible to prove: l_d - ⌊i / 2⌋ = ⌊(l_v - i + 1) / 2⌋
// where l_d = self.data.len()
// l_v = self.length
let half_idx = idx / 2;
let mut tail = NibbleVec::new();
// Copy the bytes.
for i in half_idx..self.data.len() {
tail.data.push(self.data[i]);
}
// Pop the same bytes.
for _ in half_idx..self.data.len() {
self.data.pop();
}
// Update lengths.
tail.length = self.length - idx;
self.length = idx;
tail
}
/// Copy data between the second half of self.data[start] and
/// self.data[end - 1]. The second half of the last entry is included
/// if include_last is true.
#[inline]
fn overlap_copy(
&self,
start: usize,
end: usize,
vec: &mut SmallVec<A>,
length: &mut usize,
include_last: bool,
) {
// Copy up to the first half of the last byte.
for i in start..(end - 1) {
// The first half is the second half of the old entry.
let first_half = self.data[i] & 0x0f;
// The second half is the first half of the next entry.
let second_half = self.data[i + 1] >> 4;
vec.push((first_half << 4) | second_half);
*length += 2;
}
if include_last {
let last = self.data[end - 1] & 0x0f;
vec.push(last << 4);
*length += 1;
}
}
/// Append another nibble vector whilst consuming this vector.
#[inline]
pub fn join(mut self, other: &NibbleVec<A>) -> NibbleVec<A> {
// If the length is even, we can append directly.
if self.length % 2 == 0 {
self.length += other.length;
self.data.extend_from_slice(&other.data);
return self;
}
// If the other vector is empty, bail out.
if other.is_empty() {
return self;
}
// If the length is odd, we have to perform an overlap copy.
// Copy the first half of the first element, to make the vector an even length.
self.push(other.get(0));
// Copy the rest of the vector using an overlap copy.
let take_last = other.len() % 2 == 0;
other.overlap_copy(
0,
other.data.len(),
&mut self.data,
&mut self.length,
take_last,
);
self
}
}
impl<A: Array<Item = u8>> PartialEq<NibbleVec<A>> for NibbleVec<A> {
#[inline]
fn eq(&self, other: &NibbleVec<A>) -> bool {
self.length == other.length && self.data == other.data
}
}
impl<A: Array<Item = u8>> Eq for NibbleVec<A> {}
/// Compare a `NibbleVec` and a slice of bytes *element-by-element*.
/// Bytes are **not** interpreted as two `NibbleVec` entries.
impl<A: Array<Item = u8>> PartialEq<[u8]> for NibbleVec<A> {
#[inline]
fn eq(&self, other: &[u8]) -> bool {
if other.len()!= self.len() {
return false;
}
for (i, x) in other.iter().enumerate() {
if self.get(i)!= *x {
return false;
}
}
true
}
}
impl<A: Array<Item = u8>> Debug for NibbleVec<A> {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
write!(fmt, "NibbleVec [")?;
if!self.is_empty() {
write!(fmt, "{}", self.get(0))?;
}
for i in 1..self.len() {
write!(fmt, ", {}", self.get(i))?;
}
write!(fmt, "]")
}
}
impl<A: Array<Item = u8>> From<Vec<u8>> for NibbleVec<A> {
#[inline]
fn from(v: Vec<u8>) -> NibbleVec<A> {
NibbleVec::from_byte_vec(v)
}
}
impl<'a, A: Array<Item = u8>> From<&'a [u8]> for NibbleVec<A> {
#[inline]
fn from(v: &[u8]) -> NibbleVec<A> {
NibbleVec::from_byte_vec(v.into())
}
}
impl<A: Array<Item = u8>> Into<Vec<u8>> for NibbleVec<A> {
#[inline]
fn into(self) -> Vec<u8> {
self.data.to_vec()
}
}
impl<'a, A: Array<Item = u8>> Into<Vec<u8>> for &'a NibbleVec<A> {
#[inline]
fn into(self) -> Vec<u8> {
self.data.to_vec()
}
}
|
#[inline]
|
random_line_split
|
lib.rs
|
#[cfg(test)]
mod test;
use smallvec::{Array, SmallVec};
use std::convert::{From, Into};
use std::fmt::{self, Debug, Formatter};
use std::iter::FromIterator;
/// A `NibbleVec` backed by a `SmallVec` with 64 inline element slots.
/// This will not allocate until more than 64 elements are added.
pub type Nibblet = NibbleVec<[u8; 64]>;
/// A data-structure for storing a sequence of 4-bit values.
///
/// Values are stored in a `Vec<u8>`, with two values per byte.
///
/// Values at even indices are stored in the most-significant half of their byte,
/// while values at odd indices are stored in the least-significant half.
///
/// Imagine a vector of [MSB][msb-wiki] first bytes, and you'll be right.
///
/// n = [_ _ | _ _ | _ _]
///
/// [msb-wiki]: http://en.wikipedia.org/wiki/Most_significant_bit
#[derive(Clone, Default)]
pub struct NibbleVec<A: Array<Item = u8>> {
length: usize,
data: SmallVec<A>,
}
impl<A: Array<Item = u8>> NibbleVec<A> {
/// Create an empty nibble vector.
pub fn new() -> NibbleVec<A> {
NibbleVec {
length: 0,
data: SmallVec::new(),
}
}
/// Create a nibble vector from a vector of bytes.
///
/// Each byte is split into two 4-bit entries (MSB, LSB).
#[inline]
pub fn from_byte_vec(vec: Vec<u8>) -> NibbleVec<A> {
let length = 2 * vec.len();
NibbleVec {
length,
data: SmallVec::from_iter(vec),
}
}
/// Returns a byte slice of the nibble vector's contents.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
&self.data[..]
}
/// Converts a nibble vector into a byte vector.
///
/// This consumes the nibble vector, so we do not need to copy its contents.
#[inline]
pub fn into_bytes(self) -> Vec<u8> {
self.data.to_vec()
}
/// Get the number of elements stored in the vector.
#[inline]
pub fn len(&self) -> usize {
self.length
}
/// Returns `true` if the nibble vector has a length of 0.
#[inline]
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
/// Fetch a single entry from the vector.
///
/// Guaranteed to be a value in the interval [0, 15].
///
/// **Panics** if `idx >= self.len()`.
#[inline]
pub fn get(&self, idx: usize) -> u8 {
if idx >= self.length {
panic!(
"NibbleVec index out of bounds: len is {}, index is {}",
self.length, idx
);
}
let vec_idx = idx / 2;
match idx % 2 {
// If the index is even, take the first (most significant) half of the stored byte.
0 => self.data[vec_idx] >> 4,
// If the index is odd, take the second (least significant) half.
_ => self.data[vec_idx] & 0x0F,
}
}
/// Add a single nibble to the vector.
///
/// Only the 4 least-significant bits of the value are used.
#[inline]
pub fn push(&mut self, val: u8) {
if self.length % 2 == 0 {
self.data.push(val << 4);
} else {
let vec_len = self.data.len();
// Zero the second half of the last byte just to be safe.
self.data[vec_len - 1] &= 0xF0;
// Write the new value.
self.data[vec_len - 1] |= val & 0x0F;
}
self.length += 1;
}
/// Split the vector into two parts.
///
/// All elements at or following the given index are returned in a new `NibbleVec`,
/// with exactly `idx` elements remaining in this vector.
///
/// **Panics** if `idx > self.len()`.
pub fn split(&mut self, idx: usize) -> NibbleVec<A> {
// assert! is a few percent slower surprisingly
if idx > self.length {
panic!(
"attempted to split past vector end. len is {}, index is {}",
self.length, idx
);
} else if idx == self.length {
NibbleVec::new()
} else if idx % 2 == 0 {
self.split_even(idx)
} else {
self.split_odd(idx)
}
}
/// Split function for odd *indices*.
#[inline]
fn split_odd(&mut self, idx: usize) -> NibbleVec<A> {
let mut tail = NibbleVec::new();
// Perform an overlap copy, copying the last nibble of the original vector only if
// the length of the new tail is *odd*.
let tail_length = self.length - idx;
let take_last = tail_length % 2 == 1;
self.overlap_copy(
idx / 2,
self.data.len(),
&mut tail.data,
&mut tail.length,
take_last,
);
// Remove the copied bytes, being careful to skip the idx byte.
for _ in (idx / 2 + 1)..self.data.len() {
self.data.pop();
}
// Zero the second half of the index byte so as to maintain the last-nibble invariant.
self.data[idx / 2] &= 0xF0;
// Update the length of the first NibbleVec.
self.length = idx;
tail
}
/// Split function for even *indices*.
#[inline]
fn split_even(&mut self, idx: usize) -> NibbleVec<A> {
// Avoid allocating a temporary vector by copying all the bytes in order, then popping them.
// Possible to prove: l_d - ⌊i / 2⌋ = ⌊(l_v - i + 1) / 2⌋
// where l_d = self.data.len()
// l_v = self.length
let half_idx = idx / 2;
let mut tail = NibbleVec::new();
// Copy the bytes.
for i in half_idx..self.data.len() {
tail.data.push(self.data[i]);
}
// Pop the same bytes.
for _ in half_idx..self.data.len() {
self.data.pop();
}
// Update lengths.
tail.length = self.length - idx;
self.length = idx;
tail
}
/// Copy data between the second half of self.data[start] and
/// self.data[end - 1]. The second half of the last entry is included
/// if include_last is true.
#[inline]
fn overlap_copy(
&self,
start: usize,
end: usize,
vec: &mut SmallVec<A>,
length: &mut usize,
include_last: bool,
) {
|
/ Append another nibble vector whilst consuming this vector.
#[inline]
pub fn join(mut self, other: &NibbleVec<A>) -> NibbleVec<A> {
// If the length is even, we can append directly.
if self.length % 2 == 0 {
self.length += other.length;
self.data.extend_from_slice(&other.data);
return self;
}
// If the other vector is empty, bail out.
if other.is_empty() {
return self;
}
// If the length is odd, we have to perform an overlap copy.
// Copy the first half of the first element, to make the vector an even length.
self.push(other.get(0));
// Copy the rest of the vector using an overlap copy.
let take_last = other.len() % 2 == 0;
other.overlap_copy(
0,
other.data.len(),
&mut self.data,
&mut self.length,
take_last,
);
self
}
}
impl<A: Array<Item = u8>> PartialEq<NibbleVec<A>> for NibbleVec<A> {
#[inline]
fn eq(&self, other: &NibbleVec<A>) -> bool {
self.length == other.length && self.data == other.data
}
}
impl<A: Array<Item = u8>> Eq for NibbleVec<A> {}
/// Compare a `NibbleVec` and a slice of bytes *element-by-element*.
/// Bytes are **not** interpreted as two `NibbleVec` entries.
impl<A: Array<Item = u8>> PartialEq<[u8]> for NibbleVec<A> {
#[inline]
fn eq(&self, other: &[u8]) -> bool {
if other.len()!= self.len() {
return false;
}
for (i, x) in other.iter().enumerate() {
if self.get(i)!= *x {
return false;
}
}
true
}
}
impl<A: Array<Item = u8>> Debug for NibbleVec<A> {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
write!(fmt, "NibbleVec [")?;
if!self.is_empty() {
write!(fmt, "{}", self.get(0))?;
}
for i in 1..self.len() {
write!(fmt, ", {}", self.get(i))?;
}
write!(fmt, "]")
}
}
impl<A: Array<Item = u8>> From<Vec<u8>> for NibbleVec<A> {
#[inline]
fn from(v: Vec<u8>) -> NibbleVec<A> {
NibbleVec::from_byte_vec(v)
}
}
impl<'a, A: Array<Item = u8>> From<&'a [u8]> for NibbleVec<A> {
#[inline]
fn from(v: &[u8]) -> NibbleVec<A> {
NibbleVec::from_byte_vec(v.into())
}
}
impl<A: Array<Item = u8>> Into<Vec<u8>> for NibbleVec<A> {
#[inline]
fn into(self) -> Vec<u8> {
self.data.to_vec()
}
}
impl<'a, A: Array<Item = u8>> Into<Vec<u8>> for &'a NibbleVec<A> {
#[inline]
fn into(self) -> Vec<u8> {
self.data.to_vec()
}
}
|
// Copy up to the first half of the last byte.
for i in start..(end - 1) {
// The first half is the second half of the old entry.
let first_half = self.data[i] & 0x0f;
// The second half is the first half of the next entry.
let second_half = self.data[i + 1] >> 4;
vec.push((first_half << 4) | second_half);
*length += 2;
}
if include_last {
let last = self.data[end - 1] & 0x0f;
vec.push(last << 4);
*length += 1;
}
}
//
|
identifier_body
|
lib.rs
|
#[cfg(test)]
mod test;
use smallvec::{Array, SmallVec};
use std::convert::{From, Into};
use std::fmt::{self, Debug, Formatter};
use std::iter::FromIterator;
/// A `NibbleVec` backed by a `SmallVec` with 64 inline element slots.
/// This will not allocate until more than 64 elements are added.
pub type Nibblet = NibbleVec<[u8; 64]>;
/// A data-structure for storing a sequence of 4-bit values.
///
/// Values are stored in a `Vec<u8>`, with two values per byte.
///
/// Values at even indices are stored in the most-significant half of their byte,
/// while values at odd indices are stored in the least-significant half.
///
/// Imagine a vector of [MSB][msb-wiki] first bytes, and you'll be right.
///
/// n = [_ _ | _ _ | _ _]
///
/// [msb-wiki]: http://en.wikipedia.org/wiki/Most_significant_bit
#[derive(Clone, Default)]
pub struct NibbleVec<A: Array<Item = u8>> {
length: usize,
data: SmallVec<A>,
}
impl<A: Array<Item = u8>> NibbleVec<A> {
/// Create an empty nibble vector.
pub fn new() -> NibbleVec<A> {
NibbleVec {
length: 0,
data: SmallVec::new(),
}
}
/// Create a nibble vector from a vector of bytes.
///
/// Each byte is split into two 4-bit entries (MSB, LSB).
#[inline]
pub fn from_byte_vec(vec: Vec<u8>) -> NibbleVec<A> {
let length = 2 * vec.len();
NibbleVec {
length,
data: SmallVec::from_iter(vec),
}
}
/// Returns a byte slice of the nibble vector's contents.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
&self.data[..]
}
/// Converts a nibble vector into a byte vector.
///
/// This consumes the nibble vector, so we do not need to copy its contents.
#[inline]
pub fn into_bytes(self) -> Vec<u8> {
self.data.to_vec()
}
/// Get the number of elements stored in the vector.
#[inline]
pub fn len(&self) -> usize {
self.length
}
/// Returns `true` if the nibble vector has a length of 0.
#[inline]
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
/// Fetch a single entry from the vector.
///
/// Guaranteed to be a value in the interval [0, 15].
///
/// **Panics** if `idx >= self.len()`.
#[inline]
pub fn get(&self, idx: usize) -> u8 {
if idx >= self.length {
panic!(
"NibbleVec index out of bounds: len is {}, index is {}",
self.length, idx
);
}
let vec_idx = idx / 2;
match idx % 2 {
// If the index is even, take the first (most significant) half of the stored byte.
0 => self.data[vec_idx] >> 4,
// If the index is odd, take the second (least significant) half.
_ => self.data[vec_idx] & 0x0F,
}
}
/// Add a single nibble to the vector.
///
/// Only the 4 least-significant bits of the value are used.
#[inline]
pub fn push(&mut self, val: u8) {
if self.length % 2 == 0 {
self.data.push(val << 4);
} else {
let vec_len = self.data.len();
// Zero the second half of the last byte just to be safe.
self.data[vec_len - 1] &= 0xF0;
// Write the new value.
self.data[vec_len - 1] |= val & 0x0F;
}
self.length += 1;
}
/// Split the vector into two parts.
///
/// All elements at or following the given index are returned in a new `NibbleVec`,
/// with exactly `idx` elements remaining in this vector.
///
/// **Panics** if `idx > self.len()`.
pub fn split(&mut self, idx: usize) -> NibbleVec<A> {
// assert! is a few percent slower surprisingly
if idx > self.length {
panic!(
"attempted to split past vector end. len is {}, index is {}",
self.length, idx
);
} else if idx == self.length {
NibbleVec::new()
} else if idx % 2 == 0 {
self.split_even(idx)
} else
|
}
/// Split function for odd *indices*.
#[inline]
fn split_odd(&mut self, idx: usize) -> NibbleVec<A> {
let mut tail = NibbleVec::new();
// Perform an overlap copy, copying the last nibble of the original vector only if
// the length of the new tail is *odd*.
let tail_length = self.length - idx;
let take_last = tail_length % 2 == 1;
self.overlap_copy(
idx / 2,
self.data.len(),
&mut tail.data,
&mut tail.length,
take_last,
);
// Remove the copied bytes, being careful to skip the idx byte.
for _ in (idx / 2 + 1)..self.data.len() {
self.data.pop();
}
// Zero the second half of the index byte so as to maintain the last-nibble invariant.
self.data[idx / 2] &= 0xF0;
// Update the length of the first NibbleVec.
self.length = idx;
tail
}
/// Split function for even *indices*.
#[inline]
fn split_even(&mut self, idx: usize) -> NibbleVec<A> {
// Avoid allocating a temporary vector by copying all the bytes in order, then popping them.
// Possible to prove: l_d - ⌊i / 2⌋ = ⌊(l_v - i + 1) / 2⌋
// where l_d = self.data.len()
// l_v = self.length
let half_idx = idx / 2;
let mut tail = NibbleVec::new();
// Copy the bytes.
for i in half_idx..self.data.len() {
tail.data.push(self.data[i]);
}
// Pop the same bytes.
for _ in half_idx..self.data.len() {
self.data.pop();
}
// Update lengths.
tail.length = self.length - idx;
self.length = idx;
tail
}
/// Copy data between the second half of self.data[start] and
/// self.data[end - 1]. The second half of the last entry is included
/// if include_last is true.
#[inline]
fn overlap_copy(
&self,
start: usize,
end: usize,
vec: &mut SmallVec<A>,
length: &mut usize,
include_last: bool,
) {
// Copy up to the first half of the last byte.
for i in start..(end - 1) {
// The first half is the second half of the old entry.
let first_half = self.data[i] & 0x0f;
// The second half is the first half of the next entry.
let second_half = self.data[i + 1] >> 4;
vec.push((first_half << 4) | second_half);
*length += 2;
}
if include_last {
let last = self.data[end - 1] & 0x0f;
vec.push(last << 4);
*length += 1;
}
}
/// Append another nibble vector whilst consuming this vector.
#[inline]
pub fn join(mut self, other: &NibbleVec<A>) -> NibbleVec<A> {
// If the length is even, we can append directly.
if self.length % 2 == 0 {
self.length += other.length;
self.data.extend_from_slice(&other.data);
return self;
}
// If the other vector is empty, bail out.
if other.is_empty() {
return self;
}
// If the length is odd, we have to perform an overlap copy.
// Copy the first half of the first element, to make the vector an even length.
self.push(other.get(0));
// Copy the rest of the vector using an overlap copy.
let take_last = other.len() % 2 == 0;
other.overlap_copy(
0,
other.data.len(),
&mut self.data,
&mut self.length,
take_last,
);
self
}
}
impl<A: Array<Item = u8>> PartialEq<NibbleVec<A>> for NibbleVec<A> {
#[inline]
fn eq(&self, other: &NibbleVec<A>) -> bool {
self.length == other.length && self.data == other.data
}
}
impl<A: Array<Item = u8>> Eq for NibbleVec<A> {}
/// Compare a `NibbleVec` and a slice of bytes *element-by-element*.
/// Bytes are **not** interpreted as two `NibbleVec` entries.
impl<A: Array<Item = u8>> PartialEq<[u8]> for NibbleVec<A> {
#[inline]
fn eq(&self, other: &[u8]) -> bool {
if other.len()!= self.len() {
return false;
}
for (i, x) in other.iter().enumerate() {
if self.get(i)!= *x {
return false;
}
}
true
}
}
impl<A: Array<Item = u8>> Debug for NibbleVec<A> {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
write!(fmt, "NibbleVec [")?;
if!self.is_empty() {
write!(fmt, "{}", self.get(0))?;
}
for i in 1..self.len() {
write!(fmt, ", {}", self.get(i))?;
}
write!(fmt, "]")
}
}
impl<A: Array<Item = u8>> From<Vec<u8>> for NibbleVec<A> {
#[inline]
fn from(v: Vec<u8>) -> NibbleVec<A> {
NibbleVec::from_byte_vec(v)
}
}
impl<'a, A: Array<Item = u8>> From<&'a [u8]> for NibbleVec<A> {
#[inline]
fn from(v: &[u8]) -> NibbleVec<A> {
NibbleVec::from_byte_vec(v.into())
}
}
impl<A: Array<Item = u8>> Into<Vec<u8>> for NibbleVec<A> {
#[inline]
fn into(self) -> Vec<u8> {
self.data.to_vec()
}
}
impl<'a, A: Array<Item = u8>> Into<Vec<u8>> for &'a NibbleVec<A> {
#[inline]
fn into(self) -> Vec<u8> {
self.data.to_vec()
}
}
|
{
self.split_odd(idx)
}
|
conditional_block
|
shootout-binarytrees.rs
|
// xfail-test
// Broken due to arena API problems.
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod std;
use std::arena;
enum tree<'self> {
nil,
node(&'self tree<'self>, &'self tree<'self>, int),
}
fn
|
(t: &tree) -> int {
match *t {
nil => { return 0; }
node(left, right, item) => {
return item + item_check(left) - item_check(right);
}
}
}
fn bottom_up_tree<'r>(arena: &'r mut arena::Arena, item: int, depth: int)
-> &'r tree<'r> {
if depth > 0 {
return arena.alloc(
|| node(bottom_up_tree(arena, 2 * item - 1, depth - 1),
bottom_up_tree(arena, 2 * item, depth - 1),
item));
}
return arena.alloc(|| nil);
}
fn main() {
let args = os::args();
let args = if os::getenv(~"RUST_BENCH").is_some() {
~[~"", ~"17"]
} else if args.len() <= 1u {
~[~"", ~"8"]
} else {
args
};
let n = int::from_str(args[1]).get();
let min_depth = 4;
let mut max_depth;
if min_depth + 2 > n {
max_depth = min_depth + 2;
} else {
max_depth = n;
}
let mut stretch_arena = arena::Arena();
let stretch_depth = max_depth + 1;
let stretch_tree = bottom_up_tree(&mut stretch_arena, 0, stretch_depth);
io::println(fmt!("stretch tree of depth %d\t check: %d",
stretch_depth,
item_check(stretch_tree)));
let mut long_lived_arena = arena::Arena();
let long_lived_tree = bottom_up_tree(&mut long_lived_arena, 0, max_depth);
let mut depth = min_depth;
while depth <= max_depth {
let iterations = int::pow(2, (max_depth - depth + min_depth) as uint);
let mut chk = 0;
let mut i = 1;
while i <= iterations {
let mut temp_tree = bottom_up_tree(&mut long_lived_arena, i, depth);
chk += item_check(temp_tree);
temp_tree = bottom_up_tree(&mut long_lived_arena, -i, depth);
chk += item_check(temp_tree);
i += 1;
}
io::println(fmt!("%d\t trees of depth %d\t check: %d",
iterations * 2, depth,
chk));
depth += 2;
}
io::println(fmt!("long lived trees of depth %d\t check: %d",
max_depth,
item_check(long_lived_tree)));
}
|
item_check
|
identifier_name
|
shootout-binarytrees.rs
|
// xfail-test
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod std;
use std::arena;
enum tree<'self> {
nil,
node(&'self tree<'self>, &'self tree<'self>, int),
}
fn item_check(t: &tree) -> int {
match *t {
nil => { return 0; }
node(left, right, item) => {
return item + item_check(left) - item_check(right);
}
}
}
fn bottom_up_tree<'r>(arena: &'r mut arena::Arena, item: int, depth: int)
-> &'r tree<'r> {
if depth > 0 {
return arena.alloc(
|| node(bottom_up_tree(arena, 2 * item - 1, depth - 1),
bottom_up_tree(arena, 2 * item, depth - 1),
item));
}
return arena.alloc(|| nil);
}
fn main() {
let args = os::args();
let args = if os::getenv(~"RUST_BENCH").is_some() {
~[~"", ~"17"]
} else if args.len() <= 1u {
~[~"", ~"8"]
} else {
args
};
let n = int::from_str(args[1]).get();
let min_depth = 4;
let mut max_depth;
if min_depth + 2 > n {
max_depth = min_depth + 2;
} else {
max_depth = n;
}
let mut stretch_arena = arena::Arena();
let stretch_depth = max_depth + 1;
let stretch_tree = bottom_up_tree(&mut stretch_arena, 0, stretch_depth);
io::println(fmt!("stretch tree of depth %d\t check: %d",
stretch_depth,
item_check(stretch_tree)));
let mut long_lived_arena = arena::Arena();
let long_lived_tree = bottom_up_tree(&mut long_lived_arena, 0, max_depth);
let mut depth = min_depth;
while depth <= max_depth {
let iterations = int::pow(2, (max_depth - depth + min_depth) as uint);
let mut chk = 0;
let mut i = 1;
while i <= iterations {
let mut temp_tree = bottom_up_tree(&mut long_lived_arena, i, depth);
chk += item_check(temp_tree);
temp_tree = bottom_up_tree(&mut long_lived_arena, -i, depth);
chk += item_check(temp_tree);
i += 1;
}
io::println(fmt!("%d\t trees of depth %d\t check: %d",
iterations * 2, depth,
chk));
depth += 2;
}
io::println(fmt!("long lived trees of depth %d\t check: %d",
max_depth,
item_check(long_lived_tree)));
}
|
// Broken due to arena API problems.
|
random_line_split
|
shootout-binarytrees.rs
|
// xfail-test
// Broken due to arena API problems.
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod std;
use std::arena;
enum tree<'self> {
nil,
node(&'self tree<'self>, &'self tree<'self>, int),
}
fn item_check(t: &tree) -> int {
match *t {
nil => { return 0; }
node(left, right, item) => {
return item + item_check(left) - item_check(right);
}
}
}
fn bottom_up_tree<'r>(arena: &'r mut arena::Arena, item: int, depth: int)
-> &'r tree<'r> {
if depth > 0 {
return arena.alloc(
|| node(bottom_up_tree(arena, 2 * item - 1, depth - 1),
bottom_up_tree(arena, 2 * item, depth - 1),
item));
}
return arena.alloc(|| nil);
}
fn main() {
let args = os::args();
let args = if os::getenv(~"RUST_BENCH").is_some()
|
else if args.len() <= 1u {
~[~"", ~"8"]
} else {
args
};
let n = int::from_str(args[1]).get();
let min_depth = 4;
let mut max_depth;
if min_depth + 2 > n {
max_depth = min_depth + 2;
} else {
max_depth = n;
}
let mut stretch_arena = arena::Arena();
let stretch_depth = max_depth + 1;
let stretch_tree = bottom_up_tree(&mut stretch_arena, 0, stretch_depth);
io::println(fmt!("stretch tree of depth %d\t check: %d",
stretch_depth,
item_check(stretch_tree)));
let mut long_lived_arena = arena::Arena();
let long_lived_tree = bottom_up_tree(&mut long_lived_arena, 0, max_depth);
let mut depth = min_depth;
while depth <= max_depth {
let iterations = int::pow(2, (max_depth - depth + min_depth) as uint);
let mut chk = 0;
let mut i = 1;
while i <= iterations {
let mut temp_tree = bottom_up_tree(&mut long_lived_arena, i, depth);
chk += item_check(temp_tree);
temp_tree = bottom_up_tree(&mut long_lived_arena, -i, depth);
chk += item_check(temp_tree);
i += 1;
}
io::println(fmt!("%d\t trees of depth %d\t check: %d",
iterations * 2, depth,
chk));
depth += 2;
}
io::println(fmt!("long lived trees of depth %d\t check: %d",
max_depth,
item_check(long_lived_tree)));
}
|
{
~[~"", ~"17"]
}
|
conditional_block
|
shootout-binarytrees.rs
|
// xfail-test
// Broken due to arena API problems.
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod std;
use std::arena;
enum tree<'self> {
nil,
node(&'self tree<'self>, &'self tree<'self>, int),
}
fn item_check(t: &tree) -> int {
match *t {
nil => { return 0; }
node(left, right, item) => {
return item + item_check(left) - item_check(right);
}
}
}
fn bottom_up_tree<'r>(arena: &'r mut arena::Arena, item: int, depth: int)
-> &'r tree<'r>
|
fn main() {
let args = os::args();
let args = if os::getenv(~"RUST_BENCH").is_some() {
~[~"", ~"17"]
} else if args.len() <= 1u {
~[~"", ~"8"]
} else {
args
};
let n = int::from_str(args[1]).get();
let min_depth = 4;
let mut max_depth;
if min_depth + 2 > n {
max_depth = min_depth + 2;
} else {
max_depth = n;
}
let mut stretch_arena = arena::Arena();
let stretch_depth = max_depth + 1;
let stretch_tree = bottom_up_tree(&mut stretch_arena, 0, stretch_depth);
io::println(fmt!("stretch tree of depth %d\t check: %d",
stretch_depth,
item_check(stretch_tree)));
let mut long_lived_arena = arena::Arena();
let long_lived_tree = bottom_up_tree(&mut long_lived_arena, 0, max_depth);
let mut depth = min_depth;
while depth <= max_depth {
let iterations = int::pow(2, (max_depth - depth + min_depth) as uint);
let mut chk = 0;
let mut i = 1;
while i <= iterations {
let mut temp_tree = bottom_up_tree(&mut long_lived_arena, i, depth);
chk += item_check(temp_tree);
temp_tree = bottom_up_tree(&mut long_lived_arena, -i, depth);
chk += item_check(temp_tree);
i += 1;
}
io::println(fmt!("%d\t trees of depth %d\t check: %d",
iterations * 2, depth,
chk));
depth += 2;
}
io::println(fmt!("long lived trees of depth %d\t check: %d",
max_depth,
item_check(long_lived_tree)));
}
|
{
if depth > 0 {
return arena.alloc(
|| node(bottom_up_tree(arena, 2 * item - 1, depth - 1),
bottom_up_tree(arena, 2 * item, depth - 1),
item));
}
return arena.alloc(|| nil);
}
|
identifier_body
|
issue-11577.rs
|
// pretty-expanded FIXME #23616
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Destructuring struct variants would ICE where regular structs wouldn't
enum Foo {
VBar { num: isize }
}
struct
|
{ num: isize }
pub fn main() {
let vbar = Foo::VBar { num: 1 };
let Foo::VBar { num } = vbar;
assert_eq!(num, 1);
let sbar = SBar { num: 2 };
let SBar { num } = sbar;
assert_eq!(num, 2);
}
|
SBar
|
identifier_name
|
issue-11577.rs
|
// pretty-expanded FIXME #23616
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Destructuring struct variants would ICE where regular structs wouldn't
|
struct SBar { num: isize }
pub fn main() {
let vbar = Foo::VBar { num: 1 };
let Foo::VBar { num } = vbar;
assert_eq!(num, 1);
let sbar = SBar { num: 2 };
let SBar { num } = sbar;
assert_eq!(num, 2);
}
|
enum Foo {
VBar { num: isize }
}
|
random_line_split
|
issue-11577.rs
|
// pretty-expanded FIXME #23616
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Destructuring struct variants would ICE where regular structs wouldn't
enum Foo {
VBar { num: isize }
}
struct SBar { num: isize }
pub fn main()
|
{
let vbar = Foo::VBar { num: 1 };
let Foo::VBar { num } = vbar;
assert_eq!(num, 1);
let sbar = SBar { num: 2 };
let SBar { num } = sbar;
assert_eq!(num, 2);
}
|
identifier_body
|
|
client.rs
|
use std::fmt;
use std::sync::Mutex;
use std::cell::{RefCell, UnsafeCell};
use std::ptr;
use wrust_io::mio;
use wrust_io::mio::tcp::*;
use wrust_io::mio::unix::*;
use wrust_types::Result;
use wrust_types::net::Protocol;
use wrust_types::net::connection::{State, Descriptor};
use wrust_module::stream::{Intention, Flush};
pub type ClientProtocol = Protocol<TcpStream, (), UnixStream>;
pub struct LeftData {
data: Vec<u8>,
intention: Intention,
flush: Flush,
}
unsafe impl Send for LeftData {}
unsafe impl Sync for LeftData {}
impl LeftData {
pub fn new(data: Vec<u8>, intention: Intention, flush: Flush) -> LeftData {
LeftData {
data: data,
intention: intention,
flush: flush,
}
}
pub fn data(&self) -> &Vec<u8> {
&self.data
}
pub fn intention(&self) -> &Intention {
&self.intention
}
pub fn flush(&self) -> &Flush {
&self.flush
}
pub fn consume(self) -> (Vec<u8>, (Intention, Flush)) {
(self.data, (self.intention, self.flush))
}
}
pub struct Client {
server_token: mio::Token,
token: mio::Token,
socket: Mutex<RefCell<ClientProtocol>>,
state: Mutex<RefCell<State>>,
descriptor: Descriptor,
left_data: Mutex<UnsafeCell<Option<LeftData>>>,
}
impl Client {
pub fn new(server_token: mio::Token, token: mio::Token, socket: ClientProtocol) -> Client {
let descriptor = Descriptor::new(
token.as_usize() as u32,
match socket {
Protocol::Tcp(ref s) => Some(s.peer_addr().unwrap()),
Protocol::Udp(_) => None,
Protocol::Unix(_) => None,
}
);
Client {
server_token: server_token,
token: token,
socket: Mutex::new(RefCell::new(socket)),
state: Mutex::new(RefCell::new(State::Opened)),
descriptor: descriptor,
left_data: Mutex::new(UnsafeCell::new(None)),
}
}
pub fn server_token(&self) -> &mio::Token {
&self.server_token
}
pub fn token(&self) -> &mio::Token {
&self.token
}
pub fn state(&self) -> State {
let guard = self.state.lock().unwrap();
let cell = guard.borrow();
cell.clone()
}
pub fn set_state(&self, state: State) {
let guard = self.state.lock().unwrap();
let mut cell = guard.borrow_mut();
*cell = state;
}
pub fn descriptor(&self) -> &Descriptor {
&self.descriptor
}
pub fn then_on_socket<F, T>(&self, mut func: F) -> Result<T>
where F: FnMut(&mut ClientProtocol) -> Result<T> {
let guard = self.socket.lock().unwrap();
let mut cell = guard.borrow_mut();
func(&mut *cell)
}
pub fn left_data(&self) -> Option<LeftData> {
let cell = self.left_data.lock().unwrap();
let mut left_data: Option<LeftData> = None;
unsafe { ptr::swap(&mut left_data, cell.get()); }
left_data
}
pub fn
|
(&self, data: Option<LeftData>) {
let cell = self.left_data.lock().unwrap();
let mut left_data = data;
unsafe { ptr::swap(&mut left_data, cell.get()); }
}
}
impl fmt::Debug for Client {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Client #{:?}", self.token)
}
}
|
set_left_data
|
identifier_name
|
client.rs
|
use std::fmt;
use std::sync::Mutex;
use std::cell::{RefCell, UnsafeCell};
use std::ptr;
use wrust_io::mio;
use wrust_io::mio::tcp::*;
use wrust_io::mio::unix::*;
use wrust_types::Result;
use wrust_types::net::Protocol;
use wrust_types::net::connection::{State, Descriptor};
use wrust_module::stream::{Intention, Flush};
pub type ClientProtocol = Protocol<TcpStream, (), UnixStream>;
pub struct LeftData {
data: Vec<u8>,
intention: Intention,
flush: Flush,
}
unsafe impl Send for LeftData {}
unsafe impl Sync for LeftData {}
impl LeftData {
pub fn new(data: Vec<u8>, intention: Intention, flush: Flush) -> LeftData {
LeftData {
data: data,
intention: intention,
flush: flush,
}
}
pub fn data(&self) -> &Vec<u8>
|
pub fn intention(&self) -> &Intention {
&self.intention
}
pub fn flush(&self) -> &Flush {
&self.flush
}
pub fn consume(self) -> (Vec<u8>, (Intention, Flush)) {
(self.data, (self.intention, self.flush))
}
}
pub struct Client {
server_token: mio::Token,
token: mio::Token,
socket: Mutex<RefCell<ClientProtocol>>,
state: Mutex<RefCell<State>>,
descriptor: Descriptor,
left_data: Mutex<UnsafeCell<Option<LeftData>>>,
}
impl Client {
pub fn new(server_token: mio::Token, token: mio::Token, socket: ClientProtocol) -> Client {
let descriptor = Descriptor::new(
token.as_usize() as u32,
match socket {
Protocol::Tcp(ref s) => Some(s.peer_addr().unwrap()),
Protocol::Udp(_) => None,
Protocol::Unix(_) => None,
}
);
Client {
server_token: server_token,
token: token,
socket: Mutex::new(RefCell::new(socket)),
state: Mutex::new(RefCell::new(State::Opened)),
descriptor: descriptor,
left_data: Mutex::new(UnsafeCell::new(None)),
}
}
pub fn server_token(&self) -> &mio::Token {
&self.server_token
}
pub fn token(&self) -> &mio::Token {
&self.token
}
pub fn state(&self) -> State {
let guard = self.state.lock().unwrap();
let cell = guard.borrow();
cell.clone()
}
pub fn set_state(&self, state: State) {
let guard = self.state.lock().unwrap();
let mut cell = guard.borrow_mut();
*cell = state;
}
pub fn descriptor(&self) -> &Descriptor {
&self.descriptor
}
pub fn then_on_socket<F, T>(&self, mut func: F) -> Result<T>
where F: FnMut(&mut ClientProtocol) -> Result<T> {
let guard = self.socket.lock().unwrap();
let mut cell = guard.borrow_mut();
func(&mut *cell)
}
pub fn left_data(&self) -> Option<LeftData> {
let cell = self.left_data.lock().unwrap();
let mut left_data: Option<LeftData> = None;
unsafe { ptr::swap(&mut left_data, cell.get()); }
left_data
}
pub fn set_left_data(&self, data: Option<LeftData>) {
let cell = self.left_data.lock().unwrap();
let mut left_data = data;
unsafe { ptr::swap(&mut left_data, cell.get()); }
}
}
impl fmt::Debug for Client {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Client #{:?}", self.token)
}
}
|
{
&self.data
}
|
identifier_body
|
client.rs
|
use std::fmt;
use std::sync::Mutex;
use std::cell::{RefCell, UnsafeCell};
use std::ptr;
use wrust_io::mio;
use wrust_io::mio::tcp::*;
use wrust_io::mio::unix::*;
use wrust_types::Result;
use wrust_types::net::Protocol;
use wrust_types::net::connection::{State, Descriptor};
use wrust_module::stream::{Intention, Flush};
pub type ClientProtocol = Protocol<TcpStream, (), UnixStream>;
pub struct LeftData {
data: Vec<u8>,
intention: Intention,
flush: Flush,
}
unsafe impl Send for LeftData {}
unsafe impl Sync for LeftData {}
impl LeftData {
pub fn new(data: Vec<u8>, intention: Intention, flush: Flush) -> LeftData {
LeftData {
data: data,
intention: intention,
flush: flush,
}
}
pub fn data(&self) -> &Vec<u8> {
&self.data
}
pub fn intention(&self) -> &Intention {
&self.intention
}
pub fn flush(&self) -> &Flush {
&self.flush
}
pub fn consume(self) -> (Vec<u8>, (Intention, Flush)) {
(self.data, (self.intention, self.flush))
}
}
pub struct Client {
server_token: mio::Token,
token: mio::Token,
socket: Mutex<RefCell<ClientProtocol>>,
state: Mutex<RefCell<State>>,
descriptor: Descriptor,
left_data: Mutex<UnsafeCell<Option<LeftData>>>,
}
impl Client {
pub fn new(server_token: mio::Token, token: mio::Token, socket: ClientProtocol) -> Client {
let descriptor = Descriptor::new(
token.as_usize() as u32,
match socket {
Protocol::Tcp(ref s) => Some(s.peer_addr().unwrap()),
Protocol::Udp(_) => None,
Protocol::Unix(_) => None,
}
);
Client {
server_token: server_token,
token: token,
socket: Mutex::new(RefCell::new(socket)),
state: Mutex::new(RefCell::new(State::Opened)),
descriptor: descriptor,
left_data: Mutex::new(UnsafeCell::new(None)),
}
}
pub fn server_token(&self) -> &mio::Token {
&self.server_token
}
pub fn token(&self) -> &mio::Token {
&self.token
}
pub fn state(&self) -> State {
let guard = self.state.lock().unwrap();
let cell = guard.borrow();
cell.clone()
}
pub fn set_state(&self, state: State) {
let guard = self.state.lock().unwrap();
let mut cell = guard.borrow_mut();
*cell = state;
}
pub fn descriptor(&self) -> &Descriptor {
|
pub fn then_on_socket<F, T>(&self, mut func: F) -> Result<T>
where F: FnMut(&mut ClientProtocol) -> Result<T> {
let guard = self.socket.lock().unwrap();
let mut cell = guard.borrow_mut();
func(&mut *cell)
}
pub fn left_data(&self) -> Option<LeftData> {
let cell = self.left_data.lock().unwrap();
let mut left_data: Option<LeftData> = None;
unsafe { ptr::swap(&mut left_data, cell.get()); }
left_data
}
pub fn set_left_data(&self, data: Option<LeftData>) {
let cell = self.left_data.lock().unwrap();
let mut left_data = data;
unsafe { ptr::swap(&mut left_data, cell.get()); }
}
}
impl fmt::Debug for Client {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Client #{:?}", self.token)
}
}
|
&self.descriptor
}
|
random_line_split
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Tests for parsing and serialization of values/properties
use cssparser::Parser;
use media_queries::CSSErrorReporterTest;
use style::parser::ParserContext;
use style::stylesheets::Origin;
fn parse<T, F: Fn(&ParserContext, &mut Parser) -> Result<T, ()>>(f: F, s: &str) -> Result<T, ()>
|
// This is a macro so that the file/line information
// is preserved in the panic
macro_rules! assert_roundtrip_with_context {
($fun:expr, $string:expr) => {
assert_roundtrip_with_context!($fun, $string, $string);
};
($fun:expr,$input:expr, $output:expr) => {
let url = ::servo_url::ServoUrl::parse("http://localhost").unwrap();
let context = ParserContext::new(Origin::Author, &url, Box::new(CSSErrorReporterTest));
let mut parser = Parser::new($input);
let parsed = $fun(&context, &mut parser)
.expect(&format!("Failed to parse {}", $input));
let serialized = ToCss::to_css_string(&parsed);
assert_eq!(serialized, $output);
let mut parser = Parser::new(&serialized);
let re_parsed = $fun(&context, &mut parser)
.expect(&format!("Failed to parse serialization {}", $input));
let re_serialized = ToCss::to_css_string(&re_parsed);
assert_eq!(serialized, re_serialized);
}
}
macro_rules! parse_longhand {
($name:ident, $s:expr) => {{
let url = ::servo_url::ServoUrl::parse("http://localhost").unwrap();
let context = ParserContext::new(Origin::Author, &url, Box::new(CSSErrorReporterTest));
$name::parse(&context, &mut Parser::new($s)).unwrap()
}};
}
mod background;
mod basic_shape;
mod border;
mod font;
mod image;
mod inherited_box;
mod inherited_text;
mod mask;
mod position;
mod selectors;
|
{
let url = ::servo_url::ServoUrl::parse("http://localhost").unwrap();
let context = ParserContext::new(Origin::Author, &url, Box::new(CSSErrorReporterTest));
let mut parser = Parser::new(s);
f(&context, &mut parser)
}
|
identifier_body
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Tests for parsing and serialization of values/properties
use cssparser::Parser;
use media_queries::CSSErrorReporterTest;
use style::parser::ParserContext;
use style::stylesheets::Origin;
fn
|
<T, F: Fn(&ParserContext, &mut Parser) -> Result<T, ()>>(f: F, s: &str) -> Result<T, ()> {
let url = ::servo_url::ServoUrl::parse("http://localhost").unwrap();
let context = ParserContext::new(Origin::Author, &url, Box::new(CSSErrorReporterTest));
let mut parser = Parser::new(s);
f(&context, &mut parser)
}
// This is a macro so that the file/line information
// is preserved in the panic
macro_rules! assert_roundtrip_with_context {
($fun:expr, $string:expr) => {
assert_roundtrip_with_context!($fun, $string, $string);
};
($fun:expr,$input:expr, $output:expr) => {
let url = ::servo_url::ServoUrl::parse("http://localhost").unwrap();
let context = ParserContext::new(Origin::Author, &url, Box::new(CSSErrorReporterTest));
let mut parser = Parser::new($input);
let parsed = $fun(&context, &mut parser)
.expect(&format!("Failed to parse {}", $input));
let serialized = ToCss::to_css_string(&parsed);
assert_eq!(serialized, $output);
let mut parser = Parser::new(&serialized);
let re_parsed = $fun(&context, &mut parser)
.expect(&format!("Failed to parse serialization {}", $input));
let re_serialized = ToCss::to_css_string(&re_parsed);
assert_eq!(serialized, re_serialized);
}
}
macro_rules! parse_longhand {
($name:ident, $s:expr) => {{
let url = ::servo_url::ServoUrl::parse("http://localhost").unwrap();
let context = ParserContext::new(Origin::Author, &url, Box::new(CSSErrorReporterTest));
$name::parse(&context, &mut Parser::new($s)).unwrap()
}};
}
mod background;
mod basic_shape;
mod border;
mod font;
mod image;
mod inherited_box;
mod inherited_text;
mod mask;
mod position;
mod selectors;
|
parse
|
identifier_name
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Tests for parsing and serialization of values/properties
use cssparser::Parser;
use media_queries::CSSErrorReporterTest;
use style::parser::ParserContext;
use style::stylesheets::Origin;
fn parse<T, F: Fn(&ParserContext, &mut Parser) -> Result<T, ()>>(f: F, s: &str) -> Result<T, ()> {
let url = ::servo_url::ServoUrl::parse("http://localhost").unwrap();
let context = ParserContext::new(Origin::Author, &url, Box::new(CSSErrorReporterTest));
let mut parser = Parser::new(s);
f(&context, &mut parser)
}
// This is a macro so that the file/line information
// is preserved in the panic
macro_rules! assert_roundtrip_with_context {
($fun:expr, $string:expr) => {
assert_roundtrip_with_context!($fun, $string, $string);
};
($fun:expr,$input:expr, $output:expr) => {
let url = ::servo_url::ServoUrl::parse("http://localhost").unwrap();
let context = ParserContext::new(Origin::Author, &url, Box::new(CSSErrorReporterTest));
let mut parser = Parser::new($input);
let parsed = $fun(&context, &mut parser)
.expect(&format!("Failed to parse {}", $input));
let serialized = ToCss::to_css_string(&parsed);
assert_eq!(serialized, $output);
let mut parser = Parser::new(&serialized);
let re_parsed = $fun(&context, &mut parser)
.expect(&format!("Failed to parse serialization {}", $input));
let re_serialized = ToCss::to_css_string(&re_parsed);
assert_eq!(serialized, re_serialized);
}
}
macro_rules! parse_longhand {
($name:ident, $s:expr) => {{
let url = ::servo_url::ServoUrl::parse("http://localhost").unwrap();
let context = ParserContext::new(Origin::Author, &url, Box::new(CSSErrorReporterTest));
$name::parse(&context, &mut Parser::new($s)).unwrap()
}};
}
mod background;
mod basic_shape;
mod border;
|
mod image;
mod inherited_box;
mod inherited_text;
mod mask;
mod position;
mod selectors;
|
mod font;
|
random_line_split
|
buffer.rs
|
use buffer::{BufferView, BufferViewAny, BufferType, BufferCreationError};
use uniforms::{AsUniformValue, UniformValue, UniformBlock, UniformType};
use std::ops::{Deref, DerefMut};
use backend::Facade;
/// Buffer that contains a uniform block.
#[derive(Debug)]
pub struct UniformBuffer<T> where T: Copy {
buffer: BufferView<T>,
}
/// Same as `UniformBuffer` but doesn't contain any information about the type.
#[derive(Debug)]
pub struct TypelessUniformBuffer {
buffer: BufferViewAny,
}
impl<T> UniformBuffer<T> where T: Copy {
/// Uploads data in the uniforms buffer.
///
/// # Features
///
/// Only available if the `gl_uniform_blocks` feature is enabled.
#[cfg(feature = "gl_uniform_blocks")]
pub fn new<F>(facade: &F, data: T) -> UniformBuffer<T> where F: Facade {
UniformBuffer::new_if_supported(facade, data).unwrap()
}
/// Uploads data in the uniforms buffer.
pub fn new_if_supported<F>(facade: &F, data: T) -> Option<UniformBuffer<T>> where F: Facade {
let buffer = match BufferView::new(facade, &data, BufferType::UniformBuffer, true) {
Ok(b) => b,
Err(BufferCreationError::BufferTypeNotSupported) => return None,
e @ Err(_) => e.unwrap(),
};
Some(UniformBuffer {
buffer: buffer,
})
}
/// Creates an empty buffer.
///
/// # Features
///
/// Only available if the `gl_uniform_blocks` feature is enabled.
#[cfg(feature = "gl_uniform_blocks")]
pub fn empty<F>(facade: &F) -> UniformBuffer<T> where F: Facade {
UniformBuffer::empty_if_supported(facade).unwrap()
}
/// Creates an empty buffer.
pub fn empty_if_supported<F>(facade: &F) -> Option<UniformBuffer<T>> where F: Facade {
let buffer = match BufferView::empty(facade, BufferType::UniformBuffer, true) {
Ok(b) => b,
Err(BufferCreationError::BufferTypeNotSupported) => return None,
e @ Err(_) => e.unwrap(),
};
Some(UniformBuffer {
buffer: buffer,
})
}
}
impl<T> Deref for UniformBuffer<T> where T: Copy {
type Target = BufferView<T>;
fn
|
(&self) -> &BufferView<T> {
&self.buffer
}
}
impl<T> DerefMut for UniformBuffer<T> where T: Copy {
fn deref_mut(&mut self) -> &mut BufferView<T> {
&mut self.buffer
}
}
impl<'a, T> AsUniformValue for &'a UniformBuffer<T> where T: UniformBlock + Copy {
fn as_uniform_value(&self) -> UniformValue {
UniformValue::Block(self.buffer.as_slice_any(), <T as UniformBlock>::matches)
}
fn matches(_: &UniformType) -> bool {
false
}
}
|
deref
|
identifier_name
|
buffer.rs
|
use buffer::{BufferView, BufferViewAny, BufferType, BufferCreationError};
use uniforms::{AsUniformValue, UniformValue, UniformBlock, UniformType};
use std::ops::{Deref, DerefMut};
use backend::Facade;
/// Buffer that contains a uniform block.
#[derive(Debug)]
pub struct UniformBuffer<T> where T: Copy {
buffer: BufferView<T>,
}
/// Same as `UniformBuffer` but doesn't contain any information about the type.
#[derive(Debug)]
pub struct TypelessUniformBuffer {
buffer: BufferViewAny,
}
impl<T> UniformBuffer<T> where T: Copy {
/// Uploads data in the uniforms buffer.
///
/// # Features
///
/// Only available if the `gl_uniform_blocks` feature is enabled.
#[cfg(feature = "gl_uniform_blocks")]
pub fn new<F>(facade: &F, data: T) -> UniformBuffer<T> where F: Facade {
UniformBuffer::new_if_supported(facade, data).unwrap()
}
/// Uploads data in the uniforms buffer.
|
};
Some(UniformBuffer {
buffer: buffer,
})
}
/// Creates an empty buffer.
///
/// # Features
///
/// Only available if the `gl_uniform_blocks` feature is enabled.
#[cfg(feature = "gl_uniform_blocks")]
pub fn empty<F>(facade: &F) -> UniformBuffer<T> where F: Facade {
UniformBuffer::empty_if_supported(facade).unwrap()
}
/// Creates an empty buffer.
pub fn empty_if_supported<F>(facade: &F) -> Option<UniformBuffer<T>> where F: Facade {
let buffer = match BufferView::empty(facade, BufferType::UniformBuffer, true) {
Ok(b) => b,
Err(BufferCreationError::BufferTypeNotSupported) => return None,
e @ Err(_) => e.unwrap(),
};
Some(UniformBuffer {
buffer: buffer,
})
}
}
impl<T> Deref for UniformBuffer<T> where T: Copy {
type Target = BufferView<T>;
fn deref(&self) -> &BufferView<T> {
&self.buffer
}
}
impl<T> DerefMut for UniformBuffer<T> where T: Copy {
fn deref_mut(&mut self) -> &mut BufferView<T> {
&mut self.buffer
}
}
impl<'a, T> AsUniformValue for &'a UniformBuffer<T> where T: UniformBlock + Copy {
fn as_uniform_value(&self) -> UniformValue {
UniformValue::Block(self.buffer.as_slice_any(), <T as UniformBlock>::matches)
}
fn matches(_: &UniformType) -> bool {
false
}
}
|
pub fn new_if_supported<F>(facade: &F, data: T) -> Option<UniformBuffer<T>> where F: Facade {
let buffer = match BufferView::new(facade, &data, BufferType::UniformBuffer, true) {
Ok(b) => b,
Err(BufferCreationError::BufferTypeNotSupported) => return None,
e @ Err(_) => e.unwrap(),
|
random_line_split
|
buffer.rs
|
use buffer::{BufferView, BufferViewAny, BufferType, BufferCreationError};
use uniforms::{AsUniformValue, UniformValue, UniformBlock, UniformType};
use std::ops::{Deref, DerefMut};
use backend::Facade;
/// Buffer that contains a uniform block.
#[derive(Debug)]
pub struct UniformBuffer<T> where T: Copy {
buffer: BufferView<T>,
}
/// Same as `UniformBuffer` but doesn't contain any information about the type.
#[derive(Debug)]
pub struct TypelessUniformBuffer {
buffer: BufferViewAny,
}
impl<T> UniformBuffer<T> where T: Copy {
/// Uploads data in the uniforms buffer.
///
/// # Features
///
/// Only available if the `gl_uniform_blocks` feature is enabled.
#[cfg(feature = "gl_uniform_blocks")]
pub fn new<F>(facade: &F, data: T) -> UniformBuffer<T> where F: Facade {
UniformBuffer::new_if_supported(facade, data).unwrap()
}
/// Uploads data in the uniforms buffer.
pub fn new_if_supported<F>(facade: &F, data: T) -> Option<UniformBuffer<T>> where F: Facade
|
/// Creates an empty buffer.
///
/// # Features
///
/// Only available if the `gl_uniform_blocks` feature is enabled.
#[cfg(feature = "gl_uniform_blocks")]
pub fn empty<F>(facade: &F) -> UniformBuffer<T> where F: Facade {
UniformBuffer::empty_if_supported(facade).unwrap()
}
/// Creates an empty buffer.
pub fn empty_if_supported<F>(facade: &F) -> Option<UniformBuffer<T>> where F: Facade {
let buffer = match BufferView::empty(facade, BufferType::UniformBuffer, true) {
Ok(b) => b,
Err(BufferCreationError::BufferTypeNotSupported) => return None,
e @ Err(_) => e.unwrap(),
};
Some(UniformBuffer {
buffer: buffer,
})
}
}
impl<T> Deref for UniformBuffer<T> where T: Copy {
type Target = BufferView<T>;
fn deref(&self) -> &BufferView<T> {
&self.buffer
}
}
impl<T> DerefMut for UniformBuffer<T> where T: Copy {
fn deref_mut(&mut self) -> &mut BufferView<T> {
&mut self.buffer
}
}
impl<'a, T> AsUniformValue for &'a UniformBuffer<T> where T: UniformBlock + Copy {
fn as_uniform_value(&self) -> UniformValue {
UniformValue::Block(self.buffer.as_slice_any(), <T as UniformBlock>::matches)
}
fn matches(_: &UniformType) -> bool {
false
}
}
|
{
let buffer = match BufferView::new(facade, &data, BufferType::UniformBuffer, true) {
Ok(b) => b,
Err(BufferCreationError::BufferTypeNotSupported) => return None,
e @ Err(_) => e.unwrap(),
};
Some(UniformBuffer {
buffer: buffer,
})
}
|
identifier_body
|
rotate_point.rs
|
// Copyright 2016 The Noise-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate noise;
use noise::{Cylinders, RotatePoint};
mod debug;
fn main()
|
{
let cylinders = Cylinders::new();
let rotate_point = RotatePoint::new(cylinders).set_x_angle(60.0);
debug::render_noise_module3("rotate_point.png", &rotate_point, 1024, 1024, 50);
}
|
identifier_body
|
|
rotate_point.rs
|
// Copyright 2016 The Noise-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate noise;
use noise::{Cylinders, RotatePoint};
mod debug;
fn main() {
|
let cylinders = Cylinders::new();
let rotate_point = RotatePoint::new(cylinders).set_x_angle(60.0);
debug::render_noise_module3("rotate_point.png", &rotate_point, 1024, 1024, 50);
}
|
random_line_split
|
|
rotate_point.rs
|
// Copyright 2016 The Noise-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate noise;
use noise::{Cylinders, RotatePoint};
mod debug;
fn
|
() {
let cylinders = Cylinders::new();
let rotate_point = RotatePoint::new(cylinders).set_x_angle(60.0);
debug::render_noise_module3("rotate_point.png", &rotate_point, 1024, 1024, 50);
}
|
main
|
identifier_name
|
shared_lock.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Different objects protected by the same lock
#[cfg(feature = "gecko")]
use atomic_refcell::{AtomicRefCell, AtomicRef, AtomicRefMut};
#[cfg(feature = "servo")]
use parking_lot::RwLock;
use servo_arc::Arc;
use std::cell::UnsafeCell;
use std::fmt;
#[cfg(feature = "gecko")]
use std::ptr;
use str::{CssString, CssStringWriter};
use stylesheets::Origin;
/// A shared read/write lock that can protect multiple objects.
///
/// In Gecko builds, we don't need the blocking behavior, just the safety. As
/// such we implement this with an AtomicRefCell instead in Gecko builds,
/// which is ~2x as fast, and panics (rather than deadlocking) when things go
/// wrong (which is much easier to debug on CI).
///
/// Servo needs the blocking behavior for its unsynchronized animation setup,
/// but that may not be web-compatible and may need to be changed (at which
/// point Servo could use AtomicRefCell too).
#[derive(Clone)]
#[cfg_attr(feature = "servo", derive(MallocSizeOf))]
pub struct SharedRwLock {
#[cfg(feature = "servo")]
#[cfg_attr(feature = "servo", ignore_malloc_size_of = "Arc")]
arc: Arc<RwLock<()>>,
#[cfg(feature = "gecko")]
cell: Arc<AtomicRefCell<SomethingZeroSizedButTyped>>,
}
#[cfg(feature = "gecko")]
struct SomethingZeroSizedButTyped;
impl fmt::Debug for SharedRwLock {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("SharedRwLock")
}
}
impl SharedRwLock {
/// Create a new shared lock (servo).
#[cfg(feature = "servo")]
pub fn new() -> Self {
SharedRwLock {
arc: Arc::new(RwLock::new(()))
}
}
/// Create a new shared lock (gecko).
#[cfg(feature = "gecko")]
pub fn new() -> Self {
SharedRwLock {
cell: Arc::new(AtomicRefCell::new(SomethingZeroSizedButTyped))
}
}
/// Wrap the given data to make its access protected by this lock.
pub fn wrap<T>(&self, data: T) -> Locked<T> {
Locked {
shared_lock: self.clone(),
data: UnsafeCell::new(data),
}
}
/// Obtain the lock for reading (servo).
#[cfg(feature = "servo")]
pub fn read(&self) -> SharedRwLockReadGuard {
self.arc.raw_read();
SharedRwLockReadGuard(self)
}
/// Obtain the lock for reading (gecko).
#[cfg(feature = "gecko")]
pub fn read(&self) -> SharedRwLockReadGuard
|
/// Obtain the lock for writing (servo).
#[cfg(feature = "servo")]
pub fn write(&self) -> SharedRwLockWriteGuard {
self.arc.raw_write();
SharedRwLockWriteGuard(self)
}
/// Obtain the lock for writing (gecko).
#[cfg(feature = "gecko")]
pub fn write(&self) -> SharedRwLockWriteGuard {
SharedRwLockWriteGuard(self.cell.borrow_mut())
}
}
/// Proof that a shared lock was obtained for reading (servo).
#[cfg(feature = "servo")]
pub struct SharedRwLockReadGuard<'a>(&'a SharedRwLock);
/// Proof that a shared lock was obtained for writing (gecko).
#[cfg(feature = "gecko")]
pub struct SharedRwLockReadGuard<'a>(AtomicRef<'a, SomethingZeroSizedButTyped>);
#[cfg(feature = "servo")]
impl<'a> Drop for SharedRwLockReadGuard<'a> {
fn drop(&mut self) {
// Unsafe: self.lock is private to this module, only ever set after `raw_read()`,
// and never copied or cloned (see `compile_time_assert` below).
unsafe {
self.0.arc.raw_unlock_read()
}
}
}
/// Proof that a shared lock was obtained for writing (servo).
#[cfg(feature = "servo")]
pub struct SharedRwLockWriteGuard<'a>(&'a SharedRwLock);
/// Proof that a shared lock was obtained for writing (gecko).
#[cfg(feature = "gecko")]
pub struct SharedRwLockWriteGuard<'a>(AtomicRefMut<'a, SomethingZeroSizedButTyped>);
#[cfg(feature = "servo")]
impl<'a> Drop for SharedRwLockWriteGuard<'a> {
fn drop(&mut self) {
// Unsafe: self.lock is private to this module, only ever set after `raw_write()`,
// and never copied or cloned (see `compile_time_assert` below).
unsafe {
self.0.arc.raw_unlock_write()
}
}
}
/// Data protect by a shared lock.
pub struct Locked<T> {
shared_lock: SharedRwLock,
data: UnsafeCell<T>,
}
// Unsafe: the data inside `UnsafeCell` is only accessed in `read_with` and `write_with`,
// where guards ensure synchronization.
unsafe impl<T: Send> Send for Locked<T> {}
unsafe impl<T: Send + Sync> Sync for Locked<T> {}
impl<T: fmt::Debug> fmt::Debug for Locked<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let guard = self.shared_lock.read();
self.read_with(&guard).fmt(f)
}
}
impl<T> Locked<T> {
#[cfg(feature = "servo")]
fn same_lock_as(&self, lock: &SharedRwLock) -> bool {
Arc::ptr_eq(&self.shared_lock.arc, &lock.arc)
}
#[cfg(feature = "gecko")]
fn same_lock_as(&self, derefed_guard: &SomethingZeroSizedButTyped) -> bool {
ptr::eq(self.shared_lock.cell.as_ptr(), derefed_guard)
}
/// Access the data for reading.
pub fn read_with<'a>(&'a self, guard: &'a SharedRwLockReadGuard) -> &'a T {
assert!(self.same_lock_as(&guard.0),
"Locked::read_with called with a guard from an unrelated SharedRwLock");
let ptr = self.data.get();
// Unsafe:
//
// * The guard guarantees that the lock is taken for reading,
// and we’ve checked that it’s the correct lock.
// * The returned reference borrows *both* the data and the guard,
// so that it can outlive neither.
unsafe {
&*ptr
}
}
/// Access the data for reading without verifying the lock. Use with caution.
#[cfg(feature = "gecko")]
pub unsafe fn read_unchecked<'a>(&'a self) -> &'a T {
let ptr = self.data.get();
&*ptr
}
/// Access the data for writing.
pub fn write_with<'a>(&'a self, guard: &'a mut SharedRwLockWriteGuard) -> &'a mut T {
assert!(self.same_lock_as(&guard.0),
"Locked::write_with called with a guard from an unrelated SharedRwLock");
let ptr = self.data.get();
// Unsafe:
//
// * The guard guarantees that the lock is taken for writing,
// and we’ve checked that it’s the correct lock.
// * The returned reference borrows *both* the data and the guard,
// so that it can outlive neither.
// * We require a mutable borrow of the guard,
// so that one write guard can only be used once at a time.
unsafe {
&mut *ptr
}
}
}
#[allow(dead_code)]
mod compile_time_assert {
use super::{SharedRwLockReadGuard, SharedRwLockWriteGuard};
trait Marker1 {}
impl<T: Clone> Marker1 for T {}
impl<'a> Marker1 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard:!Clone
impl<'a> Marker1 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard:!Clone
trait Marker2 {}
impl<T: Copy> Marker2 for T {}
impl<'a> Marker2 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard:!Copy
impl<'a> Marker2 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard:!Copy
}
/// Like ToCss, but with a lock guard given by the caller, and with the writer specified
/// concretely rather than with a parameter.
pub trait ToCssWithGuard {
/// Serialize `self` in CSS syntax, writing to `dest`, using the given lock guard.
fn to_css(&self, guard: &SharedRwLockReadGuard, dest: &mut CssStringWriter) -> fmt::Result;
/// Serialize `self` in CSS syntax using the given lock guard and return a string.
///
/// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
#[inline]
fn to_css_string(&self, guard: &SharedRwLockReadGuard) -> CssString {
let mut s = CssString::new();
self.to_css(guard, &mut s).unwrap();
s
}
}
/// Parameters needed for deep clones.
#[cfg(feature = "gecko")]
pub struct DeepCloneParams {
/// The new sheet we're cloning rules into.
pub reference_sheet: *const ::gecko_bindings::structs::ServoStyleSheet,
}
/// Parameters needed for deep clones.
#[cfg(feature = "servo")]
pub struct DeepCloneParams;
/// A trait to do a deep clone of a given CSS type. Gets a lock and a read
/// guard, in order to be able to read and clone nested structures.
pub trait DeepCloneWithLock : Sized {
/// Deep clones this object.
fn deep_clone_with_lock(
&self,
lock: &SharedRwLock,
guard: &SharedRwLockReadGuard,
params: &DeepCloneParams,
) -> Self;
}
/// Guards for a document
#[derive(Clone)]
pub struct StylesheetGuards<'a> {
/// For author-origin stylesheets.
pub author: &'a SharedRwLockReadGuard<'a>,
/// For user-agent-origin and user-origin stylesheets
pub ua_or_user: &'a SharedRwLockReadGuard<'a>,
}
impl<'a> StylesheetGuards<'a> {
/// Get the guard for a given stylesheet origin.
pub fn for_origin(&self, origin: Origin) -> &SharedRwLockReadGuard<'a> {
match origin {
Origin::Author => &self.author,
_ => &self.ua_or_user,
}
}
/// Same guard for all origins
pub fn same(guard: &'a SharedRwLockReadGuard<'a>) -> Self {
StylesheetGuards {
author: guard,
ua_or_user: guard,
}
}
}
|
{
SharedRwLockReadGuard(self.cell.borrow())
}
|
identifier_body
|
shared_lock.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Different objects protected by the same lock
#[cfg(feature = "gecko")]
use atomic_refcell::{AtomicRefCell, AtomicRef, AtomicRefMut};
#[cfg(feature = "servo")]
use parking_lot::RwLock;
use servo_arc::Arc;
use std::cell::UnsafeCell;
use std::fmt;
#[cfg(feature = "gecko")]
use std::ptr;
use str::{CssString, CssStringWriter};
use stylesheets::Origin;
/// A shared read/write lock that can protect multiple objects.
///
/// In Gecko builds, we don't need the blocking behavior, just the safety. As
/// such we implement this with an AtomicRefCell instead in Gecko builds,
/// which is ~2x as fast, and panics (rather than deadlocking) when things go
/// wrong (which is much easier to debug on CI).
///
/// Servo needs the blocking behavior for its unsynchronized animation setup,
/// but that may not be web-compatible and may need to be changed (at which
/// point Servo could use AtomicRefCell too).
#[derive(Clone)]
#[cfg_attr(feature = "servo", derive(MallocSizeOf))]
pub struct SharedRwLock {
#[cfg(feature = "servo")]
#[cfg_attr(feature = "servo", ignore_malloc_size_of = "Arc")]
arc: Arc<RwLock<()>>,
#[cfg(feature = "gecko")]
cell: Arc<AtomicRefCell<SomethingZeroSizedButTyped>>,
}
#[cfg(feature = "gecko")]
struct SomethingZeroSizedButTyped;
impl fmt::Debug for SharedRwLock {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("SharedRwLock")
}
}
impl SharedRwLock {
/// Create a new shared lock (servo).
#[cfg(feature = "servo")]
pub fn
|
() -> Self {
SharedRwLock {
arc: Arc::new(RwLock::new(()))
}
}
/// Create a new shared lock (gecko).
#[cfg(feature = "gecko")]
pub fn new() -> Self {
SharedRwLock {
cell: Arc::new(AtomicRefCell::new(SomethingZeroSizedButTyped))
}
}
/// Wrap the given data to make its access protected by this lock.
pub fn wrap<T>(&self, data: T) -> Locked<T> {
Locked {
shared_lock: self.clone(),
data: UnsafeCell::new(data),
}
}
/// Obtain the lock for reading (servo).
#[cfg(feature = "servo")]
pub fn read(&self) -> SharedRwLockReadGuard {
self.arc.raw_read();
SharedRwLockReadGuard(self)
}
/// Obtain the lock for reading (gecko).
#[cfg(feature = "gecko")]
pub fn read(&self) -> SharedRwLockReadGuard {
SharedRwLockReadGuard(self.cell.borrow())
}
/// Obtain the lock for writing (servo).
#[cfg(feature = "servo")]
pub fn write(&self) -> SharedRwLockWriteGuard {
self.arc.raw_write();
SharedRwLockWriteGuard(self)
}
/// Obtain the lock for writing (gecko).
#[cfg(feature = "gecko")]
pub fn write(&self) -> SharedRwLockWriteGuard {
SharedRwLockWriteGuard(self.cell.borrow_mut())
}
}
/// Proof that a shared lock was obtained for reading (servo).
#[cfg(feature = "servo")]
pub struct SharedRwLockReadGuard<'a>(&'a SharedRwLock);
/// Proof that a shared lock was obtained for writing (gecko).
#[cfg(feature = "gecko")]
pub struct SharedRwLockReadGuard<'a>(AtomicRef<'a, SomethingZeroSizedButTyped>);
#[cfg(feature = "servo")]
impl<'a> Drop for SharedRwLockReadGuard<'a> {
fn drop(&mut self) {
// Unsafe: self.lock is private to this module, only ever set after `raw_read()`,
// and never copied or cloned (see `compile_time_assert` below).
unsafe {
self.0.arc.raw_unlock_read()
}
}
}
/// Proof that a shared lock was obtained for writing (servo).
#[cfg(feature = "servo")]
pub struct SharedRwLockWriteGuard<'a>(&'a SharedRwLock);
/// Proof that a shared lock was obtained for writing (gecko).
#[cfg(feature = "gecko")]
pub struct SharedRwLockWriteGuard<'a>(AtomicRefMut<'a, SomethingZeroSizedButTyped>);
#[cfg(feature = "servo")]
impl<'a> Drop for SharedRwLockWriteGuard<'a> {
fn drop(&mut self) {
// Unsafe: self.lock is private to this module, only ever set after `raw_write()`,
// and never copied or cloned (see `compile_time_assert` below).
unsafe {
self.0.arc.raw_unlock_write()
}
}
}
/// Data protect by a shared lock.
pub struct Locked<T> {
shared_lock: SharedRwLock,
data: UnsafeCell<T>,
}
// Unsafe: the data inside `UnsafeCell` is only accessed in `read_with` and `write_with`,
// where guards ensure synchronization.
unsafe impl<T: Send> Send for Locked<T> {}
unsafe impl<T: Send + Sync> Sync for Locked<T> {}
impl<T: fmt::Debug> fmt::Debug for Locked<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let guard = self.shared_lock.read();
self.read_with(&guard).fmt(f)
}
}
impl<T> Locked<T> {
#[cfg(feature = "servo")]
fn same_lock_as(&self, lock: &SharedRwLock) -> bool {
Arc::ptr_eq(&self.shared_lock.arc, &lock.arc)
}
#[cfg(feature = "gecko")]
fn same_lock_as(&self, derefed_guard: &SomethingZeroSizedButTyped) -> bool {
ptr::eq(self.shared_lock.cell.as_ptr(), derefed_guard)
}
/// Access the data for reading.
pub fn read_with<'a>(&'a self, guard: &'a SharedRwLockReadGuard) -> &'a T {
assert!(self.same_lock_as(&guard.0),
"Locked::read_with called with a guard from an unrelated SharedRwLock");
let ptr = self.data.get();
// Unsafe:
//
// * The guard guarantees that the lock is taken for reading,
// and we’ve checked that it’s the correct lock.
// * The returned reference borrows *both* the data and the guard,
// so that it can outlive neither.
unsafe {
&*ptr
}
}
/// Access the data for reading without verifying the lock. Use with caution.
#[cfg(feature = "gecko")]
pub unsafe fn read_unchecked<'a>(&'a self) -> &'a T {
let ptr = self.data.get();
&*ptr
}
/// Access the data for writing.
pub fn write_with<'a>(&'a self, guard: &'a mut SharedRwLockWriteGuard) -> &'a mut T {
assert!(self.same_lock_as(&guard.0),
"Locked::write_with called with a guard from an unrelated SharedRwLock");
let ptr = self.data.get();
// Unsafe:
//
// * The guard guarantees that the lock is taken for writing,
// and we’ve checked that it’s the correct lock.
// * The returned reference borrows *both* the data and the guard,
// so that it can outlive neither.
// * We require a mutable borrow of the guard,
// so that one write guard can only be used once at a time.
unsafe {
&mut *ptr
}
}
}
#[allow(dead_code)]
mod compile_time_assert {
use super::{SharedRwLockReadGuard, SharedRwLockWriteGuard};
trait Marker1 {}
impl<T: Clone> Marker1 for T {}
impl<'a> Marker1 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard:!Clone
impl<'a> Marker1 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard:!Clone
trait Marker2 {}
impl<T: Copy> Marker2 for T {}
impl<'a> Marker2 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard:!Copy
impl<'a> Marker2 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard:!Copy
}
/// Like ToCss, but with a lock guard given by the caller, and with the writer specified
/// concretely rather than with a parameter.
pub trait ToCssWithGuard {
/// Serialize `self` in CSS syntax, writing to `dest`, using the given lock guard.
fn to_css(&self, guard: &SharedRwLockReadGuard, dest: &mut CssStringWriter) -> fmt::Result;
/// Serialize `self` in CSS syntax using the given lock guard and return a string.
///
/// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
#[inline]
fn to_css_string(&self, guard: &SharedRwLockReadGuard) -> CssString {
let mut s = CssString::new();
self.to_css(guard, &mut s).unwrap();
s
}
}
/// Parameters needed for deep clones.
#[cfg(feature = "gecko")]
pub struct DeepCloneParams {
/// The new sheet we're cloning rules into.
pub reference_sheet: *const ::gecko_bindings::structs::ServoStyleSheet,
}
/// Parameters needed for deep clones.
#[cfg(feature = "servo")]
pub struct DeepCloneParams;
/// A trait to do a deep clone of a given CSS type. Gets a lock and a read
/// guard, in order to be able to read and clone nested structures.
pub trait DeepCloneWithLock : Sized {
/// Deep clones this object.
fn deep_clone_with_lock(
&self,
lock: &SharedRwLock,
guard: &SharedRwLockReadGuard,
params: &DeepCloneParams,
) -> Self;
}
/// Guards for a document
#[derive(Clone)]
pub struct StylesheetGuards<'a> {
/// For author-origin stylesheets.
pub author: &'a SharedRwLockReadGuard<'a>,
/// For user-agent-origin and user-origin stylesheets
pub ua_or_user: &'a SharedRwLockReadGuard<'a>,
}
impl<'a> StylesheetGuards<'a> {
/// Get the guard for a given stylesheet origin.
pub fn for_origin(&self, origin: Origin) -> &SharedRwLockReadGuard<'a> {
match origin {
Origin::Author => &self.author,
_ => &self.ua_or_user,
}
}
/// Same guard for all origins
pub fn same(guard: &'a SharedRwLockReadGuard<'a>) -> Self {
StylesheetGuards {
author: guard,
ua_or_user: guard,
}
}
}
|
new
|
identifier_name
|
shared_lock.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Different objects protected by the same lock
#[cfg(feature = "gecko")]
use atomic_refcell::{AtomicRefCell, AtomicRef, AtomicRefMut};
#[cfg(feature = "servo")]
use parking_lot::RwLock;
use servo_arc::Arc;
use std::cell::UnsafeCell;
use std::fmt;
#[cfg(feature = "gecko")]
use std::ptr;
use str::{CssString, CssStringWriter};
use stylesheets::Origin;
/// A shared read/write lock that can protect multiple objects.
///
/// In Gecko builds, we don't need the blocking behavior, just the safety. As
/// such we implement this with an AtomicRefCell instead in Gecko builds,
/// which is ~2x as fast, and panics (rather than deadlocking) when things go
/// wrong (which is much easier to debug on CI).
///
/// Servo needs the blocking behavior for its unsynchronized animation setup,
/// but that may not be web-compatible and may need to be changed (at which
/// point Servo could use AtomicRefCell too).
#[derive(Clone)]
#[cfg_attr(feature = "servo", derive(MallocSizeOf))]
pub struct SharedRwLock {
#[cfg(feature = "servo")]
#[cfg_attr(feature = "servo", ignore_malloc_size_of = "Arc")]
arc: Arc<RwLock<()>>,
#[cfg(feature = "gecko")]
cell: Arc<AtomicRefCell<SomethingZeroSizedButTyped>>,
}
#[cfg(feature = "gecko")]
struct SomethingZeroSizedButTyped;
impl fmt::Debug for SharedRwLock {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("SharedRwLock")
}
}
impl SharedRwLock {
/// Create a new shared lock (servo).
#[cfg(feature = "servo")]
pub fn new() -> Self {
SharedRwLock {
arc: Arc::new(RwLock::new(()))
}
}
/// Create a new shared lock (gecko).
#[cfg(feature = "gecko")]
pub fn new() -> Self {
SharedRwLock {
cell: Arc::new(AtomicRefCell::new(SomethingZeroSizedButTyped))
}
}
/// Wrap the given data to make its access protected by this lock.
pub fn wrap<T>(&self, data: T) -> Locked<T> {
Locked {
shared_lock: self.clone(),
data: UnsafeCell::new(data),
}
}
/// Obtain the lock for reading (servo).
|
self.arc.raw_read();
SharedRwLockReadGuard(self)
}
/// Obtain the lock for reading (gecko).
#[cfg(feature = "gecko")]
pub fn read(&self) -> SharedRwLockReadGuard {
SharedRwLockReadGuard(self.cell.borrow())
}
/// Obtain the lock for writing (servo).
#[cfg(feature = "servo")]
pub fn write(&self) -> SharedRwLockWriteGuard {
self.arc.raw_write();
SharedRwLockWriteGuard(self)
}
/// Obtain the lock for writing (gecko).
#[cfg(feature = "gecko")]
pub fn write(&self) -> SharedRwLockWriteGuard {
SharedRwLockWriteGuard(self.cell.borrow_mut())
}
}
/// Proof that a shared lock was obtained for reading (servo).
#[cfg(feature = "servo")]
pub struct SharedRwLockReadGuard<'a>(&'a SharedRwLock);
/// Proof that a shared lock was obtained for writing (gecko).
#[cfg(feature = "gecko")]
pub struct SharedRwLockReadGuard<'a>(AtomicRef<'a, SomethingZeroSizedButTyped>);
#[cfg(feature = "servo")]
impl<'a> Drop for SharedRwLockReadGuard<'a> {
fn drop(&mut self) {
// Unsafe: self.lock is private to this module, only ever set after `raw_read()`,
// and never copied or cloned (see `compile_time_assert` below).
unsafe {
self.0.arc.raw_unlock_read()
}
}
}
/// Proof that a shared lock was obtained for writing (servo).
#[cfg(feature = "servo")]
pub struct SharedRwLockWriteGuard<'a>(&'a SharedRwLock);
/// Proof that a shared lock was obtained for writing (gecko).
#[cfg(feature = "gecko")]
pub struct SharedRwLockWriteGuard<'a>(AtomicRefMut<'a, SomethingZeroSizedButTyped>);
#[cfg(feature = "servo")]
impl<'a> Drop for SharedRwLockWriteGuard<'a> {
fn drop(&mut self) {
// Unsafe: self.lock is private to this module, only ever set after `raw_write()`,
// and never copied or cloned (see `compile_time_assert` below).
unsafe {
self.0.arc.raw_unlock_write()
}
}
}
/// Data protect by a shared lock.
pub struct Locked<T> {
shared_lock: SharedRwLock,
data: UnsafeCell<T>,
}
// Unsafe: the data inside `UnsafeCell` is only accessed in `read_with` and `write_with`,
// where guards ensure synchronization.
unsafe impl<T: Send> Send for Locked<T> {}
unsafe impl<T: Send + Sync> Sync for Locked<T> {}
impl<T: fmt::Debug> fmt::Debug for Locked<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let guard = self.shared_lock.read();
self.read_with(&guard).fmt(f)
}
}
impl<T> Locked<T> {
#[cfg(feature = "servo")]
fn same_lock_as(&self, lock: &SharedRwLock) -> bool {
Arc::ptr_eq(&self.shared_lock.arc, &lock.arc)
}
#[cfg(feature = "gecko")]
fn same_lock_as(&self, derefed_guard: &SomethingZeroSizedButTyped) -> bool {
ptr::eq(self.shared_lock.cell.as_ptr(), derefed_guard)
}
/// Access the data for reading.
pub fn read_with<'a>(&'a self, guard: &'a SharedRwLockReadGuard) -> &'a T {
assert!(self.same_lock_as(&guard.0),
"Locked::read_with called with a guard from an unrelated SharedRwLock");
let ptr = self.data.get();
// Unsafe:
//
// * The guard guarantees that the lock is taken for reading,
// and we’ve checked that it’s the correct lock.
// * The returned reference borrows *both* the data and the guard,
// so that it can outlive neither.
unsafe {
&*ptr
}
}
/// Access the data for reading without verifying the lock. Use with caution.
#[cfg(feature = "gecko")]
pub unsafe fn read_unchecked<'a>(&'a self) -> &'a T {
let ptr = self.data.get();
&*ptr
}
/// Access the data for writing.
pub fn write_with<'a>(&'a self, guard: &'a mut SharedRwLockWriteGuard) -> &'a mut T {
assert!(self.same_lock_as(&guard.0),
"Locked::write_with called with a guard from an unrelated SharedRwLock");
let ptr = self.data.get();
// Unsafe:
//
// * The guard guarantees that the lock is taken for writing,
// and we’ve checked that it’s the correct lock.
// * The returned reference borrows *both* the data and the guard,
// so that it can outlive neither.
// * We require a mutable borrow of the guard,
// so that one write guard can only be used once at a time.
unsafe {
&mut *ptr
}
}
}
#[allow(dead_code)]
mod compile_time_assert {
use super::{SharedRwLockReadGuard, SharedRwLockWriteGuard};
trait Marker1 {}
impl<T: Clone> Marker1 for T {}
impl<'a> Marker1 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard:!Clone
impl<'a> Marker1 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard:!Clone
trait Marker2 {}
impl<T: Copy> Marker2 for T {}
impl<'a> Marker2 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard:!Copy
impl<'a> Marker2 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard:!Copy
}
/// Like ToCss, but with a lock guard given by the caller, and with the writer specified
/// concretely rather than with a parameter.
pub trait ToCssWithGuard {
/// Serialize `self` in CSS syntax, writing to `dest`, using the given lock guard.
fn to_css(&self, guard: &SharedRwLockReadGuard, dest: &mut CssStringWriter) -> fmt::Result;
/// Serialize `self` in CSS syntax using the given lock guard and return a string.
///
/// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
#[inline]
fn to_css_string(&self, guard: &SharedRwLockReadGuard) -> CssString {
let mut s = CssString::new();
self.to_css(guard, &mut s).unwrap();
s
}
}
/// Parameters needed for deep clones.
#[cfg(feature = "gecko")]
pub struct DeepCloneParams {
/// The new sheet we're cloning rules into.
pub reference_sheet: *const ::gecko_bindings::structs::ServoStyleSheet,
}
/// Parameters needed for deep clones.
#[cfg(feature = "servo")]
pub struct DeepCloneParams;
/// A trait to do a deep clone of a given CSS type. Gets a lock and a read
/// guard, in order to be able to read and clone nested structures.
pub trait DeepCloneWithLock : Sized {
/// Deep clones this object.
fn deep_clone_with_lock(
&self,
lock: &SharedRwLock,
guard: &SharedRwLockReadGuard,
params: &DeepCloneParams,
) -> Self;
}
/// Guards for a document
#[derive(Clone)]
pub struct StylesheetGuards<'a> {
/// For author-origin stylesheets.
pub author: &'a SharedRwLockReadGuard<'a>,
/// For user-agent-origin and user-origin stylesheets
pub ua_or_user: &'a SharedRwLockReadGuard<'a>,
}
impl<'a> StylesheetGuards<'a> {
/// Get the guard for a given stylesheet origin.
pub fn for_origin(&self, origin: Origin) -> &SharedRwLockReadGuard<'a> {
match origin {
Origin::Author => &self.author,
_ => &self.ua_or_user,
}
}
/// Same guard for all origins
pub fn same(guard: &'a SharedRwLockReadGuard<'a>) -> Self {
StylesheetGuards {
author: guard,
ua_or_user: guard,
}
}
}
|
#[cfg(feature = "servo")]
pub fn read(&self) -> SharedRwLockReadGuard {
|
random_line_split
|
redirect.rs
|
use response::{Response, Responder};
use http::hyper::header;
use http::Status;
/// An empty redirect response to a given URL.
///
/// This type simplifies returning a redirect response to the client.
#[derive(Debug)]
pub struct Redirect(Status, String);
impl Redirect {
/// Construct a temporary "see other" (303) redirect response. This is the
/// typical response when redirecting a user to another page. This type of
/// redirect indicates that the client should look elsewhere, but always via
/// a `GET` request, for a given resource.
///
/// # Examples
///
/// ```rust
/// use rocket::response::Redirect;
///
/// let redirect = Redirect::to("/other_url");
/// ```
pub fn to(uri: &str) -> Redirect {
Redirect(Status::SeeOther, String::from(uri))
}
/// Construct a "temporary" (307) redirect response. This response instructs
/// the client to reissue the current request to a different URL,
/// maintaining the contents of the request identically. This means that,
/// for example, a `POST` request will be resent, contents included, to the
/// requested URL.
///
/// # Examples
///
/// ```rust
/// use rocket::response::Redirect;
///
/// let redirect = Redirect::temporary("/other_url");
/// ```
pub fn temporary(uri: &str) -> Redirect {
Redirect(Status::TemporaryRedirect, String::from(uri))
}
/// Construct a "permanent" (308) redirect response. This redirect must only
/// be used for permanent redirects as it is cached by clients. This
/// response instructs the client to reissue requests for the current URL to
/// a different URL, now and in the future, maintaining the contents of the
/// request identically. This means that, for example, a `POST` request will
/// be resent, contents included, to the requested URL.
///
/// # Examples
///
/// ```rust
/// use rocket::response::Redirect;
///
/// let redirect = Redirect::permanent("/other_url");
/// ```
pub fn permanent(uri: &str) -> Redirect {
Redirect(Status::PermanentRedirect, String::from(uri))
}
/// Construct a temporary "found" (302) redirect response. This response
/// instructs the client to reissue the current request to a different URL,
/// ideally maintaining the contents of the request identically.
/// Unfortunately, different clients may respond differently to this type of
/// redirect, so `303` or `307` redirects, which disambiguate, are
/// preferred.
///
/// # Examples
///
/// ```rust
/// use rocket::response::Redirect;
///
/// let redirect = Redirect::found("/other_url");
/// ```
pub fn found(uri: &str) -> Redirect {
Redirect(Status::Found, String::from(uri))
}
|
/// Construct a permanent "moved" (301) redirect response. This response
/// should only be used for permanent redirects as it can be cached by
/// browsers. Because different clients may respond differently to this type
/// of redirect, a `308` redirect, which disambiguates, is preferred.
///
/// # Examples
///
/// ```rust
/// use rocket::response::Redirect;
///
/// let redirect = Redirect::moved("/other_url");
/// ```
pub fn moved(uri: &str) -> Redirect {
Redirect(Status::MovedPermanently, String::from(uri))
}
}
/// Constructs a response with the appropriate status code and the given URL in
/// the `Location` header field. The body of the response is empty. This
/// responder does not fail.
impl Responder<'static> for Redirect {
fn respond(self) -> Result<Response<'static>, Status> {
Response::build()
.status(self.0)
.header(header::Location(self.1))
.ok()
}
}
|
random_line_split
|
|
redirect.rs
|
use response::{Response, Responder};
use http::hyper::header;
use http::Status;
/// An empty redirect response to a given URL.
///
/// This type simplifies returning a redirect response to the client.
#[derive(Debug)]
pub struct Redirect(Status, String);
impl Redirect {
/// Construct a temporary "see other" (303) redirect response. This is the
/// typical response when redirecting a user to another page. This type of
/// redirect indicates that the client should look elsewhere, but always via
/// a `GET` request, for a given resource.
///
/// # Examples
///
/// ```rust
/// use rocket::response::Redirect;
///
/// let redirect = Redirect::to("/other_url");
/// ```
pub fn
|
(uri: &str) -> Redirect {
Redirect(Status::SeeOther, String::from(uri))
}
/// Construct a "temporary" (307) redirect response. This response instructs
/// the client to reissue the current request to a different URL,
/// maintaining the contents of the request identically. This means that,
/// for example, a `POST` request will be resent, contents included, to the
/// requested URL.
///
/// # Examples
///
/// ```rust
/// use rocket::response::Redirect;
///
/// let redirect = Redirect::temporary("/other_url");
/// ```
pub fn temporary(uri: &str) -> Redirect {
Redirect(Status::TemporaryRedirect, String::from(uri))
}
/// Construct a "permanent" (308) redirect response. This redirect must only
/// be used for permanent redirects as it is cached by clients. This
/// response instructs the client to reissue requests for the current URL to
/// a different URL, now and in the future, maintaining the contents of the
/// request identically. This means that, for example, a `POST` request will
/// be resent, contents included, to the requested URL.
///
/// # Examples
///
/// ```rust
/// use rocket::response::Redirect;
///
/// let redirect = Redirect::permanent("/other_url");
/// ```
pub fn permanent(uri: &str) -> Redirect {
Redirect(Status::PermanentRedirect, String::from(uri))
}
/// Construct a temporary "found" (302) redirect response. This response
/// instructs the client to reissue the current request to a different URL,
/// ideally maintaining the contents of the request identically.
/// Unfortunately, different clients may respond differently to this type of
/// redirect, so `303` or `307` redirects, which disambiguate, are
/// preferred.
///
/// # Examples
///
/// ```rust
/// use rocket::response::Redirect;
///
/// let redirect = Redirect::found("/other_url");
/// ```
pub fn found(uri: &str) -> Redirect {
Redirect(Status::Found, String::from(uri))
}
/// Construct a permanent "moved" (301) redirect response. This response
/// should only be used for permanent redirects as it can be cached by
/// browsers. Because different clients may respond differently to this type
/// of redirect, a `308` redirect, which disambiguates, is preferred.
///
/// # Examples
///
/// ```rust
/// use rocket::response::Redirect;
///
/// let redirect = Redirect::moved("/other_url");
/// ```
pub fn moved(uri: &str) -> Redirect {
Redirect(Status::MovedPermanently, String::from(uri))
}
}
/// Constructs a response with the appropriate status code and the given URL in
/// the `Location` header field. The body of the response is empty. This
/// responder does not fail.
impl Responder<'static> for Redirect {
fn respond(self) -> Result<Response<'static>, Status> {
Response::build()
.status(self.0)
.header(header::Location(self.1))
.ok()
}
}
|
to
|
identifier_name
|
main.rs
|
/*
*Red, a line editor derived from ed editor
*Copyright (C) 2017 Walter Bruschi
*
*This program is free software: you can redistribute it and/or modify
*it under the terms of the GNU General Public License as published by
*the Free Software Foundation, either version 3 of the License, or
*(at your option) any later version.
*
*This program is distributed in the hope that it will be useful,
*but WITHOUT ANY WARRANTY; without even the implied warranty of
*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*GNU General Public License for more details.
*
*You should have received a copy of the GNU General Public License
*along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*CREDITS
*This program is derived from Free Software Foundation version of ed.
*This program has been modified to be written in Rust insead of C.
*Any credit to the original work is theirs.
*Copyright (C) 1993, 1994, 2006-2017 Free Software Foundation, Inc.
*The original work can be found at the following website:
*https://www.gnu.org/software/ed/
*/
extern crate getopts;
extern crate regex;
mod parse;
mod iofs;
mod command;
mod regexp;
use std::io::{self, Write};
use std::env;
use getopts::Options;
use parse::*;
use iofs::*;
fn print_usage(program: &str, opts: Options)
|
fn main() {
let args: Vec<String> = env::args().collect();
let program = args[0].clone();
let mut opts = Options::new();
// optional option that does not take an argument
opts.optflag("h", "help", "print this help menu");
// optional option that takes an argument
opts.optopt("p", "prompt", "use STRING as an interactive prompt", "STRING");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(e) => {
println!("{}", e.to_string());
return;
}
};
if matches.opt_present("h") {
print_usage(&program, opts);
return;
}
let prompt = matches.opt_str("p").unwrap_or("*".to_string());
let filename = if!matches.free.is_empty() {
matches.free[0].clone()
} else {
print_usage(&program, opts);
return;
};
let file = read_file(&filename);
let reader = create_reader(file);
let mut vector = store_buffer(reader);
let mut yank_vector: Vec<String> = vec![];
let file_size: i32 = vector.len() as i32;
let mut red = Red::new();
red.filename = filename;
red.set_last_addr(file_size);
// set current line to last line
red.set_current_addr(file_size);
red.prompt_on = matches.opt_present("p");
loop {
if red.prompt_on {print!("{}", prompt);}
// print line immediately
io::stdout().flush()
.expect("cannot flush the stdout");
let mut input = match get_input() {
Ok(i) => i,
Err(e) => {
println!("{}", e);
continue;
}
};
match exec_command(&mut input, &mut vector,
&mut yank_vector, &mut red) {
ReturnCommand::Quit => return,
ReturnCommand::Error => println!("?"),
_ => {
//red.print();
continue
},
};
}
}
|
{
let brief = format!("Usage: {} FILE", program);
print!("{}", opts.usage(&brief));
}
|
identifier_body
|
main.rs
|
/*
*Red, a line editor derived from ed editor
*Copyright (C) 2017 Walter Bruschi
*
*This program is free software: you can redistribute it and/or modify
*it under the terms of the GNU General Public License as published by
*the Free Software Foundation, either version 3 of the License, or
*(at your option) any later version.
*
*This program is distributed in the hope that it will be useful,
*but WITHOUT ANY WARRANTY; without even the implied warranty of
*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*GNU General Public License for more details.
*
*You should have received a copy of the GNU General Public License
*along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*CREDITS
*This program is derived from Free Software Foundation version of ed.
*This program has been modified to be written in Rust insead of C.
*Any credit to the original work is theirs.
*Copyright (C) 1993, 1994, 2006-2017 Free Software Foundation, Inc.
*The original work can be found at the following website:
*https://www.gnu.org/software/ed/
*/
extern crate getopts;
extern crate regex;
mod parse;
mod iofs;
mod command;
mod regexp;
use std::io::{self, Write};
use std::env;
use getopts::Options;
use parse::*;
use iofs::*;
fn print_usage(program: &str, opts: Options) {
let brief = format!("Usage: {} FILE", program);
print!("{}", opts.usage(&brief));
}
fn main() {
let args: Vec<String> = env::args().collect();
let program = args[0].clone();
let mut opts = Options::new();
// optional option that does not take an argument
opts.optflag("h", "help", "print this help menu");
// optional option that takes an argument
opts.optopt("p", "prompt", "use STRING as an interactive prompt", "STRING");
let matches = match opts.parse(&args[1..]) {
Ok(m) =>
|
,
Err(e) => {
println!("{}", e.to_string());
return;
}
};
if matches.opt_present("h") {
print_usage(&program, opts);
return;
}
let prompt = matches.opt_str("p").unwrap_or("*".to_string());
let filename = if!matches.free.is_empty() {
matches.free[0].clone()
} else {
print_usage(&program, opts);
return;
};
let file = read_file(&filename);
let reader = create_reader(file);
let mut vector = store_buffer(reader);
let mut yank_vector: Vec<String> = vec![];
let file_size: i32 = vector.len() as i32;
let mut red = Red::new();
red.filename = filename;
red.set_last_addr(file_size);
// set current line to last line
red.set_current_addr(file_size);
red.prompt_on = matches.opt_present("p");
loop {
if red.prompt_on {print!("{}", prompt);}
// print line immediately
io::stdout().flush()
.expect("cannot flush the stdout");
let mut input = match get_input() {
Ok(i) => i,
Err(e) => {
println!("{}", e);
continue;
}
};
match exec_command(&mut input, &mut vector,
&mut yank_vector, &mut red) {
ReturnCommand::Quit => return,
ReturnCommand::Error => println!("?"),
_ => {
//red.print();
continue
},
};
}
}
|
{ m }
|
conditional_block
|
main.rs
|
/*
*Red, a line editor derived from ed editor
*Copyright (C) 2017 Walter Bruschi
*
*This program is free software: you can redistribute it and/or modify
*it under the terms of the GNU General Public License as published by
*the Free Software Foundation, either version 3 of the License, or
*(at your option) any later version.
*
*This program is distributed in the hope that it will be useful,
*but WITHOUT ANY WARRANTY; without even the implied warranty of
*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*GNU General Public License for more details.
*
*You should have received a copy of the GNU General Public License
*along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*CREDITS
*This program is derived from Free Software Foundation version of ed.
*This program has been modified to be written in Rust insead of C.
*Any credit to the original work is theirs.
*Copyright (C) 1993, 1994, 2006-2017 Free Software Foundation, Inc.
*The original work can be found at the following website:
*https://www.gnu.org/software/ed/
*/
extern crate getopts;
extern crate regex;
mod parse;
mod iofs;
mod command;
mod regexp;
use std::io::{self, Write};
use std::env;
use getopts::Options;
use parse::*;
use iofs::*;
fn print_usage(program: &str, opts: Options) {
let brief = format!("Usage: {} FILE", program);
print!("{}", opts.usage(&brief));
}
fn
|
() {
let args: Vec<String> = env::args().collect();
let program = args[0].clone();
let mut opts = Options::new();
// optional option that does not take an argument
opts.optflag("h", "help", "print this help menu");
// optional option that takes an argument
opts.optopt("p", "prompt", "use STRING as an interactive prompt", "STRING");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(e) => {
println!("{}", e.to_string());
return;
}
};
if matches.opt_present("h") {
print_usage(&program, opts);
return;
}
let prompt = matches.opt_str("p").unwrap_or("*".to_string());
let filename = if!matches.free.is_empty() {
matches.free[0].clone()
} else {
print_usage(&program, opts);
return;
};
let file = read_file(&filename);
let reader = create_reader(file);
let mut vector = store_buffer(reader);
let mut yank_vector: Vec<String> = vec![];
let file_size: i32 = vector.len() as i32;
let mut red = Red::new();
red.filename = filename;
red.set_last_addr(file_size);
// set current line to last line
red.set_current_addr(file_size);
red.prompt_on = matches.opt_present("p");
loop {
if red.prompt_on {print!("{}", prompt);}
// print line immediately
io::stdout().flush()
.expect("cannot flush the stdout");
let mut input = match get_input() {
Ok(i) => i,
Err(e) => {
println!("{}", e);
continue;
}
};
match exec_command(&mut input, &mut vector,
&mut yank_vector, &mut red) {
ReturnCommand::Quit => return,
ReturnCommand::Error => println!("?"),
_ => {
//red.print();
continue
},
};
}
}
|
main
|
identifier_name
|
main.rs
|
/*
*Red, a line editor derived from ed editor
*Copyright (C) 2017 Walter Bruschi
*
*This program is free software: you can redistribute it and/or modify
*it under the terms of the GNU General Public License as published by
*the Free Software Foundation, either version 3 of the License, or
*(at your option) any later version.
*
*This program is distributed in the hope that it will be useful,
*but WITHOUT ANY WARRANTY; without even the implied warranty of
*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*GNU General Public License for more details.
*
|
*This program is derived from Free Software Foundation version of ed.
*This program has been modified to be written in Rust insead of C.
*Any credit to the original work is theirs.
*Copyright (C) 1993, 1994, 2006-2017 Free Software Foundation, Inc.
*The original work can be found at the following website:
*https://www.gnu.org/software/ed/
*/
extern crate getopts;
extern crate regex;
mod parse;
mod iofs;
mod command;
mod regexp;
use std::io::{self, Write};
use std::env;
use getopts::Options;
use parse::*;
use iofs::*;
fn print_usage(program: &str, opts: Options) {
let brief = format!("Usage: {} FILE", program);
print!("{}", opts.usage(&brief));
}
fn main() {
let args: Vec<String> = env::args().collect();
let program = args[0].clone();
let mut opts = Options::new();
// optional option that does not take an argument
opts.optflag("h", "help", "print this help menu");
// optional option that takes an argument
opts.optopt("p", "prompt", "use STRING as an interactive prompt", "STRING");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(e) => {
println!("{}", e.to_string());
return;
}
};
if matches.opt_present("h") {
print_usage(&program, opts);
return;
}
let prompt = matches.opt_str("p").unwrap_or("*".to_string());
let filename = if!matches.free.is_empty() {
matches.free[0].clone()
} else {
print_usage(&program, opts);
return;
};
let file = read_file(&filename);
let reader = create_reader(file);
let mut vector = store_buffer(reader);
let mut yank_vector: Vec<String> = vec![];
let file_size: i32 = vector.len() as i32;
let mut red = Red::new();
red.filename = filename;
red.set_last_addr(file_size);
// set current line to last line
red.set_current_addr(file_size);
red.prompt_on = matches.opt_present("p");
loop {
if red.prompt_on {print!("{}", prompt);}
// print line immediately
io::stdout().flush()
.expect("cannot flush the stdout");
let mut input = match get_input() {
Ok(i) => i,
Err(e) => {
println!("{}", e);
continue;
}
};
match exec_command(&mut input, &mut vector,
&mut yank_vector, &mut red) {
ReturnCommand::Quit => return,
ReturnCommand::Error => println!("?"),
_ => {
//red.print();
continue
},
};
}
}
|
*You should have received a copy of the GNU General Public License
*along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*CREDITS
|
random_line_split
|
computations.rs
|
// Lumol, an extensible molecular simulation engine
// Copyright (C) 2015-2016 Lumol's contributors — BSD license
use energy::{Potential, PairPotential};
/// Alternative energy and forces computation.
///
/// The `Computation` trait represent an alternative way to compute a given
/// potential. For example using interpolation on a table or on a grid, from a
/// Fourier decomposition, *etc.*
///
/// # Examples
///
/// ```
/// # use lumol::energy::Potential;
/// use lumol::energy::Computation;
/// use lumol::energy::Harmonic;
///
/// /// This is just a thin wrapper logging every time the `energy/force`
/// /// methods are called.
/// #[derive(Clone)]
/// struct LoggingComputation<T: Potential>(T);
///
/// impl<T: Potential> Computation for LoggingComputation<T> {
/// fn compute_energy(&self, r: f64) -> f64 {
/// println!("Called energy");
/// self.0.energy(r)
/// }
///
/// fn compute_force(&self, r: f64) -> f64 {
/// println!("Called force");
/// self.0.force(r)
/// }
/// }
///
/// let potential = Harmonic{x0: 0.5, k: 4.2};
/// let computation = LoggingComputation(potential.clone());
///
/// assert_eq!(computation.energy(1.0), potential.energy(1.0));
/// assert_eq!(computation.force(2.0), potential.force(2.0));
/// ```
pub trait Computation: Sync + Send {
/// Compute the energy value at `r`
fn compute_energy(&self, r: f64) -> f64;
/// Compute the force value at `r`
fn compute_force(&self, r: f64) -> f64;
}
impl<P: Computation + Clone +'static> Potential for P {
#[inline] fn energy(&self, r:f64) -> f64 {
self.compute_energy(r)
}
#[inline] fn force(&self, r:f64) -> f64 {
self.compute_force(r)
}
}
/// Computation of a potential using tabulated values.
///
/// This can be faster than direct computation for smooth potentials, but will
/// uses more memory and be less precise than direct computation. Values are
/// tabulated in the `[0, max)` range, and a cutoff is applied after `max`.
#[derive(Clone)]
pub struct TableComputation {
/// Step for tabulated value. `energy_table[i]`/`force_table[i]` contains
/// energy/force at `r = i * delta`
delta: f64,
/// Cutoff distance
cutoff: f64,
/// Tabulated potential
energy_table: Vec<f64>,
/// Tabulated compute_force
force_table: Vec<f64>,
/// Initial potential, kept around for tail corrections
potential: Box<PairPotential>,
}
impl TableComputation {
/// Create a new `TableComputation` for `potential`, with `size` points and
/// a maximum value of `max`.
///
/// # Examples
///
/// ```
/// # use lumol::energy::Potential;
/// use lumol::energy::TableComputation;
/// use lumol::energy::Harmonic;
///
/// let potential = Box::new(Harmonic{x0: 0.5, k: 4.2});
/// let table = TableComputation::new(potential, 1000, 2.0);
///
/// assert_eq!(table.energy(1.0), 0.525);
/// assert_eq!(table.energy(3.0), 0.0);
/// ```
pub fn new(potential: Box<PairPotential>, size: usize, max: f64) -> TableComputation {
let delta = max / (size as f64);
let mut energy_table = Vec::with_capacity(size);
let mut force_table = Vec::with_capacity(size);
for i in 0..size {
let r = i as f64 * delta;
energy_table.push(potential.energy(r));
force_table.push(potential.force(r));
}
TableComputation {
delta: delta,
cutoff: max,
energy_table: energy_table,
force_table: force_table,
potential: potential,
}
}
}
impl Computation for TableComputation {
fn co
|
self, r: f64) -> f64 {
debug_assert_eq!(self.energy_table.len(), self.force_table.len());
let bin = f64::floor(r / self.delta) as usize;
if bin < self.energy_table.len() - 1 {
let dx = r - (bin as f64) * self.delta;
let slope = (self.energy_table[bin + 1] - self.energy_table[bin]) / self.delta;
return self.energy_table[bin] + dx * slope;
} else {
return 0.0;
}
}
fn compute_force(&self, r: f64) -> f64 {
debug_assert_eq!(self.energy_table.len(), self.force_table.len());
let bin = f64::floor(r / self.delta) as usize;
if bin < self.force_table.len() - 1 {
let dx = r - (bin as f64) * self.delta;
let slope = (self.force_table[bin + 1] - self.force_table[bin]) / self.delta;
return self.force_table[bin] + dx * slope;
} else {
return 0.0;
}
}
}
impl PairPotential for TableComputation {
fn tail_energy(&self, cutoff: f64) -> f64 {
if cutoff > self.cutoff {
warn_once!("Cutoff in table computation ({}) is smaller than the \
pair interaction cutoff ({}) when computing tail correction. This \
may lead to wrong values for energy.", cutoff, self.cutoff);
}
return self.potential.tail_energy(cutoff);
}
fn tail_virial(&self, cutoff: f64) -> f64 {
if cutoff > self.cutoff {
warn_once!("Cutoff in table computation ({}) is smaller than the \
pair interaction cutoff ({}) when computing tail correction. This \
may lead to wrong values for pressure.", cutoff, self.cutoff);
}
return self.potential.tail_virial(cutoff);
}
}
#[cfg(test)]
mod test {
use super::*;
use energy::{Harmonic, LennardJones};
use energy::PairPotential;
#[test]
fn table() {
let table = TableComputation::new(
Box::new(Harmonic{k: 50.0, x0: 2.0}), 1000, 4.0
);
assert_eq!(table.compute_energy(2.5), 6.25);
assert_eq!(table.compute_force(2.5), -25.0);
// Check that the table is defined up to the cutoff value
let delta = 4.0 / 1000.0;
assert_eq!(table.compute_energy(4.0 - 2.0 * delta), 99.2016);
assert_eq!(table.compute_force(4.0 - 2.0 * delta), -99.6);
assert_eq!(table.compute_energy(4.0 - delta), 0.0);
assert_eq!(table.compute_force(4.0 - delta), 0.0);
assert_eq!(table.compute_energy(4.0), 0.0);
assert_eq!(table.compute_force(4.0), 0.0);
assert_eq!(table.compute_energy(4.1), 0.0);
assert_eq!(table.compute_force(4.1), 0.0);
let lj = LennardJones{epsilon: 50.0, sigma: 2.0};
let table = TableComputation::new(Box::new(lj.clone()), 1000, 4.0);
assert_eq!(table.tail_energy(5.0), lj.tail_energy(5.0));
assert_eq!(table.tail_virial(5.0), lj.tail_virial(5.0));
}
}
|
mpute_energy(&
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.