file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
parsing.rs | //! Utility functions for Header implementations.
use std::str;
use std::fmt::{self, Display};
/// Reads a single raw string when parsing a header
pub fn | <T: str::FromStr>(raw: &[Vec<u8>]) -> Option<T> {
if raw.len()!= 1 {
return None;
}
// we JUST checked that raw.len() == 1, so raw[0] WILL exist.
if let Ok(s) = str::from_utf8(& unsafe { raw.get_unchecked(0) }[..]) {
if s!= "" {
return str::FromStr::from_str(s).ok();
}
}
None
}
/// Reads a comma-delimited raw header into a Vec.
#[inline]
pub fn from_comma_delimited<T: str::FromStr>(raw: &[Vec<u8>]) -> Option<Vec<T>> {
if raw.len()!= 1 {
return None;
}
// we JUST checked that raw.len() == 1, so raw[0] WILL exist.
from_one_comma_delimited(& unsafe { raw.get_unchecked(0) }[..])
}
/// Reads a comma-delimited raw string into a Vec.
pub fn from_one_comma_delimited<T: str::FromStr>(raw: &[u8]) -> Option<Vec<T>> {
match str::from_utf8(raw) {
Ok(s) => {
Some(s
.split(',')
.filter_map(|x| match x.trim() {
"" => None,
y => Some(y)
})
.filter_map(|x| x.parse().ok())
.collect())
}
Err(_) => None
}
}
/// Format an array into a comma-delimited string.
pub fn fmt_comma_delimited<T: Display>(f: &mut fmt::Formatter, parts: &[T]) -> fmt::Result {
for (i, part) in parts.iter().enumerate() {
if i!= 0 {
try!(f.write_str(", "));
}
try!(Display::fmt(part, f));
}
Ok(())
}
| from_one_raw_str | identifier_name |
no_0977_squares_of_a_sorted_array.rs | struct Solution;
impl Solution {
// 官方题解中的思路
pub fn sorted_squares(a: Vec<i32>) -> Vec<i32> {
let mut ans = vec![0; a.len()];
if a.is_empty() {
return ans;
}
// 这个减少了一半的时间。
if a[0] >= 0 {
return a.into_iter().map(|v| v * v).collect();
}
let (mut l, mut r) = (0, a.len() - 1);
// 选择 l 和 r 中较大的那个,逆序放入 ans 中。
for pos in (0..a.len()).rev() {
if a[l].abs() > a[r].abs() {
ans[pos] = a[l].pow(2);
l += 1;
} else {
ans[pos] = a[r].pow(2);
if r == 0 {
break;
}
r -= 1; | pub fn sorted_squares1(a: Vec<i32>) -> Vec<i32> {
if a.is_empty() {
return Vec::new();
}
if a[0] >= 0 {
return a.into_iter().map(|v| v * v).collect();
}
let n = a.len();
// 第一个 >= 0 的索引,或者 a.len()
let r = a.iter().position(|&v| v >= 0);
let mut r = r.unwrap_or(n);
let mut l = r as isize - 1;
let mut ans = Vec::with_capacity(n);
while l >= 0 && r < a.len() {
if a[l as usize].abs() < a[r] {
ans.push(a[l as usize].pow(2));
l -= 1;
} else {
ans.push(a[r].pow(2));
r += 1;
}
}
if l >= 0 {
for i in (0..=l as usize).rev() {
ans.push(a[i].pow(2));
}
}
for i in r..n {
ans.push(a[i].pow(2));
}
ans
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_sorted_squares() {
assert_eq!(
Solution::sorted_squares(vec![-4, -1, 0, 3, 10]),
vec![0, 1, 9, 16, 100]
);
assert_eq!(
Solution::sorted_squares(vec![-7, -3, 2, 3, 11]),
vec![4, 9, 9, 49, 121]
);
}
}
|
}
}
ans
}
| conditional_block |
no_0977_squares_of_a_sorted_array.rs | struct Solution;
impl Solution {
// 官方题解中的思路
pub fn sorted_squares(a | Vec<i32> {
let mut ans = vec![0; a.len()];
if a.is_empty() {
return ans;
}
// 这个减少了一半的时间。
if a[0] >= 0 {
return a.into_iter().map(|v| v * v).collect();
}
let (mut l, mut r) = (0, a.len() - 1);
// 选择 l 和 r 中较大的那个,逆序放入 ans 中。
for pos in (0..a.len()).rev() {
if a[l].abs() > a[r].abs() {
ans[pos] = a[l].pow(2);
l += 1;
} else {
ans[pos] = a[r].pow(2);
if r == 0 {
break;
}
r -= 1;
}
}
ans
}
pub fn sorted_squares1(a: Vec<i32>) -> Vec<i32> {
if a.is_empty() {
return Vec::new();
}
if a[0] >= 0 {
return a.into_iter().map(|v| v * v).collect();
}
let n = a.len();
// 第一个 >= 0 的索引,或者 a.len()
let r = a.iter().position(|&v| v >= 0);
let mut r = r.unwrap_or(n);
let mut l = r as isize - 1;
let mut ans = Vec::with_capacity(n);
while l >= 0 && r < a.len() {
if a[l as usize].abs() < a[r] {
ans.push(a[l as usize].pow(2));
l -= 1;
} else {
ans.push(a[r].pow(2));
r += 1;
}
}
if l >= 0 {
for i in (0..=l as usize).rev() {
ans.push(a[i].pow(2));
}
}
for i in r..n {
ans.push(a[i].pow(2));
}
ans
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_sorted_squares() {
assert_eq!(
Solution::sorted_squares(vec![-4, -1, 0, 3, 10]),
vec![0, 1, 9, 16, 100]
);
assert_eq!(
Solution::sorted_squares(vec![-7, -3, 2, 3, 11]),
vec![4, 9, 9, 49, 121]
);
}
}
| : Vec<i32>) -> | identifier_name |
no_0977_squares_of_a_sorted_array.rs | struct Solution;
impl Solution {
// 官方题解中的思路
pub fn sorted_squares(a: Vec<i32>) -> Vec<i32> {
let mut ans = vec![0; a.len()];
if a.is_empty() {
return ans;
}
// 这个减少了一半的时间。
if a[0] >= 0 {
return a.into_iter().map(|v| v * v).collect();
}
let (mut l, mut r) = (0, a.len() - 1);
// 选择 l 和 r 中较大的那个,逆序放入 ans 中。
for pos in (0..a.len()).rev() {
if a[l].abs() > a[r].abs() {
ans[pos] = a[l].pow(2);
l += 1;
} else {
ans[pos] = a[r].pow(2);
if r == 0 {
break;
}
r -= 1;
}
}
ans
} | pub fn sorted_squares1(a: Vec<i32>) -> Vec<i32> {
if a.is_empty() {
return Vec::new();
}
if a[0] >= 0 {
return a.into_iter().map(|v| v * v).collect();
}
let n = a.len();
// 第一个 >= 0 的索引,或者 a.len()
let r = a.iter().position(|&v| v >= 0);
let mut r = r.unwrap_or(n);
let mut l = r as isize - 1;
let mut ans = Vec::with_capacity(n);
while l >= 0 && r < a.len() {
if a[l as usize].abs() < a[r] {
ans.push(a[l as usize].pow(2));
l -= 1;
} else {
ans.push(a[r].pow(2));
r += 1;
}
}
if l >= 0 {
for i in (0..=l as usize).rev() {
ans.push(a[i].pow(2));
}
}
for i in r..n {
ans.push(a[i].pow(2));
}
ans
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_sorted_squares() {
assert_eq!(
Solution::sorted_squares(vec![-4, -1, 0, 3, 10]),
vec![0, 1, 9, 16, 100]
);
assert_eq!(
Solution::sorted_squares(vec![-7, -3, 2, 3, 11]),
vec![4, 9, 9, 49, 121]
);
}
} | random_line_split |
|
logger.rs | //! This module is a trimmed-down copy of rtx_core::util::logger,
//! which is still waiting to get released as a crate...
//! maybe there is a simple logger crate that achieves this exact behavior?
use ansi_term::Colour::{Green, Red, White, Yellow};
use ansi_term::Style;
use chrono::Local;
use log::max_level;
use log::{Level, LevelFilter, Metadata, Record, SetLoggerError};
struct RtxLogger;
static LOGGER: RtxLogger = RtxLogger;
/// Convenient printing to STDERR (with \n)
#[macro_export]
macro_rules! println_stderr(
($($arg:tt)*) => ({
use std::io::Write;
match writeln!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
})
);
/// Convenient printing to STDERR
#[macro_export]
macro_rules! print_stderr(
($($arg:tt)*) => ({
use std::io::Write;
match write!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
})
);
impl log::Log for RtxLogger {
fn | (&self, metadata: &Metadata) -> bool {
metadata.level() <= max_level()
}
fn log(&self, record: &Record) {
if self.enabled(record.metadata()) {
let record_target = record.target();
let details = record.args();
let category_object = if record_target.is_empty() {
"" // "unknown:unknown"???
} else {
record_target
};
// Following the reporting syntax at: http://dlmf.nist.gov/LaTeXML/manual/errorcodes/
// let severity = if category_object.starts_with("Fatal:") {
// ""
// } else {
// match record.level() {
// Level::Info => "Info",
// Level::Warn => "Warn",
// Level::Error => "Error",
// Level::Debug => "Debug",
// Level::Trace => "Trace",
// }
// };
let message = format!("{}\t", category_object);
let painted_message = match record.level() {
Level::Info => Green.paint(message),
Level::Warn => Yellow.paint(message),
Level::Error => Red.paint(message),
Level::Debug => Style::default().paint(message),
_ => White.paint(message),
}
.to_string()
+ &details.to_string();
println_stderr!(
"\r[{}] {}",
Local::now().format("%Y-%m-%d %H:%M:%S"),
painted_message
);
}
}
fn flush(&self) {}
}
/// Initialize the logger with an appropriate level of verbosity
pub fn init(level: LevelFilter) -> Result<(), SetLoggerError> {
log::set_logger(&LOGGER).unwrap();
log::set_max_level(level);
Ok(())
}
| enabled | identifier_name |
logger.rs | //! This module is a trimmed-down copy of rtx_core::util::logger,
//! which is still waiting to get released as a crate...
//! maybe there is a simple logger crate that achieves this exact behavior?
use ansi_term::Colour::{Green, Red, White, Yellow};
use ansi_term::Style;
use chrono::Local;
use log::max_level;
use log::{Level, LevelFilter, Metadata, Record, SetLoggerError};
struct RtxLogger;
static LOGGER: RtxLogger = RtxLogger;
/// Convenient printing to STDERR (with \n)
#[macro_export]
macro_rules! println_stderr(
($($arg:tt)*) => ({
use std::io::Write;
match writeln!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
})
);
/// Convenient printing to STDERR
#[macro_export]
macro_rules! print_stderr(
($($arg:tt)*) => ({
use std::io::Write;
match write!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
})
);
impl log::Log for RtxLogger {
fn enabled(&self, metadata: &Metadata) -> bool {
metadata.level() <= max_level()
}
fn log(&self, record: &Record) {
if self.enabled(record.metadata()) {
let record_target = record.target();
let details = record.args();
let category_object = if record_target.is_empty() {
"" // "unknown:unknown"???
} else {
record_target
};
// Following the reporting syntax at: http://dlmf.nist.gov/LaTeXML/manual/errorcodes/
// let severity = if category_object.starts_with("Fatal:") {
// ""
// } else {
// match record.level() {
// Level::Info => "Info",
// Level::Warn => "Warn",
// Level::Error => "Error",
// Level::Debug => "Debug",
// Level::Trace => "Trace",
// }
// };
let message = format!("{}\t", category_object); |
let painted_message = match record.level() {
Level::Info => Green.paint(message),
Level::Warn => Yellow.paint(message),
Level::Error => Red.paint(message),
Level::Debug => Style::default().paint(message),
_ => White.paint(message),
}
.to_string()
+ &details.to_string();
println_stderr!(
"\r[{}] {}",
Local::now().format("%Y-%m-%d %H:%M:%S"),
painted_message
);
}
}
fn flush(&self) {}
}
/// Initialize the logger with an appropriate level of verbosity
pub fn init(level: LevelFilter) -> Result<(), SetLoggerError> {
log::set_logger(&LOGGER).unwrap();
log::set_max_level(level);
Ok(())
} | random_line_split |
|
logger.rs | //! This module is a trimmed-down copy of rtx_core::util::logger,
//! which is still waiting to get released as a crate...
//! maybe there is a simple logger crate that achieves this exact behavior?
use ansi_term::Colour::{Green, Red, White, Yellow};
use ansi_term::Style;
use chrono::Local;
use log::max_level;
use log::{Level, LevelFilter, Metadata, Record, SetLoggerError};
struct RtxLogger;
static LOGGER: RtxLogger = RtxLogger;
/// Convenient printing to STDERR (with \n)
#[macro_export]
macro_rules! println_stderr(
($($arg:tt)*) => ({
use std::io::Write;
match writeln!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
})
);
/// Convenient printing to STDERR
#[macro_export]
macro_rules! print_stderr(
($($arg:tt)*) => ({
use std::io::Write;
match write!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
})
);
impl log::Log for RtxLogger {
fn enabled(&self, metadata: &Metadata) -> bool |
fn log(&self, record: &Record) {
if self.enabled(record.metadata()) {
let record_target = record.target();
let details = record.args();
let category_object = if record_target.is_empty() {
"" // "unknown:unknown"???
} else {
record_target
};
// Following the reporting syntax at: http://dlmf.nist.gov/LaTeXML/manual/errorcodes/
// let severity = if category_object.starts_with("Fatal:") {
// ""
// } else {
// match record.level() {
// Level::Info => "Info",
// Level::Warn => "Warn",
// Level::Error => "Error",
// Level::Debug => "Debug",
// Level::Trace => "Trace",
// }
// };
let message = format!("{}\t", category_object);
let painted_message = match record.level() {
Level::Info => Green.paint(message),
Level::Warn => Yellow.paint(message),
Level::Error => Red.paint(message),
Level::Debug => Style::default().paint(message),
_ => White.paint(message),
}
.to_string()
+ &details.to_string();
println_stderr!(
"\r[{}] {}",
Local::now().format("%Y-%m-%d %H:%M:%S"),
painted_message
);
}
}
fn flush(&self) {}
}
/// Initialize the logger with an appropriate level of verbosity
pub fn init(level: LevelFilter) -> Result<(), SetLoggerError> {
log::set_logger(&LOGGER).unwrap();
log::set_max_level(level);
Ok(())
}
| {
metadata.level() <= max_level()
} | identifier_body |
main.rs | //! See <https://github.com/matklad/cargo-xtask/>.
//!
//! This binary is integrated into the `cargo` command line by using an alias in
//! `.cargo/config`. Run commands as `cargo xtask [command]`.
#![allow(clippy::exhaustive_structs)]
use std::path::PathBuf; | mod cargo;
mod ci;
mod flags;
#[cfg(feature = "default")]
mod release;
#[cfg(feature = "default")]
mod util;
use ci::CiTask;
#[cfg(feature = "default")]
use release::ReleaseTask;
type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
fn main() {
if let Err(e) = try_main() {
eprintln!("{}", e);
std::process::exit(-1);
}
}
fn try_main() -> Result<()> {
let flags = flags::Xtask::from_env()?;
match flags.subcommand {
flags::XtaskCmd::Help(_) => {
println!("{}", flags::Xtask::HELP);
Ok(())
}
flags::XtaskCmd::Ci(ci) => {
let task = CiTask::new(ci.version)?;
task.run()
}
#[cfg(feature = "default")]
flags::XtaskCmd::Release(cmd) => {
let mut task = ReleaseTask::new(cmd.name, cmd.version)?;
task.run()
}
#[cfg(feature = "default")]
flags::XtaskCmd::Publish(cmd) => {
let mut task = ReleaseTask::new(cmd.name, cmd.version)?;
task.run()
}
#[cfg(not(feature = "default"))]
_ => {
Err("This command is only available when xtask is built with default features.".into())
}
}
}
/// The metadata of a cargo workspace.
#[derive(Clone, Debug, Deserialize)]
struct Metadata {
pub workspace_root: PathBuf,
#[cfg(feature = "default")]
pub packages: Vec<cargo::Package>,
}
impl Metadata {
/// Load a new `Metadata` from the command line.
pub fn load() -> Result<Metadata> {
let metadata_json = cmd!("cargo metadata --no-deps --format-version 1").read()?;
Ok(from_json_str(&metadata_json)?)
}
}
#[cfg(feature = "default")]
#[derive(Debug, Deserialize)]
struct Config {
/// Credentials to authenticate to GitHub.
github: GithubConfig,
}
#[cfg(feature = "default")]
impl Config {
/// Load a new `Config` from `config.toml`.
fn load() -> Result<Self> {
use std::{env, path::Path};
let path = Path::new(&env!("CARGO_MANIFEST_DIR")).join("config.toml");
let config = xshell::read_file(path)?;
Ok(toml::from_str(&config)?)
}
}
#[cfg(feature = "default")]
#[derive(Debug, Deserialize)]
struct GithubConfig {
/// The username to use for authentication.
user: String,
/// The personal access token to use for authentication.
token: String,
}
#[macro_export]
macro_rules! cmd {
($cmd:tt) => {
xshell::cmd!($cmd).echo_cmd(false)
};
} |
use serde::Deserialize;
use serde_json::from_str as from_json_str;
#[cfg(feature = "default")] | random_line_split |
main.rs | //! See <https://github.com/matklad/cargo-xtask/>.
//!
//! This binary is integrated into the `cargo` command line by using an alias in
//! `.cargo/config`. Run commands as `cargo xtask [command]`.
#![allow(clippy::exhaustive_structs)]
use std::path::PathBuf;
use serde::Deserialize;
use serde_json::from_str as from_json_str;
#[cfg(feature = "default")]
mod cargo;
mod ci;
mod flags;
#[cfg(feature = "default")]
mod release;
#[cfg(feature = "default")]
mod util;
use ci::CiTask;
#[cfg(feature = "default")]
use release::ReleaseTask;
type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
fn main() {
if let Err(e) = try_main() {
eprintln!("{}", e);
std::process::exit(-1);
}
}
fn try_main() -> Result<()> {
let flags = flags::Xtask::from_env()?;
match flags.subcommand {
flags::XtaskCmd::Help(_) => {
println!("{}", flags::Xtask::HELP);
Ok(())
}
flags::XtaskCmd::Ci(ci) => {
let task = CiTask::new(ci.version)?;
task.run()
}
#[cfg(feature = "default")]
flags::XtaskCmd::Release(cmd) => {
let mut task = ReleaseTask::new(cmd.name, cmd.version)?;
task.run()
}
#[cfg(feature = "default")]
flags::XtaskCmd::Publish(cmd) => {
let mut task = ReleaseTask::new(cmd.name, cmd.version)?;
task.run()
}
#[cfg(not(feature = "default"))]
_ => {
Err("This command is only available when xtask is built with default features.".into())
}
}
}
/// The metadata of a cargo workspace.
#[derive(Clone, Debug, Deserialize)]
struct Metadata {
pub workspace_root: PathBuf,
#[cfg(feature = "default")]
pub packages: Vec<cargo::Package>,
}
impl Metadata {
/// Load a new `Metadata` from the command line.
pub fn load() -> Result<Metadata> {
let metadata_json = cmd!("cargo metadata --no-deps --format-version 1").read()?;
Ok(from_json_str(&metadata_json)?)
}
}
#[cfg(feature = "default")]
#[derive(Debug, Deserialize)]
struct Config {
/// Credentials to authenticate to GitHub.
github: GithubConfig,
}
#[cfg(feature = "default")]
impl Config {
/// Load a new `Config` from `config.toml`.
fn | () -> Result<Self> {
use std::{env, path::Path};
let path = Path::new(&env!("CARGO_MANIFEST_DIR")).join("config.toml");
let config = xshell::read_file(path)?;
Ok(toml::from_str(&config)?)
}
}
#[cfg(feature = "default")]
#[derive(Debug, Deserialize)]
struct GithubConfig {
/// The username to use for authentication.
user: String,
/// The personal access token to use for authentication.
token: String,
}
#[macro_export]
macro_rules! cmd {
($cmd:tt) => {
xshell::cmd!($cmd).echo_cmd(false)
};
}
| load | identifier_name |
main.rs | //! See <https://github.com/matklad/cargo-xtask/>.
//!
//! This binary is integrated into the `cargo` command line by using an alias in
//! `.cargo/config`. Run commands as `cargo xtask [command]`.
#![allow(clippy::exhaustive_structs)]
use std::path::PathBuf;
use serde::Deserialize;
use serde_json::from_str as from_json_str;
#[cfg(feature = "default")]
mod cargo;
mod ci;
mod flags;
#[cfg(feature = "default")]
mod release;
#[cfg(feature = "default")]
mod util;
use ci::CiTask;
#[cfg(feature = "default")]
use release::ReleaseTask;
type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
fn main() {
if let Err(e) = try_main() |
}
fn try_main() -> Result<()> {
let flags = flags::Xtask::from_env()?;
match flags.subcommand {
flags::XtaskCmd::Help(_) => {
println!("{}", flags::Xtask::HELP);
Ok(())
}
flags::XtaskCmd::Ci(ci) => {
let task = CiTask::new(ci.version)?;
task.run()
}
#[cfg(feature = "default")]
flags::XtaskCmd::Release(cmd) => {
let mut task = ReleaseTask::new(cmd.name, cmd.version)?;
task.run()
}
#[cfg(feature = "default")]
flags::XtaskCmd::Publish(cmd) => {
let mut task = ReleaseTask::new(cmd.name, cmd.version)?;
task.run()
}
#[cfg(not(feature = "default"))]
_ => {
Err("This command is only available when xtask is built with default features.".into())
}
}
}
/// The metadata of a cargo workspace.
#[derive(Clone, Debug, Deserialize)]
struct Metadata {
pub workspace_root: PathBuf,
#[cfg(feature = "default")]
pub packages: Vec<cargo::Package>,
}
impl Metadata {
/// Load a new `Metadata` from the command line.
pub fn load() -> Result<Metadata> {
let metadata_json = cmd!("cargo metadata --no-deps --format-version 1").read()?;
Ok(from_json_str(&metadata_json)?)
}
}
#[cfg(feature = "default")]
#[derive(Debug, Deserialize)]
struct Config {
/// Credentials to authenticate to GitHub.
github: GithubConfig,
}
#[cfg(feature = "default")]
impl Config {
/// Load a new `Config` from `config.toml`.
fn load() -> Result<Self> {
use std::{env, path::Path};
let path = Path::new(&env!("CARGO_MANIFEST_DIR")).join("config.toml");
let config = xshell::read_file(path)?;
Ok(toml::from_str(&config)?)
}
}
#[cfg(feature = "default")]
#[derive(Debug, Deserialize)]
struct GithubConfig {
/// The username to use for authentication.
user: String,
/// The personal access token to use for authentication.
token: String,
}
#[macro_export]
macro_rules! cmd {
($cmd:tt) => {
xshell::cmd!($cmd).echo_cmd(false)
};
}
| {
eprintln!("{}", e);
std::process::exit(-1);
} | conditional_block |
process.rs | (arr: &[u8]) -> i32 {
let a = arr[0] as u32;
let b = arr[1] as u32;
let c = arr[2] as u32;
let d = arr[3] as u32;
((a << 24) | (b << 16) | (c << 8) | (d << 0)) as i32
}
let p = Process{ pid: pid };
drop(output);
let mut bytes = [0; 8];
return match input.read(&mut bytes) {
Ok(8) => {
assert!(combine(CLOEXEC_MSG_FOOTER) == combine(&bytes[4.. 8]),
"Validation on the CLOEXEC pipe failed: {:?}", bytes);
let errno = combine(&bytes[0.. 4]);
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
Err(super::decode_error(errno))
}
Err(ref e) if e.kind == EndOfFile => Ok(p),
Err(e) => {
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
panic!("the CLOEXEC pipe failed: {:?}", e)
},
Ok(..) => { // pipe I/O up to PIPE_BUF bytes should be atomic
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
panic!("short read on the CLOEXEC pipe")
}
};
}
// And at this point we've reached a special time in the life of the
// child. The child must now be considered hamstrung and unable to
// do anything other than syscalls really. Consider the following
// scenario:
//
// 1. Thread A of process 1 grabs the malloc() mutex
// 2. Thread B of process 1 forks(), creating thread C
// 3. Thread C of process 2 then attempts to malloc()
// 4. The memory of process 2 is the same as the memory of
// process 1, so the mutex is locked.
//
// This situation looks a lot like deadlock, right? It turns out
// that this is what pthread_atfork() takes care of, which is
// presumably implemented across platforms. The first thing that
// threads to *before* forking is to do things like grab the malloc
// mutex, and then after the fork they unlock it.
//
// Despite this information, libnative's spawn has been witnessed to
// deadlock on both OSX and FreeBSD. I'm not entirely sure why, but
// all collected backtraces point at malloc/free traffic in the
// child spawned process.
//
// For this reason, the block of code below should contain 0
// invocations of either malloc of free (or their related friends).
//
// As an example of not having malloc/free traffic, we don't close
// this file descriptor by dropping the FileDesc (which contains an
// allocation). Instead we just close it manually. This will never
// have the drop glue anyway because this code never returns (the
// child will either exec() or invoke libc::exit)
let _ = libc::close(input.fd());
fn fail(output: &mut FileDesc) ->! {
let errno = sys::os::errno() as u32;
let bytes = [
(errno >> 24) as u8,
(errno >> 16) as u8,
(errno >> 8) as u8,
(errno >> 0) as u8,
CLOEXEC_MSG_FOOTER[0], CLOEXEC_MSG_FOOTER[1],
CLOEXEC_MSG_FOOTER[2], CLOEXEC_MSG_FOOTER[3]
];
// pipe I/O up to PIPE_BUF bytes should be atomic
assert!(output.write(&bytes).is_ok());
unsafe { libc::_exit(1) }
}
rustrt::rust_unset_sigprocmask();
// If a stdio file descriptor is set to be ignored (via a -1 file
// descriptor), then we don't actually close it, but rather open
// up /dev/null into that file descriptor. Otherwise, the first file
// descriptor opened up in the child would be numbered as one of the
// stdio file descriptors, which is likely to wreak havoc.
let setup = |src: Option<P>, dst: c_int| {
let src = match src {
None => {
let flags = if dst == libc::STDIN_FILENO {
libc::O_RDONLY
} else {
libc::O_RDWR
};
libc::open(devnull.as_ptr() as *const _, flags, 0)
}
Some(obj) => {
let fd = obj.as_inner().fd();
// Leak the memory and the file descriptor. We're in the
// child now an all our resources are going to be
// cleaned up very soon
mem::forget(obj);
fd
}
};
src!= -1 && retry(|| dup2(src, dst))!= -1
};
if!setup(in_fd, libc::STDIN_FILENO) { fail(&mut output) }
if!setup(out_fd, libc::STDOUT_FILENO) { fail(&mut output) }
if!setup(err_fd, libc::STDERR_FILENO) { fail(&mut output) }
// close all other fds
for fd in (3..getdtablesize()).rev() {
if fd!= output.fd() {
let _ = close(fd as c_int);
}
}
match cfg.gid() {
Some(u) => {
if libc::setgid(u as libc::gid_t)!= 0 {
fail(&mut output);
}
}
None => {}
}
match cfg.uid() {
Some(u) => {
// When dropping privileges from root, the `setgroups` call
// will remove any extraneous groups. If we don't call this,
// then even though our uid has dropped, we may still have
// groups that enable us to do super-user things. This will
// fail if we aren't root, so don't bother checking the
// return value, this is just done as an optimistic
// privilege dropping function.
extern {
fn setgroups(ngroups: libc::c_int,
ptr: *const libc::c_void) -> libc::c_int;
}
let _ = setgroups(0, ptr::null());
if libc::setuid(u as libc::uid_t)!= 0 {
fail(&mut output);
}
}
None => {}
}
if cfg.detach() {
// Don't check the error of setsid because it fails if we're the
// process leader already. We just forked so it shouldn't return
// error, but ignore it anyway.
let _ = libc::setsid();
}
if!dirp.is_null() && chdir(dirp) == -1 {
fail(&mut output);
}
if!envp.is_null() {
*sys::os::environ() = envp as *const _;
}
let _ = execvp(*argv, argv as *mut _);
fail(&mut output);
})
})
}
pub fn wait(&self, deadline: u64) -> IoResult<ProcessExit> {
use cmp;
use sync::mpsc::TryRecvError;
static mut WRITE_FD: libc::c_int = 0;
let mut status = 0 as c_int;
if deadline == 0 {
return match retry(|| unsafe { c::waitpid(self.pid, &mut status, 0) }) {
-1 => panic!("unknown waitpid error: {:?}", super::last_error()),
_ => Ok(translate_status(status)),
}
}
// On unix, wait() and its friends have no timeout parameters, so there is
// no way to time out a thread in wait(). From some googling and some
// thinking, it appears that there are a few ways to handle timeouts in
// wait(), but the only real reasonable one for a multi-threaded program is
// to listen for SIGCHLD.
//
// With this in mind, the waiting mechanism with a timeout barely uses
// waitpid() at all. There are a few times that waitpid() is invoked with
// WNOHANG, but otherwise all the necessary blocking is done by waiting for
// a SIGCHLD to arrive (and that blocking has a timeout). Note, however,
// that waitpid() is still used to actually reap the child.
//
// Signal handling is super tricky in general, and this is no exception. Due
// to the async nature of SIGCHLD, we use the self-pipe trick to transmit
// data out of the signal handler to the rest of the application. The first
// idea would be to have each thread waiting with a timeout to read this
// output file descriptor, but a write() is akin to a signal(), not a
// broadcast(), so it would only wake up one thread, and possibly the wrong
// thread. Hence a helper thread is used.
//
// The helper thread here is responsible for farming requests for a
// waitpid() with a timeout, and then processing all of the wait requests.
// By guaranteeing that only this helper thread is reading half of the
// self-pipe, we're sure that we'll never lose a SIGCHLD. This helper thread
// is also responsible for select() to wait for incoming messages or
// incoming SIGCHLD messages, along with passing an appropriate timeout to
// select() to wake things up as necessary.
//
// The ordering of the following statements is also very purposeful. First,
// we must be guaranteed that the helper thread is booted and available to
// receive SIGCHLD signals, and then we must also ensure that we do a
// nonblocking waitpid() at least once before we go ask the sigchld helper.
// This prevents the race where the child exits, we boot the helper, and
// then we ask for the child's exit status (never seeing a sigchld).
//
// The actual communication between the helper thread and this thread is
// quite simple, just a channel moving data around.
HELPER.boot(register_sigchld, waitpid_helper);
match self.try_wait() {
Some(ret) => return Ok(ret),
None => {}
}
let (tx, rx) = channel();
HELPER.send(NewChild(self.pid, tx, deadline));
return match rx.recv() {
Ok(e) => Ok(e),
Err(..) => Err(timeout("wait timed out")),
};
// Register a new SIGCHLD handler, returning the reading half of the
// self-pipe plus the old handler registered (return value of sigaction).
//
// Be sure to set up the self-pipe first because as soon as we register a
// handler we're going to start receiving signals.
fn register_sigchld() -> (libc::c_int, c::sigaction) {
unsafe {
let mut pipes = [0; 2];
assert_eq!(libc::pipe(pipes.as_mut_ptr()), 0);
set_nonblocking(pipes[0], true);
set_nonblocking(pipes[1], true);
WRITE_FD = pipes[1];
let mut old: c::sigaction = mem::zeroed();
let mut new: c::sigaction = mem::zeroed();
new.sa_handler = sigchld_handler;
new.sa_flags = c::SA_NOCLDSTOP;
assert_eq!(c::sigaction(c::SIGCHLD, &new, &mut old), 0);
(pipes[0], old)
}
}
// Helper thread for processing SIGCHLD messages
fn waitpid_helper(input: libc::c_int,
messages: Receiver<Req>,
(read_fd, old): (libc::c_int, c::sigaction)) {
set_nonblocking(input, true);
let mut set: c::fd_set = unsafe { mem::zeroed() };
let mut tv: libc::timeval;
let mut active = Vec::<(libc::pid_t, Sender<ProcessExit>, u64)>::new();
let max = cmp::max(input, read_fd) + 1;
'outer: loop {
// Figure out the timeout of our syscall-to-happen. If we're waiting
// for some processes, then they'll have a timeout, otherwise we
// wait indefinitely for a message to arrive.
//
// FIXME: sure would be nice to not have to scan the entire array
let min = active.iter().map(|a| a.2).enumerate().min_by(|p| {
p.1
});
let (p, idx) = match min {
Some((idx, deadline)) => {
let now = sys::timer::now();
let ms = if now < deadline {deadline - now} else {0};
tv = ms_to_timeval(ms);
(&mut tv as *mut _, idx)
}
None => (ptr::null_mut(), -1),
};
// Wait for something to happen
c::fd_set(&mut set, input);
c::fd_set(&mut set, read_fd);
match unsafe { c::select(max, &mut set, ptr::null_mut(),
ptr::null_mut(), p) } {
// interrupted, retry
-1 if os::errno() == libc::EINTR as i32 => continue,
// We read something, break out and process
1 | 2 => {}
// Timeout, the pending request is removed
0 => {
drop(active.remove(idx));
continue
}
n => panic!("error in select {:?} ({:?})", os::errno(), n),
}
// Process any pending messages
if drain(input) {
loop {
match messages.try_recv() {
Ok(NewChild(pid, tx, deadline)) => {
active.push((pid, tx, deadline));
}
// Once we've been disconnected it means the main
// thread is exiting (at_exit has run). We could
// still have active waiter for other threads, so
// we're just going to drop them all on the floor.
// This means that they won't receive a "you're
// done" message in which case they'll be considered
// as timed out, but more generally errors will
// start propagating.
Err(TryRecvError::Disconnected) => {
| combine | identifier_name |
|
process.rs |
pub fn spawn<K, V, C, P>(cfg: &C, in_fd: Option<P>,
out_fd: Option<P>, err_fd: Option<P>)
-> IoResult<Process>
where C: ProcessConfig<K, V>, P: AsInner<FileDesc>,
K: BytesContainer + Eq + Hash, V: BytesContainer
{
use libc::funcs::posix88::unistd::{fork, dup2, close, chdir, execvp};
mod rustrt {
extern {
pub fn rust_unset_sigprocmask();
}
}
unsafe fn set_cloexec(fd: c_int) {
let ret = c::ioctl(fd, c::FIOCLEX);
assert_eq!(ret, 0);
}
#[cfg(all(target_os = "android", target_arch = "aarch64"))]
unsafe fn getdtablesize() -> c_int {
libc::sysconf(libc::consts::os::sysconf::_SC_OPEN_MAX) as c_int
}
#[cfg(not(all(target_os = "android", target_arch = "aarch64")))]
unsafe fn getdtablesize() -> c_int {
libc::funcs::bsd44::getdtablesize()
}
let dirp = cfg.cwd().map(|c| c.as_ptr()).unwrap_or(ptr::null());
// temporary until unboxed closures land
let cfg = unsafe {
mem::transmute::<&ProcessConfig<K,V>,&'static ProcessConfig<K,V>>(cfg)
};
with_envp(cfg.env(), move|envp: *const c_void| {
with_argv(cfg.program(), cfg.args(), move|argv: *const *const libc::c_char| unsafe {
let (input, mut output) = try!(sys::os::pipe());
// We may use this in the child, so perform allocations before the
// fork
let devnull = b"/dev/null\0";
set_cloexec(output.fd());
let pid = fork();
if pid < 0 {
return Err(super::last_error())
} else if pid > 0 {
#[inline]
fn combine(arr: &[u8]) -> i32 {
let a = arr[0] as u32;
let b = arr[1] as u32;
let c = arr[2] as u32;
let d = arr[3] as u32;
((a << 24) | (b << 16) | (c << 8) | (d << 0)) as i32
}
let p = Process{ pid: pid };
drop(output);
let mut bytes = [0; 8];
return match input.read(&mut bytes) {
Ok(8) => {
assert!(combine(CLOEXEC_MSG_FOOTER) == combine(&bytes[4.. 8]),
"Validation on the CLOEXEC pipe failed: {:?}", bytes);
let errno = combine(&bytes[0.. 4]);
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
Err(super::decode_error(errno))
}
Err(ref e) if e.kind == EndOfFile => Ok(p),
Err(e) => {
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
panic!("the CLOEXEC pipe failed: {:?}", e)
},
Ok(..) => { // pipe I/O up to PIPE_BUF bytes should be atomic
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
panic!("short read on the CLOEXEC pipe")
}
};
}
// And at this point we've reached a special time in the life of the
// child. The child must now be considered hamstrung and unable to
// do anything other than syscalls really. Consider the following
// scenario:
//
// 1. Thread A of process 1 grabs the malloc() mutex
// 2. Thread B of process 1 forks(), creating thread C
// 3. Thread C of process 2 then attempts to malloc()
// 4. The memory of process 2 is the same as the memory of
// process 1, so the mutex is locked.
//
// This situation looks a lot like deadlock, right? It turns out
// that this is what pthread_atfork() takes care of, which is
// presumably implemented across platforms. The first thing that
// threads to *before* forking is to do things like grab the malloc
// mutex, and then after the fork they unlock it.
//
// Despite this information, libnative's spawn has been witnessed to
// deadlock on both OSX and FreeBSD. I'm not entirely sure why, but
// all collected backtraces point at malloc/free traffic in the
// child spawned process.
//
// For this reason, the block of code below should contain 0
// invocations of either malloc of free (or their related friends).
//
// As an example of not having malloc/free traffic, we don't close
// this file descriptor by dropping the FileDesc (which contains an
// allocation). Instead we just close it manually. This will never
// have the drop glue anyway because this code never returns (the
// child will either exec() or invoke libc::exit)
let _ = libc::close(input.fd());
fn fail(output: &mut FileDesc) ->! {
let errno = sys::os::errno() as u32;
let bytes = [
(errno >> 24) as u8,
(errno >> 16) as u8,
(errno >> 8) as u8,
(errno >> 0) as u8,
CLOEXEC_MSG_FOOTER[0], CLOEXEC_MSG_FOOTER[1],
CLOEXEC_MSG_FOOTER[2], CLOEXEC_MSG_FOOTER[3]
];
// pipe I/O up to PIPE_BUF bytes should be atomic
assert!(output.write(&bytes).is_ok());
unsafe { libc::_exit(1) }
}
rustrt::rust_unset_sigprocmask();
// If a stdio file descriptor is set to be ignored (via a -1 file
// descriptor), then we don't actually close it, but rather open
// up /dev/null into that file descriptor. Otherwise, the first file
// descriptor opened up in the child would be numbered as one of the
// stdio file descriptors, which is likely to wreak havoc.
let setup = |src: Option<P>, dst: c_int| {
let src = match src {
None => {
let flags = if dst == libc::STDIN_FILENO {
libc::O_RDONLY
} else {
libc::O_RDWR
};
libc::open(devnull.as_ptr() as *const _, flags, 0)
}
Some(obj) => {
let fd = obj.as_inner().fd();
// Leak the memory and the file descriptor. We're in the
// child now an all our resources are going to be
// cleaned up very soon
mem::forget(obj);
fd
}
};
src!= -1 && retry(|| dup2(src, dst))!= -1
};
if!setup(in_fd, libc::STDIN_FILENO) { fail(&mut output) }
if!setup(out_fd, libc::STDOUT_FILENO) { fail(&mut output) }
if!setup(err_fd, libc::STDERR_FILENO) { fail(&mut output) }
// close all other fds
for fd in (3..getdtablesize()).rev() {
if fd!= output.fd() {
let _ = close(fd as c_int);
}
}
match cfg.gid() {
Some(u) => {
if libc::setgid(u as libc::gid_t)!= 0 {
fail(&mut output);
}
}
None => {}
}
match cfg.uid() {
Some(u) => {
// When dropping privileges from root, the `setgroups` call
// will remove any extraneous groups. If we don't call this,
// then even though our uid has dropped, we may still have
// groups that enable us to do super-user things. This will
// fail if we aren't root, so don't bother checking the
// return value, this is just done as an optimistic
// privilege dropping function.
extern {
fn setgroups(ngroups: libc::c_int,
ptr: *const libc::c_void) -> libc::c_int;
}
let _ = setgroups(0, ptr::null());
if libc::setuid(u as libc::uid_t)!= 0 {
fail(&mut output);
}
}
None => {}
}
if cfg.detach() {
// Don't check the error of setsid because it fails if we're the
// process leader already. We just forked so it shouldn't return
// error, but ignore it anyway.
let _ = libc::setsid();
}
if!dirp.is_null() && chdir(dirp) == -1 {
fail(&mut output);
}
if!envp.is_null() {
*sys::os::environ() = envp as *const _;
}
let _ = execvp(*argv, argv as *mut _);
fail(&mut output);
})
})
}
pub fn wait(&self, deadline: u64) -> IoResult<ProcessExit> {
use cmp;
use sync::mpsc::TryRecvError;
static mut WRITE_FD: libc::c_int = 0;
let mut status = 0 as c_int;
if deadline == 0 {
return match retry(|| unsafe { c::waitpid(self.pid, &mut status, 0) }) {
-1 => panic!("unknown waitpid error: {:?}", super::last_error()),
_ => Ok(translate_status(status)),
}
}
// On unix, wait() and its friends have no timeout parameters, so there is
// no way to time out a thread in wait(). From some googling and some
// thinking, it appears that there are a few ways to handle timeouts in
// wait(), but the only real reasonable one for a multi-threaded program is
// to listen for SIGCHLD.
//
// With this in mind, the waiting mechanism with a timeout barely uses
// waitpid() at all. There are a few times that waitpid() is invoked with
// WNOHANG, but otherwise all the necessary blocking is done by waiting for
// a SIGCHLD to arrive (and that blocking has a timeout). Note, however,
// that waitpid() is still used to actually reap the child.
//
// Signal handling is super tricky in general, and this is no exception. Due
// to the async nature of SIGCHLD, we use the self-pipe trick to transmit
// data out of the signal handler to the rest of the application. The first
// idea would be to have each thread waiting with a timeout to read this
// output file descriptor, but a write() is akin to a signal(), not a
// broadcast(), so it would only wake up one thread, and possibly the wrong
// thread. Hence a helper thread is used.
//
// The helper thread here is responsible for farming requests for a
// waitpid() with a timeout, and then processing all of the wait requests.
// By guaranteeing that only this helper thread is reading half of the
// self-pipe, we're sure that we'll never lose a SIGCHLD. This helper thread
// is also responsible for select() to wait for incoming messages or
// incoming SIGCHLD messages, along with passing an appropriate timeout to
// select() to wake things up as necessary.
//
// The ordering of the following statements is also very purposeful. First,
// we must be guaranteed that the helper thread is booted and available to
// receive SIGCHLD signals, and then we must also ensure that we do a
// nonblocking waitpid() at least once before we go ask the sigchld helper.
// This prevents the race where the child exits, we boot the helper, and
// then we ask for the child's exit status (never seeing a sigchld).
//
// The actual communication between the helper thread and this thread is
// quite simple, just a channel moving data around.
HELPER.boot(register_sigchld, waitpid_helper);
match self.try_wait() {
Some(ret) => return Ok(ret),
None => {}
}
let (tx, rx) = channel();
HELPER.send(NewChild(self.pid, tx, deadline));
return match rx.recv() {
Ok(e) => Ok(e),
Err(..) => Err(timeout("wait timed out")),
};
// Register a new SIGCHLD handler, returning the reading half of the
// self-pipe plus the old handler registered (return value of sigaction).
//
// Be sure to set up the self-pipe first because as soon as we register a
// handler we're going to start receiving signals.
fn register_sigchld() -> (libc::c_int, c::sigaction) {
unsafe {
let mut pipes = [0; 2];
assert_eq!(libc::pipe(pipes.as_mut_ptr()), 0);
set_nonblocking(pipes[0], true);
set_nonblocking(pipes[1], true);
WRITE_FD = pipes[1];
let mut old: c::sigaction = mem::zeroed();
let mut new: c::sigaction = mem::zeroed();
new.sa_handler = sigchld_handler;
new.sa_flags = c::SA_NOCLDSTOP;
assert_eq!(c::sigaction(c::SIGCHLD, &new, &mut old), 0);
(pipes[0], old)
}
}
// Helper thread for processing SIGCHLD messages
fn waitpid_helper(input: libc::c_int,
messages: Receiver<Req>,
(read_fd, old): (libc::c_int, c::sigaction)) {
set_nonblocking(input, true);
let mut set: c::fd_set = unsafe { mem::zeroed() };
let mut tv: libc::timeval;
let mut active = Vec::<(libc::pid_t, Sender<ProcessExit>, u64)>::new();
let max = cmp::max(input, read_fd) + 1;
'outer: loop {
// Figure out the timeout of our syscall-to-happen. If we're waiting
// for some processes, then they'll have a timeout, otherwise we
// wait indefinitely for a message to arrive.
//
// FIXME: sure would be nice to not have to scan the entire array
let min = active.iter().map(|a| a.2).enumerate().min_by(|p| {
p.1
});
let (p, idx) = match min {
Some((idx, deadline)) => {
let now = sys::timer::now();
let ms = if now < deadline {deadline - now} else {0};
tv = ms_to_timeval(ms);
(&mut tv as *mut _, idx)
}
None => (ptr | {
let r = libc::funcs::posix88::signal::kill(pid, signal as c_int);
mkerr_libc(r)
} | identifier_body |
|
process.rs | _ok(), "wait(0) should either return Ok or panic");
panic!("short read on the CLOEXEC pipe")
}
};
}
// And at this point we've reached a special time in the life of the
// child. The child must now be considered hamstrung and unable to
// do anything other than syscalls really. Consider the following
// scenario:
//
// 1. Thread A of process 1 grabs the malloc() mutex
// 2. Thread B of process 1 forks(), creating thread C
// 3. Thread C of process 2 then attempts to malloc()
// 4. The memory of process 2 is the same as the memory of
// process 1, so the mutex is locked.
//
// This situation looks a lot like deadlock, right? It turns out
// that this is what pthread_atfork() takes care of, which is
// presumably implemented across platforms. The first thing that
// threads to *before* forking is to do things like grab the malloc
// mutex, and then after the fork they unlock it.
//
// Despite this information, libnative's spawn has been witnessed to
// deadlock on both OSX and FreeBSD. I'm not entirely sure why, but
// all collected backtraces point at malloc/free traffic in the
// child spawned process.
//
// For this reason, the block of code below should contain 0
// invocations of either malloc of free (or their related friends).
//
// As an example of not having malloc/free traffic, we don't close
// this file descriptor by dropping the FileDesc (which contains an
// allocation). Instead we just close it manually. This will never
// have the drop glue anyway because this code never returns (the
// child will either exec() or invoke libc::exit)
let _ = libc::close(input.fd());
fn fail(output: &mut FileDesc) ->! {
let errno = sys::os::errno() as u32;
let bytes = [
(errno >> 24) as u8,
(errno >> 16) as u8,
(errno >> 8) as u8,
(errno >> 0) as u8,
CLOEXEC_MSG_FOOTER[0], CLOEXEC_MSG_FOOTER[1],
CLOEXEC_MSG_FOOTER[2], CLOEXEC_MSG_FOOTER[3]
];
// pipe I/O up to PIPE_BUF bytes should be atomic
assert!(output.write(&bytes).is_ok());
unsafe { libc::_exit(1) }
}
rustrt::rust_unset_sigprocmask();
// If a stdio file descriptor is set to be ignored (via a -1 file
// descriptor), then we don't actually close it, but rather open
// up /dev/null into that file descriptor. Otherwise, the first file
// descriptor opened up in the child would be numbered as one of the
// stdio file descriptors, which is likely to wreak havoc.
let setup = |src: Option<P>, dst: c_int| {
let src = match src {
None => {
let flags = if dst == libc::STDIN_FILENO {
libc::O_RDONLY
} else {
libc::O_RDWR
};
libc::open(devnull.as_ptr() as *const _, flags, 0)
}
Some(obj) => {
let fd = obj.as_inner().fd();
// Leak the memory and the file descriptor. We're in the
// child now an all our resources are going to be
// cleaned up very soon
mem::forget(obj);
fd
}
};
src!= -1 && retry(|| dup2(src, dst))!= -1
};
if!setup(in_fd, libc::STDIN_FILENO) { fail(&mut output) }
if!setup(out_fd, libc::STDOUT_FILENO) { fail(&mut output) }
if!setup(err_fd, libc::STDERR_FILENO) { fail(&mut output) }
// close all other fds
for fd in (3..getdtablesize()).rev() {
if fd!= output.fd() {
let _ = close(fd as c_int);
}
}
match cfg.gid() {
Some(u) => {
if libc::setgid(u as libc::gid_t)!= 0 {
fail(&mut output);
}
}
None => {}
}
match cfg.uid() {
Some(u) => {
// When dropping privileges from root, the `setgroups` call
// will remove any extraneous groups. If we don't call this,
// then even though our uid has dropped, we may still have
// groups that enable us to do super-user things. This will
// fail if we aren't root, so don't bother checking the
// return value, this is just done as an optimistic
// privilege dropping function.
extern {
fn setgroups(ngroups: libc::c_int,
ptr: *const libc::c_void) -> libc::c_int;
}
let _ = setgroups(0, ptr::null());
if libc::setuid(u as libc::uid_t)!= 0 {
fail(&mut output);
}
}
None => {}
}
if cfg.detach() {
// Don't check the error of setsid because it fails if we're the
// process leader already. We just forked so it shouldn't return
// error, but ignore it anyway.
let _ = libc::setsid();
}
if!dirp.is_null() && chdir(dirp) == -1 {
fail(&mut output);
}
if!envp.is_null() {
*sys::os::environ() = envp as *const _;
}
let _ = execvp(*argv, argv as *mut _);
fail(&mut output);
})
})
}
pub fn wait(&self, deadline: u64) -> IoResult<ProcessExit> {
use cmp;
use sync::mpsc::TryRecvError;
static mut WRITE_FD: libc::c_int = 0;
let mut status = 0 as c_int;
if deadline == 0 {
return match retry(|| unsafe { c::waitpid(self.pid, &mut status, 0) }) {
-1 => panic!("unknown waitpid error: {:?}", super::last_error()),
_ => Ok(translate_status(status)),
}
}
// On unix, wait() and its friends have no timeout parameters, so there is
// no way to time out a thread in wait(). From some googling and some
// thinking, it appears that there are a few ways to handle timeouts in
// wait(), but the only real reasonable one for a multi-threaded program is
// to listen for SIGCHLD.
//
// With this in mind, the waiting mechanism with a timeout barely uses
// waitpid() at all. There are a few times that waitpid() is invoked with
// WNOHANG, but otherwise all the necessary blocking is done by waiting for
// a SIGCHLD to arrive (and that blocking has a timeout). Note, however,
// that waitpid() is still used to actually reap the child.
//
// Signal handling is super tricky in general, and this is no exception. Due
// to the async nature of SIGCHLD, we use the self-pipe trick to transmit
// data out of the signal handler to the rest of the application. The first
// idea would be to have each thread waiting with a timeout to read this
// output file descriptor, but a write() is akin to a signal(), not a
// broadcast(), so it would only wake up one thread, and possibly the wrong
// thread. Hence a helper thread is used.
//
// The helper thread here is responsible for farming requests for a
// waitpid() with a timeout, and then processing all of the wait requests.
// By guaranteeing that only this helper thread is reading half of the
// self-pipe, we're sure that we'll never lose a SIGCHLD. This helper thread
// is also responsible for select() to wait for incoming messages or
// incoming SIGCHLD messages, along with passing an appropriate timeout to
// select() to wake things up as necessary.
//
// The ordering of the following statements is also very purposeful. First,
// we must be guaranteed that the helper thread is booted and available to
// receive SIGCHLD signals, and then we must also ensure that we do a
// nonblocking waitpid() at least once before we go ask the sigchld helper.
// This prevents the race where the child exits, we boot the helper, and
// then we ask for the child's exit status (never seeing a sigchld).
//
// The actual communication between the helper thread and this thread is
// quite simple, just a channel moving data around.
HELPER.boot(register_sigchld, waitpid_helper);
match self.try_wait() {
Some(ret) => return Ok(ret),
None => {}
}
let (tx, rx) = channel();
HELPER.send(NewChild(self.pid, tx, deadline));
return match rx.recv() {
Ok(e) => Ok(e),
Err(..) => Err(timeout("wait timed out")),
};
// Register a new SIGCHLD handler, returning the reading half of the
// self-pipe plus the old handler registered (return value of sigaction).
//
// Be sure to set up the self-pipe first because as soon as we register a
// handler we're going to start receiving signals.
fn register_sigchld() -> (libc::c_int, c::sigaction) {
unsafe {
let mut pipes = [0; 2];
assert_eq!(libc::pipe(pipes.as_mut_ptr()), 0);
set_nonblocking(pipes[0], true);
set_nonblocking(pipes[1], true);
WRITE_FD = pipes[1];
let mut old: c::sigaction = mem::zeroed();
let mut new: c::sigaction = mem::zeroed();
new.sa_handler = sigchld_handler;
new.sa_flags = c::SA_NOCLDSTOP;
assert_eq!(c::sigaction(c::SIGCHLD, &new, &mut old), 0);
(pipes[0], old)
}
}
// Helper thread for processing SIGCHLD messages
fn waitpid_helper(input: libc::c_int,
messages: Receiver<Req>,
(read_fd, old): (libc::c_int, c::sigaction)) {
set_nonblocking(input, true);
let mut set: c::fd_set = unsafe { mem::zeroed() };
let mut tv: libc::timeval;
let mut active = Vec::<(libc::pid_t, Sender<ProcessExit>, u64)>::new();
let max = cmp::max(input, read_fd) + 1;
'outer: loop {
// Figure out the timeout of our syscall-to-happen. If we're waiting
// for some processes, then they'll have a timeout, otherwise we
// wait indefinitely for a message to arrive.
//
// FIXME: sure would be nice to not have to scan the entire array
let min = active.iter().map(|a| a.2).enumerate().min_by(|p| {
p.1
});
let (p, idx) = match min {
Some((idx, deadline)) => {
let now = sys::timer::now();
let ms = if now < deadline {deadline - now} else {0};
tv = ms_to_timeval(ms);
(&mut tv as *mut _, idx)
}
None => (ptr::null_mut(), -1),
};
// Wait for something to happen
c::fd_set(&mut set, input);
c::fd_set(&mut set, read_fd);
match unsafe { c::select(max, &mut set, ptr::null_mut(),
ptr::null_mut(), p) } {
// interrupted, retry
-1 if os::errno() == libc::EINTR as i32 => continue,
// We read something, break out and process
1 | 2 => {}
// Timeout, the pending request is removed
0 => {
drop(active.remove(idx));
continue
}
n => panic!("error in select {:?} ({:?})", os::errno(), n),
}
// Process any pending messages
if drain(input) {
loop {
match messages.try_recv() {
Ok(NewChild(pid, tx, deadline)) => {
active.push((pid, tx, deadline));
}
// Once we've been disconnected it means the main
// thread is exiting (at_exit has run). We could
// still have active waiter for other threads, so
// we're just going to drop them all on the floor.
// This means that they won't receive a "you're
// done" message in which case they'll be considered
// as timed out, but more generally errors will
// start propagating.
Err(TryRecvError::Disconnected) => {
break 'outer;
}
Err(TryRecvError::Empty) => break,
}
}
}
// If a child exited (somehow received SIGCHLD), then poll all
// children to see if any of them exited.
//
// We also attempt to be responsible netizens when dealing with
// SIGCHLD by invoking any previous SIGCHLD handler instead of just
// ignoring any previous SIGCHLD handler. Note that we don't provide
// a 1:1 mapping of our handler invocations to the previous handler
// invocations because we drain the `read_fd` entirely. This is
// probably OK because the kernel is already allowed to coalesce
// simultaneous signals, we're just doing some extra coalescing.
//
// Another point of note is that this likely runs the signal handler
// on a different thread than the one that received the signal. I
// *think* this is ok at this time.
//
// The main reason for doing this is to allow stdtest to run native
// tests as well. Both libgreen and libnative are running around
// with process timeouts, but libgreen should get there first
// (currently libuv doesn't handle old signal handlers).
if drain(read_fd) {
let i: uint = unsafe { mem::transmute(old.sa_handler) };
if i!= 0 {
assert!(old.sa_flags & c::SA_SIGINFO == 0);
(old.sa_handler)(c::SIGCHLD);
}
| random_line_split |
||
process.rs | let b = arr[1] as u32;
let c = arr[2] as u32;
let d = arr[3] as u32;
((a << 24) | (b << 16) | (c << 8) | (d << 0)) as i32
}
let p = Process{ pid: pid };
drop(output);
let mut bytes = [0; 8];
return match input.read(&mut bytes) {
Ok(8) => {
assert!(combine(CLOEXEC_MSG_FOOTER) == combine(&bytes[4.. 8]),
"Validation on the CLOEXEC pipe failed: {:?}", bytes);
let errno = combine(&bytes[0.. 4]);
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
Err(super::decode_error(errno))
}
Err(ref e) if e.kind == EndOfFile => Ok(p),
Err(e) => {
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
panic!("the CLOEXEC pipe failed: {:?}", e)
},
Ok(..) => { // pipe I/O up to PIPE_BUF bytes should be atomic
assert!(p.wait(0).is_ok(), "wait(0) should either return Ok or panic");
panic!("short read on the CLOEXEC pipe")
}
};
}
// And at this point we've reached a special time in the life of the
// child. The child must now be considered hamstrung and unable to
// do anything other than syscalls really. Consider the following
// scenario:
//
// 1. Thread A of process 1 grabs the malloc() mutex
// 2. Thread B of process 1 forks(), creating thread C
// 3. Thread C of process 2 then attempts to malloc()
// 4. The memory of process 2 is the same as the memory of
// process 1, so the mutex is locked.
//
// This situation looks a lot like deadlock, right? It turns out
// that this is what pthread_atfork() takes care of, which is
// presumably implemented across platforms. The first thing that
// threads to *before* forking is to do things like grab the malloc
// mutex, and then after the fork they unlock it.
//
// Despite this information, libnative's spawn has been witnessed to
// deadlock on both OSX and FreeBSD. I'm not entirely sure why, but
// all collected backtraces point at malloc/free traffic in the
// child spawned process.
//
// For this reason, the block of code below should contain 0
// invocations of either malloc of free (or their related friends).
//
// As an example of not having malloc/free traffic, we don't close
// this file descriptor by dropping the FileDesc (which contains an
// allocation). Instead we just close it manually. This will never
// have the drop glue anyway because this code never returns (the
// child will either exec() or invoke libc::exit)
let _ = libc::close(input.fd());
fn fail(output: &mut FileDesc) ->! {
let errno = sys::os::errno() as u32;
let bytes = [
(errno >> 24) as u8,
(errno >> 16) as u8,
(errno >> 8) as u8,
(errno >> 0) as u8,
CLOEXEC_MSG_FOOTER[0], CLOEXEC_MSG_FOOTER[1],
CLOEXEC_MSG_FOOTER[2], CLOEXEC_MSG_FOOTER[3]
];
// pipe I/O up to PIPE_BUF bytes should be atomic
assert!(output.write(&bytes).is_ok());
unsafe { libc::_exit(1) }
}
rustrt::rust_unset_sigprocmask();
// If a stdio file descriptor is set to be ignored (via a -1 file
// descriptor), then we don't actually close it, but rather open
// up /dev/null into that file descriptor. Otherwise, the first file
// descriptor opened up in the child would be numbered as one of the
// stdio file descriptors, which is likely to wreak havoc.
let setup = |src: Option<P>, dst: c_int| {
let src = match src {
None => {
let flags = if dst == libc::STDIN_FILENO {
libc::O_RDONLY
} else {
libc::O_RDWR
};
libc::open(devnull.as_ptr() as *const _, flags, 0)
}
Some(obj) => {
let fd = obj.as_inner().fd();
// Leak the memory and the file descriptor. We're in the
// child now an all our resources are going to be
// cleaned up very soon
mem::forget(obj);
fd
}
};
src!= -1 && retry(|| dup2(src, dst))!= -1
};
if!setup(in_fd, libc::STDIN_FILENO) { fail(&mut output) }
if!setup(out_fd, libc::STDOUT_FILENO) { fail(&mut output) }
if!setup(err_fd, libc::STDERR_FILENO) { fail(&mut output) }
// close all other fds
for fd in (3..getdtablesize()).rev() {
if fd!= output.fd() {
let _ = close(fd as c_int);
}
}
match cfg.gid() {
Some(u) => |
None => {}
}
match cfg.uid() {
Some(u) => {
// When dropping privileges from root, the `setgroups` call
// will remove any extraneous groups. If we don't call this,
// then even though our uid has dropped, we may still have
// groups that enable us to do super-user things. This will
// fail if we aren't root, so don't bother checking the
// return value, this is just done as an optimistic
// privilege dropping function.
extern {
fn setgroups(ngroups: libc::c_int,
ptr: *const libc::c_void) -> libc::c_int;
}
let _ = setgroups(0, ptr::null());
if libc::setuid(u as libc::uid_t)!= 0 {
fail(&mut output);
}
}
None => {}
}
if cfg.detach() {
// Don't check the error of setsid because it fails if we're the
// process leader already. We just forked so it shouldn't return
// error, but ignore it anyway.
let _ = libc::setsid();
}
if!dirp.is_null() && chdir(dirp) == -1 {
fail(&mut output);
}
if!envp.is_null() {
*sys::os::environ() = envp as *const _;
}
let _ = execvp(*argv, argv as *mut _);
fail(&mut output);
})
})
}
pub fn wait(&self, deadline: u64) -> IoResult<ProcessExit> {
use cmp;
use sync::mpsc::TryRecvError;
static mut WRITE_FD: libc::c_int = 0;
let mut status = 0 as c_int;
if deadline == 0 {
return match retry(|| unsafe { c::waitpid(self.pid, &mut status, 0) }) {
-1 => panic!("unknown waitpid error: {:?}", super::last_error()),
_ => Ok(translate_status(status)),
}
}
// On unix, wait() and its friends have no timeout parameters, so there is
// no way to time out a thread in wait(). From some googling and some
// thinking, it appears that there are a few ways to handle timeouts in
// wait(), but the only real reasonable one for a multi-threaded program is
// to listen for SIGCHLD.
//
// With this in mind, the waiting mechanism with a timeout barely uses
// waitpid() at all. There are a few times that waitpid() is invoked with
// WNOHANG, but otherwise all the necessary blocking is done by waiting for
// a SIGCHLD to arrive (and that blocking has a timeout). Note, however,
// that waitpid() is still used to actually reap the child.
//
// Signal handling is super tricky in general, and this is no exception. Due
// to the async nature of SIGCHLD, we use the self-pipe trick to transmit
// data out of the signal handler to the rest of the application. The first
// idea would be to have each thread waiting with a timeout to read this
// output file descriptor, but a write() is akin to a signal(), not a
// broadcast(), so it would only wake up one thread, and possibly the wrong
// thread. Hence a helper thread is used.
//
// The helper thread here is responsible for farming requests for a
// waitpid() with a timeout, and then processing all of the wait requests.
// By guaranteeing that only this helper thread is reading half of the
// self-pipe, we're sure that we'll never lose a SIGCHLD. This helper thread
// is also responsible for select() to wait for incoming messages or
// incoming SIGCHLD messages, along with passing an appropriate timeout to
// select() to wake things up as necessary.
//
// The ordering of the following statements is also very purposeful. First,
// we must be guaranteed that the helper thread is booted and available to
// receive SIGCHLD signals, and then we must also ensure that we do a
// nonblocking waitpid() at least once before we go ask the sigchld helper.
// This prevents the race where the child exits, we boot the helper, and
// then we ask for the child's exit status (never seeing a sigchld).
//
// The actual communication between the helper thread and this thread is
// quite simple, just a channel moving data around.
HELPER.boot(register_sigchld, waitpid_helper);
match self.try_wait() {
Some(ret) => return Ok(ret),
None => {}
}
let (tx, rx) = channel();
HELPER.send(NewChild(self.pid, tx, deadline));
return match rx.recv() {
Ok(e) => Ok(e),
Err(..) => Err(timeout("wait timed out")),
};
// Register a new SIGCHLD handler, returning the reading half of the
// self-pipe plus the old handler registered (return value of sigaction).
//
// Be sure to set up the self-pipe first because as soon as we register a
// handler we're going to start receiving signals.
fn register_sigchld() -> (libc::c_int, c::sigaction) {
unsafe {
let mut pipes = [0; 2];
assert_eq!(libc::pipe(pipes.as_mut_ptr()), 0);
set_nonblocking(pipes[0], true);
set_nonblocking(pipes[1], true);
WRITE_FD = pipes[1];
let mut old: c::sigaction = mem::zeroed();
let mut new: c::sigaction = mem::zeroed();
new.sa_handler = sigchld_handler;
new.sa_flags = c::SA_NOCLDSTOP;
assert_eq!(c::sigaction(c::SIGCHLD, &new, &mut old), 0);
(pipes[0], old)
}
}
// Helper thread for processing SIGCHLD messages
fn waitpid_helper(input: libc::c_int,
messages: Receiver<Req>,
(read_fd, old): (libc::c_int, c::sigaction)) {
set_nonblocking(input, true);
let mut set: c::fd_set = unsafe { mem::zeroed() };
let mut tv: libc::timeval;
let mut active = Vec::<(libc::pid_t, Sender<ProcessExit>, u64)>::new();
let max = cmp::max(input, read_fd) + 1;
'outer: loop {
// Figure out the timeout of our syscall-to-happen. If we're waiting
// for some processes, then they'll have a timeout, otherwise we
// wait indefinitely for a message to arrive.
//
// FIXME: sure would be nice to not have to scan the entire array
let min = active.iter().map(|a| a.2).enumerate().min_by(|p| {
p.1
});
let (p, idx) = match min {
Some((idx, deadline)) => {
let now = sys::timer::now();
let ms = if now < deadline {deadline - now} else {0};
tv = ms_to_timeval(ms);
(&mut tv as *mut _, idx)
}
None => (ptr::null_mut(), -1),
};
// Wait for something to happen
c::fd_set(&mut set, input);
c::fd_set(&mut set, read_fd);
match unsafe { c::select(max, &mut set, ptr::null_mut(),
ptr::null_mut(), p) } {
// interrupted, retry
-1 if os::errno() == libc::EINTR as i32 => continue,
// We read something, break out and process
1 | 2 => {}
// Timeout, the pending request is removed
0 => {
drop(active.remove(idx));
continue
}
n => panic!("error in select {:?} ({:?})", os::errno(), n),
}
// Process any pending messages
if drain(input) {
loop {
match messages.try_recv() {
Ok(NewChild(pid, tx, deadline)) => {
active.push((pid, tx, deadline));
}
// Once we've been disconnected it means the main
// thread is exiting (at_exit has run). We could
// still have active waiter for other threads, so
// we're just going to drop them all on the floor.
// This means that they won't receive a "you're
// done" message in which case they'll be considered
// as timed out, but more generally errors will
// start propagating.
Err(TryRecvError::Disconnected) => {
break 'outer;
}
| {
if libc::setgid(u as libc::gid_t) != 0 {
fail(&mut output);
}
} | conditional_block |
fs.rs | use std::{iter};
use std::collections::{HashMap, HashSet, VecDeque};
use prelude::*;
pub struct Filesystem {
pub volume: Box<Volume>,
pub superblock: Superblock,
pub superblock_bytes: Vec<u8>,
pub superblock_dirty: bool,
pub groups: Vec<Group>,
pub inode_cache: HashMap<u64, Inode>,
pub dirty_inos: HashSet<u64>,
pub reused_inos: HashSet<u64>,
pub cache_queue: VecDeque<u64>,
}
pub struct Group {
pub idx: u64,
pub desc: GroupDesc,
pub block_bitmap: Vec<u8>,
pub inode_bitmap: Vec<u8>,
pub dirty: bool,
}
pub const ROOT_INO: u64 = 2;
impl Filesystem {
pub fn block_size(&self) -> u64 {
1024 << self.superblock.log_block_size
}
pub fn | (&self) -> u64 {
let a = self.superblock.blocks_count as u64;
let b = self.superblock.blocks_per_group as u64;
(a + b - 1) / b
}
}
pub fn mount_fs(mut volume: Box<Volume>) -> Result<Filesystem> {
let mut superblock_bytes = make_buffer(1024);
try!(volume.read(1024, &mut superblock_bytes[..]));
let superblock = try!(decode_superblock(&superblock_bytes[..], true));
let mut fs = Filesystem {
volume: volume,
superblock: superblock,
superblock_bytes: superblock_bytes,
superblock_dirty: false,
groups: Vec::new(),
inode_cache: HashMap::new(),
dirty_inos: HashSet::new(),
reused_inos: HashSet::new(),
cache_queue: VecDeque::new(),
};
for group_idx in 0..fs.group_count() {
let group = try!(read_group(&mut fs, group_idx));
fs.groups.push(group);
}
try!(flush_superblock(&mut fs, false));
Ok(fs)
}
pub fn flush_fs(fs: &mut Filesystem) -> Result<()> {
let dirty_inos = fs.dirty_inos.clone();
for dirty_ino in dirty_inos {
try!(flush_ino(fs, dirty_ino));
}
for group_idx in 0..fs.group_count() {
try!(flush_group(fs, group_idx));
}
flush_superblock(fs, true)
}
fn flush_superblock(fs: &mut Filesystem, clean: bool) -> Result<()> {
let state = if clean { 1 } else { 2 };
fs.superblock_dirty = fs.superblock_dirty || fs.superblock.state!= state;
fs.superblock.state = state;
if fs.superblock_dirty {
try!(encode_superblock(&fs.superblock, &mut fs.superblock_bytes[..]));
try!(fs.volume.write(1024, &fs.superblock_bytes[..]));
fs.superblock_dirty = false;
}
Ok(())
}
pub fn make_buffer(size: u64) -> Vec<u8> {
iter::repeat(0).take(size as usize).collect()
}
| group_count | identifier_name |
fs.rs | use std::{iter};
use std::collections::{HashMap, HashSet, VecDeque};
use prelude::*;
pub struct Filesystem {
pub volume: Box<Volume>,
pub superblock: Superblock,
pub superblock_bytes: Vec<u8>,
pub superblock_dirty: bool,
pub groups: Vec<Group>,
pub inode_cache: HashMap<u64, Inode>,
pub dirty_inos: HashSet<u64>,
pub reused_inos: HashSet<u64>,
pub cache_queue: VecDeque<u64>,
}
pub struct Group {
pub idx: u64,
pub desc: GroupDesc,
pub block_bitmap: Vec<u8>,
pub inode_bitmap: Vec<u8>,
pub dirty: bool,
}
pub const ROOT_INO: u64 = 2;
impl Filesystem {
pub fn block_size(&self) -> u64 {
1024 << self.superblock.log_block_size
}
pub fn group_count(&self) -> u64 {
let a = self.superblock.blocks_count as u64;
let b = self.superblock.blocks_per_group as u64;
(a + b - 1) / b
}
}
pub fn mount_fs(mut volume: Box<Volume>) -> Result<Filesystem> {
let mut superblock_bytes = make_buffer(1024);
try!(volume.read(1024, &mut superblock_bytes[..]));
let superblock = try!(decode_superblock(&superblock_bytes[..], true));
let mut fs = Filesystem {
volume: volume,
superblock: superblock,
superblock_bytes: superblock_bytes,
superblock_dirty: false,
groups: Vec::new(),
inode_cache: HashMap::new(),
dirty_inos: HashSet::new(),
reused_inos: HashSet::new(),
cache_queue: VecDeque::new(),
};
for group_idx in 0..fs.group_count() {
let group = try!(read_group(&mut fs, group_idx));
fs.groups.push(group);
}
try!(flush_superblock(&mut fs, false));
Ok(fs)
}
pub fn flush_fs(fs: &mut Filesystem) -> Result<()> {
let dirty_inos = fs.dirty_inos.clone();
for dirty_ino in dirty_inos {
try!(flush_ino(fs, dirty_ino));
}
for group_idx in 0..fs.group_count() {
try!(flush_group(fs, group_idx));
}
flush_superblock(fs, true)
}
fn flush_superblock(fs: &mut Filesystem, clean: bool) -> Result<()> {
let state = if clean { 1 } else { 2 };
fs.superblock_dirty = fs.superblock_dirty || fs.superblock.state!= state;
fs.superblock.state = state;
if fs.superblock_dirty |
Ok(())
}
pub fn make_buffer(size: u64) -> Vec<u8> {
iter::repeat(0).take(size as usize).collect()
}
| {
try!(encode_superblock(&fs.superblock, &mut fs.superblock_bytes[..]));
try!(fs.volume.write(1024, &fs.superblock_bytes[..]));
fs.superblock_dirty = false;
} | conditional_block |
fs.rs | use std::{iter};
use std::collections::{HashMap, HashSet, VecDeque};
use prelude::*;
pub struct Filesystem {
pub volume: Box<Volume>,
pub superblock: Superblock,
pub superblock_bytes: Vec<u8>,
pub superblock_dirty: bool,
pub groups: Vec<Group>,
pub inode_cache: HashMap<u64, Inode>,
pub dirty_inos: HashSet<u64>,
pub reused_inos: HashSet<u64>,
pub cache_queue: VecDeque<u64>,
}
pub struct Group {
pub idx: u64,
pub desc: GroupDesc,
pub block_bitmap: Vec<u8>,
pub inode_bitmap: Vec<u8>,
pub dirty: bool,
}
pub const ROOT_INO: u64 = 2;
impl Filesystem {
pub fn block_size(&self) -> u64 {
1024 << self.superblock.log_block_size
}
pub fn group_count(&self) -> u64 {
let a = self.superblock.blocks_count as u64;
let b = self.superblock.blocks_per_group as u64;
(a + b - 1) / b
}
}
pub fn mount_fs(mut volume: Box<Volume>) -> Result<Filesystem> | }
try!(flush_superblock(&mut fs, false));
Ok(fs)
}
pub fn flush_fs(fs: &mut Filesystem) -> Result<()> {
let dirty_inos = fs.dirty_inos.clone();
for dirty_ino in dirty_inos {
try!(flush_ino(fs, dirty_ino));
}
for group_idx in 0..fs.group_count() {
try!(flush_group(fs, group_idx));
}
flush_superblock(fs, true)
}
fn flush_superblock(fs: &mut Filesystem, clean: bool) -> Result<()> {
let state = if clean { 1 } else { 2 };
fs.superblock_dirty = fs.superblock_dirty || fs.superblock.state!= state;
fs.superblock.state = state;
if fs.superblock_dirty {
try!(encode_superblock(&fs.superblock, &mut fs.superblock_bytes[..]));
try!(fs.volume.write(1024, &fs.superblock_bytes[..]));
fs.superblock_dirty = false;
}
Ok(())
}
pub fn make_buffer(size: u64) -> Vec<u8> {
iter::repeat(0).take(size as usize).collect()
}
| {
let mut superblock_bytes = make_buffer(1024);
try!(volume.read(1024, &mut superblock_bytes[..]));
let superblock = try!(decode_superblock(&superblock_bytes[..], true));
let mut fs = Filesystem {
volume: volume,
superblock: superblock,
superblock_bytes: superblock_bytes,
superblock_dirty: false,
groups: Vec::new(),
inode_cache: HashMap::new(),
dirty_inos: HashSet::new(),
reused_inos: HashSet::new(),
cache_queue: VecDeque::new(),
};
for group_idx in 0..fs.group_count() {
let group = try!(read_group(&mut fs, group_idx));
fs.groups.push(group); | identifier_body |
fs.rs | use std::{iter};
use std::collections::{HashMap, HashSet, VecDeque};
use prelude::*;
pub struct Filesystem {
pub volume: Box<Volume>,
pub superblock: Superblock,
pub superblock_bytes: Vec<u8>,
pub superblock_dirty: bool,
pub groups: Vec<Group>,
pub inode_cache: HashMap<u64, Inode>,
pub dirty_inos: HashSet<u64>,
pub reused_inos: HashSet<u64>,
pub cache_queue: VecDeque<u64>,
}
pub struct Group {
pub idx: u64,
pub desc: GroupDesc,
pub block_bitmap: Vec<u8>,
pub inode_bitmap: Vec<u8>,
pub dirty: bool,
}
pub const ROOT_INO: u64 = 2;
impl Filesystem {
pub fn block_size(&self) -> u64 {
1024 << self.superblock.log_block_size
}
pub fn group_count(&self) -> u64 {
let a = self.superblock.blocks_count as u64;
let b = self.superblock.blocks_per_group as u64;
(a + b - 1) / b
}
}
pub fn mount_fs(mut volume: Box<Volume>) -> Result<Filesystem> {
let mut superblock_bytes = make_buffer(1024);
try!(volume.read(1024, &mut superblock_bytes[..]));
let superblock = try!(decode_superblock(&superblock_bytes[..], true));
let mut fs = Filesystem {
volume: volume,
superblock: superblock,
superblock_bytes: superblock_bytes,
superblock_dirty: false,
groups: Vec::new(),
inode_cache: HashMap::new(),
dirty_inos: HashSet::new(),
reused_inos: HashSet::new(),
cache_queue: VecDeque::new(),
};
for group_idx in 0..fs.group_count() {
let group = try!(read_group(&mut fs, group_idx));
fs.groups.push(group);
}
try!(flush_superblock(&mut fs, false));
Ok(fs)
}
pub fn flush_fs(fs: &mut Filesystem) -> Result<()> {
let dirty_inos = fs.dirty_inos.clone();
for dirty_ino in dirty_inos { | try!(flush_group(fs, group_idx));
}
flush_superblock(fs, true)
}
fn flush_superblock(fs: &mut Filesystem, clean: bool) -> Result<()> {
let state = if clean { 1 } else { 2 };
fs.superblock_dirty = fs.superblock_dirty || fs.superblock.state!= state;
fs.superblock.state = state;
if fs.superblock_dirty {
try!(encode_superblock(&fs.superblock, &mut fs.superblock_bytes[..]));
try!(fs.volume.write(1024, &fs.superblock_bytes[..]));
fs.superblock_dirty = false;
}
Ok(())
}
pub fn make_buffer(size: u64) -> Vec<u8> {
iter::repeat(0).take(size as usize).collect()
} | try!(flush_ino(fs, dirty_ino));
}
for group_idx in 0..fs.group_count() { | random_line_split |
misc.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Diff misc.
use Bytes;
use rlp::RlpStream;
use target_info::Target;
include!(concat!(env!("OUT_DIR"), "/version.rs"));
include!(concat!(env!("OUT_DIR"), "/rustc_version.rs"));
#[cfg(feature = "final")]
const THIS_TRACK: &'static str = "nightly";
// ^^^ should be reset to "stable" or "beta" according to the release branch.
#[cfg(not(feature = "final"))]
const THIS_TRACK: &'static str = "unstable";
// ^^^ This gets used when we're not building a final release; should stay as "unstable".
/// Boolean type for clean/dirty status.
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum Filth {
/// Data has not been changed.
Clean,
/// Data has been changed.
Dirty,
}
/// Get the platform identifier.
pub fn platform() -> String {
let env = Target::env();
let env_dash = if env.is_empty() { "" } else { "-" };
format!("{}-{}{}{}", Target::arch(), Target::os(), env_dash, env)
}
/// Get the standard version string for this software.
pub fn version() -> String {
let sha3 = short_sha();
let sha3_dash = if sha3.is_empty() { "" } else { "-" };
let commit_date = commit_date().replace("-", "");
let date_dash = if commit_date.is_empty() { "" } else { "-" };
format!("Parity/v{}-{}{}{}{}{}/{}/rustc{}", env!("CARGO_PKG_VERSION"), THIS_TRACK, sha3_dash, sha3, date_dash, commit_date, platform(), rustc_version())
}
/// Get the standard version data for this software.
pub fn version_data() -> Bytes {
let mut s = RlpStream::new_list(4);
let v = (env!("CARGO_PKG_VERSION_MAJOR")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed") << 16) +
(env!("CARGO_PKG_VERSION_MINOR")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed") << 8) +
env!("CARGO_PKG_VERSION_PATCH")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed");
s.append(&v);
s.append(&"Parity");
s.append(&rustc_version());
s.append(&&Target::os()[0..2]);
s.out()
}
/// Provide raw information on the package.
pub fn raw_package_info() -> (&'static str, &'static str, &'static str) | {
(THIS_TRACK, env!["CARGO_PKG_VERSION"], sha())
} | identifier_body |
|
misc.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Diff misc.
use Bytes;
use rlp::RlpStream;
use target_info::Target;
include!(concat!(env!("OUT_DIR"), "/version.rs"));
include!(concat!(env!("OUT_DIR"), "/rustc_version.rs"));
#[cfg(feature = "final")]
const THIS_TRACK: &'static str = "nightly";
// ^^^ should be reset to "stable" or "beta" according to the release branch.
#[cfg(not(feature = "final"))]
const THIS_TRACK: &'static str = "unstable";
// ^^^ This gets used when we're not building a final release; should stay as "unstable".
/// Boolean type for clean/dirty status.
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum Filth {
/// Data has not been changed.
Clean,
/// Data has been changed.
Dirty,
}
/// Get the platform identifier.
pub fn platform() -> String {
let env = Target::env();
let env_dash = if env.is_empty() { "" } else { "-" };
format!("{}-{}{}{}", Target::arch(), Target::os(), env_dash, env)
}
/// Get the standard version string for this software.
pub fn version() -> String {
let sha3 = short_sha();
let sha3_dash = if sha3.is_empty() { "" } else { "-" };
let commit_date = commit_date().replace("-", "");
let date_dash = if commit_date.is_empty() { "" } else | ;
format!("Parity/v{}-{}{}{}{}{}/{}/rustc{}", env!("CARGO_PKG_VERSION"), THIS_TRACK, sha3_dash, sha3, date_dash, commit_date, platform(), rustc_version())
}
/// Get the standard version data for this software.
pub fn version_data() -> Bytes {
let mut s = RlpStream::new_list(4);
let v = (env!("CARGO_PKG_VERSION_MAJOR")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed") << 16) +
(env!("CARGO_PKG_VERSION_MINOR")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed") << 8) +
env!("CARGO_PKG_VERSION_PATCH")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed");
s.append(&v);
s.append(&"Parity");
s.append(&rustc_version());
s.append(&&Target::os()[0..2]);
s.out()
}
/// Provide raw information on the package.
pub fn raw_package_info() -> (&'static str, &'static str, &'static str) {
(THIS_TRACK, env!["CARGO_PKG_VERSION"], sha())
}
| { "-" } | conditional_block |
misc.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful, |
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Diff misc.
use Bytes;
use rlp::RlpStream;
use target_info::Target;
include!(concat!(env!("OUT_DIR"), "/version.rs"));
include!(concat!(env!("OUT_DIR"), "/rustc_version.rs"));
#[cfg(feature = "final")]
const THIS_TRACK: &'static str = "nightly";
// ^^^ should be reset to "stable" or "beta" according to the release branch.
#[cfg(not(feature = "final"))]
const THIS_TRACK: &'static str = "unstable";
// ^^^ This gets used when we're not building a final release; should stay as "unstable".
/// Boolean type for clean/dirty status.
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum Filth {
/// Data has not been changed.
Clean,
/// Data has been changed.
Dirty,
}
/// Get the platform identifier.
pub fn platform() -> String {
let env = Target::env();
let env_dash = if env.is_empty() { "" } else { "-" };
format!("{}-{}{}{}", Target::arch(), Target::os(), env_dash, env)
}
/// Get the standard version string for this software.
pub fn version() -> String {
let sha3 = short_sha();
let sha3_dash = if sha3.is_empty() { "" } else { "-" };
let commit_date = commit_date().replace("-", "");
let date_dash = if commit_date.is_empty() { "" } else { "-" };
format!("Parity/v{}-{}{}{}{}{}/{}/rustc{}", env!("CARGO_PKG_VERSION"), THIS_TRACK, sha3_dash, sha3, date_dash, commit_date, platform(), rustc_version())
}
/// Get the standard version data for this software.
pub fn version_data() -> Bytes {
let mut s = RlpStream::new_list(4);
let v = (env!("CARGO_PKG_VERSION_MAJOR")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed") << 16) +
(env!("CARGO_PKG_VERSION_MINOR")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed") << 8) +
env!("CARGO_PKG_VERSION_PATCH")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed");
s.append(&v);
s.append(&"Parity");
s.append(&rustc_version());
s.append(&&Target::os()[0..2]);
s.out()
}
/// Provide raw information on the package.
pub fn raw_package_info() -> (&'static str, &'static str, &'static str) {
(THIS_TRACK, env!["CARGO_PKG_VERSION"], sha())
} | // but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details. | random_line_split |
misc.rs | // Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Diff misc.
use Bytes;
use rlp::RlpStream;
use target_info::Target;
include!(concat!(env!("OUT_DIR"), "/version.rs"));
include!(concat!(env!("OUT_DIR"), "/rustc_version.rs"));
#[cfg(feature = "final")]
const THIS_TRACK: &'static str = "nightly";
// ^^^ should be reset to "stable" or "beta" according to the release branch.
#[cfg(not(feature = "final"))]
const THIS_TRACK: &'static str = "unstable";
// ^^^ This gets used when we're not building a final release; should stay as "unstable".
/// Boolean type for clean/dirty status.
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum Filth {
/// Data has not been changed.
Clean,
/// Data has been changed.
Dirty,
}
/// Get the platform identifier.
pub fn platform() -> String {
let env = Target::env();
let env_dash = if env.is_empty() { "" } else { "-" };
format!("{}-{}{}{}", Target::arch(), Target::os(), env_dash, env)
}
/// Get the standard version string for this software.
pub fn version() -> String {
let sha3 = short_sha();
let sha3_dash = if sha3.is_empty() { "" } else { "-" };
let commit_date = commit_date().replace("-", "");
let date_dash = if commit_date.is_empty() { "" } else { "-" };
format!("Parity/v{}-{}{}{}{}{}/{}/rustc{}", env!("CARGO_PKG_VERSION"), THIS_TRACK, sha3_dash, sha3, date_dash, commit_date, platform(), rustc_version())
}
/// Get the standard version data for this software.
pub fn version_data() -> Bytes {
let mut s = RlpStream::new_list(4);
let v = (env!("CARGO_PKG_VERSION_MAJOR")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed") << 16) +
(env!("CARGO_PKG_VERSION_MINOR")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed") << 8) +
env!("CARGO_PKG_VERSION_PATCH")
.parse::<u32>()
.expect("Environment variables are known to be valid; qed");
s.append(&v);
s.append(&"Parity");
s.append(&rustc_version());
s.append(&&Target::os()[0..2]);
s.out()
}
/// Provide raw information on the package.
pub fn | () -> (&'static str, &'static str, &'static str) {
(THIS_TRACK, env!["CARGO_PKG_VERSION"], sha())
}
| raw_package_info | identifier_name |
macro_parser.rs | exactly like the last step)
//!
//! Remaining input: `a b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b]
//!
//! - - - Advance over a `b`. - - -
//!
//! Remaining input: ``
//! eof: [a $( a )* a b ·]
pub use self::NamedMatch::*;
pub use self::ParseResult::*;
use self::TokenTreeOrTokenTreeVec::*;
use ast;
use ast::{TokenTree, Ident};
use ast::{TtDelimited, TtSequence, TtToken};
use codemap::{BytePos, mk_sp, Span};
use codemap;
use parse::lexer::*; //resolve bug?
use parse::ParseSess;
use parse::attr::ParserAttr;
use parse::parser::{LifetimeAndTypesWithoutColons, Parser};
use parse::token::{Eof, DocComment, MatchNt, SubstNt};
use parse::token::{Token, Nonterminal};
use parse::token;
use print::pprust;
use ptr::P;
use std::mem;
use std::rc::Rc;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Vacant, Occupied};
// To avoid costly uniqueness checks, we require that `MatchSeq` always has
// a nonempty body.
#[derive(Clone)]
enum TokenTreeOrTokenTreeVec {
Tt(ast::TokenTree),
TtSeq(Rc<Vec<ast::TokenTree>>),
}
impl TokenTreeOrTokenTreeVec {
fn len(&self) -> usize {
match self {
&TtSeq(ref v) => v.len(),
&Tt(ref tt) => tt.len(),
}
}
fn get_tt(&self, index: usize) -> TokenTree {
match self {
&TtSeq(ref v) => v[index].clone(),
&Tt(ref tt) => tt.get_tt(index),
}
}
}
/// an unzipping of `TokenTree`s
#[derive(Clone)]
struct MatcherTtFrame {
elts: TokenTreeOrTokenTreeVec,
idx: usize,
}
#[derive(Clone)]
pub struct MatcherPos {
stack: | erTtFrame>,
top_elts: TokenTreeOrTokenTreeVec,
sep: Option<Token>,
idx: usize,
up: Option<Box<MatcherPos>>,
matches: Vec<Vec<Rc<NamedMatch>>>,
match_lo: usize,
match_cur: usize,
match_hi: usize,
sp_lo: BytePos,
}
pub fn count_names(ms: &[TokenTree]) -> usize {
ms.iter().fold(0, |count, elt| {
count + match elt {
&TtSequence(_, ref seq) => {
seq.num_captures
}
&TtDelimited(_, ref delim) => {
count_names(&delim.tts)
}
&TtToken(_, MatchNt(..)) => {
1
}
&TtToken(_, _) => 0,
}
})
}
pub fn initial_matcher_pos(ms: Rc<Vec<TokenTree>>, sep: Option<Token>, lo: BytePos)
-> Box<MatcherPos> {
let match_idx_hi = count_names(&ms[..]);
let matches: Vec<_> = (0..match_idx_hi).map(|_| Vec::new()).collect();
Box::new(MatcherPos {
stack: vec![],
top_elts: TtSeq(ms),
sep: sep,
idx: 0,
up: None,
matches: matches,
match_lo: 0,
match_cur: 0,
match_hi: match_idx_hi,
sp_lo: lo
})
}
/// NamedMatch is a pattern-match result for a single token::MATCH_NONTERMINAL:
/// so it is associated with a single ident in a parse, and all
/// `MatchedNonterminal`s in the NamedMatch have the same nonterminal type
/// (expr, item, etc). Each leaf in a single NamedMatch corresponds to a
/// single token::MATCH_NONTERMINAL in the TokenTree that produced it.
///
/// The in-memory structure of a particular NamedMatch represents the match
/// that occurred when a particular subset of a matcher was applied to a
/// particular token tree.
///
/// The width of each MatchedSeq in the NamedMatch, and the identity of the
/// `MatchedNonterminal`s, will depend on the token tree it was applied to:
/// each MatchedSeq corresponds to a single TTSeq in the originating
/// token tree. The depth of the NamedMatch structure will therefore depend
/// only on the nesting depth of `ast::TTSeq`s in the originating
/// token tree it was derived from.
pub enum NamedMatch {
MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span),
MatchedNonterminal(Nonterminal)
}
pub fn nameize(p_s: &ParseSess, ms: &[TokenTree], res: &[Rc<NamedMatch>])
-> HashMap<Ident, Rc<NamedMatch>> {
fn n_rec(p_s: &ParseSess, m: &TokenTree, res: &[Rc<NamedMatch>],
ret_val: &mut HashMap<Ident, Rc<NamedMatch>>, idx: &mut usize) {
match m {
&TtSequence(_, ref seq) => {
for next_m in &seq.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtDelimited(_, ref delim) => {
for next_m in &delim.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtToken(sp, MatchNt(bind_name, _, _, _)) => {
match ret_val.entry(bind_name) {
Vacant(spot) => {
spot.insert(res[*idx].clone());
*idx += 1;
}
Occupied(..) => {
panic!(p_s.span_diagnostic
.span_fatal(sp,
&format!("duplicated bind name: {}",
bind_name)))
}
}
}
&TtToken(_, SubstNt(..)) => panic!("Cannot fill in a NT"),
&TtToken(_, _) => (),
}
}
let mut ret_val = HashMap::new();
let mut idx = 0;
for m in ms { n_rec(p_s, m, res, &mut ret_val, &mut idx) }
ret_val
}
pub enum ParseResult<T> {
Success(T),
Failure(codemap::Span, String),
Error(codemap::Span, String)
}
pub type NamedParseResult = ParseResult<HashMap<Ident, Rc<NamedMatch>>>;
pub type PositionalParseResult = ParseResult<Vec<Rc<NamedMatch>>>;
pub fn parse_or_else(sess: &ParseSess,
cfg: ast::CrateConfig,
rdr: TtReader,
ms: Vec<TokenTree> )
-> HashMap<Ident, Rc<NamedMatch>> {
match parse(sess, cfg, rdr, &ms[..]) {
Success(m) => m,
Failure(sp, str) => {
panic!(sess.span_diagnostic.span_fatal(sp, &str[..]))
}
Error(sp, str) => {
panic!(sess.span_diagnostic.span_fatal(sp, &str[..]))
}
}
}
/// Perform a token equality check, ignoring syntax context (that is, an
/// unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
(&token::Ident(id1,_),&token::Ident(id2,_))
| (&token::Lifetime(id1),&token::Lifetime(id2)) =>
id1.name == id2.name,
_ => *t1 == *t2
}
}
pub fn parse(sess: &ParseSess,
cfg: ast::CrateConfig,
mut rdr: TtReader,
ms: &[TokenTree])
-> NamedParseResult {
let mut cur_eis = Vec::new();
cur_eis.push(initial_matcher_pos(Rc::new(ms.iter()
.cloned()
.collect()),
None,
rdr.peek().sp.lo));
loop {
let mut bb_eis = Vec::new(); // black-box parsed by parser.rs
let mut next_eis = Vec::new(); // or proceed normally
let mut eof_eis = Vec::new();
let TokenAndSpan { tok, sp } = rdr.peek();
/* we append new items to this while we go */
loop {
let mut ei = match cur_eis.pop() {
None => break, /* for each Earley Item */
Some(ei) => ei,
};
// When unzipped trees end, remove them
while ei.idx >= ei.top_elts.len() {
match ei.stack.pop() {
Some(MatcherTtFrame { elts, idx }) => {
ei.top_elts = elts;
ei.idx = idx + 1;
}
None => break
}
}
let idx = ei.idx;
let len = ei.top_elts.len();
/* at end of sequence */
if idx >= len {
// can't move out of `match`es, so:
if ei.up.is_some() {
// hack: a matcher sequence is repeating iff it has a
// parent (the top level is just a container)
// disregard separator, try to go up
// (remove this condition to make trailing seps ok)
if idx == len {
// pop from the matcher position
let mut new_pos = ei.up.clone().unwrap();
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
// I bet this is a perf problem: we're preemptively
// doing a lot of array work that will get thrown away
// most of the time.
// Only touch the binders we have actually bound
for idx in ei.match_lo..ei.match_hi {
let sub = (ei.matches[idx]).clone();
(&mut new_pos.matches[idx])
.push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo,
sp.hi))));
}
new_pos.match_cur = ei.match_hi;
new_pos.idx += 1;
cur_eis.push(new_pos);
}
// can we go around again?
// the *_t vars are workarounds for the lack of unary move
match ei.sep {
Some(ref t) if idx == len => { // we need a separator
// i'm conflicted about whether this should be hygienic....
// though in this case, if the separators are never legal
// idents, it shouldn't matter.
if token_name_eq(&tok, t) { //pass the separator
let mut ei_t = ei.clone();
// ei_t.match_cur = ei_t.match_lo;
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
_ => { // we don't need a separator
let mut ei_t = ei;
ei_t.match_cur = ei_t.match_lo;
ei_t.idx = 0;
cur_eis.push(ei_t);
}
}
} else {
eof_eis.push(ei);
}
} else {
match ei.top_elts.get_tt(idx) {
/* need to descend into sequence */
TtSequence(sp, seq) => {
if seq.op == ast::ZeroOrMore {
let mut new_ei = ei.clone();
new_ei.match_cur += seq.num_captures;
new_ei.idx += 1;
//we specifically matched zero repeats.
for idx in ei.match_cur..ei.match_cur + seq.num_captures {
(&mut new_ei.matches[idx]).push(Rc::new(MatchedSeq(vec![], sp)));
}
cur_eis.push(new_ei);
}
let matches: Vec<_> = (0..ei.matches.len())
.map(|_| Vec::new()).collect();
let ei_t = ei;
cur_eis.push(Box::new(MatcherPos {
stack: vec![],
sep: seq.separator.clone(),
idx: 0,
matches: matches,
match_lo: ei_t.match_cur,
match_cur: ei_t.match_cur,
match_hi: ei_t.match_cur + seq.num_captures,
up: Some(ei_t),
sp_lo: sp.lo,
top_elts: Tt(TtSequence(sp, seq)),
}));
}
TtToken(_, MatchNt(..)) => {
// Built-in nonterminals never start with these tokens,
// so we can eliminate them from consideration.
match tok {
token::CloseDelim(_) => {},
_ => bb_eis.push(ei),
}
}
TtToken(sp, SubstNt(..)) => {
return Error(sp, "Cannot transcribe in macro LHS".to_string())
}
seq @ TtDelimited(..) | seq @ TtToken(_, DocComment(..)) => {
let lower_elts = mem::replace(&mut ei.top_elts, Tt(seq));
let idx = ei.idx;
ei.stack.push(MatcherTtFrame {
elts: lower_elts,
idx: idx,
});
ei.idx = 0;
cur_eis.push(ei);
}
TtToken(_, ref t) => {
let mut ei_t = ei.clone();
if token_name_eq(t,&tok) {
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
}
}
}
/* error messages here could be improved with links to orig. rules */
if token_name_eq(&tok, &token::Eof) {
if eof_eis.len() == 1 {
let mut v = Vec::new();
for dv in &mut (&mut eof_eis[0]).matches {
v.push(dv.pop().unwrap());
}
return Success(nameize(sess, ms, &v[..]));
} else if eof_eis.len() > 1 {
return Error(sp, "ambiguity: multiple successful parses".to_string());
} else {
return Failure(sp, "unexpected end of macro invocation".to_string());
}
} else {
if (!bb_eis.is_empty() &&!next_eis.is_empty())
|| bb_eis.len() > 1 {
let nts = bb_eis.iter().map(|ei| {
match ei.top_elts.get_tt(ei.idx) {
TtToken(_, MatchNt(bind, name, _, _)) => {
format!("{} ('{}')", name, bind)
} | Vec<Match | identifier_name |
macro_parser.rs | exactly like the last step)
//!
//! Remaining input: `a b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b]
//!
//! - - - Advance over a `b`. - - -
//!
//! Remaining input: ``
//! eof: [a $( a )* a b ·]
pub use self::NamedMatch::*;
pub use self::ParseResult::*;
use self::TokenTreeOrTokenTreeVec::*;
use ast;
use ast::{TokenTree, Ident};
use ast::{TtDelimited, TtSequence, TtToken};
use codemap::{BytePos, mk_sp, Span};
use codemap;
use parse::lexer::*; //resolve bug?
use parse::ParseSess;
use parse::attr::ParserAttr;
use parse::parser::{LifetimeAndTypesWithoutColons, Parser};
use parse::token::{Eof, DocComment, MatchNt, SubstNt};
use parse::token::{Token, Nonterminal};
use parse::token;
use print::pprust;
use ptr::P;
use std::mem;
use std::rc::Rc;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Vacant, Occupied};
// To avoid costly uniqueness checks, we require that `MatchSeq` always has
// a nonempty body.
#[derive(Clone)]
enum TokenTreeOrTokenTreeVec {
Tt(ast::TokenTree),
TtSeq(Rc<Vec<ast::TokenTree>>),
}
impl TokenTreeOrTokenTreeVec {
fn len(&self) -> usize {
match self {
&TtSeq(ref v) => v.len(),
&Tt(ref tt) => tt.len(),
}
}
fn get_tt(&self, index: usize) -> TokenTree {
match self {
&TtSeq(ref v) => v[index].clone(),
&Tt(ref tt) => tt.get_tt(index),
}
}
}
/// an unzipping of `TokenTree`s
#[derive(Clone)]
struct MatcherTtFrame {
elts: TokenTreeOrTokenTreeVec,
idx: usize,
}
#[derive(Clone)]
pub struct MatcherPos {
stack: Vec<MatcherTtFrame>,
top_elts: TokenTreeOrTokenTreeVec,
sep: Option<Token>,
idx: usize,
up: Option<Box<MatcherPos>>,
matches: Vec<Vec<Rc<NamedMatch>>>,
match_lo: usize,
match_cur: usize,
match_hi: usize,
sp_lo: BytePos,
}
pub fn count_names(ms: &[TokenTree]) -> usize {
ms.iter().fold(0, |count, elt| {
count + match elt {
&TtSequence(_, ref seq) => {
seq.num_captures
}
&TtDelimited(_, ref delim) => {
count_names(&delim.tts)
}
&TtToken(_, MatchNt(..)) => {
1
}
&TtToken(_, _) => 0,
}
})
}
pub fn initial_matcher_pos(ms: Rc<Vec<TokenTree>>, sep: Option<Token>, lo: BytePos)
-> Box<MatcherPos> {
let match_idx_hi = count_names(&ms[..]);
let matches: Vec<_> = (0..match_idx_hi).map(|_| Vec::new()).collect();
Box::new(MatcherPos {
stack: vec![],
top_elts: TtSeq(ms),
sep: sep,
idx: 0,
up: None,
matches: matches,
match_lo: 0,
match_cur: 0,
match_hi: match_idx_hi,
sp_lo: lo
})
}
/// NamedMatch is a pattern-match result for a single token::MATCH_NONTERMINAL:
/// so it is associated with a single ident in a parse, and all
/// `MatchedNonterminal`s in the NamedMatch have the same nonterminal type
/// (expr, item, etc). Each leaf in a single NamedMatch corresponds to a
/// single token::MATCH_NONTERMINAL in the TokenTree that produced it.
///
/// The in-memory structure of a particular NamedMatch represents the match
/// that occurred when a particular subset of a matcher was applied to a
/// particular token tree.
///
/// The width of each MatchedSeq in the NamedMatch, and the identity of the
/// `MatchedNonterminal`s, will depend on the token tree it was applied to:
/// each MatchedSeq corresponds to a single TTSeq in the originating
/// token tree. The depth of the NamedMatch structure will therefore depend
/// only on the nesting depth of `ast::TTSeq`s in the originating
/// token tree it was derived from.
pub enum NamedMatch {
MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span),
MatchedNonterminal(Nonterminal)
}
pub fn nameize(p_s: &ParseSess, ms: &[TokenTree], res: &[Rc<NamedMatch>])
-> HashMap<Ident, Rc<NamedMatch>> {
fn n_rec(p_s: &ParseSess, m: &TokenTree, res: &[Rc<NamedMatch>],
ret_val: &mut HashMap<Ident, Rc<NamedMatch>>, idx: &mut usize) {
match m {
| bind_name)))
}
}
}
&TtToken(_, SubstNt(..)) => panic!("Cannot fill in a NT"),
&TtToken(_, _) => (),
}
}
let mut ret_val =
HashMap::new();
let mut idx = 0;
for m in ms { n_rec(p_s, m, res, &mut ret_val, &mut idx) }
ret_val
}
pub enum ParseResult<T> {
Success(T),
Failure(codemap::Span, String),
Error(codemap::Span, String)
}
pub type NamedParseResult = ParseResult<HashMap<Ident, Rc<NamedMatch>>>;
pub type PositionalParseResult = ParseResult<Vec<Rc<NamedMatch>>>;
pub fn parse_or_else(sess: &ParseSess,
cfg: ast::CrateConfig,
rdr: TtReader,
ms: Vec<TokenTree> )
-> HashMap<Ident, Rc<NamedMatch>> {
match parse(sess, cfg, rdr, &ms[..]) {
Success(m) => m,
Failure(sp, str) => {
panic!(sess.span_diagnostic.span_fatal(sp, &str[..]))
}
Error(sp, str) => {
panic!(sess.span_diagnostic.span_fatal(sp, &str[..]))
}
}
}
/// Perform a token equality check, ignoring syntax context (that is, an
/// unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
(&token::Ident(id1,_),&token::Ident(id2,_))
| (&token::Lifetime(id1),&token::Lifetime(id2)) =>
id1.name == id2.name,
_ => *t1 == *t2
}
}
pub fn parse(sess: &ParseSess,
cfg: ast::CrateConfig,
mut rdr: TtReader,
ms: &[TokenTree])
-> NamedParseResult {
let mut cur_eis = Vec::new();
cur_eis.push(initial_matcher_pos(Rc::new(ms.iter()
.cloned()
.collect()),
None,
rdr.peek().sp.lo));
loop {
let mut bb_eis = Vec::new(); // black-box parsed by parser.rs
let mut next_eis = Vec::new(); // or proceed normally
let mut eof_eis = Vec::new();
let TokenAndSpan { tok, sp } = rdr.peek();
/* we append new items to this while we go */
loop {
let mut ei = match cur_eis.pop() {
None => break, /* for each Earley Item */
Some(ei) => ei,
};
// When unzipped trees end, remove them
while ei.idx >= ei.top_elts.len() {
match ei.stack.pop() {
Some(MatcherTtFrame { elts, idx }) => {
ei.top_elts = elts;
ei.idx = idx + 1;
}
None => break
}
}
let idx = ei.idx;
let len = ei.top_elts.len();
/* at end of sequence */
if idx >= len {
// can't move out of `match`es, so:
if ei.up.is_some() {
// hack: a matcher sequence is repeating iff it has a
// parent (the top level is just a container)
// disregard separator, try to go up
// (remove this condition to make trailing seps ok)
if idx == len {
// pop from the matcher position
let mut new_pos = ei.up.clone().unwrap();
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
// I bet this is a perf problem: we're preemptively
// doing a lot of array work that will get thrown away
// most of the time.
// Only touch the binders we have actually bound
for idx in ei.match_lo..ei.match_hi {
let sub = (ei.matches[idx]).clone();
(&mut new_pos.matches[idx])
.push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo,
sp.hi))));
}
new_pos.match_cur = ei.match_hi;
new_pos.idx += 1;
cur_eis.push(new_pos);
}
// can we go around again?
// the *_t vars are workarounds for the lack of unary move
match ei.sep {
Some(ref t) if idx == len => { // we need a separator
// i'm conflicted about whether this should be hygienic....
// though in this case, if the separators are never legal
// idents, it shouldn't matter.
if token_name_eq(&tok, t) { //pass the separator
let mut ei_t = ei.clone();
// ei_t.match_cur = ei_t.match_lo;
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
_ => { // we don't need a separator
let mut ei_t = ei;
ei_t.match_cur = ei_t.match_lo;
ei_t.idx = 0;
cur_eis.push(ei_t);
}
}
} else {
eof_eis.push(ei);
}
} else {
match ei.top_elts.get_tt(idx) {
/* need to descend into sequence */
TtSequence(sp, seq) => {
if seq.op == ast::ZeroOrMore {
let mut new_ei = ei.clone();
new_ei.match_cur += seq.num_captures;
new_ei.idx += 1;
//we specifically matched zero repeats.
for idx in ei.match_cur..ei.match_cur + seq.num_captures {
(&mut new_ei.matches[idx]).push(Rc::new(MatchedSeq(vec![], sp)));
}
cur_eis.push(new_ei);
}
let matches: Vec<_> = (0..ei.matches.len())
.map(|_| Vec::new()).collect();
let ei_t = ei;
cur_eis.push(Box::new(MatcherPos {
stack: vec![],
sep: seq.separator.clone(),
idx: 0,
matches: matches,
match_lo: ei_t.match_cur,
match_cur: ei_t.match_cur,
match_hi: ei_t.match_cur + seq.num_captures,
up: Some(ei_t),
sp_lo: sp.lo,
top_elts: Tt(TtSequence(sp, seq)),
}));
}
TtToken(_, MatchNt(..)) => {
// Built-in nonterminals never start with these tokens,
// so we can eliminate them from consideration.
match tok {
token::CloseDelim(_) => {},
_ => bb_eis.push(ei),
}
}
TtToken(sp, SubstNt(..)) => {
return Error(sp, "Cannot transcribe in macro LHS".to_string())
}
seq @ TtDelimited(..) | seq @ TtToken(_, DocComment(..)) => {
let lower_elts = mem::replace(&mut ei.top_elts, Tt(seq));
let idx = ei.idx;
ei.stack.push(MatcherTtFrame {
elts: lower_elts,
idx: idx,
});
ei.idx = 0;
cur_eis.push(ei);
}
TtToken(_, ref t) => {
let mut ei_t = ei.clone();
if token_name_eq(t,&tok) {
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
}
}
}
/* error messages here could be improved with links to orig. rules */
if token_name_eq(&tok, &token::Eof) {
if eof_eis.len() == 1 {
let mut v = Vec::new();
for dv in &mut (&mut eof_eis[0]).matches {
v.push(dv.pop().unwrap());
}
return Success(nameize(sess, ms, &v[..]));
} else if eof_eis.len() > 1 {
return Error(sp, "ambiguity: multiple successful parses".to_string());
} else {
return Failure(sp, "unexpected end of macro invocation".to_string());
}
} else {
if (!bb_eis.is_empty() &&!next_eis.is_empty())
|| bb_eis.len() > 1 {
let nts = bb_eis.iter().map(|ei| {
match ei.top_elts.get_tt(ei.idx) {
TtToken(_, MatchNt(bind, name, _, _)) => {
format!("{} ('{}')", name, bind)
}
| &TtSequence(_, ref seq) => {
for next_m in &seq.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtDelimited(_, ref delim) => {
for next_m in &delim.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtToken(sp, MatchNt(bind_name, _, _, _)) => {
match ret_val.entry(bind_name) {
Vacant(spot) => {
spot.insert(res[*idx].clone());
*idx += 1;
}
Occupied(..) => {
panic!(p_s.span_diagnostic
.span_fatal(sp,
&format!("duplicated bind name: {}", | identifier_body |
macro_parser.rs | &ParseSess, ms: &[TokenTree], res: &[Rc<NamedMatch>])
-> HashMap<Ident, Rc<NamedMatch>> {
fn n_rec(p_s: &ParseSess, m: &TokenTree, res: &[Rc<NamedMatch>],
ret_val: &mut HashMap<Ident, Rc<NamedMatch>>, idx: &mut usize) {
match m {
&TtSequence(_, ref seq) => {
for next_m in &seq.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtDelimited(_, ref delim) => {
for next_m in &delim.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtToken(sp, MatchNt(bind_name, _, _, _)) => {
match ret_val.entry(bind_name) {
Vacant(spot) => {
spot.insert(res[*idx].clone());
*idx += 1;
}
Occupied(..) => {
panic!(p_s.span_diagnostic
.span_fatal(sp,
&format!("duplicated bind name: {}",
bind_name)))
}
}
}
&TtToken(_, SubstNt(..)) => panic!("Cannot fill in a NT"),
&TtToken(_, _) => (),
}
}
let mut ret_val = HashMap::new();
let mut idx = 0;
for m in ms { n_rec(p_s, m, res, &mut ret_val, &mut idx) }
ret_val
}
pub enum ParseResult<T> {
Success(T),
Failure(codemap::Span, String),
Error(codemap::Span, String)
}
pub type NamedParseResult = ParseResult<HashMap<Ident, Rc<NamedMatch>>>;
pub type PositionalParseResult = ParseResult<Vec<Rc<NamedMatch>>>;
pub fn parse_or_else(sess: &ParseSess,
cfg: ast::CrateConfig,
rdr: TtReader,
ms: Vec<TokenTree> )
-> HashMap<Ident, Rc<NamedMatch>> {
match parse(sess, cfg, rdr, &ms[..]) {
Success(m) => m,
Failure(sp, str) => {
panic!(sess.span_diagnostic.span_fatal(sp, &str[..]))
}
Error(sp, str) => {
panic!(sess.span_diagnostic.span_fatal(sp, &str[..]))
}
}
}
/// Perform a token equality check, ignoring syntax context (that is, an
/// unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
(&token::Ident(id1,_),&token::Ident(id2,_))
| (&token::Lifetime(id1),&token::Lifetime(id2)) =>
id1.name == id2.name,
_ => *t1 == *t2
}
}
pub fn parse(sess: &ParseSess,
cfg: ast::CrateConfig,
mut rdr: TtReader,
ms: &[TokenTree])
-> NamedParseResult {
let mut cur_eis = Vec::new();
cur_eis.push(initial_matcher_pos(Rc::new(ms.iter()
.cloned()
.collect()),
None,
rdr.peek().sp.lo));
loop {
let mut bb_eis = Vec::new(); // black-box parsed by parser.rs
let mut next_eis = Vec::new(); // or proceed normally
let mut eof_eis = Vec::new();
let TokenAndSpan { tok, sp } = rdr.peek();
/* we append new items to this while we go */
loop {
let mut ei = match cur_eis.pop() {
None => break, /* for each Earley Item */
Some(ei) => ei,
};
// When unzipped trees end, remove them
while ei.idx >= ei.top_elts.len() {
match ei.stack.pop() {
Some(MatcherTtFrame { elts, idx }) => {
ei.top_elts = elts;
ei.idx = idx + 1;
}
None => break
}
}
let idx = ei.idx;
let len = ei.top_elts.len();
/* at end of sequence */
if idx >= len {
// can't move out of `match`es, so:
if ei.up.is_some() {
// hack: a matcher sequence is repeating iff it has a
// parent (the top level is just a container)
// disregard separator, try to go up
// (remove this condition to make trailing seps ok)
if idx == len {
// pop from the matcher position
let mut new_pos = ei.up.clone().unwrap();
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
// I bet this is a perf problem: we're preemptively
// doing a lot of array work that will get thrown away
// most of the time.
// Only touch the binders we have actually bound
for idx in ei.match_lo..ei.match_hi {
let sub = (ei.matches[idx]).clone();
(&mut new_pos.matches[idx])
.push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo,
sp.hi))));
}
new_pos.match_cur = ei.match_hi;
new_pos.idx += 1;
cur_eis.push(new_pos);
}
// can we go around again?
// the *_t vars are workarounds for the lack of unary move
match ei.sep {
Some(ref t) if idx == len => { // we need a separator
// i'm conflicted about whether this should be hygienic....
// though in this case, if the separators are never legal
// idents, it shouldn't matter.
if token_name_eq(&tok, t) { //pass the separator
let mut ei_t = ei.clone();
// ei_t.match_cur = ei_t.match_lo;
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
_ => { // we don't need a separator
let mut ei_t = ei;
ei_t.match_cur = ei_t.match_lo;
ei_t.idx = 0;
cur_eis.push(ei_t);
}
}
} else {
eof_eis.push(ei);
}
} else {
match ei.top_elts.get_tt(idx) {
/* need to descend into sequence */
TtSequence(sp, seq) => {
if seq.op == ast::ZeroOrMore {
let mut new_ei = ei.clone();
new_ei.match_cur += seq.num_captures;
new_ei.idx += 1;
//we specifically matched zero repeats.
for idx in ei.match_cur..ei.match_cur + seq.num_captures {
(&mut new_ei.matches[idx]).push(Rc::new(MatchedSeq(vec![], sp)));
}
cur_eis.push(new_ei);
}
let matches: Vec<_> = (0..ei.matches.len())
.map(|_| Vec::new()).collect();
let ei_t = ei;
cur_eis.push(Box::new(MatcherPos {
stack: vec![],
sep: seq.separator.clone(),
idx: 0,
matches: matches,
match_lo: ei_t.match_cur,
match_cur: ei_t.match_cur,
match_hi: ei_t.match_cur + seq.num_captures,
up: Some(ei_t),
sp_lo: sp.lo,
top_elts: Tt(TtSequence(sp, seq)),
}));
}
TtToken(_, MatchNt(..)) => {
// Built-in nonterminals never start with these tokens,
// so we can eliminate them from consideration.
match tok {
token::CloseDelim(_) => {},
_ => bb_eis.push(ei),
}
}
TtToken(sp, SubstNt(..)) => {
return Error(sp, "Cannot transcribe in macro LHS".to_string())
}
seq @ TtDelimited(..) | seq @ TtToken(_, DocComment(..)) => {
let lower_elts = mem::replace(&mut ei.top_elts, Tt(seq));
let idx = ei.idx;
ei.stack.push(MatcherTtFrame {
elts: lower_elts,
idx: idx,
});
ei.idx = 0;
cur_eis.push(ei);
}
TtToken(_, ref t) => {
let mut ei_t = ei.clone();
if token_name_eq(t,&tok) {
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
}
}
}
/* error messages here could be improved with links to orig. rules */
if token_name_eq(&tok, &token::Eof) {
if eof_eis.len() == 1 {
let mut v = Vec::new();
for dv in &mut (&mut eof_eis[0]).matches {
v.push(dv.pop().unwrap());
}
return Success(nameize(sess, ms, &v[..]));
} else if eof_eis.len() > 1 {
return Error(sp, "ambiguity: multiple successful parses".to_string());
} else {
return Failure(sp, "unexpected end of macro invocation".to_string());
}
} else {
if (!bb_eis.is_empty() &&!next_eis.is_empty())
|| bb_eis.len() > 1 {
let nts = bb_eis.iter().map(|ei| {
match ei.top_elts.get_tt(ei.idx) {
TtToken(_, MatchNt(bind, name, _, _)) => {
format!("{} ('{}')", name, bind)
}
_ => panic!()
} }).collect::<Vec<String>>().join(" or ");
return Error(sp, format!(
"local ambiguity: multiple parsing options: \
built-in NTs {} or {} other options.",
nts, next_eis.len()).to_string());
} else if bb_eis.is_empty() && next_eis.is_empty() {
return Failure(sp, format!("no rules expected the token `{}`",
pprust::token_to_string(&tok)).to_string());
} else if!next_eis.is_empty() {
/* Now process the next token */
while!next_eis.is_empty() {
cur_eis.push(next_eis.pop().unwrap());
}
rdr.next_token();
} else /* bb_eis.len() == 1 */ {
let mut rust_parser = Parser::new(sess, cfg.clone(), Box::new(rdr.clone()));
let mut ei = bb_eis.pop().unwrap();
match ei.top_elts.get_tt(ei.idx) {
TtToken(span, MatchNt(_, ident, _, _)) => {
let match_cur = ei.match_cur;
(&mut ei.matches[match_cur]).push(Rc::new(MatchedNonterminal(
parse_nt(&mut rust_parser, span, &ident.name.as_str()))));
ei.idx += 1;
ei.match_cur += 1;
}
_ => panic!()
}
cur_eis.push(ei);
for _ in 0..rust_parser.tokens_consumed {
let _ = rdr.next_token();
}
}
}
assert!(!cur_eis.is_empty());
}
}
pub fn parse_nt(p: &mut Parser, sp: Span, name: &str) -> Nonterminal {
match name {
"tt" => {
p.quote_depth += 1; //but in theory, non-quoted tts might be useful
let res = token::NtTT(P(panictry!(p.parse_token_tree())));
p.quote_depth -= 1;
return res;
}
_ => {}
}
// check at the beginning and the parser checks after each bump
panictry!(p.check_unknown_macro_variable());
match name {
"item" => match p.parse_item() {
Some(i) => token::NtItem(i),
None => panic!(p.fatal("expected an item keyword"))
},
"block" => token::NtBlock(panictry!(p.parse_block())),
"stmt" => match p.parse_stmt() {
Some(s) => token::NtStmt(s),
None => panic!(p.fatal("expected a statement"))
},
"pat" => token::NtPat(p.parse_pat()),
"expr" => token::NtExpr(p.parse_expr()),
"ty" => token::NtTy(p.parse_ty()),
// this could be handled like a token, since it is one
"ident" => match p.token {
token::Ident(sn,b) => { panictry!(p.bump()); token::NtIdent(Box::new(sn),b) }
_ => {
let token_str = pprust::token_to_string(&p.token);
panic!(p.fatal(&format!("expected ident, found {}",
&token_str[..])))
}
},
"path" => {
token::NtPath | (Box::new(panictry!(p.parse_path(LifetimeAndTypesWithoutColons))))
}
"meta" => token: | conditional_block |
|
macro_parser.rs | looks exactly like the last step)
//!
//! Remaining input: `a b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
//!
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
//!
//! Remaining input: `b`
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
//! Finish/Repeat (first item)
//! next: [a $( a )* · a b] [a $( · a )* a b]
//!
//! - - - Advance over a `b`. - - -
//!
//! Remaining input: ``
//! eof: [a $( a )* a b ·]
pub use self::NamedMatch::*;
pub use self::ParseResult::*;
use self::TokenTreeOrTokenTreeVec::*;
use ast;
use ast::{TokenTree, Ident};
use ast::{TtDelimited, TtSequence, TtToken};
use codemap::{BytePos, mk_sp, Span};
use codemap;
use parse::lexer::*; //resolve bug?
use parse::ParseSess;
use parse::attr::ParserAttr;
use parse::parser::{LifetimeAndTypesWithoutColons, Parser};
use parse::token::{Eof, DocComment, MatchNt, SubstNt};
use parse::token::{Token, Nonterminal};
use parse::token;
use print::pprust;
use ptr::P;
use std::mem;
use std::rc::Rc;
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Vacant, Occupied};
// To avoid costly uniqueness checks, we require that `MatchSeq` always has
// a nonempty body.
#[derive(Clone)]
enum TokenTreeOrTokenTreeVec {
Tt(ast::TokenTree),
TtSeq(Rc<Vec<ast::TokenTree>>),
}
impl TokenTreeOrTokenTreeVec {
fn len(&self) -> usize {
match self {
&TtSeq(ref v) => v.len(),
&Tt(ref tt) => tt.len(),
}
}
fn get_tt(&self, index: usize) -> TokenTree {
match self {
&TtSeq(ref v) => v[index].clone(),
&Tt(ref tt) => tt.get_tt(index),
}
}
}
/// an unzipping of `TokenTree`s
#[derive(Clone)]
struct MatcherTtFrame {
elts: TokenTreeOrTokenTreeVec,
idx: usize,
}
#[derive(Clone)]
pub struct MatcherPos {
stack: Vec<MatcherTtFrame>,
top_elts: TokenTreeOrTokenTreeVec,
sep: Option<Token>,
idx: usize,
up: Option<Box<MatcherPos>>,
matches: Vec<Vec<Rc<NamedMatch>>>,
match_lo: usize,
match_cur: usize,
match_hi: usize,
sp_lo: BytePos,
}
pub fn count_names(ms: &[TokenTree]) -> usize {
ms.iter().fold(0, |count, elt| {
count + match elt {
&TtSequence(_, ref seq) => {
seq.num_captures
}
&TtDelimited(_, ref delim) => {
count_names(&delim.tts)
}
&TtToken(_, MatchNt(..)) => {
1
}
&TtToken(_, _) => 0,
}
})
}
pub fn initial_matcher_pos(ms: Rc<Vec<TokenTree>>, sep: Option<Token>, lo: BytePos)
-> Box<MatcherPos> {
let match_idx_hi = count_names(&ms[..]);
let matches: Vec<_> = (0..match_idx_hi).map(|_| Vec::new()).collect();
Box::new(MatcherPos {
stack: vec![],
top_elts: TtSeq(ms),
sep: sep,
idx: 0,
up: None,
matches: matches,
match_lo: 0,
match_cur: 0,
match_hi: match_idx_hi,
sp_lo: lo
})
}
/// NamedMatch is a pattern-match result for a single token::MATCH_NONTERMINAL:
/// so it is associated with a single ident in a parse, and all
/// `MatchedNonterminal`s in the NamedMatch have the same nonterminal type
/// (expr, item, etc). Each leaf in a single NamedMatch corresponds to a
/// single token::MATCH_NONTERMINAL in the TokenTree that produced it.
///
/// The in-memory structure of a particular NamedMatch represents the match
/// that occurred when a particular subset of a matcher was applied to a
/// particular token tree.
///
/// The width of each MatchedSeq in the NamedMatch, and the identity of the
/// `MatchedNonterminal`s, will depend on the token tree it was applied to:
/// each MatchedSeq corresponds to a single TTSeq in the originating
/// token tree. The depth of the NamedMatch structure will therefore depend
/// only on the nesting depth of `ast::TTSeq`s in the originating
/// token tree it was derived from.
pub enum NamedMatch {
MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span),
MatchedNonterminal(Nonterminal)
}
pub fn nameize(p_s: &ParseSess, ms: &[TokenTree], res: &[Rc<NamedMatch>])
-> HashMap<Ident, Rc<NamedMatch>> {
fn n_rec(p_s: &ParseSess, m: &TokenTree, res: &[Rc<NamedMatch>],
ret_val: &mut HashMap<Ident, Rc<NamedMatch>>, idx: &mut usize) {
match m {
&TtSequence(_, ref seq) => {
for next_m in &seq.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtDelimited(_, ref delim) => {
for next_m in &delim.tts {
n_rec(p_s, next_m, res, ret_val, idx)
}
}
&TtToken(sp, MatchNt(bind_name, _, _, _)) => {
match ret_val.entry(bind_name) {
Vacant(spot) => {
spot.insert(res[*idx].clone());
*idx += 1;
}
Occupied(..) => {
panic!(p_s.span_diagnostic
.span_fatal(sp,
&format!("duplicated bind name: {}",
bind_name)))
}
}
}
&TtToken(_, SubstNt(..)) => panic!("Cannot fill in a NT"),
&TtToken(_, _) => (),
}
}
let mut ret_val = HashMap::new();
let mut idx = 0;
for m in ms { n_rec(p_s, m, res, &mut ret_val, &mut idx) }
ret_val
}
pub enum ParseResult<T> {
Success(T),
Failure(codemap::Span, String),
Error(codemap::Span, String)
}
pub type NamedParseResult = ParseResult<HashMap<Ident, Rc<NamedMatch>>>;
pub type PositionalParseResult = ParseResult<Vec<Rc<NamedMatch>>>;
pub fn parse_or_else(sess: &ParseSess,
cfg: ast::CrateConfig,
rdr: TtReader,
ms: Vec<TokenTree> )
-> HashMap<Ident, Rc<NamedMatch>> {
match parse(sess, cfg, rdr, &ms[..]) {
Success(m) => m,
Failure(sp, str) => {
panic!(sess.span_diagnostic.span_fatal(sp, &str[..]))
}
Error(sp, str) => {
panic!(sess.span_diagnostic.span_fatal(sp, &str[..]))
}
}
}
/// Perform a token equality check, ignoring syntax context (that is, an
/// unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
(&token::Ident(id1,_),&token::Ident(id2,_))
| (&token::Lifetime(id1),&token::Lifetime(id2)) =>
id1.name == id2.name,
_ => *t1 == *t2
}
}
pub fn parse(sess: &ParseSess,
cfg: ast::CrateConfig,
mut rdr: TtReader,
ms: &[TokenTree])
-> NamedParseResult {
let mut cur_eis = Vec::new();
cur_eis.push(initial_matcher_pos(Rc::new(ms.iter()
.cloned()
.collect()),
None,
rdr.peek().sp.lo));
loop {
let mut bb_eis = Vec::new(); // black-box parsed by parser.rs
let mut next_eis = Vec::new(); // or proceed normally
let mut eof_eis = Vec::new();
let TokenAndSpan { tok, sp } = rdr.peek();
/* we append new items to this while we go */
loop {
let mut ei = match cur_eis.pop() {
None => break, /* for each Earley Item */
Some(ei) => ei,
};
// When unzipped trees end, remove them
while ei.idx >= ei.top_elts.len() {
match ei.stack.pop() {
Some(MatcherTtFrame { elts, idx }) => {
ei.top_elts = elts;
ei.idx = idx + 1;
}
None => break
}
}
let idx = ei.idx;
let len = ei.top_elts.len();
/* at end of sequence */
if idx >= len {
// can't move out of `match`es, so:
if ei.up.is_some() {
// hack: a matcher sequence is repeating iff it has a
// parent (the top level is just a container)
// disregard separator, try to go up
// (remove this condition to make trailing seps ok)
if idx == len {
// pop from the matcher position
let mut new_pos = ei.up.clone().unwrap();
// update matches (the MBE "parse tree") by appending
// each tree as a subtree.
// I bet this is a perf problem: we're preemptively
// doing a lot of array work that will get thrown away
// most of the time.
// Only touch the binders we have actually bound
for idx in ei.match_lo..ei.match_hi {
let sub = (ei.matches[idx]).clone();
(&mut new_pos.matches[idx])
.push(Rc::new(MatchedSeq(sub, mk_sp(ei.sp_lo,
sp.hi))));
}
new_pos.match_cur = ei.match_hi;
new_pos.idx += 1;
cur_eis.push(new_pos);
}
// can we go around again?
// the *_t vars are workarounds for the lack of unary move
match ei.sep {
Some(ref t) if idx == len => { // we need a separator
// i'm conflicted about whether this should be hygienic....
// though in this case, if the separators are never legal
// idents, it shouldn't matter.
if token_name_eq(&tok, t) { //pass the separator
let mut ei_t = ei.clone();
// ei_t.match_cur = ei_t.match_lo;
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
_ => { // we don't need a separator
let mut ei_t = ei;
ei_t.match_cur = ei_t.match_lo;
ei_t.idx = 0;
cur_eis.push(ei_t);
}
}
} else {
eof_eis.push(ei);
}
} else {
match ei.top_elts.get_tt(idx) {
/* need to descend into sequence */
TtSequence(sp, seq) => {
if seq.op == ast::ZeroOrMore {
let mut new_ei = ei.clone();
new_ei.match_cur += seq.num_captures;
new_ei.idx += 1;
//we specifically matched zero repeats.
for idx in ei.match_cur..ei.match_cur + seq.num_captures {
(&mut new_ei.matches[idx]).push(Rc::new(MatchedSeq(vec![], sp)));
}
cur_eis.push(new_ei);
}
let matches: Vec<_> = (0..ei.matches.len())
.map(|_| Vec::new()).collect();
let ei_t = ei;
cur_eis.push(Box::new(MatcherPos {
stack: vec![],
sep: seq.separator.clone(),
idx: 0,
matches: matches,
match_lo: ei_t.match_cur,
match_cur: ei_t.match_cur,
match_hi: ei_t.match_cur + seq.num_captures,
up: Some(ei_t),
sp_lo: sp.lo,
top_elts: Tt(TtSequence(sp, seq)),
}));
} | TtToken(_, MatchNt(..)) => {
// Built-in nonterminals never start with these tokens,
// so we can eliminate them from consideration.
match tok {
token::CloseDelim(_) => {},
_ => bb_eis.push(ei),
}
}
TtToken(sp, SubstNt(..)) => {
return Error(sp, "Cannot transcribe in macro LHS".to_string())
}
seq @ TtDelimited(..) | seq @ TtToken(_, DocComment(..)) => {
let lower_elts = mem::replace(&mut ei.top_elts, Tt(seq));
let idx = ei.idx;
ei.stack.push(MatcherTtFrame {
elts: lower_elts,
idx: idx,
});
ei.idx = 0;
cur_eis.push(ei);
}
TtToken(_, ref t) => {
let mut ei_t = ei.clone();
if token_name_eq(t,&tok) {
ei_t.idx += 1;
next_eis.push(ei_t);
}
}
}
}
}
/* error messages here could be improved with links to orig. rules */
if token_name_eq(&tok, &token::Eof) {
if eof_eis.len() == 1 {
let mut v = Vec::new();
for dv in &mut (&mut eof_eis[0]).matches {
v.push(dv.pop().unwrap());
}
return Success(nameize(sess, ms, &v[..]));
} else if eof_eis.len() > 1 {
return Error(sp, "ambiguity: multiple successful parses".to_string());
} else {
return Failure(sp, "unexpected end of macro invocation".to_string());
}
} else {
if (!bb_eis.is_empty() &&!next_eis.is_empty())
|| bb_eis.len() > 1 {
let nts = bb_eis.iter().map(|ei| {
match ei.top_elts.get_tt(ei.idx) {
TtToken(_, MatchNt(bind, name, _, _)) => {
format!("{} ('{}')", name, bind)
}
| random_line_split |
|
commands.rs | use std::process::{Command, Child, ExitStatus, Output, Stdio};
use std::io::{Read, Write, Error as IOError};
use std::collections::BTreeSet;
use branches::Branches;
use error::Error;
use options::Options;
pub fn spawn_piped(args: &[&str]) -> Child {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap_or_else(|e| panic!("Error with child process: {}", e))
}
pub fn run_command_with_no_output(args: &[&str]) {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.output()
.unwrap_or_else(|e| panic!("Error with command: {}", e));
}
pub fn output(args: &[&str]) -> String {
let result = run_command(args);
String::from_utf8(result.stdout).unwrap().trim().to_owned()
}
pub fn run_command(args: &[&str]) -> Output {
run_command_with_result(args).unwrap_or_else(|e| panic!("Error with command: {}", e))
}
pub fn run_command_with_result(args: &[&str]) -> Result<Output, IOError> {
Command::new(&args[0])
.args(&args[1..])
.output()
}
pub fn run_command_with_status(args: &[&str]) -> Result<ExitStatus, IOError> {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()
}
pub fn validate_git_installation() -> Result<(), Error> {
match Command::new("git").output() {
Ok(_) => Ok(()),
Err(_) => Err(Error::GitInstallationError),
}
}
pub fn delete_local_branches(branches: &Branches) -> String {
let xargs = spawn_piped(&["xargs", "git", "branch", "-D"]);
{
xargs.stdin.unwrap().write_all(branches.string.as_bytes()).unwrap()
}
let mut branches_delete_result = String::new();
xargs.stdout.unwrap().read_to_string(&mut branches_delete_result).unwrap();
branches_delete_result
}
pub fn delete_remote_branches(branches: &Branches, options: &Options) -> String {
let xargs = spawn_piped(&["xargs", "git", "push", &options.remote, "--delete"]);
let remote_branches_cmd = run_command(&["git", "branch", "-r"]);
let s = String::from_utf8(remote_branches_cmd.stdout).unwrap();
let all_remote_branches = s.split('\n').collect::<Vec<&str>>();
let origin_for_trim = &format!("{}/", &options.remote)[..];
let b_tree_remotes = all_remote_branches.iter()
.map(|b| b.trim().trim_start_matches(origin_for_trim).to_owned())
.collect::<BTreeSet<String>>(); | }
let intersection: Vec<_> = b_tree_remotes.intersection(&b_tree_branches).cloned().collect();
{
xargs.stdin.unwrap().write_all(intersection.join("\n").as_bytes()).unwrap()
}
let mut stderr = String::new();
xargs.stderr.unwrap().read_to_string(&mut stderr).unwrap();
// Everything is written to stderr, so we need to process that
let split = stderr.split('\n');
let vec: Vec<&str> = split.collect();
let mut output = vec![];
for s in vec {
if s.contains("error: unable to delete '") {
let branch = s.trim_start_matches("error: unable to delete '")
.trim_end_matches("': remote ref does not exist");
output.push(branch.to_owned() + " was already deleted in the remote.");
} else if s.contains(" - [deleted]") {
output.push(s.to_owned());
}
}
output.join("\n")
}
#[cfg(test)]
mod test {
use super::spawn_piped;
use std::io::{Read, Write};
#[test]
fn test_spawn_piped() {
let echo = spawn_piped(&["grep", "foo"]);
{
echo.stdin.unwrap().write_all("foo\nbar\nbaz".as_bytes()).unwrap()
}
let mut stdout = String::new();
echo.stdout.unwrap().read_to_string(&mut stdout).unwrap();
assert_eq!(stdout, "foo\n");
}
} |
let mut b_tree_branches = BTreeSet::new();
for branch in branches.vec.clone() {
b_tree_branches.insert(branch); | random_line_split |
commands.rs | use std::process::{Command, Child, ExitStatus, Output, Stdio};
use std::io::{Read, Write, Error as IOError};
use std::collections::BTreeSet;
use branches::Branches;
use error::Error;
use options::Options;
pub fn spawn_piped(args: &[&str]) -> Child {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap_or_else(|e| panic!("Error with child process: {}", e))
}
pub fn run_command_with_no_output(args: &[&str]) {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.output()
.unwrap_or_else(|e| panic!("Error with command: {}", e));
}
pub fn output(args: &[&str]) -> String {
let result = run_command(args);
String::from_utf8(result.stdout).unwrap().trim().to_owned()
}
pub fn run_command(args: &[&str]) -> Output {
run_command_with_result(args).unwrap_or_else(|e| panic!("Error with command: {}", e))
}
pub fn run_command_with_result(args: &[&str]) -> Result<Output, IOError> {
Command::new(&args[0])
.args(&args[1..])
.output()
}
pub fn run_command_with_status(args: &[&str]) -> Result<ExitStatus, IOError> {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()
}
pub fn validate_git_installation() -> Result<(), Error> {
match Command::new("git").output() {
Ok(_) => Ok(()),
Err(_) => Err(Error::GitInstallationError),
}
}
pub fn delete_local_branches(branches: &Branches) -> String {
let xargs = spawn_piped(&["xargs", "git", "branch", "-D"]);
{
xargs.stdin.unwrap().write_all(branches.string.as_bytes()).unwrap()
}
let mut branches_delete_result = String::new();
xargs.stdout.unwrap().read_to_string(&mut branches_delete_result).unwrap();
branches_delete_result
}
pub fn delete_remote_branches(branches: &Branches, options: &Options) -> String {
let xargs = spawn_piped(&["xargs", "git", "push", &options.remote, "--delete"]);
let remote_branches_cmd = run_command(&["git", "branch", "-r"]);
let s = String::from_utf8(remote_branches_cmd.stdout).unwrap();
let all_remote_branches = s.split('\n').collect::<Vec<&str>>();
let origin_for_trim = &format!("{}/", &options.remote)[..];
let b_tree_remotes = all_remote_branches.iter()
.map(|b| b.trim().trim_start_matches(origin_for_trim).to_owned())
.collect::<BTreeSet<String>>();
let mut b_tree_branches = BTreeSet::new();
for branch in branches.vec.clone() {
b_tree_branches.insert(branch);
}
let intersection: Vec<_> = b_tree_remotes.intersection(&b_tree_branches).cloned().collect();
{
xargs.stdin.unwrap().write_all(intersection.join("\n").as_bytes()).unwrap()
}
let mut stderr = String::new();
xargs.stderr.unwrap().read_to_string(&mut stderr).unwrap();
// Everything is written to stderr, so we need to process that
let split = stderr.split('\n');
let vec: Vec<&str> = split.collect();
let mut output = vec![];
for s in vec {
if s.contains("error: unable to delete '") {
let branch = s.trim_start_matches("error: unable to delete '")
.trim_end_matches("': remote ref does not exist");
output.push(branch.to_owned() + " was already deleted in the remote.");
} else if s.contains(" - [deleted]") |
}
output.join("\n")
}
#[cfg(test)]
mod test {
use super::spawn_piped;
use std::io::{Read, Write};
#[test]
fn test_spawn_piped() {
let echo = spawn_piped(&["grep", "foo"]);
{
echo.stdin.unwrap().write_all("foo\nbar\nbaz".as_bytes()).unwrap()
}
let mut stdout = String::new();
echo.stdout.unwrap().read_to_string(&mut stdout).unwrap();
assert_eq!(stdout, "foo\n");
}
}
| {
output.push(s.to_owned());
} | conditional_block |
commands.rs | use std::process::{Command, Child, ExitStatus, Output, Stdio};
use std::io::{Read, Write, Error as IOError};
use std::collections::BTreeSet;
use branches::Branches;
use error::Error;
use options::Options;
pub fn spawn_piped(args: &[&str]) -> Child {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap_or_else(|e| panic!("Error with child process: {}", e))
}
pub fn run_command_with_no_output(args: &[&str]) {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.output()
.unwrap_or_else(|e| panic!("Error with command: {}", e));
}
pub fn output(args: &[&str]) -> String {
let result = run_command(args);
String::from_utf8(result.stdout).unwrap().trim().to_owned()
}
pub fn run_command(args: &[&str]) -> Output {
run_command_with_result(args).unwrap_or_else(|e| panic!("Error with command: {}", e))
}
pub fn run_command_with_result(args: &[&str]) -> Result<Output, IOError> {
Command::new(&args[0])
.args(&args[1..])
.output()
}
pub fn run_command_with_status(args: &[&str]) -> Result<ExitStatus, IOError> {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()
}
pub fn validate_git_installation() -> Result<(), Error> {
match Command::new("git").output() {
Ok(_) => Ok(()),
Err(_) => Err(Error::GitInstallationError),
}
}
pub fn delete_local_branches(branches: &Branches) -> String {
let xargs = spawn_piped(&["xargs", "git", "branch", "-D"]);
{
xargs.stdin.unwrap().write_all(branches.string.as_bytes()).unwrap()
}
let mut branches_delete_result = String::new();
xargs.stdout.unwrap().read_to_string(&mut branches_delete_result).unwrap();
branches_delete_result
}
pub fn delete_remote_branches(branches: &Branches, options: &Options) -> String {
let xargs = spawn_piped(&["xargs", "git", "push", &options.remote, "--delete"]);
let remote_branches_cmd = run_command(&["git", "branch", "-r"]);
let s = String::from_utf8(remote_branches_cmd.stdout).unwrap();
let all_remote_branches = s.split('\n').collect::<Vec<&str>>();
let origin_for_trim = &format!("{}/", &options.remote)[..];
let b_tree_remotes = all_remote_branches.iter()
.map(|b| b.trim().trim_start_matches(origin_for_trim).to_owned())
.collect::<BTreeSet<String>>();
let mut b_tree_branches = BTreeSet::new();
for branch in branches.vec.clone() {
b_tree_branches.insert(branch);
}
let intersection: Vec<_> = b_tree_remotes.intersection(&b_tree_branches).cloned().collect();
{
xargs.stdin.unwrap().write_all(intersection.join("\n").as_bytes()).unwrap()
}
let mut stderr = String::new();
xargs.stderr.unwrap().read_to_string(&mut stderr).unwrap();
// Everything is written to stderr, so we need to process that
let split = stderr.split('\n');
let vec: Vec<&str> = split.collect();
let mut output = vec![];
for s in vec {
if s.contains("error: unable to delete '") {
let branch = s.trim_start_matches("error: unable to delete '")
.trim_end_matches("': remote ref does not exist");
output.push(branch.to_owned() + " was already deleted in the remote.");
} else if s.contains(" - [deleted]") {
output.push(s.to_owned());
}
}
output.join("\n")
}
#[cfg(test)]
mod test {
use super::spawn_piped;
use std::io::{Read, Write};
#[test]
fn test_spawn_piped() |
}
| {
let echo = spawn_piped(&["grep", "foo"]);
{
echo.stdin.unwrap().write_all("foo\nbar\nbaz".as_bytes()).unwrap()
}
let mut stdout = String::new();
echo.stdout.unwrap().read_to_string(&mut stdout).unwrap();
assert_eq!(stdout, "foo\n");
} | identifier_body |
commands.rs | use std::process::{Command, Child, ExitStatus, Output, Stdio};
use std::io::{Read, Write, Error as IOError};
use std::collections::BTreeSet;
use branches::Branches;
use error::Error;
use options::Options;
pub fn | (args: &[&str]) -> Child {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap_or_else(|e| panic!("Error with child process: {}", e))
}
pub fn run_command_with_no_output(args: &[&str]) {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.output()
.unwrap_or_else(|e| panic!("Error with command: {}", e));
}
pub fn output(args: &[&str]) -> String {
let result = run_command(args);
String::from_utf8(result.stdout).unwrap().trim().to_owned()
}
pub fn run_command(args: &[&str]) -> Output {
run_command_with_result(args).unwrap_or_else(|e| panic!("Error with command: {}", e))
}
pub fn run_command_with_result(args: &[&str]) -> Result<Output, IOError> {
Command::new(&args[0])
.args(&args[1..])
.output()
}
pub fn run_command_with_status(args: &[&str]) -> Result<ExitStatus, IOError> {
Command::new(&args[0])
.args(&args[1..])
.stdin(Stdio::null())
.stdout(Stdio::null())
.stderr(Stdio::null())
.status()
}
pub fn validate_git_installation() -> Result<(), Error> {
match Command::new("git").output() {
Ok(_) => Ok(()),
Err(_) => Err(Error::GitInstallationError),
}
}
pub fn delete_local_branches(branches: &Branches) -> String {
let xargs = spawn_piped(&["xargs", "git", "branch", "-D"]);
{
xargs.stdin.unwrap().write_all(branches.string.as_bytes()).unwrap()
}
let mut branches_delete_result = String::new();
xargs.stdout.unwrap().read_to_string(&mut branches_delete_result).unwrap();
branches_delete_result
}
pub fn delete_remote_branches(branches: &Branches, options: &Options) -> String {
let xargs = spawn_piped(&["xargs", "git", "push", &options.remote, "--delete"]);
let remote_branches_cmd = run_command(&["git", "branch", "-r"]);
let s = String::from_utf8(remote_branches_cmd.stdout).unwrap();
let all_remote_branches = s.split('\n').collect::<Vec<&str>>();
let origin_for_trim = &format!("{}/", &options.remote)[..];
let b_tree_remotes = all_remote_branches.iter()
.map(|b| b.trim().trim_start_matches(origin_for_trim).to_owned())
.collect::<BTreeSet<String>>();
let mut b_tree_branches = BTreeSet::new();
for branch in branches.vec.clone() {
b_tree_branches.insert(branch);
}
let intersection: Vec<_> = b_tree_remotes.intersection(&b_tree_branches).cloned().collect();
{
xargs.stdin.unwrap().write_all(intersection.join("\n").as_bytes()).unwrap()
}
let mut stderr = String::new();
xargs.stderr.unwrap().read_to_string(&mut stderr).unwrap();
// Everything is written to stderr, so we need to process that
let split = stderr.split('\n');
let vec: Vec<&str> = split.collect();
let mut output = vec![];
for s in vec {
if s.contains("error: unable to delete '") {
let branch = s.trim_start_matches("error: unable to delete '")
.trim_end_matches("': remote ref does not exist");
output.push(branch.to_owned() + " was already deleted in the remote.");
} else if s.contains(" - [deleted]") {
output.push(s.to_owned());
}
}
output.join("\n")
}
#[cfg(test)]
mod test {
use super::spawn_piped;
use std::io::{Read, Write};
#[test]
fn test_spawn_piped() {
let echo = spawn_piped(&["grep", "foo"]);
{
echo.stdin.unwrap().write_all("foo\nbar\nbaz".as_bytes()).unwrap()
}
let mut stdout = String::new();
echo.stdout.unwrap().read_to_string(&mut stdout).unwrap();
assert_eq!(stdout, "foo\n");
}
}
| spawn_piped | identifier_name |
test.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Code that generates a test runner to run all the tests in a crate
use driver::session;
use front::config;
use std::vec;
use syntax::ast_util::*;
use syntax::attr::AttrMetaMethods;
use syntax::attr;
use syntax::codemap::{dummy_sp, Span, ExpnInfo, NameAndSpan, MacroAttribute};
use syntax::codemap;
use syntax::ext::base::ExtCtxt;
use syntax::fold::ast_fold;
use syntax::fold;
use syntax::opt_vec;
use syntax::print::pprust;
use syntax::{ast, ast_util};
use syntax::util::small_vector::SmallVector;
struct Test {
span: Span,
path: ~[ast::Ident],
bench: bool,
ignore: bool,
should_fail: bool
}
struct TestCtxt {
sess: session::Session,
path: ~[ast::Ident],
ext_cx: @ExtCtxt,
testfns: ~[Test],
is_extra: bool,
config: ast::CrateConfig,
}
// Traverse the crate, collecting all the test functions, eliding any
// existing main functions, and synthesizing a main test harness
pub fn modify_for_testing(sess: session::Session,
crate: ast::Crate) -> ast::Crate {
// We generate the test harness when building in the 'test'
// configuration, either with the '--test' or '--cfg test'
// command line options.
let should_test = attr::contains_name(crate.config, "test");
if should_test {
generate_test_harness(sess, crate)
} else {
strip_test_functions(crate)
}
}
struct TestHarnessGenerator {
cx: @mut TestCtxt,
}
impl fold::ast_fold for TestHarnessGenerator {
fn fold_crate(&self, c: ast::Crate) -> ast::Crate {
let folded = fold::noop_fold_crate(c, self);
// Add a special __test module to the crate that will contain code
// generated for the test harness
ast::Crate {
module: add_test_module(self.cx, &folded.module),
.. folded
}
}
fn fold_item(&self, i: @ast::item) -> SmallVector<@ast::item> {
self.cx.path.push(i.ident);
debug!("current path: {}",
ast_util::path_name_i(self.cx.path.clone()));
if is_test_fn(self.cx, i) || is_bench_fn(i) {
match i.node {
ast::item_fn(_, purity, _, _, _)
if purity == ast::unsafe_fn => |
_ => {
debug!("this is a test function");
let test = Test {
span: i.span,
path: self.cx.path.clone(),
bench: is_bench_fn(i),
ignore: is_ignored(self.cx, i),
should_fail: should_fail(i)
};
self.cx.testfns.push(test);
// debug!("have {} test/bench functions",
// cx.testfns.len());
}
}
}
let res = fold::noop_fold_item(i, self);
self.cx.path.pop();
res
}
fn fold_mod(&self, m: &ast::_mod) -> ast::_mod {
// Remove any #[main] from the AST so it doesn't clash with
// the one we're going to add. Only if compiling an executable.
fn nomain(cx: @mut TestCtxt, item: @ast::item) -> @ast::item {
if!*cx.sess.building_library {
@ast::item {
attrs: item.attrs.iter().filter_map(|attr| {
if "main"!= attr.name() {
Some(*attr)
} else {
None
}
}).collect(),
.. (*item).clone()
}
} else {
item
}
}
let mod_nomain = ast::_mod {
view_items: m.view_items.clone(),
items: m.items.iter().map(|i| nomain(self.cx, *i)).collect(),
};
fold::noop_fold_mod(&mod_nomain, self)
}
}
fn generate_test_harness(sess: session::Session, crate: ast::Crate)
-> ast::Crate {
let cx: @mut TestCtxt = @mut TestCtxt {
sess: sess,
ext_cx: ExtCtxt::new(sess.parse_sess, sess.opts.cfg.clone()),
path: ~[],
testfns: ~[],
is_extra: is_extra(&crate),
config: crate.config.clone(),
};
let ext_cx = cx.ext_cx;
ext_cx.bt_push(ExpnInfo {
call_site: dummy_sp(),
callee: NameAndSpan {
name: @"test",
format: MacroAttribute,
span: None
}
});
let fold = TestHarnessGenerator {
cx: cx
};
let res = fold.fold_crate(crate);
ext_cx.bt_pop();
return res;
}
fn strip_test_functions(crate: ast::Crate) -> ast::Crate {
// When not compiling with --test we should not compile the
// #[test] functions
config::strip_items(crate, |attrs| {
!attr::contains_name(attrs, "test") &&
!attr::contains_name(attrs, "bench")
})
}
fn is_test_fn(cx: @mut TestCtxt, i: @ast::item) -> bool {
let has_test_attr = attr::contains_name(i.attrs, "test");
fn has_test_signature(i: @ast::item) -> bool {
match &i.node {
&ast::item_fn(ref decl, _, _, ref generics, _) => {
let no_output = match decl.output.node {
ast::ty_nil => true,
_ => false
};
decl.inputs.is_empty()
&& no_output
&&!generics.is_parameterized()
}
_ => false
}
}
if has_test_attr &&!has_test_signature(i) {
let sess = cx.sess;
sess.span_err(
i.span,
"functions used as tests must have signature fn() -> ()."
);
}
return has_test_attr && has_test_signature(i);
}
fn is_bench_fn(i: @ast::item) -> bool {
let has_bench_attr = attr::contains_name(i.attrs, "bench");
fn has_test_signature(i: @ast::item) -> bool {
match i.node {
ast::item_fn(ref decl, _, _, ref generics, _) => {
let input_cnt = decl.inputs.len();
let no_output = match decl.output.node {
ast::ty_nil => true,
_ => false
};
let tparm_cnt = generics.ty_params.len();
// NB: inadequate check, but we're running
// well before resolve, can't get too deep.
input_cnt == 1u
&& no_output && tparm_cnt == 0u
}
_ => false
}
}
return has_bench_attr && has_test_signature(i);
}
fn is_ignored(cx: @mut TestCtxt, i: @ast::item) -> bool {
i.attrs.iter().any(|attr| {
// check ignore(cfg(foo, bar))
"ignore" == attr.name() && match attr.meta_item_list() {
Some(ref cfgs) => attr::test_cfg(cx.config, cfgs.iter().map(|x| *x)),
None => true
}
})
}
fn should_fail(i: @ast::item) -> bool {
attr::contains_name(i.attrs, "should_fail")
}
fn add_test_module(cx: &TestCtxt, m: &ast::_mod) -> ast::_mod {
let testmod = mk_test_module(cx);
ast::_mod {
items: vec::append_one(m.items.clone(), testmod),
..(*m).clone()
}
}
/*
We're going to be building a module that looks more or less like:
mod __test {
#[!resolve_unexported]
extern mod extra (name = "extra", vers = "...");
fn main() {
#[main];
extra::test::test_main_static(::os::args(), tests)
}
static tests : &'static [extra::test::TestDescAndFn] = &[
... the list of tests in the crate...
];
}
*/
fn mk_std(cx: &TestCtxt) -> ast::view_item {
let id_extra = cx.sess.ident_of("extra");
let vi = if cx.is_extra {
ast::view_item_use(
~[@nospan(ast::view_path_simple(id_extra,
path_node(~[id_extra]),
ast::DUMMY_NODE_ID))])
} else {
let mi = attr::mk_name_value_item_str(@"vers", @"0.9-pre");
ast::view_item_extern_mod(id_extra, None, ~[mi], ast::DUMMY_NODE_ID)
};
ast::view_item {
node: vi,
attrs: ~[],
vis: ast::public,
span: dummy_sp()
}
}
fn mk_test_module(cx: &TestCtxt) -> @ast::item {
// Link to extra
let view_items = ~[mk_std(cx)];
// A constant vector of test descriptors.
let tests = mk_tests(cx);
// The synthesized main function which will call the console test runner
// with our list of tests
let mainfn = (quote_item!(cx.ext_cx,
pub fn main() {
#[main];
extra::test::test_main_static(::std::os::args(), TESTS);
}
)).unwrap();
let testmod = ast::_mod {
view_items: view_items,
items: ~[mainfn, tests],
};
let item_ = ast::item_mod(testmod);
// This attribute tells resolve to let us call unexported functions
let resolve_unexported_attr =
attr::mk_attr(attr::mk_word_item(@"!resolve_unexported"));
let item = ast::item {
ident: cx.sess.ident_of("__test"),
attrs: ~[resolve_unexported_attr],
id: ast::DUMMY_NODE_ID,
node: item_,
vis: ast::public,
span: dummy_sp(),
};
debug!("Synthetic test module:\n{}\n",
pprust::item_to_str(&item, cx.sess.intr()));
return @item;
}
fn nospan<T>(t: T) -> codemap::Spanned<T> {
codemap::Spanned { node: t, span: dummy_sp() }
}
fn path_node(ids: ~[ast::Ident]) -> ast::Path {
ast::Path {
span: dummy_sp(),
global: false,
segments: ids.move_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
lifetimes: opt_vec::Empty,
types: opt_vec::Empty,
}).collect()
}
}
fn path_node_global(ids: ~[ast::Ident]) -> ast::Path {
ast::Path {
span: dummy_sp(),
global: true,
segments: ids.move_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
lifetimes: opt_vec::Empty,
types: opt_vec::Empty,
}).collect()
}
}
fn mk_tests(cx: &TestCtxt) -> @ast::item {
// The vector of test_descs for this crate
let test_descs = mk_test_descs(cx);
(quote_item!(cx.ext_cx,
pub static TESTS : &'static [self::extra::test::TestDescAndFn] =
$test_descs
;
)).unwrap()
}
fn is_extra(crate: &ast::Crate) -> bool {
let items = attr::find_linkage_metas(crate.attrs);
match attr::last_meta_item_value_str_by_name(items, "name") {
Some(s) if "extra" == s => true,
_ => false
}
}
fn mk_test_descs(cx: &TestCtxt) -> @ast::Expr {
debug!("building test vector from {} tests", cx.testfns.len());
let mut descs = ~[];
for test in cx.testfns.iter() {
descs.push(mk_test_desc_and_fn_rec(cx, test));
}
let inner_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVec(descs, ast::MutImmutable),
span: dummy_sp(),
};
@ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVstore(inner_expr, ast::ExprVstoreSlice),
span: dummy_sp(),
}
}
fn mk_test_desc_and_fn_rec(cx: &TestCtxt, test: &Test) -> @ast::Expr {
let span = test.span;
let path = test.path.clone();
debug!("encoding {}", ast_util::path_name_i(path));
let name_lit: ast::lit =
nospan(ast::lit_str(ast_util::path_name_i(path).to_managed(), ast::CookedStr));
let name_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprLit(@name_lit),
span: span
};
let fn_path = path_node_global(path);
let fn_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprPath(fn_path),
span: span,
};
let t_expr = if test.bench {
quote_expr!(cx.ext_cx, self::extra::test::StaticBenchFn($fn_expr) )
} else {
quote_expr!(cx.ext_cx, self::extra::test::StaticTestFn($fn_expr) )
};
let ignore_expr = if test.ignore {
quote_expr!(cx.ext_cx, true )
} else {
quote_expr!(cx.ext_cx, false )
};
let fail_expr = if test.should_fail {
quote_expr!(cx.ext_cx, true )
} else {
quote_expr!(cx.ext_cx, false )
};
let e = quote_expr!(cx.ext_cx,
self::extra::test::TestDescAndFn {
desc: self::extra::test::TestDesc {
name: self::extra::test::StaticTestName($name_expr),
ignore: $ignore_expr,
should_fail: $fail_expr
},
testfn: $t_expr,
}
);
e
}
| {
let sess = self.cx.sess;
sess.span_fatal(i.span,
"unsafe functions cannot be used for \
tests");
} | conditional_block |
test.rs | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Code that generates a test runner to run all the tests in a crate
use driver::session;
use front::config;
use std::vec;
use syntax::ast_util::*;
use syntax::attr::AttrMetaMethods;
use syntax::attr;
use syntax::codemap::{dummy_sp, Span, ExpnInfo, NameAndSpan, MacroAttribute};
use syntax::codemap;
use syntax::ext::base::ExtCtxt;
use syntax::fold::ast_fold;
use syntax::fold;
use syntax::opt_vec;
use syntax::print::pprust;
use syntax::{ast, ast_util};
use syntax::util::small_vector::SmallVector;
struct Test {
span: Span,
path: ~[ast::Ident],
bench: bool,
ignore: bool,
should_fail: bool
}
struct TestCtxt {
sess: session::Session,
path: ~[ast::Ident],
ext_cx: @ExtCtxt,
testfns: ~[Test],
is_extra: bool,
config: ast::CrateConfig,
}
// Traverse the crate, collecting all the test functions, eliding any
// existing main functions, and synthesizing a main test harness
pub fn modify_for_testing(sess: session::Session,
crate: ast::Crate) -> ast::Crate {
// We generate the test harness when building in the 'test'
// configuration, either with the '--test' or '--cfg test'
// command line options.
let should_test = attr::contains_name(crate.config, "test");
if should_test {
generate_test_harness(sess, crate)
} else {
strip_test_functions(crate)
}
}
struct TestHarnessGenerator {
cx: @mut TestCtxt,
}
impl fold::ast_fold for TestHarnessGenerator {
fn fold_crate(&self, c: ast::Crate) -> ast::Crate {
let folded = fold::noop_fold_crate(c, self);
// Add a special __test module to the crate that will contain code
// generated for the test harness
ast::Crate {
module: add_test_module(self.cx, &folded.module),
.. folded
}
}
fn fold_item(&self, i: @ast::item) -> SmallVector<@ast::item> {
self.cx.path.push(i.ident);
debug!("current path: {}",
ast_util::path_name_i(self.cx.path.clone()));
if is_test_fn(self.cx, i) || is_bench_fn(i) {
match i.node {
ast::item_fn(_, purity, _, _, _)
if purity == ast::unsafe_fn => {
let sess = self.cx.sess;
sess.span_fatal(i.span,
"unsafe functions cannot be used for \
tests");
}
_ => {
debug!("this is a test function");
let test = Test {
span: i.span,
path: self.cx.path.clone(),
bench: is_bench_fn(i),
ignore: is_ignored(self.cx, i),
should_fail: should_fail(i)
};
self.cx.testfns.push(test);
// debug!("have {} test/bench functions",
// cx.testfns.len());
}
}
}
let res = fold::noop_fold_item(i, self);
self.cx.path.pop();
res
}
fn fold_mod(&self, m: &ast::_mod) -> ast::_mod {
// Remove any #[main] from the AST so it doesn't clash with
// the one we're going to add. Only if compiling an executable.
fn nomain(cx: @mut TestCtxt, item: @ast::item) -> @ast::item {
if!*cx.sess.building_library {
@ast::item {
attrs: item.attrs.iter().filter_map(|attr| {
if "main"!= attr.name() {
Some(*attr)
} else {
None
}
}).collect(),
.. (*item).clone()
}
} else {
item
}
}
let mod_nomain = ast::_mod {
view_items: m.view_items.clone(),
items: m.items.iter().map(|i| nomain(self.cx, *i)).collect(),
};
fold::noop_fold_mod(&mod_nomain, self)
}
}
fn generate_test_harness(sess: session::Session, crate: ast::Crate)
-> ast::Crate {
let cx: @mut TestCtxt = @mut TestCtxt {
sess: sess,
ext_cx: ExtCtxt::new(sess.parse_sess, sess.opts.cfg.clone()),
path: ~[],
testfns: ~[],
is_extra: is_extra(&crate),
config: crate.config.clone(),
};
let ext_cx = cx.ext_cx;
ext_cx.bt_push(ExpnInfo {
call_site: dummy_sp(),
callee: NameAndSpan {
name: @"test",
format: MacroAttribute,
span: None
}
});
let fold = TestHarnessGenerator {
cx: cx
};
let res = fold.fold_crate(crate);
ext_cx.bt_pop();
return res;
}
fn strip_test_functions(crate: ast::Crate) -> ast::Crate {
// When not compiling with --test we should not compile the
// #[test] functions
config::strip_items(crate, |attrs| {
!attr::contains_name(attrs, "test") &&
!attr::contains_name(attrs, "bench")
})
}
fn is_test_fn(cx: @mut TestCtxt, i: @ast::item) -> bool {
let has_test_attr = attr::contains_name(i.attrs, "test");
fn has_test_signature(i: @ast::item) -> bool {
match &i.node {
&ast::item_fn(ref decl, _, _, ref generics, _) => {
let no_output = match decl.output.node {
ast::ty_nil => true,
_ => false
};
decl.inputs.is_empty()
&& no_output
&&!generics.is_parameterized()
}
_ => false
}
}
if has_test_attr &&!has_test_signature(i) {
let sess = cx.sess;
sess.span_err(
i.span,
"functions used as tests must have signature fn() -> ()."
);
}
return has_test_attr && has_test_signature(i);
}
fn is_bench_fn(i: @ast::item) -> bool {
let has_bench_attr = attr::contains_name(i.attrs, "bench");
fn has_test_signature(i: @ast::item) -> bool {
match i.node {
ast::item_fn(ref decl, _, _, ref generics, _) => {
let input_cnt = decl.inputs.len();
let no_output = match decl.output.node {
ast::ty_nil => true,
_ => false
};
let tparm_cnt = generics.ty_params.len();
// NB: inadequate check, but we're running
// well before resolve, can't get too deep.
input_cnt == 1u
&& no_output && tparm_cnt == 0u
}
_ => false
}
}
return has_bench_attr && has_test_signature(i);
}
fn is_ignored(cx: @mut TestCtxt, i: @ast::item) -> bool {
i.attrs.iter().any(|attr| {
// check ignore(cfg(foo, bar))
"ignore" == attr.name() && match attr.meta_item_list() {
Some(ref cfgs) => attr::test_cfg(cx.config, cfgs.iter().map(|x| *x)),
None => true
}
})
}
fn should_fail(i: @ast::item) -> bool {
attr::contains_name(i.attrs, "should_fail")
}
fn add_test_module(cx: &TestCtxt, m: &ast::_mod) -> ast::_mod {
let testmod = mk_test_module(cx);
ast::_mod {
items: vec::append_one(m.items.clone(), testmod),
..(*m).clone()
}
}
/*
We're going to be building a module that looks more or less like:
mod __test {
#[!resolve_unexported]
extern mod extra (name = "extra", vers = "...");
fn main() {
#[main];
extra::test::test_main_static(::os::args(), tests)
}
static tests : &'static [extra::test::TestDescAndFn] = &[
... the list of tests in the crate...
];
}
*/
fn mk_std(cx: &TestCtxt) -> ast::view_item {
let id_extra = cx.sess.ident_of("extra");
let vi = if cx.is_extra {
ast::view_item_use(
~[@nospan(ast::view_path_simple(id_extra,
path_node(~[id_extra]),
ast::DUMMY_NODE_ID))])
} else {
let mi = attr::mk_name_value_item_str(@"vers", @"0.9-pre");
ast::view_item_extern_mod(id_extra, None, ~[mi], ast::DUMMY_NODE_ID)
};
ast::view_item {
node: vi,
attrs: ~[],
vis: ast::public,
span: dummy_sp()
}
}
fn mk_test_module(cx: &TestCtxt) -> @ast::item {
// Link to extra
let view_items = ~[mk_std(cx)];
// A constant vector of test descriptors.
let tests = mk_tests(cx);
// The synthesized main function which will call the console test runner
// with our list of tests
let mainfn = (quote_item!(cx.ext_cx,
pub fn main() {
#[main];
extra::test::test_main_static(::std::os::args(), TESTS);
}
)).unwrap();
let testmod = ast::_mod {
view_items: view_items,
items: ~[mainfn, tests],
};
let item_ = ast::item_mod(testmod);
// This attribute tells resolve to let us call unexported functions
let resolve_unexported_attr =
attr::mk_attr(attr::mk_word_item(@"!resolve_unexported"));
let item = ast::item {
ident: cx.sess.ident_of("__test"),
attrs: ~[resolve_unexported_attr],
id: ast::DUMMY_NODE_ID,
node: item_,
vis: ast::public,
span: dummy_sp(),
};
debug!("Synthetic test module:\n{}\n",
pprust::item_to_str(&item, cx.sess.intr()));
return @item;
}
fn nospan<T>(t: T) -> codemap::Spanned<T> {
codemap::Spanned { node: t, span: dummy_sp() }
}
fn path_node(ids: ~[ast::Ident]) -> ast::Path {
ast::Path {
span: dummy_sp(),
global: false,
segments: ids.move_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
lifetimes: opt_vec::Empty,
types: opt_vec::Empty,
}).collect()
}
}
fn path_node_global(ids: ~[ast::Ident]) -> ast::Path {
ast::Path {
span: dummy_sp(),
global: true,
segments: ids.move_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
lifetimes: opt_vec::Empty,
types: opt_vec::Empty,
}).collect()
}
}
fn mk_tests(cx: &TestCtxt) -> @ast::item {
// The vector of test_descs for this crate
let test_descs = mk_test_descs(cx);
(quote_item!(cx.ext_cx,
pub static TESTS : &'static [self::extra::test::TestDescAndFn] =
$test_descs
;
)).unwrap()
}
fn is_extra(crate: &ast::Crate) -> bool {
let items = attr::find_linkage_metas(crate.attrs);
match attr::last_meta_item_value_str_by_name(items, "name") {
Some(s) if "extra" == s => true,
_ => false
}
}
fn mk_test_descs(cx: &TestCtxt) -> @ast::Expr {
debug!("building test vector from {} tests", cx.testfns.len());
let mut descs = ~[];
for test in cx.testfns.iter() {
descs.push(mk_test_desc_and_fn_rec(cx, test));
}
let inner_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVec(descs, ast::MutImmutable),
span: dummy_sp(),
};
@ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVstore(inner_expr, ast::ExprVstoreSlice),
span: dummy_sp(),
}
}
fn mk_test_desc_and_fn_rec(cx: &TestCtxt, test: &Test) -> @ast::Expr {
let span = test.span;
let path = test.path.clone();
debug!("encoding {}", ast_util::path_name_i(path));
let name_lit: ast::lit =
nospan(ast::lit_str(ast_util::path_name_i(path).to_managed(), ast::CookedStr));
let name_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprLit(@name_lit),
span: span
};
let fn_path = path_node_global(path);
let fn_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprPath(fn_path),
span: span,
};
let t_expr = if test.bench {
quote_expr!(cx.ext_cx, self::extra::test::StaticBenchFn($fn_expr) )
} else {
quote_expr!(cx.ext_cx, self::extra::test::StaticTestFn($fn_expr) )
};
let ignore_expr = if test.ignore {
quote_expr!(cx.ext_cx, true )
} else {
quote_expr!(cx.ext_cx, false )
};
let fail_expr = if test.should_fail {
quote_expr!(cx.ext_cx, true )
} else {
quote_expr!(cx.ext_cx, false )
};
let e = quote_expr!(cx.ext_cx,
self::extra::test::TestDescAndFn {
desc: self::extra::test::TestDesc {
name: self::extra::test::StaticTestName($name_expr),
ignore: $ignore_expr,
should_fail: $fail_expr
},
testfn: $t_expr,
}
);
e
} | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
// | random_line_split |
|
test.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Code that generates a test runner to run all the tests in a crate
use driver::session;
use front::config;
use std::vec;
use syntax::ast_util::*;
use syntax::attr::AttrMetaMethods;
use syntax::attr;
use syntax::codemap::{dummy_sp, Span, ExpnInfo, NameAndSpan, MacroAttribute};
use syntax::codemap;
use syntax::ext::base::ExtCtxt;
use syntax::fold::ast_fold;
use syntax::fold;
use syntax::opt_vec;
use syntax::print::pprust;
use syntax::{ast, ast_util};
use syntax::util::small_vector::SmallVector;
struct Test {
span: Span,
path: ~[ast::Ident],
bench: bool,
ignore: bool,
should_fail: bool
}
struct TestCtxt {
sess: session::Session,
path: ~[ast::Ident],
ext_cx: @ExtCtxt,
testfns: ~[Test],
is_extra: bool,
config: ast::CrateConfig,
}
// Traverse the crate, collecting all the test functions, eliding any
// existing main functions, and synthesizing a main test harness
pub fn modify_for_testing(sess: session::Session,
crate: ast::Crate) -> ast::Crate {
// We generate the test harness when building in the 'test'
// configuration, either with the '--test' or '--cfg test'
// command line options.
let should_test = attr::contains_name(crate.config, "test");
if should_test {
generate_test_harness(sess, crate)
} else {
strip_test_functions(crate)
}
}
struct TestHarnessGenerator {
cx: @mut TestCtxt,
}
impl fold::ast_fold for TestHarnessGenerator {
fn fold_crate(&self, c: ast::Crate) -> ast::Crate {
let folded = fold::noop_fold_crate(c, self);
// Add a special __test module to the crate that will contain code
// generated for the test harness
ast::Crate {
module: add_test_module(self.cx, &folded.module),
.. folded
}
}
fn fold_item(&self, i: @ast::item) -> SmallVector<@ast::item> {
self.cx.path.push(i.ident);
debug!("current path: {}",
ast_util::path_name_i(self.cx.path.clone()));
if is_test_fn(self.cx, i) || is_bench_fn(i) {
match i.node {
ast::item_fn(_, purity, _, _, _)
if purity == ast::unsafe_fn => {
let sess = self.cx.sess;
sess.span_fatal(i.span,
"unsafe functions cannot be used for \
tests");
}
_ => {
debug!("this is a test function");
let test = Test {
span: i.span,
path: self.cx.path.clone(),
bench: is_bench_fn(i),
ignore: is_ignored(self.cx, i),
should_fail: should_fail(i)
};
self.cx.testfns.push(test);
// debug!("have {} test/bench functions",
// cx.testfns.len());
}
}
}
let res = fold::noop_fold_item(i, self);
self.cx.path.pop();
res
}
fn fold_mod(&self, m: &ast::_mod) -> ast::_mod {
// Remove any #[main] from the AST so it doesn't clash with
// the one we're going to add. Only if compiling an executable.
fn nomain(cx: @mut TestCtxt, item: @ast::item) -> @ast::item {
if!*cx.sess.building_library {
@ast::item {
attrs: item.attrs.iter().filter_map(|attr| {
if "main"!= attr.name() {
Some(*attr)
} else {
None
}
}).collect(),
.. (*item).clone()
}
} else {
item
}
}
let mod_nomain = ast::_mod {
view_items: m.view_items.clone(),
items: m.items.iter().map(|i| nomain(self.cx, *i)).collect(),
};
fold::noop_fold_mod(&mod_nomain, self)
}
}
fn generate_test_harness(sess: session::Session, crate: ast::Crate)
-> ast::Crate {
let cx: @mut TestCtxt = @mut TestCtxt {
sess: sess,
ext_cx: ExtCtxt::new(sess.parse_sess, sess.opts.cfg.clone()),
path: ~[],
testfns: ~[],
is_extra: is_extra(&crate),
config: crate.config.clone(),
};
let ext_cx = cx.ext_cx;
ext_cx.bt_push(ExpnInfo {
call_site: dummy_sp(),
callee: NameAndSpan {
name: @"test",
format: MacroAttribute,
span: None
}
});
let fold = TestHarnessGenerator {
cx: cx
};
let res = fold.fold_crate(crate);
ext_cx.bt_pop();
return res;
}
fn strip_test_functions(crate: ast::Crate) -> ast::Crate {
// When not compiling with --test we should not compile the
// #[test] functions
config::strip_items(crate, |attrs| {
!attr::contains_name(attrs, "test") &&
!attr::contains_name(attrs, "bench")
})
}
fn is_test_fn(cx: @mut TestCtxt, i: @ast::item) -> bool {
let has_test_attr = attr::contains_name(i.attrs, "test");
fn has_test_signature(i: @ast::item) -> bool {
match &i.node {
&ast::item_fn(ref decl, _, _, ref generics, _) => {
let no_output = match decl.output.node {
ast::ty_nil => true,
_ => false
};
decl.inputs.is_empty()
&& no_output
&&!generics.is_parameterized()
}
_ => false
}
}
if has_test_attr &&!has_test_signature(i) {
let sess = cx.sess;
sess.span_err(
i.span,
"functions used as tests must have signature fn() -> ()."
);
}
return has_test_attr && has_test_signature(i);
}
fn is_bench_fn(i: @ast::item) -> bool {
let has_bench_attr = attr::contains_name(i.attrs, "bench");
fn has_test_signature(i: @ast::item) -> bool |
return has_bench_attr && has_test_signature(i);
}
fn is_ignored(cx: @mut TestCtxt, i: @ast::item) -> bool {
i.attrs.iter().any(|attr| {
// check ignore(cfg(foo, bar))
"ignore" == attr.name() && match attr.meta_item_list() {
Some(ref cfgs) => attr::test_cfg(cx.config, cfgs.iter().map(|x| *x)),
None => true
}
})
}
fn should_fail(i: @ast::item) -> bool {
attr::contains_name(i.attrs, "should_fail")
}
fn add_test_module(cx: &TestCtxt, m: &ast::_mod) -> ast::_mod {
let testmod = mk_test_module(cx);
ast::_mod {
items: vec::append_one(m.items.clone(), testmod),
..(*m).clone()
}
}
/*
We're going to be building a module that looks more or less like:
mod __test {
#[!resolve_unexported]
extern mod extra (name = "extra", vers = "...");
fn main() {
#[main];
extra::test::test_main_static(::os::args(), tests)
}
static tests : &'static [extra::test::TestDescAndFn] = &[
... the list of tests in the crate...
];
}
*/
fn mk_std(cx: &TestCtxt) -> ast::view_item {
let id_extra = cx.sess.ident_of("extra");
let vi = if cx.is_extra {
ast::view_item_use(
~[@nospan(ast::view_path_simple(id_extra,
path_node(~[id_extra]),
ast::DUMMY_NODE_ID))])
} else {
let mi = attr::mk_name_value_item_str(@"vers", @"0.9-pre");
ast::view_item_extern_mod(id_extra, None, ~[mi], ast::DUMMY_NODE_ID)
};
ast::view_item {
node: vi,
attrs: ~[],
vis: ast::public,
span: dummy_sp()
}
}
fn mk_test_module(cx: &TestCtxt) -> @ast::item {
// Link to extra
let view_items = ~[mk_std(cx)];
// A constant vector of test descriptors.
let tests = mk_tests(cx);
// The synthesized main function which will call the console test runner
// with our list of tests
let mainfn = (quote_item!(cx.ext_cx,
pub fn main() {
#[main];
extra::test::test_main_static(::std::os::args(), TESTS);
}
)).unwrap();
let testmod = ast::_mod {
view_items: view_items,
items: ~[mainfn, tests],
};
let item_ = ast::item_mod(testmod);
// This attribute tells resolve to let us call unexported functions
let resolve_unexported_attr =
attr::mk_attr(attr::mk_word_item(@"!resolve_unexported"));
let item = ast::item {
ident: cx.sess.ident_of("__test"),
attrs: ~[resolve_unexported_attr],
id: ast::DUMMY_NODE_ID,
node: item_,
vis: ast::public,
span: dummy_sp(),
};
debug!("Synthetic test module:\n{}\n",
pprust::item_to_str(&item, cx.sess.intr()));
return @item;
}
fn nospan<T>(t: T) -> codemap::Spanned<T> {
codemap::Spanned { node: t, span: dummy_sp() }
}
fn path_node(ids: ~[ast::Ident]) -> ast::Path {
ast::Path {
span: dummy_sp(),
global: false,
segments: ids.move_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
lifetimes: opt_vec::Empty,
types: opt_vec::Empty,
}).collect()
}
}
fn path_node_global(ids: ~[ast::Ident]) -> ast::Path {
ast::Path {
span: dummy_sp(),
global: true,
segments: ids.move_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
lifetimes: opt_vec::Empty,
types: opt_vec::Empty,
}).collect()
}
}
fn mk_tests(cx: &TestCtxt) -> @ast::item {
// The vector of test_descs for this crate
let test_descs = mk_test_descs(cx);
(quote_item!(cx.ext_cx,
pub static TESTS : &'static [self::extra::test::TestDescAndFn] =
$test_descs
;
)).unwrap()
}
fn is_extra(crate: &ast::Crate) -> bool {
let items = attr::find_linkage_metas(crate.attrs);
match attr::last_meta_item_value_str_by_name(items, "name") {
Some(s) if "extra" == s => true,
_ => false
}
}
fn mk_test_descs(cx: &TestCtxt) -> @ast::Expr {
debug!("building test vector from {} tests", cx.testfns.len());
let mut descs = ~[];
for test in cx.testfns.iter() {
descs.push(mk_test_desc_and_fn_rec(cx, test));
}
let inner_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVec(descs, ast::MutImmutable),
span: dummy_sp(),
};
@ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVstore(inner_expr, ast::ExprVstoreSlice),
span: dummy_sp(),
}
}
fn mk_test_desc_and_fn_rec(cx: &TestCtxt, test: &Test) -> @ast::Expr {
let span = test.span;
let path = test.path.clone();
debug!("encoding {}", ast_util::path_name_i(path));
let name_lit: ast::lit =
nospan(ast::lit_str(ast_util::path_name_i(path).to_managed(), ast::CookedStr));
let name_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprLit(@name_lit),
span: span
};
let fn_path = path_node_global(path);
let fn_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprPath(fn_path),
span: span,
};
let t_expr = if test.bench {
quote_expr!(cx.ext_cx, self::extra::test::StaticBenchFn($fn_expr) )
} else {
quote_expr!(cx.ext_cx, self::extra::test::StaticTestFn($fn_expr) )
};
let ignore_expr = if test.ignore {
quote_expr!(cx.ext_cx, true )
} else {
quote_expr!(cx.ext_cx, false )
};
let fail_expr = if test.should_fail {
quote_expr!(cx.ext_cx, true )
} else {
quote_expr!(cx.ext_cx, false )
};
let e = quote_expr!(cx.ext_cx,
self::extra::test::TestDescAndFn {
desc: self::extra::test::TestDesc {
name: self::extra::test::StaticTestName($name_expr),
ignore: $ignore_expr,
should_fail: $fail_expr
},
testfn: $t_expr,
}
);
e
}
| {
match i.node {
ast::item_fn(ref decl, _, _, ref generics, _) => {
let input_cnt = decl.inputs.len();
let no_output = match decl.output.node {
ast::ty_nil => true,
_ => false
};
let tparm_cnt = generics.ty_params.len();
// NB: inadequate check, but we're running
// well before resolve, can't get too deep.
input_cnt == 1u
&& no_output && tparm_cnt == 0u
}
_ => false
}
} | identifier_body |
test.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Code that generates a test runner to run all the tests in a crate
use driver::session;
use front::config;
use std::vec;
use syntax::ast_util::*;
use syntax::attr::AttrMetaMethods;
use syntax::attr;
use syntax::codemap::{dummy_sp, Span, ExpnInfo, NameAndSpan, MacroAttribute};
use syntax::codemap;
use syntax::ext::base::ExtCtxt;
use syntax::fold::ast_fold;
use syntax::fold;
use syntax::opt_vec;
use syntax::print::pprust;
use syntax::{ast, ast_util};
use syntax::util::small_vector::SmallVector;
struct Test {
span: Span,
path: ~[ast::Ident],
bench: bool,
ignore: bool,
should_fail: bool
}
struct TestCtxt {
sess: session::Session,
path: ~[ast::Ident],
ext_cx: @ExtCtxt,
testfns: ~[Test],
is_extra: bool,
config: ast::CrateConfig,
}
// Traverse the crate, collecting all the test functions, eliding any
// existing main functions, and synthesizing a main test harness
pub fn modify_for_testing(sess: session::Session,
crate: ast::Crate) -> ast::Crate {
// We generate the test harness when building in the 'test'
// configuration, either with the '--test' or '--cfg test'
// command line options.
let should_test = attr::contains_name(crate.config, "test");
if should_test {
generate_test_harness(sess, crate)
} else {
strip_test_functions(crate)
}
}
struct TestHarnessGenerator {
cx: @mut TestCtxt,
}
impl fold::ast_fold for TestHarnessGenerator {
fn fold_crate(&self, c: ast::Crate) -> ast::Crate {
let folded = fold::noop_fold_crate(c, self);
// Add a special __test module to the crate that will contain code
// generated for the test harness
ast::Crate {
module: add_test_module(self.cx, &folded.module),
.. folded
}
}
fn fold_item(&self, i: @ast::item) -> SmallVector<@ast::item> {
self.cx.path.push(i.ident);
debug!("current path: {}",
ast_util::path_name_i(self.cx.path.clone()));
if is_test_fn(self.cx, i) || is_bench_fn(i) {
match i.node {
ast::item_fn(_, purity, _, _, _)
if purity == ast::unsafe_fn => {
let sess = self.cx.sess;
sess.span_fatal(i.span,
"unsafe functions cannot be used for \
tests");
}
_ => {
debug!("this is a test function");
let test = Test {
span: i.span,
path: self.cx.path.clone(),
bench: is_bench_fn(i),
ignore: is_ignored(self.cx, i),
should_fail: should_fail(i)
};
self.cx.testfns.push(test);
// debug!("have {} test/bench functions",
// cx.testfns.len());
}
}
}
let res = fold::noop_fold_item(i, self);
self.cx.path.pop();
res
}
fn fold_mod(&self, m: &ast::_mod) -> ast::_mod {
// Remove any #[main] from the AST so it doesn't clash with
// the one we're going to add. Only if compiling an executable.
fn nomain(cx: @mut TestCtxt, item: @ast::item) -> @ast::item {
if!*cx.sess.building_library {
@ast::item {
attrs: item.attrs.iter().filter_map(|attr| {
if "main"!= attr.name() {
Some(*attr)
} else {
None
}
}).collect(),
.. (*item).clone()
}
} else {
item
}
}
let mod_nomain = ast::_mod {
view_items: m.view_items.clone(),
items: m.items.iter().map(|i| nomain(self.cx, *i)).collect(),
};
fold::noop_fold_mod(&mod_nomain, self)
}
}
fn generate_test_harness(sess: session::Session, crate: ast::Crate)
-> ast::Crate {
let cx: @mut TestCtxt = @mut TestCtxt {
sess: sess,
ext_cx: ExtCtxt::new(sess.parse_sess, sess.opts.cfg.clone()),
path: ~[],
testfns: ~[],
is_extra: is_extra(&crate),
config: crate.config.clone(),
};
let ext_cx = cx.ext_cx;
ext_cx.bt_push(ExpnInfo {
call_site: dummy_sp(),
callee: NameAndSpan {
name: @"test",
format: MacroAttribute,
span: None
}
});
let fold = TestHarnessGenerator {
cx: cx
};
let res = fold.fold_crate(crate);
ext_cx.bt_pop();
return res;
}
fn strip_test_functions(crate: ast::Crate) -> ast::Crate {
// When not compiling with --test we should not compile the
// #[test] functions
config::strip_items(crate, |attrs| {
!attr::contains_name(attrs, "test") &&
!attr::contains_name(attrs, "bench")
})
}
fn is_test_fn(cx: @mut TestCtxt, i: @ast::item) -> bool {
let has_test_attr = attr::contains_name(i.attrs, "test");
fn has_test_signature(i: @ast::item) -> bool {
match &i.node {
&ast::item_fn(ref decl, _, _, ref generics, _) => {
let no_output = match decl.output.node {
ast::ty_nil => true,
_ => false
};
decl.inputs.is_empty()
&& no_output
&&!generics.is_parameterized()
}
_ => false
}
}
if has_test_attr &&!has_test_signature(i) {
let sess = cx.sess;
sess.span_err(
i.span,
"functions used as tests must have signature fn() -> ()."
);
}
return has_test_attr && has_test_signature(i);
}
fn is_bench_fn(i: @ast::item) -> bool {
let has_bench_attr = attr::contains_name(i.attrs, "bench");
fn has_test_signature(i: @ast::item) -> bool {
match i.node {
ast::item_fn(ref decl, _, _, ref generics, _) => {
let input_cnt = decl.inputs.len();
let no_output = match decl.output.node {
ast::ty_nil => true,
_ => false
};
let tparm_cnt = generics.ty_params.len();
// NB: inadequate check, but we're running
// well before resolve, can't get too deep.
input_cnt == 1u
&& no_output && tparm_cnt == 0u
}
_ => false
}
}
return has_bench_attr && has_test_signature(i);
}
fn is_ignored(cx: @mut TestCtxt, i: @ast::item) -> bool {
i.attrs.iter().any(|attr| {
// check ignore(cfg(foo, bar))
"ignore" == attr.name() && match attr.meta_item_list() {
Some(ref cfgs) => attr::test_cfg(cx.config, cfgs.iter().map(|x| *x)),
None => true
}
})
}
fn should_fail(i: @ast::item) -> bool {
attr::contains_name(i.attrs, "should_fail")
}
fn add_test_module(cx: &TestCtxt, m: &ast::_mod) -> ast::_mod {
let testmod = mk_test_module(cx);
ast::_mod {
items: vec::append_one(m.items.clone(), testmod),
..(*m).clone()
}
}
/*
We're going to be building a module that looks more or less like:
mod __test {
#[!resolve_unexported]
extern mod extra (name = "extra", vers = "...");
fn main() {
#[main];
extra::test::test_main_static(::os::args(), tests)
}
static tests : &'static [extra::test::TestDescAndFn] = &[
... the list of tests in the crate...
];
}
*/
fn mk_std(cx: &TestCtxt) -> ast::view_item {
let id_extra = cx.sess.ident_of("extra");
let vi = if cx.is_extra {
ast::view_item_use(
~[@nospan(ast::view_path_simple(id_extra,
path_node(~[id_extra]),
ast::DUMMY_NODE_ID))])
} else {
let mi = attr::mk_name_value_item_str(@"vers", @"0.9-pre");
ast::view_item_extern_mod(id_extra, None, ~[mi], ast::DUMMY_NODE_ID)
};
ast::view_item {
node: vi,
attrs: ~[],
vis: ast::public,
span: dummy_sp()
}
}
fn mk_test_module(cx: &TestCtxt) -> @ast::item {
// Link to extra
let view_items = ~[mk_std(cx)];
// A constant vector of test descriptors.
let tests = mk_tests(cx);
// The synthesized main function which will call the console test runner
// with our list of tests
let mainfn = (quote_item!(cx.ext_cx,
pub fn main() {
#[main];
extra::test::test_main_static(::std::os::args(), TESTS);
}
)).unwrap();
let testmod = ast::_mod {
view_items: view_items,
items: ~[mainfn, tests],
};
let item_ = ast::item_mod(testmod);
// This attribute tells resolve to let us call unexported functions
let resolve_unexported_attr =
attr::mk_attr(attr::mk_word_item(@"!resolve_unexported"));
let item = ast::item {
ident: cx.sess.ident_of("__test"),
attrs: ~[resolve_unexported_attr],
id: ast::DUMMY_NODE_ID,
node: item_,
vis: ast::public,
span: dummy_sp(),
};
debug!("Synthetic test module:\n{}\n",
pprust::item_to_str(&item, cx.sess.intr()));
return @item;
}
fn nospan<T>(t: T) -> codemap::Spanned<T> {
codemap::Spanned { node: t, span: dummy_sp() }
}
fn | (ids: ~[ast::Ident]) -> ast::Path {
ast::Path {
span: dummy_sp(),
global: false,
segments: ids.move_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
lifetimes: opt_vec::Empty,
types: opt_vec::Empty,
}).collect()
}
}
fn path_node_global(ids: ~[ast::Ident]) -> ast::Path {
ast::Path {
span: dummy_sp(),
global: true,
segments: ids.move_iter().map(|identifier| ast::PathSegment {
identifier: identifier,
lifetimes: opt_vec::Empty,
types: opt_vec::Empty,
}).collect()
}
}
fn mk_tests(cx: &TestCtxt) -> @ast::item {
// The vector of test_descs for this crate
let test_descs = mk_test_descs(cx);
(quote_item!(cx.ext_cx,
pub static TESTS : &'static [self::extra::test::TestDescAndFn] =
$test_descs
;
)).unwrap()
}
fn is_extra(crate: &ast::Crate) -> bool {
let items = attr::find_linkage_metas(crate.attrs);
match attr::last_meta_item_value_str_by_name(items, "name") {
Some(s) if "extra" == s => true,
_ => false
}
}
fn mk_test_descs(cx: &TestCtxt) -> @ast::Expr {
debug!("building test vector from {} tests", cx.testfns.len());
let mut descs = ~[];
for test in cx.testfns.iter() {
descs.push(mk_test_desc_and_fn_rec(cx, test));
}
let inner_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVec(descs, ast::MutImmutable),
span: dummy_sp(),
};
@ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprVstore(inner_expr, ast::ExprVstoreSlice),
span: dummy_sp(),
}
}
fn mk_test_desc_and_fn_rec(cx: &TestCtxt, test: &Test) -> @ast::Expr {
let span = test.span;
let path = test.path.clone();
debug!("encoding {}", ast_util::path_name_i(path));
let name_lit: ast::lit =
nospan(ast::lit_str(ast_util::path_name_i(path).to_managed(), ast::CookedStr));
let name_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprLit(@name_lit),
span: span
};
let fn_path = path_node_global(path);
let fn_expr = @ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprPath(fn_path),
span: span,
};
let t_expr = if test.bench {
quote_expr!(cx.ext_cx, self::extra::test::StaticBenchFn($fn_expr) )
} else {
quote_expr!(cx.ext_cx, self::extra::test::StaticTestFn($fn_expr) )
};
let ignore_expr = if test.ignore {
quote_expr!(cx.ext_cx, true )
} else {
quote_expr!(cx.ext_cx, false )
};
let fail_expr = if test.should_fail {
quote_expr!(cx.ext_cx, true )
} else {
quote_expr!(cx.ext_cx, false )
};
let e = quote_expr!(cx.ext_cx,
self::extra::test::TestDescAndFn {
desc: self::extra::test::TestDesc {
name: self::extra::test::StaticTestName($name_expr),
ignore: $ignore_expr,
should_fail: $fail_expr
},
testfn: $t_expr,
}
);
e
}
| path_node | identifier_name |
ethertype.rs | use core::convert::TryFrom;
use num_enum::TryFromPrimitive;
use serde::{Deserialize, Serialize};
/// https://en.wikipedia.org/wiki/EtherType#Examples
#[derive(
Debug,
Clone,
Copy,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
TryFromPrimitive,
Deserialize,
Serialize,
)]
#[repr(u16)]
pub enum EtherType {
Ipv4 = 0x0800,
ARP = 0x0806,
WakeOnLan = 0x0842,
SLPP = 0x8102,
Ipv6 = 0x86dd, | pub fn from_bytes(bytes: &[u8]) -> Self {
let n = u16::from_be_bytes([bytes[0], bytes[1]]);
Self::try_from(n).unwrap_or_else(|_| panic!("Unknwn EtherType {:04x}", n))
}
pub fn to_bytes(self) -> [u8; 2] {
u16::to_be_bytes(self as u16)
}
} | EthernetFlowControl = 0x8808,
EthernetSlowProtocol = 0x8809,
}
impl EtherType { | random_line_split |
ethertype.rs | use core::convert::TryFrom;
use num_enum::TryFromPrimitive;
use serde::{Deserialize, Serialize};
/// https://en.wikipedia.org/wiki/EtherType#Examples
#[derive(
Debug,
Clone,
Copy,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
TryFromPrimitive,
Deserialize,
Serialize,
)]
#[repr(u16)]
pub enum EtherType {
Ipv4 = 0x0800,
ARP = 0x0806,
WakeOnLan = 0x0842,
SLPP = 0x8102,
Ipv6 = 0x86dd,
EthernetFlowControl = 0x8808,
EthernetSlowProtocol = 0x8809,
}
impl EtherType {
pub fn | (bytes: &[u8]) -> Self {
let n = u16::from_be_bytes([bytes[0], bytes[1]]);
Self::try_from(n).unwrap_or_else(|_| panic!("Unknwn EtherType {:04x}", n))
}
pub fn to_bytes(self) -> [u8; 2] {
u16::to_be_bytes(self as u16)
}
}
| from_bytes | identifier_name |
ethertype.rs | use core::convert::TryFrom;
use num_enum::TryFromPrimitive;
use serde::{Deserialize, Serialize};
/// https://en.wikipedia.org/wiki/EtherType#Examples
#[derive(
Debug,
Clone,
Copy,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
TryFromPrimitive,
Deserialize,
Serialize,
)]
#[repr(u16)]
pub enum EtherType {
Ipv4 = 0x0800,
ARP = 0x0806,
WakeOnLan = 0x0842,
SLPP = 0x8102,
Ipv6 = 0x86dd,
EthernetFlowControl = 0x8808,
EthernetSlowProtocol = 0x8809,
}
impl EtherType {
pub fn from_bytes(bytes: &[u8]) -> Self {
let n = u16::from_be_bytes([bytes[0], bytes[1]]);
Self::try_from(n).unwrap_or_else(|_| panic!("Unknwn EtherType {:04x}", n))
}
pub fn to_bytes(self) -> [u8; 2] |
}
| {
u16::to_be_bytes(self as u16)
} | identifier_body |
bytes.rs | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::types::{Intern, RawInternKey};
use fnv::FnvHashMap;
use lazy_static::lazy_static;
use parking_lot::RwLock;
use serde::{Deserialize, Deserializer};
use std::fmt;
use std::sync::Arc;
/// Slices of bytes intern as BytesKey
impl Intern for &[u8] {
type Key = BytesKey;
fn intern(self) -> Self::Key {
BytesKey(BYTES_TABLE.intern(self))
}
}
/// Owned strings intern as StringKey, with the interning
/// based on the raw bytes of the string
impl Intern for String {
type Key = StringKey;
fn intern(self) -> Self::Key {
StringKey(BYTES_TABLE.intern(self.as_bytes()))
}
}
/// Str (slices) intern as StringKey, with the interning
/// based on the raw bytes of the str.
impl Intern for &str {
type Key = StringKey;
fn intern(self) -> Self::Key {
StringKey(BYTES_TABLE.intern(self.as_bytes()))
}
}
/// Interned bytes
#[derive(Copy, Clone, Eq, Ord, Hash, PartialEq, PartialOrd)]
pub struct BytesKey(RawInternKey);
impl BytesKey {
pub fn lookup(self) -> &'static [u8] {
BYTES_TABLE.lookup(self.0)
}
}
impl fmt::Debug for BytesKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let bytes_value = self.lookup();
write!(f, "{:?}", bytes_value)
}
}
/// An interned string
#[derive(Copy, Clone, Eq, Ord, Hash, PartialEq, PartialOrd)]
pub struct StringKey(RawInternKey);
impl StringKey {
/// Get a reference to the original str.
pub fn lookup(self) -> &'static str {
let bytes = BYTES_TABLE.lookup(self.0);
// This is safe because the bytes we are converting originally came
// from a str when we interned it: the only way to get a StringKey is
// to intern an (already valid) string, so if we have a StringKey then
// its bytes must be valid UTF-8.
unsafe { std::str::from_utf8_unchecked(bytes) }
}
/// Convert the interned string key into an interned bytes key. Because
/// strings intern as their raw bytes, this is an O(1) operation.
/// Note the reverse (BytesKey.as_str) is a fallible operation since
/// the bytes may not be valid UTF-8.
pub fn as_bytes(self) -> BytesKey {
BytesKey(self.0)
}
}
impl fmt::Debug for StringKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let str_value = self.lookup();
write!(f, "{:?}", str_value)
}
}
impl fmt::Display for StringKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let str_value = self.lookup();
write!(f, "{}", str_value)
}
}
impl<'de> Deserialize<'de> for StringKey {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(deserializer).map(|s: String| s.intern())
}
}
// Static table used in the bytes/str Intern implementations
lazy_static! {
static ref BYTES_TABLE: BytesTable = BytesTable::new();
}
/// Similar to the generic `InternTable` but customized for sequences of raw bytes (and strings).
pub struct BytesTable {
data: Arc<RwLock<BytesTableData>>,
}
impl BytesTable {
pub fn new() -> Self {
Self {
data: Arc::new(RwLock::new(BytesTableData::new())),
}
}
pub fn intern(&self, value: &[u8]) -> RawInternKey {
if let Some(prev) = self.data.read().get(&value) {
return prev;
}
let mut writer = self.data.write();
writer.intern(value)
}
pub fn lookup(&self, key: RawInternKey) -> &'static [u8] {
self.data.read().lookup(key)
}
}
/// BytesTableData is similar to InternTableData but customized for sequences
/// of raw bytes (and notably, strings).
struct BytesTableData {
// Raw data storage, allocated in large chunks
buffer: Option<&'static mut [u8]>,
// Reverse mapping of index=>value, used to convert an
// interned key back to (a reference to) its value
items: Vec<&'static [u8]>,
// Mapping of values to their interned indices
table: FnvHashMap<&'static [u8], RawInternKey>,
}
impl BytesTableData {
const BUFFER_SIZE: usize = 4096;
pub fn new() -> Self {
Self {
buffer: Some(Self::new_buffer()),
items: Default::default(),
table: Default::default(), | }
}
fn new_buffer() -> &'static mut [u8] {
Box::leak(Box::new([0; Self::BUFFER_SIZE]))
}
pub fn get(&self, value: &[u8]) -> Option<RawInternKey> {
self.table.get(value).cloned()
}
// Copy the byte slice into'static memory by appending it to a buffer, if there is room.
// If the buffer fills up and the value is small, start over with a new buffer.
// If the value is large, just give it its own memory.
fn alloc(&mut self, value: &[u8]) -> &'static [u8] {
let len = value.len();
let mut buffer = self.buffer.take().unwrap();
if len > buffer.len() {
if len >= Self::BUFFER_SIZE / 16 {
// This byte slice is so big it can just have its own memory.
self.buffer = Some(buffer);
return Box::leak(value.into());
} else {
buffer = Self::new_buffer()
}
}
let (mem, remaining) = buffer.split_at_mut(len);
mem.copy_from_slice(value);
self.buffer = Some(remaining);
mem
}
pub fn intern(&mut self, value: &[u8]) -> RawInternKey {
// If there's an existing value return it
if let Some(prev) = self.get(&value) {
return prev;
}
// Otherwise intern
let key = RawInternKey::new(self.items.len());
let static_bytes = self.alloc(value);
self.items.push(static_bytes);
self.table.insert(static_bytes, key);
key
}
pub fn lookup(&self, key: RawInternKey) -> &'static [u8] {
let index = key.as_usize();
self.items[index]
}
} | random_line_split |
|
bytes.rs | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::types::{Intern, RawInternKey};
use fnv::FnvHashMap;
use lazy_static::lazy_static;
use parking_lot::RwLock;
use serde::{Deserialize, Deserializer};
use std::fmt;
use std::sync::Arc;
/// Slices of bytes intern as BytesKey
impl Intern for &[u8] {
type Key = BytesKey;
fn intern(self) -> Self::Key {
BytesKey(BYTES_TABLE.intern(self))
}
}
/// Owned strings intern as StringKey, with the interning
/// based on the raw bytes of the string
impl Intern for String {
type Key = StringKey;
fn intern(self) -> Self::Key {
StringKey(BYTES_TABLE.intern(self.as_bytes()))
}
}
/// Str (slices) intern as StringKey, with the interning
/// based on the raw bytes of the str.
impl Intern for &str {
type Key = StringKey;
fn intern(self) -> Self::Key {
StringKey(BYTES_TABLE.intern(self.as_bytes()))
}
}
/// Interned bytes
#[derive(Copy, Clone, Eq, Ord, Hash, PartialEq, PartialOrd)]
pub struct BytesKey(RawInternKey);
impl BytesKey {
pub fn lookup(self) -> &'static [u8] {
BYTES_TABLE.lookup(self.0)
}
}
impl fmt::Debug for BytesKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let bytes_value = self.lookup();
write!(f, "{:?}", bytes_value)
}
}
/// An interned string
#[derive(Copy, Clone, Eq, Ord, Hash, PartialEq, PartialOrd)]
pub struct StringKey(RawInternKey);
impl StringKey {
/// Get a reference to the original str.
pub fn lookup(self) -> &'static str {
let bytes = BYTES_TABLE.lookup(self.0);
// This is safe because the bytes we are converting originally came
// from a str when we interned it: the only way to get a StringKey is
// to intern an (already valid) string, so if we have a StringKey then
// its bytes must be valid UTF-8.
unsafe { std::str::from_utf8_unchecked(bytes) }
}
/// Convert the interned string key into an interned bytes key. Because
/// strings intern as their raw bytes, this is an O(1) operation.
/// Note the reverse (BytesKey.as_str) is a fallible operation since
/// the bytes may not be valid UTF-8.
pub fn as_bytes(self) -> BytesKey {
BytesKey(self.0)
}
}
impl fmt::Debug for StringKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let str_value = self.lookup();
write!(f, "{:?}", str_value)
}
}
impl fmt::Display for StringKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let str_value = self.lookup();
write!(f, "{}", str_value)
}
}
impl<'de> Deserialize<'de> for StringKey {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(deserializer).map(|s: String| s.intern())
}
}
// Static table used in the bytes/str Intern implementations
lazy_static! {
static ref BYTES_TABLE: BytesTable = BytesTable::new();
}
/// Similar to the generic `InternTable` but customized for sequences of raw bytes (and strings).
pub struct BytesTable {
data: Arc<RwLock<BytesTableData>>,
}
impl BytesTable {
pub fn new() -> Self {
Self {
data: Arc::new(RwLock::new(BytesTableData::new())),
}
}
pub fn intern(&self, value: &[u8]) -> RawInternKey {
if let Some(prev) = self.data.read().get(&value) {
return prev;
}
let mut writer = self.data.write();
writer.intern(value)
}
pub fn lookup(&self, key: RawInternKey) -> &'static [u8] {
self.data.read().lookup(key)
}
}
/// BytesTableData is similar to InternTableData but customized for sequences
/// of raw bytes (and notably, strings).
struct BytesTableData {
// Raw data storage, allocated in large chunks
buffer: Option<&'static mut [u8]>,
// Reverse mapping of index=>value, used to convert an
// interned key back to (a reference to) its value
items: Vec<&'static [u8]>,
// Mapping of values to their interned indices
table: FnvHashMap<&'static [u8], RawInternKey>,
}
impl BytesTableData {
const BUFFER_SIZE: usize = 4096;
pub fn | () -> Self {
Self {
buffer: Some(Self::new_buffer()),
items: Default::default(),
table: Default::default(),
}
}
fn new_buffer() -> &'static mut [u8] {
Box::leak(Box::new([0; Self::BUFFER_SIZE]))
}
pub fn get(&self, value: &[u8]) -> Option<RawInternKey> {
self.table.get(value).cloned()
}
// Copy the byte slice into'static memory by appending it to a buffer, if there is room.
// If the buffer fills up and the value is small, start over with a new buffer.
// If the value is large, just give it its own memory.
fn alloc(&mut self, value: &[u8]) -> &'static [u8] {
let len = value.len();
let mut buffer = self.buffer.take().unwrap();
if len > buffer.len() {
if len >= Self::BUFFER_SIZE / 16 {
// This byte slice is so big it can just have its own memory.
self.buffer = Some(buffer);
return Box::leak(value.into());
} else {
buffer = Self::new_buffer()
}
}
let (mem, remaining) = buffer.split_at_mut(len);
mem.copy_from_slice(value);
self.buffer = Some(remaining);
mem
}
pub fn intern(&mut self, value: &[u8]) -> RawInternKey {
// If there's an existing value return it
if let Some(prev) = self.get(&value) {
return prev;
}
// Otherwise intern
let key = RawInternKey::new(self.items.len());
let static_bytes = self.alloc(value);
self.items.push(static_bytes);
self.table.insert(static_bytes, key);
key
}
pub fn lookup(&self, key: RawInternKey) -> &'static [u8] {
let index = key.as_usize();
self.items[index]
}
}
| new | identifier_name |
bytes.rs | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::types::{Intern, RawInternKey};
use fnv::FnvHashMap;
use lazy_static::lazy_static;
use parking_lot::RwLock;
use serde::{Deserialize, Deserializer};
use std::fmt;
use std::sync::Arc;
/// Slices of bytes intern as BytesKey
impl Intern for &[u8] {
type Key = BytesKey;
fn intern(self) -> Self::Key {
BytesKey(BYTES_TABLE.intern(self))
}
}
/// Owned strings intern as StringKey, with the interning
/// based on the raw bytes of the string
impl Intern for String {
type Key = StringKey;
fn intern(self) -> Self::Key {
StringKey(BYTES_TABLE.intern(self.as_bytes()))
}
}
/// Str (slices) intern as StringKey, with the interning
/// based on the raw bytes of the str.
impl Intern for &str {
type Key = StringKey;
fn intern(self) -> Self::Key {
StringKey(BYTES_TABLE.intern(self.as_bytes()))
}
}
/// Interned bytes
#[derive(Copy, Clone, Eq, Ord, Hash, PartialEq, PartialOrd)]
pub struct BytesKey(RawInternKey);
impl BytesKey {
pub fn lookup(self) -> &'static [u8] {
BYTES_TABLE.lookup(self.0)
}
}
impl fmt::Debug for BytesKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let bytes_value = self.lookup();
write!(f, "{:?}", bytes_value)
}
}
/// An interned string
#[derive(Copy, Clone, Eq, Ord, Hash, PartialEq, PartialOrd)]
pub struct StringKey(RawInternKey);
impl StringKey {
/// Get a reference to the original str.
pub fn lookup(self) -> &'static str {
let bytes = BYTES_TABLE.lookup(self.0);
// This is safe because the bytes we are converting originally came
// from a str when we interned it: the only way to get a StringKey is
// to intern an (already valid) string, so if we have a StringKey then
// its bytes must be valid UTF-8.
unsafe { std::str::from_utf8_unchecked(bytes) }
}
/// Convert the interned string key into an interned bytes key. Because
/// strings intern as their raw bytes, this is an O(1) operation.
/// Note the reverse (BytesKey.as_str) is a fallible operation since
/// the bytes may not be valid UTF-8.
pub fn as_bytes(self) -> BytesKey {
BytesKey(self.0)
}
}
impl fmt::Debug for StringKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let str_value = self.lookup();
write!(f, "{:?}", str_value)
}
}
impl fmt::Display for StringKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let str_value = self.lookup();
write!(f, "{}", str_value)
}
}
impl<'de> Deserialize<'de> for StringKey {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(deserializer).map(|s: String| s.intern())
}
}
// Static table used in the bytes/str Intern implementations
lazy_static! {
static ref BYTES_TABLE: BytesTable = BytesTable::new();
}
/// Similar to the generic `InternTable` but customized for sequences of raw bytes (and strings).
pub struct BytesTable {
data: Arc<RwLock<BytesTableData>>,
}
impl BytesTable {
pub fn new() -> Self {
Self {
data: Arc::new(RwLock::new(BytesTableData::new())),
}
}
pub fn intern(&self, value: &[u8]) -> RawInternKey {
if let Some(prev) = self.data.read().get(&value) {
return prev;
}
let mut writer = self.data.write();
writer.intern(value)
}
pub fn lookup(&self, key: RawInternKey) -> &'static [u8] {
self.data.read().lookup(key)
}
}
/// BytesTableData is similar to InternTableData but customized for sequences
/// of raw bytes (and notably, strings).
struct BytesTableData {
// Raw data storage, allocated in large chunks
buffer: Option<&'static mut [u8]>,
// Reverse mapping of index=>value, used to convert an
// interned key back to (a reference to) its value
items: Vec<&'static [u8]>,
// Mapping of values to their interned indices
table: FnvHashMap<&'static [u8], RawInternKey>,
}
impl BytesTableData {
const BUFFER_SIZE: usize = 4096;
pub fn new() -> Self {
Self {
buffer: Some(Self::new_buffer()),
items: Default::default(),
table: Default::default(),
}
}
fn new_buffer() -> &'static mut [u8] {
Box::leak(Box::new([0; Self::BUFFER_SIZE]))
}
pub fn get(&self, value: &[u8]) -> Option<RawInternKey> |
// Copy the byte slice into'static memory by appending it to a buffer, if there is room.
// If the buffer fills up and the value is small, start over with a new buffer.
// If the value is large, just give it its own memory.
fn alloc(&mut self, value: &[u8]) -> &'static [u8] {
let len = value.len();
let mut buffer = self.buffer.take().unwrap();
if len > buffer.len() {
if len >= Self::BUFFER_SIZE / 16 {
// This byte slice is so big it can just have its own memory.
self.buffer = Some(buffer);
return Box::leak(value.into());
} else {
buffer = Self::new_buffer()
}
}
let (mem, remaining) = buffer.split_at_mut(len);
mem.copy_from_slice(value);
self.buffer = Some(remaining);
mem
}
pub fn intern(&mut self, value: &[u8]) -> RawInternKey {
// If there's an existing value return it
if let Some(prev) = self.get(&value) {
return prev;
}
// Otherwise intern
let key = RawInternKey::new(self.items.len());
let static_bytes = self.alloc(value);
self.items.push(static_bytes);
self.table.insert(static_bytes, key);
key
}
pub fn lookup(&self, key: RawInternKey) -> &'static [u8] {
let index = key.as_usize();
self.items[index]
}
}
| {
self.table.get(value).cloned()
} | identifier_body |
bytes.rs | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use crate::types::{Intern, RawInternKey};
use fnv::FnvHashMap;
use lazy_static::lazy_static;
use parking_lot::RwLock;
use serde::{Deserialize, Deserializer};
use std::fmt;
use std::sync::Arc;
/// Slices of bytes intern as BytesKey
impl Intern for &[u8] {
type Key = BytesKey;
fn intern(self) -> Self::Key {
BytesKey(BYTES_TABLE.intern(self))
}
}
/// Owned strings intern as StringKey, with the interning
/// based on the raw bytes of the string
impl Intern for String {
type Key = StringKey;
fn intern(self) -> Self::Key {
StringKey(BYTES_TABLE.intern(self.as_bytes()))
}
}
/// Str (slices) intern as StringKey, with the interning
/// based on the raw bytes of the str.
impl Intern for &str {
type Key = StringKey;
fn intern(self) -> Self::Key {
StringKey(BYTES_TABLE.intern(self.as_bytes()))
}
}
/// Interned bytes
#[derive(Copy, Clone, Eq, Ord, Hash, PartialEq, PartialOrd)]
pub struct BytesKey(RawInternKey);
impl BytesKey {
pub fn lookup(self) -> &'static [u8] {
BYTES_TABLE.lookup(self.0)
}
}
impl fmt::Debug for BytesKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let bytes_value = self.lookup();
write!(f, "{:?}", bytes_value)
}
}
/// An interned string
#[derive(Copy, Clone, Eq, Ord, Hash, PartialEq, PartialOrd)]
pub struct StringKey(RawInternKey);
impl StringKey {
/// Get a reference to the original str.
pub fn lookup(self) -> &'static str {
let bytes = BYTES_TABLE.lookup(self.0);
// This is safe because the bytes we are converting originally came
// from a str when we interned it: the only way to get a StringKey is
// to intern an (already valid) string, so if we have a StringKey then
// its bytes must be valid UTF-8.
unsafe { std::str::from_utf8_unchecked(bytes) }
}
/// Convert the interned string key into an interned bytes key. Because
/// strings intern as their raw bytes, this is an O(1) operation.
/// Note the reverse (BytesKey.as_str) is a fallible operation since
/// the bytes may not be valid UTF-8.
pub fn as_bytes(self) -> BytesKey {
BytesKey(self.0)
}
}
impl fmt::Debug for StringKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let str_value = self.lookup();
write!(f, "{:?}", str_value)
}
}
impl fmt::Display for StringKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let str_value = self.lookup();
write!(f, "{}", str_value)
}
}
impl<'de> Deserialize<'de> for StringKey {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(deserializer).map(|s: String| s.intern())
}
}
// Static table used in the bytes/str Intern implementations
lazy_static! {
static ref BYTES_TABLE: BytesTable = BytesTable::new();
}
/// Similar to the generic `InternTable` but customized for sequences of raw bytes (and strings).
pub struct BytesTable {
data: Arc<RwLock<BytesTableData>>,
}
impl BytesTable {
pub fn new() -> Self {
Self {
data: Arc::new(RwLock::new(BytesTableData::new())),
}
}
pub fn intern(&self, value: &[u8]) -> RawInternKey {
if let Some(prev) = self.data.read().get(&value) {
return prev;
}
let mut writer = self.data.write();
writer.intern(value)
}
pub fn lookup(&self, key: RawInternKey) -> &'static [u8] {
self.data.read().lookup(key)
}
}
/// BytesTableData is similar to InternTableData but customized for sequences
/// of raw bytes (and notably, strings).
struct BytesTableData {
// Raw data storage, allocated in large chunks
buffer: Option<&'static mut [u8]>,
// Reverse mapping of index=>value, used to convert an
// interned key back to (a reference to) its value
items: Vec<&'static [u8]>,
// Mapping of values to their interned indices
table: FnvHashMap<&'static [u8], RawInternKey>,
}
impl BytesTableData {
const BUFFER_SIZE: usize = 4096;
pub fn new() -> Self {
Self {
buffer: Some(Self::new_buffer()),
items: Default::default(),
table: Default::default(),
}
}
fn new_buffer() -> &'static mut [u8] {
Box::leak(Box::new([0; Self::BUFFER_SIZE]))
}
pub fn get(&self, value: &[u8]) -> Option<RawInternKey> {
self.table.get(value).cloned()
}
// Copy the byte slice into'static memory by appending it to a buffer, if there is room.
// If the buffer fills up and the value is small, start over with a new buffer.
// If the value is large, just give it its own memory.
fn alloc(&mut self, value: &[u8]) -> &'static [u8] {
let len = value.len();
let mut buffer = self.buffer.take().unwrap();
if len > buffer.len() {
if len >= Self::BUFFER_SIZE / 16 {
// This byte slice is so big it can just have its own memory.
self.buffer = Some(buffer);
return Box::leak(value.into());
} else |
}
let (mem, remaining) = buffer.split_at_mut(len);
mem.copy_from_slice(value);
self.buffer = Some(remaining);
mem
}
pub fn intern(&mut self, value: &[u8]) -> RawInternKey {
// If there's an existing value return it
if let Some(prev) = self.get(&value) {
return prev;
}
// Otherwise intern
let key = RawInternKey::new(self.items.len());
let static_bytes = self.alloc(value);
self.items.push(static_bytes);
self.table.insert(static_bytes, key);
key
}
pub fn lookup(&self, key: RawInternKey) -> &'static [u8] {
let index = key.as_usize();
self.items[index]
}
}
| {
buffer = Self::new_buffer()
} | conditional_block |
init-res-into-things.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unknown_features)]
#![feature(box_syntax)]
#![feature(unsafe_destructor)]
use std::cell::Cell;
// Resources can't be copied, but storing into data structures counts
// as a move unless the stored thing is used afterwards.
struct r<'a> {
i: &'a Cell<int>,
}
struct BoxR<'a> { x: r<'a> }
#[unsafe_destructor]
impl<'a> Drop for r<'a> {
fn drop(&mut self) {
self.i.set(self.i.get() + 1)
}
}
fn r(i: &Cell<int>) -> r {
r {
i: i
}
}
fn test_rec() {
let i = &Cell::new(0);
{
let _a = BoxR {x: r(i)};
}
assert_eq!(i.get(), 1);
}
fn test_tag() {
enum t<'a> {
t0(r<'a>),
}
| }
fn test_tup() {
let i = &Cell::new(0);
{
let _a = (r(i), 0);
}
assert_eq!(i.get(), 1);
}
fn test_unique() {
let i = &Cell::new(0);
{
let _a = box r(i);
}
assert_eq!(i.get(), 1);
}
fn test_unique_rec() {
let i = &Cell::new(0);
{
let _a = box BoxR {
x: r(i)
};
}
assert_eq!(i.get(), 1);
}
pub fn main() {
test_rec();
test_tag();
test_tup();
test_unique();
test_unique_rec();
} | let i = &Cell::new(0);
{
let _a = t::t0(r(i));
}
assert_eq!(i.get(), 1); | random_line_split |
init-res-into-things.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unknown_features)]
#![feature(box_syntax)]
#![feature(unsafe_destructor)]
use std::cell::Cell;
// Resources can't be copied, but storing into data structures counts
// as a move unless the stored thing is used afterwards.
struct | <'a> {
i: &'a Cell<int>,
}
struct BoxR<'a> { x: r<'a> }
#[unsafe_destructor]
impl<'a> Drop for r<'a> {
fn drop(&mut self) {
self.i.set(self.i.get() + 1)
}
}
fn r(i: &Cell<int>) -> r {
r {
i: i
}
}
fn test_rec() {
let i = &Cell::new(0);
{
let _a = BoxR {x: r(i)};
}
assert_eq!(i.get(), 1);
}
fn test_tag() {
enum t<'a> {
t0(r<'a>),
}
let i = &Cell::new(0);
{
let _a = t::t0(r(i));
}
assert_eq!(i.get(), 1);
}
fn test_tup() {
let i = &Cell::new(0);
{
let _a = (r(i), 0);
}
assert_eq!(i.get(), 1);
}
fn test_unique() {
let i = &Cell::new(0);
{
let _a = box r(i);
}
assert_eq!(i.get(), 1);
}
fn test_unique_rec() {
let i = &Cell::new(0);
{
let _a = box BoxR {
x: r(i)
};
}
assert_eq!(i.get(), 1);
}
pub fn main() {
test_rec();
test_tag();
test_tup();
test_unique();
test_unique_rec();
}
| r | identifier_name |
kindck-inherited-copy-bound.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that Copy bounds inherited by trait are checked.
#![feature(box_syntax)]
use std::any::Any;
trait Foo : Copy {
}
impl<T:Copy> Foo for T {
}
fn take_param<T:Foo>(foo: &T) { }
fn | () {
let x = box 3is;
take_param(&x); //~ ERROR `core::marker::Copy` is not implemented
}
fn b() {
let x = box 3is;
let y = &x;
let z = &x as &Foo; //~ ERROR `core::marker::Copy` is not implemented
}
fn main() { }
| a | identifier_name |
kindck-inherited-copy-bound.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT. | //
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that Copy bounds inherited by trait are checked.
#![feature(box_syntax)]
use std::any::Any;
trait Foo : Copy {
}
impl<T:Copy> Foo for T {
}
fn take_param<T:Foo>(foo: &T) { }
fn a() {
let x = box 3is;
take_param(&x); //~ ERROR `core::marker::Copy` is not implemented
}
fn b() {
let x = box 3is;
let y = &x;
let z = &x as &Foo; //~ ERROR `core::marker::Copy` is not implemented
}
fn main() { } | random_line_split |
|
kindck-inherited-copy-bound.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that Copy bounds inherited by trait are checked.
#![feature(box_syntax)]
use std::any::Any;
trait Foo : Copy {
}
impl<T:Copy> Foo for T {
}
fn take_param<T:Foo>(foo: &T) |
fn a() {
let x = box 3is;
take_param(&x); //~ ERROR `core::marker::Copy` is not implemented
}
fn b() {
let x = box 3is;
let y = &x;
let z = &x as &Foo; //~ ERROR `core::marker::Copy` is not implemented
}
fn main() { }
| { } | identifier_body |
opt_name.rs | //! Test optional prefix.
extern crate flame;
extern crate flamer;
use flamer::{flame, noflame};
#[flame("top")]
fn a() {
let l = Lower {};
l.a();
}
#[flame]
fn b() |
#[noflame]
fn c() {
b()
}
pub struct Lower;
impl Lower {
#[flame("lower")]
pub fn a(self) {
// nothing to do here
}
}
#[test]
fn main() {
c();
let spans = flame::spans();
assert_eq!(1, spans.len());
let roots = &spans[0];
println!("{:?}",roots);
// if more than 2 roots, a() was flamed twice or c was flamed
// main is missing because main isn't closed here
assert_eq!("b", roots.name);
assert_eq!(1, roots.children.len());
assert_eq!("top::a", roots.children[0].name);
assert_eq!(1, roots.children[0].children.len());
assert_eq!("lower::a", roots.children[0].children[0].name);
}
| {
a()
} | identifier_body |
opt_name.rs | //! Test optional prefix.
extern crate flame; | fn a() {
let l = Lower {};
l.a();
}
#[flame]
fn b() {
a()
}
#[noflame]
fn c() {
b()
}
pub struct Lower;
impl Lower {
#[flame("lower")]
pub fn a(self) {
// nothing to do here
}
}
#[test]
fn main() {
c();
let spans = flame::spans();
assert_eq!(1, spans.len());
let roots = &spans[0];
println!("{:?}",roots);
// if more than 2 roots, a() was flamed twice or c was flamed
// main is missing because main isn't closed here
assert_eq!("b", roots.name);
assert_eq!(1, roots.children.len());
assert_eq!("top::a", roots.children[0].name);
assert_eq!(1, roots.children[0].children.len());
assert_eq!("lower::a", roots.children[0].children[0].name);
} | extern crate flamer;
use flamer::{flame, noflame};
#[flame("top")] | random_line_split |
opt_name.rs | //! Test optional prefix.
extern crate flame;
extern crate flamer;
use flamer::{flame, noflame};
#[flame("top")]
fn a() {
let l = Lower {};
l.a();
}
#[flame]
fn b() {
a()
}
#[noflame]
fn | () {
b()
}
pub struct Lower;
impl Lower {
#[flame("lower")]
pub fn a(self) {
// nothing to do here
}
}
#[test]
fn main() {
c();
let spans = flame::spans();
assert_eq!(1, spans.len());
let roots = &spans[0];
println!("{:?}",roots);
// if more than 2 roots, a() was flamed twice or c was flamed
// main is missing because main isn't closed here
assert_eq!("b", roots.name);
assert_eq!(1, roots.children.len());
assert_eq!("top::a", roots.children[0].name);
assert_eq!(1, roots.children[0].children.len());
assert_eq!("lower::a", roots.children[0].children[0].name);
}
| c | identifier_name |
builtin.rs | use env::UserEnv;
use getopts::Options;
use std::collections::HashMap;
use std::env;
use std::io;
use std::io::prelude::*;
use std::path::{Component, Path, PathBuf};
use job;
const SUCCESS: io::Result<i32> = Ok(0);
pub trait Builtin {
fn run(&mut self, args: &[String], env: &mut UserEnv) -> io::Result<i32>;
fn dup(&self) -> Box<Builtin>;
}
pub struct SimpleBuiltin(fn(&[String], &mut UserEnv) -> io::Result<i32>);
impl Clone for SimpleBuiltin {
fn clone(&self) -> Self {
SimpleBuiltin(self.0)
}
}
impl Builtin for SimpleBuiltin where {
fn run(&mut self, args: &[String], env: &mut UserEnv) -> io::Result<i32> {
self.0(args, env)
}
fn dup(&self) -> Box<Builtin> {
Box::new(self.clone())
}
}
pub type BuiltinMap = HashMap<&'static str, Box<Builtin>>;
#[derive(Clone)]
struct Cd {
prev_dir: String,
}
impl Cd {
fn new() -> Cd {
let pwd = env::var("PWD").unwrap_or(String::new());
Cd { prev_dir: pwd }
}
fn change_to<P: AsRef<Path>>(&mut self, p: &P, env: &mut UserEnv) -> io::Result<()> {
let pwd = env.get("PWD");
self.prev_dir = pwd;
let new_pwd_buf = normalize_logical_path(&p);
env::set_current_dir(&new_pwd_buf)?;
let path_str = new_pwd_buf.to_str().ok_or(io::Error::new(
io::ErrorKind::Other,
"Invalid characters in path",
))?;
env.set("PWD", path_str);
Ok(())
}
}
fn normalize_logical_path<P: AsRef<Path>>(path: &P) -> PathBuf {
let path = path.as_ref();
let mut normalized_path = PathBuf::new();
for c in path.components() {
match c {
Component::ParentDir => {
normalized_path.pop();
}
Component::CurDir => continue,
_ => normalized_path.push(c.as_os_str()),
};
}
normalized_path
}
impl Builtin for Cd {
fn run(&mut self, args: &[String], env: &mut UserEnv) -> io::Result<i32> {
if args.len() == 0 {
let home = env.get("HOME");
if home.len()!= 0 {
return self.change_to(&PathBuf::from(&home), env).and(SUCCESS);
}
return SUCCESS;
}
if args[0] == "-" {
let prev_dir = self.prev_dir.clone();
return self.change_to(&prev_dir, env).and(SUCCESS);
}
let pwd = env.get("PWD");
let mut pwd_buf = if pwd == "" {
env::current_dir()?
} else {
PathBuf::from(pwd)
};
pwd_buf.push(&args[0]);
self.change_to(&pwd_buf, env).and(SUCCESS)
}
fn dup(&self) -> Box<Builtin> {
Box::new(self.clone())
}
}
fn pwd(_args: &[String], env: &mut UserEnv) -> io::Result<i32> {
println!("{}", env.get("PWD"));
SUCCESS
}
fn echo(args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
let mut opts = Options::new();
opts.optflag("n", "", "Suppress new lines");
let matches = match opts.parse(args) {
Ok(m) => m,
Err(_) => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Unable to parse arguments.",
))
}
};
let remaining_args = matches.free.join(" ");
if matches.opt_present("n") {
print!("{}", remaining_args);
try!(io::stdout().flush());
} else {
println!("{}", remaining_args);
}
SUCCESS
}
fn fg(_args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
let res = job::start_job(true)?;
Ok(res)
}
fn bg(_args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
let res = job::start_job(false)?;
Ok(res)
}
fn jobs(_args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
job::print_jobs();
Ok(0)
}
macro_rules! add_builtin_fns {
($map:ident, [ $( ($n:expr, $cmd:expr) ),* ] ) => {{
$($map.insert(
$n,
Box::new(SimpleBuiltin($cmd)) as Box<Builtin>
);)*
}}
}
fn builtin_true(_args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
SUCCESS
}
pub fn init_builtins() -> BuiltinMap {
let mut builtins: BuiltinMap = HashMap::new();
builtins.insert("cd", Box::new(Cd::new()));
add_builtin_fns!(
builtins,
[
("echo", echo),
("pwd", pwd),
("fg", fg),
("bg", bg),
("jobs", jobs),
("true", builtin_true),
("false", |_args: &[String], _env: &mut UserEnv| Ok(1)),
(":", builtin_true)
]
);
builtins
}
pub fn clone_builtins(builtins: &BuiltinMap) -> BuiltinMap {
let mut builtins_clone: BuiltinMap = HashMap::new();
for (cmd, func) in builtins.iter() {
builtins_clone.insert(cmd, func.dup());
}
builtins_clone
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
use std::{env, fs};
use test_fixture::*;
struct BuiltinTests {
pwd: PathBuf,
}
impl TestFixture for BuiltinTests {
fn setup(&mut self) {
let mut pwd = env::temp_dir();
pwd.push("pwd");
fs::create_dir(&pwd).unwrap();
self.pwd = pwd;
env::set_current_dir(&self.pwd).unwrap();
env::set_var("PWD", &self.pwd);
}
fn teardown(&mut self) {
fs::remove_dir(&self.pwd).unwrap();
}
fn | (&self) -> TestList<Self> {
vec![
test!("cd, no args", cd_with_no_args),
test!("cd, absolute arg", cd_with_absolute_arg),
test!("cd, relative arg", cd_with_relative_arg),
test!("cd, previous dir", cd_previous_directory),
]
}
}
impl BuiltinTests {
fn new() -> BuiltinTests {
BuiltinTests {
pwd: PathBuf::new(),
}
}
fn cd_with_no_args(&mut self) {
let home = String::from("/");
let mut user_env = UserEnv::new();
user_env.set("HOME", &home);
let mut cd = Cd::new();
cd.run(&[], &mut user_env).unwrap();
assert_eq!(user_env.get("PWD"), home);
}
fn cd_with_absolute_arg(&mut self) {
let dir = String::from("/");
let mut user_env = UserEnv::new();
user_env.set("PWD", &pathbuf_to_string(&self.pwd));
let mut cd = Cd::new();
cd.run(&[dir.clone()], &mut user_env).unwrap();
assert_eq!(user_env.get("PWD"), dir);
}
fn cd_with_relative_arg(&mut self) {
let mut pwd = self.pwd.clone();
pwd.pop();
let mut user_env = UserEnv::new();
user_env.set("PWD", &pathbuf_to_string(&pwd));
env::set_current_dir("..").unwrap();
let mut cd = Cd::new();
cd.run(&[String::from("pwd")], &mut user_env).unwrap();
assert_eq!(env::var("PWD"), Ok(pathbuf_to_string(&self.pwd)));
}
fn cd_previous_directory(&mut self) {
let mut user_env = UserEnv::new();
let mut cd = Cd::new();
cd.run(&[String::from("..")], &mut user_env).unwrap();
cd.run(&[String::from("-")], &mut user_env).unwrap();
assert_eq!(user_env.get("PWD"), pathbuf_to_string(&self.pwd));
}
}
fn pathbuf_to_string(p: &PathBuf) -> String {
String::from((*p).to_str().unwrap())
}
#[test]
fn builtin_tests() {
let fixture = BuiltinTests::new();
test_fixture_runner(fixture);
}
}
| tests | identifier_name |
builtin.rs | use env::UserEnv;
use getopts::Options;
use std::collections::HashMap;
use std::env;
use std::io;
use std::io::prelude::*;
use std::path::{Component, Path, PathBuf};
use job;
const SUCCESS: io::Result<i32> = Ok(0);
pub trait Builtin {
fn run(&mut self, args: &[String], env: &mut UserEnv) -> io::Result<i32>;
fn dup(&self) -> Box<Builtin>;
}
pub struct SimpleBuiltin(fn(&[String], &mut UserEnv) -> io::Result<i32>);
impl Clone for SimpleBuiltin {
fn clone(&self) -> Self {
SimpleBuiltin(self.0)
}
}
impl Builtin for SimpleBuiltin where {
fn run(&mut self, args: &[String], env: &mut UserEnv) -> io::Result<i32> {
self.0(args, env)
}
fn dup(&self) -> Box<Builtin> {
Box::new(self.clone())
}
}
pub type BuiltinMap = HashMap<&'static str, Box<Builtin>>;
#[derive(Clone)]
struct Cd {
prev_dir: String,
}
impl Cd {
fn new() -> Cd {
let pwd = env::var("PWD").unwrap_or(String::new());
Cd { prev_dir: pwd }
}
fn change_to<P: AsRef<Path>>(&mut self, p: &P, env: &mut UserEnv) -> io::Result<()> {
let pwd = env.get("PWD");
self.prev_dir = pwd;
let new_pwd_buf = normalize_logical_path(&p);
env::set_current_dir(&new_pwd_buf)?;
let path_str = new_pwd_buf.to_str().ok_or(io::Error::new(
io::ErrorKind::Other,
"Invalid characters in path",
))?;
env.set("PWD", path_str);
Ok(())
}
}
fn normalize_logical_path<P: AsRef<Path>>(path: &P) -> PathBuf {
let path = path.as_ref();
let mut normalized_path = PathBuf::new();
for c in path.components() {
match c {
Component::ParentDir => |
Component::CurDir => continue,
_ => normalized_path.push(c.as_os_str()),
};
}
normalized_path
}
impl Builtin for Cd {
fn run(&mut self, args: &[String], env: &mut UserEnv) -> io::Result<i32> {
if args.len() == 0 {
let home = env.get("HOME");
if home.len()!= 0 {
return self.change_to(&PathBuf::from(&home), env).and(SUCCESS);
}
return SUCCESS;
}
if args[0] == "-" {
let prev_dir = self.prev_dir.clone();
return self.change_to(&prev_dir, env).and(SUCCESS);
}
let pwd = env.get("PWD");
let mut pwd_buf = if pwd == "" {
env::current_dir()?
} else {
PathBuf::from(pwd)
};
pwd_buf.push(&args[0]);
self.change_to(&pwd_buf, env).and(SUCCESS)
}
fn dup(&self) -> Box<Builtin> {
Box::new(self.clone())
}
}
fn pwd(_args: &[String], env: &mut UserEnv) -> io::Result<i32> {
println!("{}", env.get("PWD"));
SUCCESS
}
fn echo(args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
let mut opts = Options::new();
opts.optflag("n", "", "Suppress new lines");
let matches = match opts.parse(args) {
Ok(m) => m,
Err(_) => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Unable to parse arguments.",
))
}
};
let remaining_args = matches.free.join(" ");
if matches.opt_present("n") {
print!("{}", remaining_args);
try!(io::stdout().flush());
} else {
println!("{}", remaining_args);
}
SUCCESS
}
fn fg(_args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
let res = job::start_job(true)?;
Ok(res)
}
fn bg(_args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
let res = job::start_job(false)?;
Ok(res)
}
fn jobs(_args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
job::print_jobs();
Ok(0)
}
macro_rules! add_builtin_fns {
($map:ident, [ $( ($n:expr, $cmd:expr) ),* ] ) => {{
$($map.insert(
$n,
Box::new(SimpleBuiltin($cmd)) as Box<Builtin>
);)*
}}
}
fn builtin_true(_args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
SUCCESS
}
pub fn init_builtins() -> BuiltinMap {
let mut builtins: BuiltinMap = HashMap::new();
builtins.insert("cd", Box::new(Cd::new()));
add_builtin_fns!(
builtins,
[
("echo", echo),
("pwd", pwd),
("fg", fg),
("bg", bg),
("jobs", jobs),
("true", builtin_true),
("false", |_args: &[String], _env: &mut UserEnv| Ok(1)),
(":", builtin_true)
]
);
builtins
}
pub fn clone_builtins(builtins: &BuiltinMap) -> BuiltinMap {
let mut builtins_clone: BuiltinMap = HashMap::new();
for (cmd, func) in builtins.iter() {
builtins_clone.insert(cmd, func.dup());
}
builtins_clone
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
use std::{env, fs};
use test_fixture::*;
struct BuiltinTests {
pwd: PathBuf,
}
impl TestFixture for BuiltinTests {
fn setup(&mut self) {
let mut pwd = env::temp_dir();
pwd.push("pwd");
fs::create_dir(&pwd).unwrap();
self.pwd = pwd;
env::set_current_dir(&self.pwd).unwrap();
env::set_var("PWD", &self.pwd);
}
fn teardown(&mut self) {
fs::remove_dir(&self.pwd).unwrap();
}
fn tests(&self) -> TestList<Self> {
vec![
test!("cd, no args", cd_with_no_args),
test!("cd, absolute arg", cd_with_absolute_arg),
test!("cd, relative arg", cd_with_relative_arg),
test!("cd, previous dir", cd_previous_directory),
]
}
}
impl BuiltinTests {
fn new() -> BuiltinTests {
BuiltinTests {
pwd: PathBuf::new(),
}
}
fn cd_with_no_args(&mut self) {
let home = String::from("/");
let mut user_env = UserEnv::new();
user_env.set("HOME", &home);
let mut cd = Cd::new();
cd.run(&[], &mut user_env).unwrap();
assert_eq!(user_env.get("PWD"), home);
}
fn cd_with_absolute_arg(&mut self) {
let dir = String::from("/");
let mut user_env = UserEnv::new();
user_env.set("PWD", &pathbuf_to_string(&self.pwd));
let mut cd = Cd::new();
cd.run(&[dir.clone()], &mut user_env).unwrap();
assert_eq!(user_env.get("PWD"), dir);
}
fn cd_with_relative_arg(&mut self) {
let mut pwd = self.pwd.clone();
pwd.pop();
let mut user_env = UserEnv::new();
user_env.set("PWD", &pathbuf_to_string(&pwd));
env::set_current_dir("..").unwrap();
let mut cd = Cd::new();
cd.run(&[String::from("pwd")], &mut user_env).unwrap();
assert_eq!(env::var("PWD"), Ok(pathbuf_to_string(&self.pwd)));
}
fn cd_previous_directory(&mut self) {
let mut user_env = UserEnv::new();
let mut cd = Cd::new();
cd.run(&[String::from("..")], &mut user_env).unwrap();
cd.run(&[String::from("-")], &mut user_env).unwrap();
assert_eq!(user_env.get("PWD"), pathbuf_to_string(&self.pwd));
}
}
fn pathbuf_to_string(p: &PathBuf) -> String {
String::from((*p).to_str().unwrap())
}
#[test]
fn builtin_tests() {
let fixture = BuiltinTests::new();
test_fixture_runner(fixture);
}
}
| {
normalized_path.pop();
} | conditional_block |
builtin.rs | use env::UserEnv;
use getopts::Options;
use std::collections::HashMap;
use std::env;
use std::io;
use std::io::prelude::*;
use std::path::{Component, Path, PathBuf};
use job;
const SUCCESS: io::Result<i32> = Ok(0);
pub trait Builtin {
fn run(&mut self, args: &[String], env: &mut UserEnv) -> io::Result<i32>;
fn dup(&self) -> Box<Builtin>;
}
pub struct SimpleBuiltin(fn(&[String], &mut UserEnv) -> io::Result<i32>);
impl Clone for SimpleBuiltin {
fn clone(&self) -> Self {
SimpleBuiltin(self.0)
}
}
impl Builtin for SimpleBuiltin where {
fn run(&mut self, args: &[String], env: &mut UserEnv) -> io::Result<i32> {
self.0(args, env)
}
fn dup(&self) -> Box<Builtin> {
Box::new(self.clone())
}
}
pub type BuiltinMap = HashMap<&'static str, Box<Builtin>>;
#[derive(Clone)]
struct Cd {
prev_dir: String,
}
impl Cd {
fn new() -> Cd {
let pwd = env::var("PWD").unwrap_or(String::new());
Cd { prev_dir: pwd }
}
fn change_to<P: AsRef<Path>>(&mut self, p: &P, env: &mut UserEnv) -> io::Result<()> {
let pwd = env.get("PWD");
self.prev_dir = pwd;
let new_pwd_buf = normalize_logical_path(&p);
env::set_current_dir(&new_pwd_buf)?;
let path_str = new_pwd_buf.to_str().ok_or(io::Error::new(
io::ErrorKind::Other,
"Invalid characters in path",
))?;
env.set("PWD", path_str);
Ok(())
}
}
fn normalize_logical_path<P: AsRef<Path>>(path: &P) -> PathBuf {
let path = path.as_ref();
let mut normalized_path = PathBuf::new();
for c in path.components() {
match c {
Component::ParentDir => {
normalized_path.pop();
}
Component::CurDir => continue,
_ => normalized_path.push(c.as_os_str()),
};
}
normalized_path
}
impl Builtin for Cd {
fn run(&mut self, args: &[String], env: &mut UserEnv) -> io::Result<i32> {
if args.len() == 0 {
let home = env.get("HOME");
if home.len()!= 0 {
return self.change_to(&PathBuf::from(&home), env).and(SUCCESS);
}
return SUCCESS;
}
if args[0] == "-" {
let prev_dir = self.prev_dir.clone();
return self.change_to(&prev_dir, env).and(SUCCESS);
}
let pwd = env.get("PWD");
let mut pwd_buf = if pwd == "" {
env::current_dir()?
} else {
PathBuf::from(pwd)
};
pwd_buf.push(&args[0]);
self.change_to(&pwd_buf, env).and(SUCCESS)
}
fn dup(&self) -> Box<Builtin> |
}
fn pwd(_args: &[String], env: &mut UserEnv) -> io::Result<i32> {
println!("{}", env.get("PWD"));
SUCCESS
}
fn echo(args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
let mut opts = Options::new();
opts.optflag("n", "", "Suppress new lines");
let matches = match opts.parse(args) {
Ok(m) => m,
Err(_) => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Unable to parse arguments.",
))
}
};
let remaining_args = matches.free.join(" ");
if matches.opt_present("n") {
print!("{}", remaining_args);
try!(io::stdout().flush());
} else {
println!("{}", remaining_args);
}
SUCCESS
}
fn fg(_args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
let res = job::start_job(true)?;
Ok(res)
}
fn bg(_args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
let res = job::start_job(false)?;
Ok(res)
}
fn jobs(_args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
job::print_jobs();
Ok(0)
}
macro_rules! add_builtin_fns {
($map:ident, [ $( ($n:expr, $cmd:expr) ),* ] ) => {{
$($map.insert(
$n,
Box::new(SimpleBuiltin($cmd)) as Box<Builtin>
);)*
}}
}
fn builtin_true(_args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
SUCCESS
}
pub fn init_builtins() -> BuiltinMap {
let mut builtins: BuiltinMap = HashMap::new();
builtins.insert("cd", Box::new(Cd::new()));
add_builtin_fns!(
builtins,
[
("echo", echo),
("pwd", pwd),
("fg", fg),
("bg", bg),
("jobs", jobs),
("true", builtin_true),
("false", |_args: &[String], _env: &mut UserEnv| Ok(1)),
(":", builtin_true)
]
);
builtins
}
pub fn clone_builtins(builtins: &BuiltinMap) -> BuiltinMap {
let mut builtins_clone: BuiltinMap = HashMap::new();
for (cmd, func) in builtins.iter() {
builtins_clone.insert(cmd, func.dup());
}
builtins_clone
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
use std::{env, fs};
use test_fixture::*;
struct BuiltinTests {
pwd: PathBuf,
}
impl TestFixture for BuiltinTests {
fn setup(&mut self) {
let mut pwd = env::temp_dir();
pwd.push("pwd");
fs::create_dir(&pwd).unwrap();
self.pwd = pwd;
env::set_current_dir(&self.pwd).unwrap();
env::set_var("PWD", &self.pwd);
}
fn teardown(&mut self) {
fs::remove_dir(&self.pwd).unwrap();
}
fn tests(&self) -> TestList<Self> {
vec![
test!("cd, no args", cd_with_no_args),
test!("cd, absolute arg", cd_with_absolute_arg),
test!("cd, relative arg", cd_with_relative_arg),
test!("cd, previous dir", cd_previous_directory),
]
}
}
impl BuiltinTests {
fn new() -> BuiltinTests {
BuiltinTests {
pwd: PathBuf::new(),
}
}
fn cd_with_no_args(&mut self) {
let home = String::from("/");
let mut user_env = UserEnv::new();
user_env.set("HOME", &home);
let mut cd = Cd::new();
cd.run(&[], &mut user_env).unwrap();
assert_eq!(user_env.get("PWD"), home);
}
fn cd_with_absolute_arg(&mut self) {
let dir = String::from("/");
let mut user_env = UserEnv::new();
user_env.set("PWD", &pathbuf_to_string(&self.pwd));
let mut cd = Cd::new();
cd.run(&[dir.clone()], &mut user_env).unwrap();
assert_eq!(user_env.get("PWD"), dir);
}
fn cd_with_relative_arg(&mut self) {
let mut pwd = self.pwd.clone();
pwd.pop();
let mut user_env = UserEnv::new();
user_env.set("PWD", &pathbuf_to_string(&pwd));
env::set_current_dir("..").unwrap();
let mut cd = Cd::new();
cd.run(&[String::from("pwd")], &mut user_env).unwrap();
assert_eq!(env::var("PWD"), Ok(pathbuf_to_string(&self.pwd)));
}
fn cd_previous_directory(&mut self) {
let mut user_env = UserEnv::new();
let mut cd = Cd::new();
cd.run(&[String::from("..")], &mut user_env).unwrap();
cd.run(&[String::from("-")], &mut user_env).unwrap();
assert_eq!(user_env.get("PWD"), pathbuf_to_string(&self.pwd));
}
}
fn pathbuf_to_string(p: &PathBuf) -> String {
String::from((*p).to_str().unwrap())
}
#[test]
fn builtin_tests() {
let fixture = BuiltinTests::new();
test_fixture_runner(fixture);
}
}
| {
Box::new(self.clone())
} | identifier_body |
builtin.rs | use env::UserEnv;
use getopts::Options;
use std::collections::HashMap;
use std::env;
use std::io;
use std::io::prelude::*;
use std::path::{Component, Path, PathBuf};
use job;
const SUCCESS: io::Result<i32> = Ok(0);
pub trait Builtin {
fn run(&mut self, args: &[String], env: &mut UserEnv) -> io::Result<i32>;
fn dup(&self) -> Box<Builtin>;
}
pub struct SimpleBuiltin(fn(&[String], &mut UserEnv) -> io::Result<i32>);
impl Clone for SimpleBuiltin {
fn clone(&self) -> Self {
SimpleBuiltin(self.0)
}
}
impl Builtin for SimpleBuiltin where {
fn run(&mut self, args: &[String], env: &mut UserEnv) -> io::Result<i32> {
self.0(args, env)
}
fn dup(&self) -> Box<Builtin> {
Box::new(self.clone())
}
}
pub type BuiltinMap = HashMap<&'static str, Box<Builtin>>;
#[derive(Clone)]
struct Cd {
prev_dir: String,
}
impl Cd {
fn new() -> Cd {
let pwd = env::var("PWD").unwrap_or(String::new());
Cd { prev_dir: pwd }
}
fn change_to<P: AsRef<Path>>(&mut self, p: &P, env: &mut UserEnv) -> io::Result<()> {
let pwd = env.get("PWD");
self.prev_dir = pwd;
let new_pwd_buf = normalize_logical_path(&p);
env::set_current_dir(&new_pwd_buf)?;
let path_str = new_pwd_buf.to_str().ok_or(io::Error::new(
io::ErrorKind::Other,
"Invalid characters in path",
))?;
env.set("PWD", path_str);
Ok(())
}
}
fn normalize_logical_path<P: AsRef<Path>>(path: &P) -> PathBuf {
let path = path.as_ref();
let mut normalized_path = PathBuf::new();
for c in path.components() {
match c {
Component::ParentDir => {
normalized_path.pop();
}
Component::CurDir => continue,
_ => normalized_path.push(c.as_os_str()),
};
}
normalized_path
}
impl Builtin for Cd {
fn run(&mut self, args: &[String], env: &mut UserEnv) -> io::Result<i32> {
if args.len() == 0 {
let home = env.get("HOME");
if home.len()!= 0 {
return self.change_to(&PathBuf::from(&home), env).and(SUCCESS);
}
return SUCCESS;
}
if args[0] == "-" {
let prev_dir = self.prev_dir.clone();
return self.change_to(&prev_dir, env).and(SUCCESS);
}
let pwd = env.get("PWD");
let mut pwd_buf = if pwd == "" {
env::current_dir()?
} else {
PathBuf::from(pwd)
};
pwd_buf.push(&args[0]);
self.change_to(&pwd_buf, env).and(SUCCESS)
}
fn dup(&self) -> Box<Builtin> {
Box::new(self.clone())
}
}
fn pwd(_args: &[String], env: &mut UserEnv) -> io::Result<i32> {
println!("{}", env.get("PWD"));
SUCCESS
}
fn echo(args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
let mut opts = Options::new();
opts.optflag("n", "", "Suppress new lines");
let matches = match opts.parse(args) {
Ok(m) => m,
Err(_) => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Unable to parse arguments.",
))
}
};
let remaining_args = matches.free.join(" ");
if matches.opt_present("n") {
print!("{}", remaining_args);
try!(io::stdout().flush());
} else {
println!("{}", remaining_args);
}
SUCCESS
}
fn fg(_args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
let res = job::start_job(true)?;
Ok(res)
}
fn bg(_args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
let res = job::start_job(false)?;
Ok(res)
}
fn jobs(_args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
job::print_jobs();
Ok(0)
}
macro_rules! add_builtin_fns {
($map:ident, [ $( ($n:expr, $cmd:expr) ),* ] ) => {{
$($map.insert(
$n,
Box::new(SimpleBuiltin($cmd)) as Box<Builtin>
);)*
}}
}
fn builtin_true(_args: &[String], _env: &mut UserEnv) -> io::Result<i32> {
SUCCESS
}
pub fn init_builtins() -> BuiltinMap {
let mut builtins: BuiltinMap = HashMap::new();
builtins.insert("cd", Box::new(Cd::new()));
add_builtin_fns!(
builtins,
[
("echo", echo),
("pwd", pwd),
("fg", fg),
("bg", bg),
("jobs", jobs),
("true", builtin_true),
("false", |_args: &[String], _env: &mut UserEnv| Ok(1)),
(":", builtin_true)
]
);
builtins
}
pub fn clone_builtins(builtins: &BuiltinMap) -> BuiltinMap {
let mut builtins_clone: BuiltinMap = HashMap::new();
for (cmd, func) in builtins.iter() {
builtins_clone.insert(cmd, func.dup());
}
builtins_clone
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
use std::{env, fs};
use test_fixture::*;
struct BuiltinTests {
pwd: PathBuf,
}
impl TestFixture for BuiltinTests {
fn setup(&mut self) {
let mut pwd = env::temp_dir();
pwd.push("pwd");
fs::create_dir(&pwd).unwrap();
self.pwd = pwd;
env::set_current_dir(&self.pwd).unwrap();
env::set_var("PWD", &self.pwd);
}
fn teardown(&mut self) {
fs::remove_dir(&self.pwd).unwrap();
}
fn tests(&self) -> TestList<Self> {
vec![
test!("cd, no args", cd_with_no_args),
test!("cd, absolute arg", cd_with_absolute_arg),
test!("cd, relative arg", cd_with_relative_arg),
test!("cd, previous dir", cd_previous_directory),
]
}
}
impl BuiltinTests {
fn new() -> BuiltinTests {
BuiltinTests {
pwd: PathBuf::new(),
}
}
fn cd_with_no_args(&mut self) {
let home = String::from("/");
let mut user_env = UserEnv::new();
user_env.set("HOME", &home);
let mut cd = Cd::new();
cd.run(&[], &mut user_env).unwrap();
assert_eq!(user_env.get("PWD"), home);
}
fn cd_with_absolute_arg(&mut self) {
let dir = String::from("/");
let mut user_env = UserEnv::new();
user_env.set("PWD", &pathbuf_to_string(&self.pwd));
let mut cd = Cd::new();
cd.run(&[dir.clone()], &mut user_env).unwrap();
assert_eq!(user_env.get("PWD"), dir);
}
fn cd_with_relative_arg(&mut self) {
let mut pwd = self.pwd.clone();
pwd.pop();
let mut user_env = UserEnv::new();
user_env.set("PWD", &pathbuf_to_string(&pwd));
env::set_current_dir("..").unwrap();
let mut cd = Cd::new();
cd.run(&[String::from("pwd")], &mut user_env).unwrap();
assert_eq!(env::var("PWD"), Ok(pathbuf_to_string(&self.pwd)));
}
fn cd_previous_directory(&mut self) {
let mut user_env = UserEnv::new();
let mut cd = Cd::new();
cd.run(&[String::from("..")], &mut user_env).unwrap();
cd.run(&[String::from("-")], &mut user_env).unwrap();
assert_eq!(user_env.get("PWD"), pathbuf_to_string(&self.pwd));
}
}
fn pathbuf_to_string(p: &PathBuf) -> String {
String::from((*p).to_str().unwrap())
} | let fixture = BuiltinTests::new();
test_fixture_runner(fixture);
}
} |
#[test]
fn builtin_tests() { | random_line_split |
struct-partial-move-1.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
// | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[derive(PartialEq, Debug)]
pub struct Partial<T> { x: T, y: T }
#[derive(PartialEq, Debug)]
struct S { val: isize }
impl S { fn new(v: isize) -> S { S { val: v } } }
impl Drop for S { fn drop(&mut self) { } }
pub fn f<T, F>((b1, b2): (T, T), mut f: F) -> Partial<T> where F: FnMut(T) -> T {
let p = Partial { x: b1, y: b2 };
// Move of `p` is legal even though we are also moving `p.y`; the
// `..p` moves all fields *except* `p.y` in this context.
Partial { y: f(p.y),..p }
}
pub fn main() {
let p = f((S::new(3), S::new(4)), |S { val: z }| S::new(z+1));
assert_eq!(p, Partial { x: S::new(3), y: S::new(5) });
} | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license | random_line_split |
struct-partial-move-1.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[derive(PartialEq, Debug)]
pub struct Partial<T> { x: T, y: T }
#[derive(PartialEq, Debug)]
struct | { val: isize }
impl S { fn new(v: isize) -> S { S { val: v } } }
impl Drop for S { fn drop(&mut self) { } }
pub fn f<T, F>((b1, b2): (T, T), mut f: F) -> Partial<T> where F: FnMut(T) -> T {
let p = Partial { x: b1, y: b2 };
// Move of `p` is legal even though we are also moving `p.y`; the
// `..p` moves all fields *except* `p.y` in this context.
Partial { y: f(p.y),..p }
}
pub fn main() {
let p = f((S::new(3), S::new(4)), |S { val: z }| S::new(z+1));
assert_eq!(p, Partial { x: S::new(3), y: S::new(5) });
}
| S | identifier_name |
zbytes.rs | //! The `zbyte` module contains code
//! to deal with opcodes and zcode.
/// A struct that holds an array of bytes and provides some convenience functions.
pub struct Bytes {
/// The underlying data
pub bytes: Vec<u8>
}
impl Bytes {
/// Returns the length of the byte array.
pub fn len(&self) -> usize {
self.bytes.len()
}
/// Writes the byte (u8) to the index specified.
///
/// If the vector isn't large enough it fills everything up to the index with zeros.
pub fn write_byte(&mut self, byte: u8, index: usize) {
while self.len() <= index {
self.bytes.push(0);
}
self.bytes[index] = byte;
}
/// Appends a byte to the end of the data.
pub fn append_byte(&mut self, byte: u8) {
let index: usize = self.bytes.len();
self.write_byte(byte, index);
}
/// Writes a u16 in two bytes with the correct byte-order for the Z-Machine at the specified
/// index.
pub fn write_u16(&mut self, value: u16, index: usize) {
self.write_byte((value >> 8) as u8, index);
self.write_byte((value & 0xff) as u8, index + 1);
}
/// Appends a u16 to the end of the data.
pub fn append_u16(&mut self, value: u16) {
let index: usize = self.bytes.len();
self.write_u16(value, index);
}
/// Writes multiple bytes at the specified index.
pub fn write_bytes(&mut self, bytes: &[u8], to_index: usize) {
for i in 0..bytes.len() {
self.write_byte(bytes[i], to_index+i);
}
}
/// Appends an array of bytes at the end of the data.
pub fn append_bytes(&mut self, bytes: &[u8]) {
let index: usize = self.bytes.len();
self.write_bytes(bytes, index);
}
| ///
/// `=> [index-1] == 0; [index] == nil;`
pub fn write_zero_until(&mut self, index: usize) {
while self.len() < index {
self.bytes.push(0);
}
}
/// Prints the underlying byte array
pub fn print(&self) {
debug!("bytes: {:?}", self.bytes);
}
} | /// Fills everything with zeros until but not including the index. | random_line_split |
zbytes.rs | //! The `zbyte` module contains code
//! to deal with opcodes and zcode.
/// A struct that holds an array of bytes and provides some convenience functions.
pub struct Bytes {
/// The underlying data
pub bytes: Vec<u8>
}
impl Bytes {
/// Returns the length of the byte array.
pub fn len(&self) -> usize {
self.bytes.len()
}
/// Writes the byte (u8) to the index specified.
///
/// If the vector isn't large enough it fills everything up to the index with zeros.
pub fn write_byte(&mut self, byte: u8, index: usize) {
while self.len() <= index {
self.bytes.push(0);
}
self.bytes[index] = byte;
}
/// Appends a byte to the end of the data.
pub fn | (&mut self, byte: u8) {
let index: usize = self.bytes.len();
self.write_byte(byte, index);
}
/// Writes a u16 in two bytes with the correct byte-order for the Z-Machine at the specified
/// index.
pub fn write_u16(&mut self, value: u16, index: usize) {
self.write_byte((value >> 8) as u8, index);
self.write_byte((value & 0xff) as u8, index + 1);
}
/// Appends a u16 to the end of the data.
pub fn append_u16(&mut self, value: u16) {
let index: usize = self.bytes.len();
self.write_u16(value, index);
}
/// Writes multiple bytes at the specified index.
pub fn write_bytes(&mut self, bytes: &[u8], to_index: usize) {
for i in 0..bytes.len() {
self.write_byte(bytes[i], to_index+i);
}
}
/// Appends an array of bytes at the end of the data.
pub fn append_bytes(&mut self, bytes: &[u8]) {
let index: usize = self.bytes.len();
self.write_bytes(bytes, index);
}
/// Fills everything with zeros until but not including the index.
///
/// `=> [index-1] == 0; [index] == nil;`
pub fn write_zero_until(&mut self, index: usize) {
while self.len() < index {
self.bytes.push(0);
}
}
/// Prints the underlying byte array
pub fn print(&self) {
debug!("bytes: {:?}", self.bytes);
}
}
| append_byte | identifier_name |
closeevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use dom::bindings::codegen::Bindings::CloseEventBinding;
use dom::bindings::codegen::Bindings::CloseEventBinding::CloseEventMethods;
use dom::bindings::codegen::InheritTypes::EventCast;
use dom::bindings::error::Fallible;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::Root;
use dom::bindings::utils::reflect_dom_object;
use dom::event::{Event, EventTypeId, EventBubbles, EventCancelable};
use script_task::ScriptChan;
use util::str::DOMString;
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct CloseEvent {
event: Event,
wasClean: bool,
code: u16,
reason: DOMString,
}
impl CloseEvent {
pub fn new_inherited(type_id: EventTypeId, wasClean: bool, code: u16,
reason: DOMString) -> CloseEvent {
CloseEvent {
event: Event::new_inherited(type_id),
wasClean: wasClean,
code: code,
reason: reason,
}
}
pub fn new(global: GlobalRef,
type_: DOMString,
bubbles: EventBubbles,
cancelable: EventCancelable,
wasClean: bool,
code: u16,
reason: DOMString) -> Root<CloseEvent> {
let event = box CloseEvent::new_inherited(EventTypeId::CloseEvent,
wasClean, code, reason);
let ev = reflect_dom_object(event, global, CloseEventBinding::Wrap);
{
let event = EventCast::from_ref(ev.r());
event.InitEvent(type_,
bubbles == EventBubbles::Bubbles,
cancelable == EventCancelable::Cancelable);
}
ev
}
pub fn Constructor(global: GlobalRef,
type_: DOMString,
init: &CloseEventBinding::CloseEventInit)
-> Fallible<Root<CloseEvent>> {
let bubbles = if init.parent.bubbles { EventBubbles::Bubbles } else { EventBubbles::DoesNotBubble };
let cancelable = if init.parent.cancelable | else {
EventCancelable::NotCancelable
};
Ok(CloseEvent::new(global, type_, bubbles, cancelable, init.wasClean,
init.code, init.reason.clone()))
}
}
impl<'a> CloseEventMethods for &'a CloseEvent {
// https://html.spec.whatwg.org/multipage/#dom-closeevent-wasclean
fn WasClean(self) -> bool {
self.wasClean
}
// https://html.spec.whatwg.org/multipage/#dom-closeevent-code
fn Code(self) -> u16 {
self.code
}
// https://html.spec.whatwg.org/multipage/#dom-closeevent-reason
fn Reason(self) -> DOMString {
self.reason.clone()
}
}
| {
EventCancelable::Cancelable
} | conditional_block |
closeevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use dom::bindings::codegen::Bindings::CloseEventBinding;
use dom::bindings::codegen::Bindings::CloseEventBinding::CloseEventMethods;
use dom::bindings::codegen::InheritTypes::EventCast;
use dom::bindings::error::Fallible;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::Root;
use dom::bindings::utils::reflect_dom_object;
use dom::event::{Event, EventTypeId, EventBubbles, EventCancelable};
use script_task::ScriptChan;
use util::str::DOMString;
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct CloseEvent {
event: Event,
wasClean: bool,
code: u16,
reason: DOMString,
}
impl CloseEvent {
pub fn new_inherited(type_id: EventTypeId, wasClean: bool, code: u16,
reason: DOMString) -> CloseEvent {
CloseEvent {
event: Event::new_inherited(type_id),
wasClean: wasClean,
code: code,
reason: reason,
}
}
pub fn new(global: GlobalRef,
type_: DOMString,
bubbles: EventBubbles,
cancelable: EventCancelable,
wasClean: bool,
code: u16,
reason: DOMString) -> Root<CloseEvent> {
let event = box CloseEvent::new_inherited(EventTypeId::CloseEvent,
wasClean, code, reason);
let ev = reflect_dom_object(event, global, CloseEventBinding::Wrap);
{
let event = EventCast::from_ref(ev.r());
event.InitEvent(type_,
bubbles == EventBubbles::Bubbles,
cancelable == EventCancelable::Cancelable);
}
ev
}
pub fn Constructor(global: GlobalRef,
type_: DOMString,
init: &CloseEventBinding::CloseEventInit)
-> Fallible<Root<CloseEvent>> {
let bubbles = if init.parent.bubbles { EventBubbles::Bubbles } else { EventBubbles::DoesNotBubble };
let cancelable = if init.parent.cancelable {
EventCancelable::Cancelable
} else {
EventCancelable::NotCancelable
};
Ok(CloseEvent::new(global, type_, bubbles, cancelable, init.wasClean,
init.code, init.reason.clone()))
}
}
impl<'a> CloseEventMethods for &'a CloseEvent {
// https://html.spec.whatwg.org/multipage/#dom-closeevent-wasclean
fn WasClean(self) -> bool {
self.wasClean
}
// https://html.spec.whatwg.org/multipage/#dom-closeevent-code
fn Code(self) -> u16 |
// https://html.spec.whatwg.org/multipage/#dom-closeevent-reason
fn Reason(self) -> DOMString {
self.reason.clone()
}
}
| {
self.code
} | identifier_body |
closeevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use dom::bindings::codegen::Bindings::CloseEventBinding;
use dom::bindings::codegen::Bindings::CloseEventBinding::CloseEventMethods;
use dom::bindings::codegen::InheritTypes::EventCast;
use dom::bindings::error::Fallible;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::Root;
use dom::bindings::utils::reflect_dom_object;
use dom::event::{Event, EventTypeId, EventBubbles, EventCancelable};
use script_task::ScriptChan;
use util::str::DOMString;
#[dom_struct] | reason: DOMString,
}
impl CloseEvent {
pub fn new_inherited(type_id: EventTypeId, wasClean: bool, code: u16,
reason: DOMString) -> CloseEvent {
CloseEvent {
event: Event::new_inherited(type_id),
wasClean: wasClean,
code: code,
reason: reason,
}
}
pub fn new(global: GlobalRef,
type_: DOMString,
bubbles: EventBubbles,
cancelable: EventCancelable,
wasClean: bool,
code: u16,
reason: DOMString) -> Root<CloseEvent> {
let event = box CloseEvent::new_inherited(EventTypeId::CloseEvent,
wasClean, code, reason);
let ev = reflect_dom_object(event, global, CloseEventBinding::Wrap);
{
let event = EventCast::from_ref(ev.r());
event.InitEvent(type_,
bubbles == EventBubbles::Bubbles,
cancelable == EventCancelable::Cancelable);
}
ev
}
pub fn Constructor(global: GlobalRef,
type_: DOMString,
init: &CloseEventBinding::CloseEventInit)
-> Fallible<Root<CloseEvent>> {
let bubbles = if init.parent.bubbles { EventBubbles::Bubbles } else { EventBubbles::DoesNotBubble };
let cancelable = if init.parent.cancelable {
EventCancelable::Cancelable
} else {
EventCancelable::NotCancelable
};
Ok(CloseEvent::new(global, type_, bubbles, cancelable, init.wasClean,
init.code, init.reason.clone()))
}
}
impl<'a> CloseEventMethods for &'a CloseEvent {
// https://html.spec.whatwg.org/multipage/#dom-closeevent-wasclean
fn WasClean(self) -> bool {
self.wasClean
}
// https://html.spec.whatwg.org/multipage/#dom-closeevent-code
fn Code(self) -> u16 {
self.code
}
// https://html.spec.whatwg.org/multipage/#dom-closeevent-reason
fn Reason(self) -> DOMString {
self.reason.clone()
}
} | #[derive(HeapSizeOf)]
pub struct CloseEvent {
event: Event,
wasClean: bool,
code: u16, | random_line_split |
closeevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use dom::bindings::codegen::Bindings::CloseEventBinding;
use dom::bindings::codegen::Bindings::CloseEventBinding::CloseEventMethods;
use dom::bindings::codegen::InheritTypes::EventCast;
use dom::bindings::error::Fallible;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::Root;
use dom::bindings::utils::reflect_dom_object;
use dom::event::{Event, EventTypeId, EventBubbles, EventCancelable};
use script_task::ScriptChan;
use util::str::DOMString;
#[dom_struct]
#[derive(HeapSizeOf)]
pub struct CloseEvent {
event: Event,
wasClean: bool,
code: u16,
reason: DOMString,
}
impl CloseEvent {
pub fn new_inherited(type_id: EventTypeId, wasClean: bool, code: u16,
reason: DOMString) -> CloseEvent {
CloseEvent {
event: Event::new_inherited(type_id),
wasClean: wasClean,
code: code,
reason: reason,
}
}
pub fn | (global: GlobalRef,
type_: DOMString,
bubbles: EventBubbles,
cancelable: EventCancelable,
wasClean: bool,
code: u16,
reason: DOMString) -> Root<CloseEvent> {
let event = box CloseEvent::new_inherited(EventTypeId::CloseEvent,
wasClean, code, reason);
let ev = reflect_dom_object(event, global, CloseEventBinding::Wrap);
{
let event = EventCast::from_ref(ev.r());
event.InitEvent(type_,
bubbles == EventBubbles::Bubbles,
cancelable == EventCancelable::Cancelable);
}
ev
}
pub fn Constructor(global: GlobalRef,
type_: DOMString,
init: &CloseEventBinding::CloseEventInit)
-> Fallible<Root<CloseEvent>> {
let bubbles = if init.parent.bubbles { EventBubbles::Bubbles } else { EventBubbles::DoesNotBubble };
let cancelable = if init.parent.cancelable {
EventCancelable::Cancelable
} else {
EventCancelable::NotCancelable
};
Ok(CloseEvent::new(global, type_, bubbles, cancelable, init.wasClean,
init.code, init.reason.clone()))
}
}
impl<'a> CloseEventMethods for &'a CloseEvent {
// https://html.spec.whatwg.org/multipage/#dom-closeevent-wasclean
fn WasClean(self) -> bool {
self.wasClean
}
// https://html.spec.whatwg.org/multipage/#dom-closeevent-code
fn Code(self) -> u16 {
self.code
}
// https://html.spec.whatwg.org/multipage/#dom-closeevent-reason
fn Reason(self) -> DOMString {
self.reason.clone()
}
}
| new | identifier_name |
build.rs | // Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
use prost_build::Config;
fn | () -> Result<(), Box<dyn std::error::Error>> {
let mut config = Config::new();
config.bytes(&["."]);
tonic_build::configure()
.build_client(true)
.build_server(true)
.compile_with_config(
config,
&[
"protos/bazelbuild_remote-apis/build/bazel/remote/execution/v2/remote_execution.proto",
"protos/bazelbuild_remote-apis/build/bazel/semver/semver.proto",
"protos/buildbarn/cas.proto",
"protos/googleapis/google/bytestream/bytestream.proto",
"protos/googleapis/google/rpc/code.proto",
"protos/googleapis/google/rpc/error_details.proto",
"protos/googleapis/google/rpc/status.proto",
"protos/googleapis/google/longrunning/operations.proto",
"protos/standard/google/protobuf/empty.proto",
],
&[
"protos/bazelbuild_remote-apis",
"protos/buildbarn",
"protos/googleapis",
"protos/standard",
],
)?;
Ok(())
}
| main | identifier_name |
build.rs | // Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
use prost_build::Config;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut config = Config::new();
config.bytes(&["."]);
tonic_build::configure()
.build_client(true)
.build_server(true)
.compile_with_config(
config,
&[
"protos/bazelbuild_remote-apis/build/bazel/remote/execution/v2/remote_execution.proto",
"protos/bazelbuild_remote-apis/build/bazel/semver/semver.proto",
"protos/buildbarn/cas.proto",
"protos/googleapis/google/bytestream/bytestream.proto",
"protos/googleapis/google/rpc/code.proto",
"protos/googleapis/google/rpc/error_details.proto",
"protos/googleapis/google/rpc/status.proto",
"protos/googleapis/google/longrunning/operations.proto",
"protos/standard/google/protobuf/empty.proto",
],
&[
"protos/bazelbuild_remote-apis",
"protos/buildbarn",
"protos/googleapis",
"protos/standard", | )?;
Ok(())
} | ], | random_line_split |
build.rs | // Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
use prost_build::Config;
fn main() -> Result<(), Box<dyn std::error::Error>> | &[
"protos/bazelbuild_remote-apis",
"protos/buildbarn",
"protos/googleapis",
"protos/standard",
],
)?;
Ok(())
}
| {
let mut config = Config::new();
config.bytes(&["."]);
tonic_build::configure()
.build_client(true)
.build_server(true)
.compile_with_config(
config,
&[
"protos/bazelbuild_remote-apis/build/bazel/remote/execution/v2/remote_execution.proto",
"protos/bazelbuild_remote-apis/build/bazel/semver/semver.proto",
"protos/buildbarn/cas.proto",
"protos/googleapis/google/bytestream/bytestream.proto",
"protos/googleapis/google/rpc/code.proto",
"protos/googleapis/google/rpc/error_details.proto",
"protos/googleapis/google/rpc/status.proto",
"protos/googleapis/google/longrunning/operations.proto",
"protos/standard/google/protobuf/empty.proto",
], | identifier_body |
lib.rs | extern crate httparse;
extern crate hyper;
extern crate mio;
extern crate netbuf;
extern crate rotor;
extern crate unicase;
extern crate url;
extern crate time;
extern crate multimap;
use rotor::transports::{accept, stream};
pub use hyper::method::Method;
pub use hyper::status::StatusCode;
pub use hyper::version::HttpVersion;
pub use mio::{EventLoop};
pub use mio::tcp::{TcpListener, TcpStream};
pub use rotor::Handler as EventHandler;
pub use url::Url;
pub use error::{Error, Result};
pub use headers::{IterListHeader, Headers};
pub use http1::Handler;
pub use message::Message;
pub use request::Request;
pub use response::Response;
mod error;
mod headers; | pub type HttpServer<C, R> = accept::Serve<C,
TcpListener,
stream::Stream<C, TcpStream, http1::Client<C, R>>>; | pub mod http1;
mod message;
mod request;
mod response;
| random_line_split |
bind-by-move-neither-can-live-while-the-other-survives-4.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct X { x: (), }
impl Drop for X {
fn finalize(&self) {
error!("destructor runs"); |
fn main() {
let x = Some((X { x: () }, X { x: () }));
match x {
Some((_y, ref _z)) => { }, //~ ERROR cannot bind by-move and by-ref in the same pattern
None => fail!()
}
} | }
} | random_line_split |
bind-by-move-neither-can-live-while-the-other-survives-4.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct X { x: (), }
impl Drop for X {
fn finalize(&self) {
error!("destructor runs");
}
}
fn main() | {
let x = Some((X { x: () }, X { x: () }));
match x {
Some((_y, ref _z)) => { }, //~ ERROR cannot bind by-move and by-ref in the same pattern
None => fail!()
}
} | identifier_body |
|
bind-by-move-neither-can-live-while-the-other-survives-4.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct X { x: (), }
impl Drop for X {
fn | (&self) {
error!("destructor runs");
}
}
fn main() {
let x = Some((X { x: () }, X { x: () }));
match x {
Some((_y, ref _z)) => { }, //~ ERROR cannot bind by-move and by-ref in the same pattern
None => fail!()
}
}
| finalize | identifier_name |
bind-by-move-neither-can-live-while-the-other-survives-4.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct X { x: (), }
impl Drop for X {
fn finalize(&self) {
error!("destructor runs");
}
}
fn main() {
let x = Some((X { x: () }, X { x: () }));
match x {
Some((_y, ref _z)) => | , //~ ERROR cannot bind by-move and by-ref in the same pattern
None => fail!()
}
}
| { } | conditional_block |
lib.rs | //! Binding Rust with Python, both ways!
//!
//! This library will generate and handle type conversions between Python and
//! Rust. To use Python from Rust refer to the
//! [library wiki](https://github.com/iduartgomez/rustypy/wiki), more general examples
//! and information on how to use Rust in Python can also be found there.
//!
//! Checkout the [PyTypes](../rustypy/pytypes/index.html) module documentation for more information
//! on how to write foreign functions that are compliant with Python as well as using the custom
//! types that will ease type conversion.
#![crate_type = "cdylib"]
extern crate cpython;
extern crate libc;
extern crate syn;
extern crate walkdir;
use std::fs::File;
use std::io::Read;
use std::path::Path;
use std::ptr;
use libc::size_t;
mod macros;
pub mod pytypes;
// re-export
pub use self::pytypes::pybool::PyBool;
pub use self::pytypes::pydict::PyDict;
pub use self::pytypes::pylist::PyList;
pub use self::pytypes::pystring::PyString;
pub use self::pytypes::pytuple::PyTuple;
pub use self::pytypes::PyArg;
#[doc(hidden)]
#[no_mangle]
pub unsafe extern "C" fn parse_src(
path: *mut PyString,
krate_data: &mut KrateData,
) -> *mut PyString {
let path = PyString::from_ptr_to_string(path);
let path: &Path = path.as_ref();
let dir = if let Some(parent) = path.parent() {
parent
} else {
// unlikely this happens, but just in case
return PyString::from("crate in root directory not allowed".to_string()).into_raw();
};
for entry in walkdir::WalkDir::new(dir)
.into_iter()
.filter_map(|e| e.ok())
.filter(|e| {
if let Some(ext) = e.path().extension() {
ext == "rs"
} else {
false
}
})
{
if let Err(err) = parse_file(krate_data, entry.path()) {
return err;
}
}
ptr::null_mut::<PyString>()
}
fn parse_file(krate_data: &mut KrateData, path: &Path) -> Result<(), *mut PyString> {
let mut f = match File::open(path) {
Ok(file) => file,
Err(_) => {
return Err(
PyString::from(format!("path not found: {}", path.to_str().unwrap())).into_raw(),
)
}
};
let mut src = String::new();
if f.read_to_string(&mut src).is_err() {
return Err(PyString::from(format!(
"failed to read the source file: {}",
path.to_str().unwrap()
))
.into_raw());
}
match syn::parse_file(&src) {
Ok(krate) => {
syn::visit::visit_file(krate_data, &krate);
krate_data.collect_values();
if krate_data.collected.is_empty() {
return Err(PyString::from("zero function calls parsed".to_string()).into_raw());
}
}
Err(err) => return Err(PyString::from(format!("{}", err)).into_raw()),
};
Ok(())
}
#[doc(hidden)]
pub struct KrateData {
functions: Vec<FnDef>,
collected: Vec<String>,
prefixes: Vec<String>,
}
impl KrateData {
fn new(prefixes: Vec<String>) -> KrateData {
KrateData {
functions: vec![],
collected: vec![],
prefixes,
}
}
fn collect_values(&mut self) {
let mut add = true;
for v in self.functions.drain(..) {
let FnDef {
name: mut fndef,
args,
output,
} = v;
let original_name = fndef.clone();
if!args.is_empty() {
fndef.push_str("::");
args.iter().fold(&mut fndef, |acc, arg| {
if let Ok(repr) = type_repr(arg, None) {
acc.push_str(&repr);
acc.push(';');
} else {
eprintln!(
"could not generate bindings for fn `{}`; unacceptable parameters
",
original_name
);
add = false;
}
acc | } else {
// function w/o arguments
fndef.push_str("::();");
}
if add {
match output {
syn::ReturnType::Default => fndef.push_str("type(void)"),
syn::ReturnType::Type(_, ty) => {
if let Ok(ty) = type_repr(&ty, None) {
fndef.push_str(&ty)
} else {
continue;
}
}
}
self.collected.push(fndef);
} else {
add = true
}
}
}
fn add_fn(&mut self, name: String, fn_decl: &syn::ItemFn) {
for prefix in &self.prefixes {
if name.starts_with(prefix) {
let syn::ItemFn { sig,.. } = fn_decl.clone();
let mut args = vec![];
for arg in sig.inputs {
match arg {
syn::FnArg::Typed(pat_ty) => args.push(*pat_ty.ty),
_ => continue,
}
}
self.functions.push(FnDef {
name,
args,
output: sig.output,
});
break;
}
}
}
fn iter_krate(&self, idx: usize) -> Option<&str> {
if self.collected.len() >= (idx + 1) {
Some(&self.collected[idx])
} else {
None
}
}
}
fn type_repr(ty: &syn::Type, r: Option<&str>) -> Result<String, ()> {
let mut repr = String::new();
match ty {
syn::Type::Path(path) => {
let syn::TypePath { path,.. } = path;
if let Some(ty) = path.segments.last() {
if let Some(r) = r {
Ok(format!("type({} {})", r, ty.ident))
} else {
Ok(format!("type({})", ty.ident))
}
} else {
Err(())
}
}
syn::Type::Ptr(ty) => {
let syn::TypePtr {
elem, mutability,..
} = ty;
let m = match mutability {
Some(_) => "*mut",
_ => "*const",
};
repr.push_str(&type_repr(&*elem, Some(m))?);
Ok(repr)
}
syn::Type::Reference(ty) => {
let syn::TypeReference {
elem, mutability,..
} = ty;
let m = match mutability {
Some(_) => "&mut",
_ => "&",
};
repr.push_str(&type_repr(&*elem, Some(m))?);
Ok(repr)
}
_ => Err(()),
}
}
impl<'ast> syn::visit::Visit<'ast> for KrateData {
fn visit_item(&mut self, item: &syn::Item) {
match item {
syn::Item::Fn(fn_decl,..) => {
if let syn::Visibility::Public(_) = fn_decl.vis {
let name = format!("{}", fn_decl.sig.ident);
self.add_fn(name, &*fn_decl)
}
}
syn::Item::Mod(mod_item) if mod_item.content.is_some() => {
for item in &mod_item.content.as_ref().unwrap().1 {
self.visit_item(item);
}
}
_ => {}
}
}
}
struct FnDef {
name: String,
output: syn::ReturnType,
args: Vec<syn::Type>,
}
// C FFI for KrateData objects:
#[doc(hidden)]
#[no_mangle]
pub unsafe extern "C" fn krate_data_new(ptr: *mut PyList) -> *mut KrateData {
let p = PyList::from_ptr(ptr);
let p: Vec<String> = PyList::into(p);
Box::into_raw(Box::new(KrateData::new(p)))
}
#[doc(hidden)]
#[no_mangle]
pub unsafe extern "C" fn krate_data_free(ptr: *mut KrateData) {
if ptr.is_null() {
return;
}
Box::from_raw(ptr);
}
#[doc(hidden)]
#[no_mangle]
pub extern "C" fn krate_data_len(krate: &KrateData) -> size_t {
krate.collected.len()
}
#[doc(hidden)]
#[no_mangle]
pub extern "C" fn krate_data_iter(krate: &KrateData, idx: size_t) -> *mut PyString {
match krate.iter_krate(idx as usize) {
Some(val) => PyString::from(val).into_raw(),
None => PyString::from("NO_IDX_ERROR").into_raw(),
}
}
#[cfg(test)]
mod parsing_tests {
use super::*;
#[test]
#[ignore]
fn parse_lib() {
let path = std::env::home_dir()
.unwrap()
.join("workspace/sources/rustypy_debug/rust_code/src/lib.rs");
// let path_ori: std::path::PathBuf = std::env::current_dir().unwrap();
// let path: std::path::PathBuf = path_ori
// .parent()
// .unwrap()
// .parent()
// .unwrap()
// .join("tests/rs_test_lib/lib.rs");
// the entry point to the library:
let entry_point = PyString::from(path.to_str().unwrap().to_string()).into_raw();
let mut krate_data = KrateData::new(vec!["python_bind_".to_string()]);
unsafe {
let response = parse_src(entry_point, &mut krate_data);
let response: String = PyString::from_ptr_to_string(response);
assert!(!response.is_empty());
}
}
} | }); | random_line_split |
lib.rs | //! Binding Rust with Python, both ways!
//!
//! This library will generate and handle type conversions between Python and
//! Rust. To use Python from Rust refer to the
//! [library wiki](https://github.com/iduartgomez/rustypy/wiki), more general examples
//! and information on how to use Rust in Python can also be found there.
//!
//! Checkout the [PyTypes](../rustypy/pytypes/index.html) module documentation for more information
//! on how to write foreign functions that are compliant with Python as well as using the custom
//! types that will ease type conversion.
#![crate_type = "cdylib"]
extern crate cpython;
extern crate libc;
extern crate syn;
extern crate walkdir;
use std::fs::File;
use std::io::Read;
use std::path::Path;
use std::ptr;
use libc::size_t;
mod macros;
pub mod pytypes;
// re-export
pub use self::pytypes::pybool::PyBool;
pub use self::pytypes::pydict::PyDict;
pub use self::pytypes::pylist::PyList;
pub use self::pytypes::pystring::PyString;
pub use self::pytypes::pytuple::PyTuple;
pub use self::pytypes::PyArg;
#[doc(hidden)]
#[no_mangle]
pub unsafe extern "C" fn parse_src(
path: *mut PyString,
krate_data: &mut KrateData,
) -> *mut PyString {
let path = PyString::from_ptr_to_string(path);
let path: &Path = path.as_ref();
let dir = if let Some(parent) = path.parent() {
parent
} else {
// unlikely this happens, but just in case
return PyString::from("crate in root directory not allowed".to_string()).into_raw();
};
for entry in walkdir::WalkDir::new(dir)
.into_iter()
.filter_map(|e| e.ok())
.filter(|e| {
if let Some(ext) = e.path().extension() {
ext == "rs"
} else {
false
}
})
{
if let Err(err) = parse_file(krate_data, entry.path()) {
return err;
}
}
ptr::null_mut::<PyString>()
}
fn parse_file(krate_data: &mut KrateData, path: &Path) -> Result<(), *mut PyString> {
let mut f = match File::open(path) {
Ok(file) => file,
Err(_) => {
return Err(
PyString::from(format!("path not found: {}", path.to_str().unwrap())).into_raw(),
)
}
};
let mut src = String::new();
if f.read_to_string(&mut src).is_err() {
return Err(PyString::from(format!(
"failed to read the source file: {}",
path.to_str().unwrap()
))
.into_raw());
}
match syn::parse_file(&src) {
Ok(krate) => {
syn::visit::visit_file(krate_data, &krate);
krate_data.collect_values();
if krate_data.collected.is_empty() {
return Err(PyString::from("zero function calls parsed".to_string()).into_raw());
}
}
Err(err) => return Err(PyString::from(format!("{}", err)).into_raw()),
};
Ok(())
}
#[doc(hidden)]
pub struct KrateData {
functions: Vec<FnDef>,
collected: Vec<String>,
prefixes: Vec<String>,
}
impl KrateData {
fn new(prefixes: Vec<String>) -> KrateData {
KrateData {
functions: vec![],
collected: vec![],
prefixes,
}
}
fn collect_values(&mut self) {
let mut add = true;
for v in self.functions.drain(..) {
let FnDef {
name: mut fndef,
args,
output,
} = v;
let original_name = fndef.clone();
if!args.is_empty() {
fndef.push_str("::");
args.iter().fold(&mut fndef, |acc, arg| {
if let Ok(repr) = type_repr(arg, None) {
acc.push_str(&repr);
acc.push(';');
} else {
eprintln!(
"could not generate bindings for fn `{}`; unacceptable parameters
",
original_name
);
add = false;
}
acc
});
} else {
// function w/o arguments
fndef.push_str("::();");
}
if add {
match output {
syn::ReturnType::Default => fndef.push_str("type(void)"),
syn::ReturnType::Type(_, ty) => {
if let Ok(ty) = type_repr(&ty, None) {
fndef.push_str(&ty)
} else {
continue;
}
}
}
self.collected.push(fndef);
} else {
add = true
}
}
}
fn add_fn(&mut self, name: String, fn_decl: &syn::ItemFn) {
for prefix in &self.prefixes {
if name.starts_with(prefix) {
let syn::ItemFn { sig,.. } = fn_decl.clone();
let mut args = vec![];
for arg in sig.inputs {
match arg {
syn::FnArg::Typed(pat_ty) => args.push(*pat_ty.ty),
_ => continue,
}
}
self.functions.push(FnDef {
name,
args,
output: sig.output,
});
break;
}
}
}
fn iter_krate(&self, idx: usize) -> Option<&str> {
if self.collected.len() >= (idx + 1) {
Some(&self.collected[idx])
} else {
None
}
}
}
fn type_repr(ty: &syn::Type, r: Option<&str>) -> Result<String, ()> {
let mut repr = String::new();
match ty {
syn::Type::Path(path) => {
let syn::TypePath { path,.. } = path;
if let Some(ty) = path.segments.last() {
if let Some(r) = r {
Ok(format!("type({} {})", r, ty.ident))
} else {
Ok(format!("type({})", ty.ident))
}
} else {
Err(())
}
}
syn::Type::Ptr(ty) => {
let syn::TypePtr {
elem, mutability,..
} = ty;
let m = match mutability {
Some(_) => "*mut",
_ => "*const",
};
repr.push_str(&type_repr(&*elem, Some(m))?);
Ok(repr)
}
syn::Type::Reference(ty) => {
let syn::TypeReference {
elem, mutability,..
} = ty;
let m = match mutability {
Some(_) => "&mut",
_ => "&",
};
repr.push_str(&type_repr(&*elem, Some(m))?);
Ok(repr)
}
_ => Err(()),
}
}
impl<'ast> syn::visit::Visit<'ast> for KrateData {
fn visit_item(&mut self, item: &syn::Item) {
match item {
syn::Item::Fn(fn_decl,..) => {
if let syn::Visibility::Public(_) = fn_decl.vis {
let name = format!("{}", fn_decl.sig.ident);
self.add_fn(name, &*fn_decl)
}
}
syn::Item::Mod(mod_item) if mod_item.content.is_some() => {
for item in &mod_item.content.as_ref().unwrap().1 {
self.visit_item(item);
}
}
_ => {}
}
}
}
struct FnDef {
name: String,
output: syn::ReturnType,
args: Vec<syn::Type>,
}
// C FFI for KrateData objects:
#[doc(hidden)]
#[no_mangle]
pub unsafe extern "C" fn krate_data_new(ptr: *mut PyList) -> *mut KrateData {
let p = PyList::from_ptr(ptr);
let p: Vec<String> = PyList::into(p);
Box::into_raw(Box::new(KrateData::new(p)))
}
#[doc(hidden)]
#[no_mangle]
pub unsafe extern "C" fn krate_data_free(ptr: *mut KrateData) {
if ptr.is_null() {
return;
}
Box::from_raw(ptr);
}
#[doc(hidden)]
#[no_mangle]
pub extern "C" fn krate_data_len(krate: &KrateData) -> size_t {
krate.collected.len()
}
#[doc(hidden)]
#[no_mangle]
pub extern "C" fn krate_data_iter(krate: &KrateData, idx: size_t) -> *mut PyString |
#[cfg(test)]
mod parsing_tests {
use super::*;
#[test]
#[ignore]
fn parse_lib() {
let path = std::env::home_dir()
.unwrap()
.join("workspace/sources/rustypy_debug/rust_code/src/lib.rs");
// let path_ori: std::path::PathBuf = std::env::current_dir().unwrap();
// let path: std::path::PathBuf = path_ori
// .parent()
// .unwrap()
// .parent()
// .unwrap()
// .join("tests/rs_test_lib/lib.rs");
// the entry point to the library:
let entry_point = PyString::from(path.to_str().unwrap().to_string()).into_raw();
let mut krate_data = KrateData::new(vec!["python_bind_".to_string()]);
unsafe {
let response = parse_src(entry_point, &mut krate_data);
let response: String = PyString::from_ptr_to_string(response);
assert!(!response.is_empty());
}
}
}
| {
match krate.iter_krate(idx as usize) {
Some(val) => PyString::from(val).into_raw(),
None => PyString::from("NO_IDX_ERROR").into_raw(),
}
} | identifier_body |
lib.rs | //! Binding Rust with Python, both ways!
//!
//! This library will generate and handle type conversions between Python and
//! Rust. To use Python from Rust refer to the
//! [library wiki](https://github.com/iduartgomez/rustypy/wiki), more general examples
//! and information on how to use Rust in Python can also be found there.
//!
//! Checkout the [PyTypes](../rustypy/pytypes/index.html) module documentation for more information
//! on how to write foreign functions that are compliant with Python as well as using the custom
//! types that will ease type conversion.
#![crate_type = "cdylib"]
extern crate cpython;
extern crate libc;
extern crate syn;
extern crate walkdir;
use std::fs::File;
use std::io::Read;
use std::path::Path;
use std::ptr;
use libc::size_t;
mod macros;
pub mod pytypes;
// re-export
pub use self::pytypes::pybool::PyBool;
pub use self::pytypes::pydict::PyDict;
pub use self::pytypes::pylist::PyList;
pub use self::pytypes::pystring::PyString;
pub use self::pytypes::pytuple::PyTuple;
pub use self::pytypes::PyArg;
#[doc(hidden)]
#[no_mangle]
pub unsafe extern "C" fn parse_src(
path: *mut PyString,
krate_data: &mut KrateData,
) -> *mut PyString {
let path = PyString::from_ptr_to_string(path);
let path: &Path = path.as_ref();
let dir = if let Some(parent) = path.parent() {
parent
} else {
// unlikely this happens, but just in case
return PyString::from("crate in root directory not allowed".to_string()).into_raw();
};
for entry in walkdir::WalkDir::new(dir)
.into_iter()
.filter_map(|e| e.ok())
.filter(|e| {
if let Some(ext) = e.path().extension() {
ext == "rs"
} else {
false
}
})
{
if let Err(err) = parse_file(krate_data, entry.path()) {
return err;
}
}
ptr::null_mut::<PyString>()
}
fn parse_file(krate_data: &mut KrateData, path: &Path) -> Result<(), *mut PyString> {
let mut f = match File::open(path) {
Ok(file) => file,
Err(_) => {
return Err(
PyString::from(format!("path not found: {}", path.to_str().unwrap())).into_raw(),
)
}
};
let mut src = String::new();
if f.read_to_string(&mut src).is_err() {
return Err(PyString::from(format!(
"failed to read the source file: {}",
path.to_str().unwrap()
))
.into_raw());
}
match syn::parse_file(&src) {
Ok(krate) => {
syn::visit::visit_file(krate_data, &krate);
krate_data.collect_values();
if krate_data.collected.is_empty() {
return Err(PyString::from("zero function calls parsed".to_string()).into_raw());
}
}
Err(err) => return Err(PyString::from(format!("{}", err)).into_raw()),
};
Ok(())
}
#[doc(hidden)]
pub struct KrateData {
functions: Vec<FnDef>,
collected: Vec<String>,
prefixes: Vec<String>,
}
impl KrateData {
fn new(prefixes: Vec<String>) -> KrateData {
KrateData {
functions: vec![],
collected: vec![],
prefixes,
}
}
fn collect_values(&mut self) {
let mut add = true;
for v in self.functions.drain(..) {
let FnDef {
name: mut fndef,
args,
output,
} = v;
let original_name = fndef.clone();
if!args.is_empty() {
fndef.push_str("::");
args.iter().fold(&mut fndef, |acc, arg| {
if let Ok(repr) = type_repr(arg, None) {
acc.push_str(&repr);
acc.push(';');
} else {
eprintln!(
"could not generate bindings for fn `{}`; unacceptable parameters
",
original_name
);
add = false;
}
acc
});
} else {
// function w/o arguments
fndef.push_str("::();");
}
if add {
match output {
syn::ReturnType::Default => fndef.push_str("type(void)"),
syn::ReturnType::Type(_, ty) => {
if let Ok(ty) = type_repr(&ty, None) {
fndef.push_str(&ty)
} else {
continue;
}
}
}
self.collected.push(fndef);
} else {
add = true
}
}
}
fn add_fn(&mut self, name: String, fn_decl: &syn::ItemFn) {
for prefix in &self.prefixes {
if name.starts_with(prefix) {
let syn::ItemFn { sig,.. } = fn_decl.clone();
let mut args = vec![];
for arg in sig.inputs {
match arg {
syn::FnArg::Typed(pat_ty) => args.push(*pat_ty.ty),
_ => continue,
}
}
self.functions.push(FnDef {
name,
args,
output: sig.output,
});
break;
}
}
}
fn iter_krate(&self, idx: usize) -> Option<&str> {
if self.collected.len() >= (idx + 1) {
Some(&self.collected[idx])
} else {
None
}
}
}
fn type_repr(ty: &syn::Type, r: Option<&str>) -> Result<String, ()> {
let mut repr = String::new();
match ty {
syn::Type::Path(path) => {
let syn::TypePath { path,.. } = path;
if let Some(ty) = path.segments.last() {
if let Some(r) = r {
Ok(format!("type({} {})", r, ty.ident))
} else {
Ok(format!("type({})", ty.ident))
}
} else {
Err(())
}
}
syn::Type::Ptr(ty) => {
let syn::TypePtr {
elem, mutability,..
} = ty;
let m = match mutability {
Some(_) => "*mut",
_ => "*const",
};
repr.push_str(&type_repr(&*elem, Some(m))?);
Ok(repr)
}
syn::Type::Reference(ty) => {
let syn::TypeReference {
elem, mutability,..
} = ty;
let m = match mutability {
Some(_) => "&mut",
_ => "&",
};
repr.push_str(&type_repr(&*elem, Some(m))?);
Ok(repr)
}
_ => Err(()),
}
}
impl<'ast> syn::visit::Visit<'ast> for KrateData {
fn visit_item(&mut self, item: &syn::Item) {
match item {
syn::Item::Fn(fn_decl,..) => {
if let syn::Visibility::Public(_) = fn_decl.vis {
let name = format!("{}", fn_decl.sig.ident);
self.add_fn(name, &*fn_decl)
}
}
syn::Item::Mod(mod_item) if mod_item.content.is_some() => {
for item in &mod_item.content.as_ref().unwrap().1 {
self.visit_item(item);
}
}
_ => {}
}
}
}
struct FnDef {
name: String,
output: syn::ReturnType,
args: Vec<syn::Type>,
}
// C FFI for KrateData objects:
#[doc(hidden)]
#[no_mangle]
pub unsafe extern "C" fn krate_data_new(ptr: *mut PyList) -> *mut KrateData {
let p = PyList::from_ptr(ptr);
let p: Vec<String> = PyList::into(p);
Box::into_raw(Box::new(KrateData::new(p)))
}
#[doc(hidden)]
#[no_mangle]
pub unsafe extern "C" fn krate_data_free(ptr: *mut KrateData) {
if ptr.is_null() {
return;
}
Box::from_raw(ptr);
}
#[doc(hidden)]
#[no_mangle]
pub extern "C" fn | (krate: &KrateData) -> size_t {
krate.collected.len()
}
#[doc(hidden)]
#[no_mangle]
pub extern "C" fn krate_data_iter(krate: &KrateData, idx: size_t) -> *mut PyString {
match krate.iter_krate(idx as usize) {
Some(val) => PyString::from(val).into_raw(),
None => PyString::from("NO_IDX_ERROR").into_raw(),
}
}
#[cfg(test)]
mod parsing_tests {
use super::*;
#[test]
#[ignore]
fn parse_lib() {
let path = std::env::home_dir()
.unwrap()
.join("workspace/sources/rustypy_debug/rust_code/src/lib.rs");
// let path_ori: std::path::PathBuf = std::env::current_dir().unwrap();
// let path: std::path::PathBuf = path_ori
// .parent()
// .unwrap()
// .parent()
// .unwrap()
// .join("tests/rs_test_lib/lib.rs");
// the entry point to the library:
let entry_point = PyString::from(path.to_str().unwrap().to_string()).into_raw();
let mut krate_data = KrateData::new(vec!["python_bind_".to_string()]);
unsafe {
let response = parse_src(entry_point, &mut krate_data);
let response: String = PyString::from_ptr_to_string(response);
assert!(!response.is_empty());
}
}
}
| krate_data_len | identifier_name |
project-cache-issue-31849.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Regression test for #31849: the problem here was actually a performance
// cliff, but I'm adding the test for reference.
pub trait Upcast<T> {
fn upcast(self) -> T;
}
impl<S1, S2, T1, T2> Upcast<(T1, T2)> for (S1,S2)
where S1: Upcast<T1>,
S2: Upcast<T2>,
{
fn upcast(self) -> (T1, T2) { (self.0.upcast(), self.1.upcast()) }
}
impl Upcast<()> for ()
{
fn upcast(self) -> () { () }
}
pub trait ToStatic {
type Static:'static;
fn to_static(self) -> Self::Static where Self: Sized;
}
impl<T, U> ToStatic for (T, U)
where T: ToStatic,
U: ToStatic
{
type Static = (T::Static, U::Static);
fn to_static(self) -> Self::Static { (self.0.to_static(), self.1.to_static()) }
}
impl ToStatic for ()
{
type Static = ();
fn to_static(self) -> () { () }
}
trait Factory {
type Output;
fn build(&self) -> Self::Output;
}
impl<S,T> Factory for (S, T)
where S: Factory,
T: Factory,
S::Output: ToStatic,
<S::Output as ToStatic>::Static: Upcast<S::Output>,
{
type Output = (S::Output, T::Output);
fn build(&self) -> Self::Output { (self.0.build().to_static().upcast(), self.1.build()) }
}
impl Factory for () {
type Output = ();
fn build(&self) -> Self::Output { () }
}
fn | () {
// More parens, more time.
let it = ((((((((((),()),()),()),()),()),()),()),()),());
it.build();
}
| main | identifier_name |
project-cache-issue-31849.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Regression test for #31849: the problem here was actually a performance
// cliff, but I'm adding the test for reference.
pub trait Upcast<T> {
fn upcast(self) -> T;
}
impl<S1, S2, T1, T2> Upcast<(T1, T2)> for (S1,S2)
where S1: Upcast<T1>,
S2: Upcast<T2>,
{
fn upcast(self) -> (T1, T2) { (self.0.upcast(), self.1.upcast()) }
}
impl Upcast<()> for ()
{
fn upcast(self) -> () |
}
pub trait ToStatic {
type Static:'static;
fn to_static(self) -> Self::Static where Self: Sized;
}
impl<T, U> ToStatic for (T, U)
where T: ToStatic,
U: ToStatic
{
type Static = (T::Static, U::Static);
fn to_static(self) -> Self::Static { (self.0.to_static(), self.1.to_static()) }
}
impl ToStatic for ()
{
type Static = ();
fn to_static(self) -> () { () }
}
trait Factory {
type Output;
fn build(&self) -> Self::Output;
}
impl<S,T> Factory for (S, T)
where S: Factory,
T: Factory,
S::Output: ToStatic,
<S::Output as ToStatic>::Static: Upcast<S::Output>,
{
type Output = (S::Output, T::Output);
fn build(&self) -> Self::Output { (self.0.build().to_static().upcast(), self.1.build()) }
}
impl Factory for () {
type Output = ();
fn build(&self) -> Self::Output { () }
}
fn main() {
// More parens, more time.
let it = ((((((((((),()),()),()),()),()),()),()),()),());
it.build();
}
| { () } | identifier_body |
project-cache-issue-31849.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Regression test for #31849: the problem here was actually a performance
// cliff, but I'm adding the test for reference.
pub trait Upcast<T> {
fn upcast(self) -> T;
}
impl<S1, S2, T1, T2> Upcast<(T1, T2)> for (S1,S2)
where S1: Upcast<T1>,
S2: Upcast<T2>,
{
fn upcast(self) -> (T1, T2) { (self.0.upcast(), self.1.upcast()) }
}
| fn upcast(self) -> () { () }
}
pub trait ToStatic {
type Static:'static;
fn to_static(self) -> Self::Static where Self: Sized;
}
impl<T, U> ToStatic for (T, U)
where T: ToStatic,
U: ToStatic
{
type Static = (T::Static, U::Static);
fn to_static(self) -> Self::Static { (self.0.to_static(), self.1.to_static()) }
}
impl ToStatic for ()
{
type Static = ();
fn to_static(self) -> () { () }
}
trait Factory {
type Output;
fn build(&self) -> Self::Output;
}
impl<S,T> Factory for (S, T)
where S: Factory,
T: Factory,
S::Output: ToStatic,
<S::Output as ToStatic>::Static: Upcast<S::Output>,
{
type Output = (S::Output, T::Output);
fn build(&self) -> Self::Output { (self.0.build().to_static().upcast(), self.1.build()) }
}
impl Factory for () {
type Output = ();
fn build(&self) -> Self::Output { () }
}
fn main() {
// More parens, more time.
let it = ((((((((((),()),()),()),()),()),()),()),()),());
it.build();
} | impl Upcast<()> for ()
{ | random_line_split |
tri10.rs | use russell_lab::{Matrix, Vector};
/// Defines a triangle with 10 nodes (cubic edges; interior node)
///
/// # Local IDs of nodes
///
/// ```text
/// s
/// |
/// 2, (0,1)
/// | ',
/// | ',
/// 5 7,
/// | ',
/// | ',
/// 8 9 4,
/// | ',
/// | (0,0) ', (1,0)
/// 0-----3-----6-----1 ---- r
/// ```
///
/// # Local IDs of edges
///
/// ```text
/// |\
/// | \
/// | \ 1
/// 2| \
/// | \
/// |_____\
/// 0
/// ```
pub struct Tri10 {}
impl Tri10 {
pub const NDIM: usize = 2;
pub const NNODE: usize = 10;
pub const NEDGE: usize = 3;
pub const NFACE: usize = 0;
pub const EDGE_NNODE: usize = 4;
pub const FACE_NNODE: usize = 0;
pub const FACE_NEDGE: usize = 0;
#[rustfmt::skip]
pub const EDGE_NODE_IDS: [[usize; Tri10::EDGE_NNODE]; Tri10::NEDGE] = [
[0, 1, 3, 6],
[1, 2, 4, 7],
[2, 0, 5, 8],
];
#[rustfmt::skip]
pub const NODE_REFERENCE_COORDS: [[f64; Tri10::NDIM]; Tri10::NNODE] = [
[0.0 , 0.0 ], // 0
[1.0 , 0.0 ], // 1
[0.0 , 1.0 ], // 2
[1.0 / 3.0, 0.0 ], // 3
[2.0 / 3.0, 1.0 / 3.0], // 4
[0.0 , 2.0 / 3.0], // 5
[2.0 / 3.0, 0.0 ], // 6
[1.0 / 3.0, 2.0 / 3.0], // 7
[0.0 , 1.0 / 3.0], // 8
[1.0 / 3.0, 1.0 / 3.0], // 9
];
/// Computes the interpolation functions
pub fn calc_interp(interp: &mut Vector, ksi: &[f64]) {
let (r, s) = (ksi[0], ksi[1]);
let z = 1.0 - r - s;
let t1 = s * (3.0 * s - 1.0);
let t2 = z * (3.0 * z - 1.0);
let t3 = r * (3.0 * r - 1.0);
interp[0] = 0.5 * t2 * (3.0 * z - 2.0);
interp[1] = 0.5 * t3 * (3.0 * r - 2.0);
interp[2] = 0.5 * t1 * (3.0 * s - 2.0);
interp[3] = 4.5 * r * t2;
interp[4] = 4.5 * s * t3;
interp[5] = 4.5 * z * t1;
interp[6] = 4.5 * z * t3;
interp[7] = 4.5 * r * t1;
interp[8] = 4.5 * s * t2;
interp[9] = 27.0 * s * z * r;
}
/// Computes the derivatives of interpolation functions
pub fn calc_deriv(deriv: &mut Matrix, ksi: &[f64]) | deriv[3][0] = q2 - q7;
deriv[4][0] = s * q5;
deriv[5][0] = -q1;
deriv[6][0] = z * q5 - q3;
deriv[7][0] = q1;
deriv[8][0] = -q6;
deriv[9][0] = 27.0 * s * (z - r);
deriv[0][1] = q8;
deriv[1][1] = 0.0;
deriv[2][1] = q9;
deriv[3][1] = -q7;
deriv[4][1] = q3;
deriv[5][1] = z * q4 - q1;
deriv[6][1] = -q3;
deriv[7][1] = r * q4;
deriv[8][1] = q2 - q6;
deriv[9][1] = 27.0 * r * (z - s);
}
}
| {
let (r, s) = (ksi[0], ksi[1]);
let z = 1.0 - r - s;
let q0 = 4.5 * (6.0 * z - 1.0);
let q1 = 4.5 * s * (3.0 * s - 1.0);
let q2 = 4.5 * z * (3.0 * z - 1.0);
let q3 = 4.5 * r * (3.0 * r - 1.0);
let q4 = 4.5 * (6.0 * s - 1.0);
let q5 = 4.5 * (6.0 * r - 1.0);
let q6 = q0 * s;
let q7 = q0 * r;
let q8 = -0.5 * (27.0 * z * z - 18.0 * z + 2.0);
let q9 = 0.5 * (27.0 * s * s - 18.0 * s + 2.0);
let q10 = 0.5 * (27.0 * r * r - 18.0 * r + 2.0);
deriv[0][0] = q8;
deriv[1][0] = q10;
deriv[2][0] = 0.0; | identifier_body |
tri10.rs | use russell_lab::{Matrix, Vector};
/// Defines a triangle with 10 nodes (cubic edges; interior node)
///
/// # Local IDs of nodes
///
/// ```text
/// s
/// |
/// 2, (0,1)
/// | ',
/// | ',
/// 5 7,
/// | ',
/// | ',
/// 8 9 4,
/// | ',
/// | (0,0) ', (1,0)
/// 0-----3-----6-----1 ---- r | /// ```
///
/// # Local IDs of edges
///
/// ```text
/// |\
/// | \
/// | \ 1
/// 2| \
/// | \
/// |_____\
/// 0
/// ```
pub struct Tri10 {}
impl Tri10 {
pub const NDIM: usize = 2;
pub const NNODE: usize = 10;
pub const NEDGE: usize = 3;
pub const NFACE: usize = 0;
pub const EDGE_NNODE: usize = 4;
pub const FACE_NNODE: usize = 0;
pub const FACE_NEDGE: usize = 0;
#[rustfmt::skip]
pub const EDGE_NODE_IDS: [[usize; Tri10::EDGE_NNODE]; Tri10::NEDGE] = [
[0, 1, 3, 6],
[1, 2, 4, 7],
[2, 0, 5, 8],
];
#[rustfmt::skip]
pub const NODE_REFERENCE_COORDS: [[f64; Tri10::NDIM]; Tri10::NNODE] = [
[0.0 , 0.0 ], // 0
[1.0 , 0.0 ], // 1
[0.0 , 1.0 ], // 2
[1.0 / 3.0, 0.0 ], // 3
[2.0 / 3.0, 1.0 / 3.0], // 4
[0.0 , 2.0 / 3.0], // 5
[2.0 / 3.0, 0.0 ], // 6
[1.0 / 3.0, 2.0 / 3.0], // 7
[0.0 , 1.0 / 3.0], // 8
[1.0 / 3.0, 1.0 / 3.0], // 9
];
/// Computes the interpolation functions
pub fn calc_interp(interp: &mut Vector, ksi: &[f64]) {
let (r, s) = (ksi[0], ksi[1]);
let z = 1.0 - r - s;
let t1 = s * (3.0 * s - 1.0);
let t2 = z * (3.0 * z - 1.0);
let t3 = r * (3.0 * r - 1.0);
interp[0] = 0.5 * t2 * (3.0 * z - 2.0);
interp[1] = 0.5 * t3 * (3.0 * r - 2.0);
interp[2] = 0.5 * t1 * (3.0 * s - 2.0);
interp[3] = 4.5 * r * t2;
interp[4] = 4.5 * s * t3;
interp[5] = 4.5 * z * t1;
interp[6] = 4.5 * z * t3;
interp[7] = 4.5 * r * t1;
interp[8] = 4.5 * s * t2;
interp[9] = 27.0 * s * z * r;
}
/// Computes the derivatives of interpolation functions
pub fn calc_deriv(deriv: &mut Matrix, ksi: &[f64]) {
let (r, s) = (ksi[0], ksi[1]);
let z = 1.0 - r - s;
let q0 = 4.5 * (6.0 * z - 1.0);
let q1 = 4.5 * s * (3.0 * s - 1.0);
let q2 = 4.5 * z * (3.0 * z - 1.0);
let q3 = 4.5 * r * (3.0 * r - 1.0);
let q4 = 4.5 * (6.0 * s - 1.0);
let q5 = 4.5 * (6.0 * r - 1.0);
let q6 = q0 * s;
let q7 = q0 * r;
let q8 = -0.5 * (27.0 * z * z - 18.0 * z + 2.0);
let q9 = 0.5 * (27.0 * s * s - 18.0 * s + 2.0);
let q10 = 0.5 * (27.0 * r * r - 18.0 * r + 2.0);
deriv[0][0] = q8;
deriv[1][0] = q10;
deriv[2][0] = 0.0;
deriv[3][0] = q2 - q7;
deriv[4][0] = s * q5;
deriv[5][0] = -q1;
deriv[6][0] = z * q5 - q3;
deriv[7][0] = q1;
deriv[8][0] = -q6;
deriv[9][0] = 27.0 * s * (z - r);
deriv[0][1] = q8;
deriv[1][1] = 0.0;
deriv[2][1] = q9;
deriv[3][1] = -q7;
deriv[4][1] = q3;
deriv[5][1] = z * q4 - q1;
deriv[6][1] = -q3;
deriv[7][1] = r * q4;
deriv[8][1] = q2 - q6;
deriv[9][1] = 27.0 * r * (z - s);
}
} | random_line_split |
|
tri10.rs | use russell_lab::{Matrix, Vector};
/// Defines a triangle with 10 nodes (cubic edges; interior node)
///
/// # Local IDs of nodes
///
/// ```text
/// s
/// |
/// 2, (0,1)
/// | ',
/// | ',
/// 5 7,
/// | ',
/// | ',
/// 8 9 4,
/// | ',
/// | (0,0) ', (1,0)
/// 0-----3-----6-----1 ---- r
/// ```
///
/// # Local IDs of edges
///
/// ```text
/// |\
/// | \
/// | \ 1
/// 2| \
/// | \
/// |_____\
/// 0
/// ```
pub struct Tri10 {}
impl Tri10 {
pub const NDIM: usize = 2;
pub const NNODE: usize = 10;
pub const NEDGE: usize = 3;
pub const NFACE: usize = 0;
pub const EDGE_NNODE: usize = 4;
pub const FACE_NNODE: usize = 0;
pub const FACE_NEDGE: usize = 0;
#[rustfmt::skip]
pub const EDGE_NODE_IDS: [[usize; Tri10::EDGE_NNODE]; Tri10::NEDGE] = [
[0, 1, 3, 6],
[1, 2, 4, 7],
[2, 0, 5, 8],
];
#[rustfmt::skip]
pub const NODE_REFERENCE_COORDS: [[f64; Tri10::NDIM]; Tri10::NNODE] = [
[0.0 , 0.0 ], // 0
[1.0 , 0.0 ], // 1
[0.0 , 1.0 ], // 2
[1.0 / 3.0, 0.0 ], // 3
[2.0 / 3.0, 1.0 / 3.0], // 4
[0.0 , 2.0 / 3.0], // 5
[2.0 / 3.0, 0.0 ], // 6
[1.0 / 3.0, 2.0 / 3.0], // 7
[0.0 , 1.0 / 3.0], // 8
[1.0 / 3.0, 1.0 / 3.0], // 9
];
/// Computes the interpolation functions
pub fn calc_interp(interp: &mut Vector, ksi: &[f64]) {
let (r, s) = (ksi[0], ksi[1]);
let z = 1.0 - r - s;
let t1 = s * (3.0 * s - 1.0);
let t2 = z * (3.0 * z - 1.0);
let t3 = r * (3.0 * r - 1.0);
interp[0] = 0.5 * t2 * (3.0 * z - 2.0);
interp[1] = 0.5 * t3 * (3.0 * r - 2.0);
interp[2] = 0.5 * t1 * (3.0 * s - 2.0);
interp[3] = 4.5 * r * t2;
interp[4] = 4.5 * s * t3;
interp[5] = 4.5 * z * t1;
interp[6] = 4.5 * z * t3;
interp[7] = 4.5 * r * t1;
interp[8] = 4.5 * s * t2;
interp[9] = 27.0 * s * z * r;
}
/// Computes the derivatives of interpolation functions
pub fn | (deriv: &mut Matrix, ksi: &[f64]) {
let (r, s) = (ksi[0], ksi[1]);
let z = 1.0 - r - s;
let q0 = 4.5 * (6.0 * z - 1.0);
let q1 = 4.5 * s * (3.0 * s - 1.0);
let q2 = 4.5 * z * (3.0 * z - 1.0);
let q3 = 4.5 * r * (3.0 * r - 1.0);
let q4 = 4.5 * (6.0 * s - 1.0);
let q5 = 4.5 * (6.0 * r - 1.0);
let q6 = q0 * s;
let q7 = q0 * r;
let q8 = -0.5 * (27.0 * z * z - 18.0 * z + 2.0);
let q9 = 0.5 * (27.0 * s * s - 18.0 * s + 2.0);
let q10 = 0.5 * (27.0 * r * r - 18.0 * r + 2.0);
deriv[0][0] = q8;
deriv[1][0] = q10;
deriv[2][0] = 0.0;
deriv[3][0] = q2 - q7;
deriv[4][0] = s * q5;
deriv[5][0] = -q1;
deriv[6][0] = z * q5 - q3;
deriv[7][0] = q1;
deriv[8][0] = -q6;
deriv[9][0] = 27.0 * s * (z - r);
deriv[0][1] = q8;
deriv[1][1] = 0.0;
deriv[2][1] = q9;
deriv[3][1] = -q7;
deriv[4][1] = q3;
deriv[5][1] = z * q4 - q1;
deriv[6][1] = -q3;
deriv[7][1] = r * q4;
deriv[8][1] = q2 - q6;
deriv[9][1] = 27.0 * r * (z - s);
}
}
| calc_deriv | identifier_name |
mono.rs | /*
* Monochrome filter
*/
#pragma version(1)
#pragma rs java_package_name(se.embargo.onebit.filter)
rs_allocation gIn;
rs_allocation gOut;
rs_script gScript;
const static float3 gMonoMult = {0.299f, 0.587f, 0.114f};
void root(const uchar4 *v_in, uchar4 *v_out, const void *usrData, uint32_t x, uint32_t y) { | }
void filter() {
int64_t t1 = rsUptimeMillis();
rsForEach(gScript, gIn, gOut, 0);
int64_t t2 = rsUptimeMillis();
rsDebug("Monochrome filter in (ms)", t2 - t1);
} | float4 pixel = rsUnpackColor8888(*v_in);
float3 mono = dot(pixel.rgb, gMonoMult);
*v_out = rsPackColorTo8888(mono); | random_line_split |
game_data.rs | use chess_pgn_parser::{Game, GameTermination};
use regex::{Captures,Regex};
use super::{GameData, MoveData};
pub struct GameMappingError { | pub error: GameError,
}
pub enum GameError {
UnknownGameTermination,
MissingComment { ply: u32 },
BadComment { ply: u32 },
}
pub fn map_game_data(games: &Vec<Game>)
-> Result<Vec<GameData>, GameMappingError> {
let mut result: Vec<GameData> = Vec::with_capacity(games.len());
let comment_parser = CommentParser::new();
for (index, game) in games.iter().enumerate() {
match map_single_game_data(game, &comment_parser) {
Ok(game_data) => result.push(game_data),
Err(error) => {
return Err(GameMappingError {
game_number: (index + 1) as u32,
error: error });
}
}
}
Ok(result)
}
fn map_single_game_data(game: &Game, comment_parser: &CommentParser) ->
Result<GameData, GameError> {
let score10 = match game.termination {
GameTermination::WhiteWins => 10,
GameTermination::DrawnGame => 5,
GameTermination::BlackWins => 0,
GameTermination::Unknown => {
return Err(GameError::UnknownGameTermination);
}
};
let mut move_data_vec : Vec<MoveData> =
Vec::with_capacity(game.moves.len());
for (ply, move_) in game.moves.iter().enumerate() {
let comment_opt = move_.comment.as_ref();
if comment_opt.is_none() {
return Err(GameError::MissingComment { ply: ply as u32 });
}
let comment = comment_opt.unwrap();
let result = comment_parser.parse(comment);
match result {
Ok(move_data) => move_data_vec.push(move_data),
Err(()) => {
return Err(GameError::BadComment {
ply: (ply + 1) as u32
});
}
}
}
Ok(GameData {
score10: score10,
move_data: move_data_vec
})
}
struct CommentParser {
re: Regex
}
impl CommentParser {
pub fn new() -> CommentParser {
let re = Regex::new(r"(?x)
^(?P<sign>(-|\+)?)
((?P<mate>M\d+)|((?P<eval>\d+)(\.(?P<eval_dec>\d{2}))))
/\d+\s
((?P<time>\d+)(\.(?P<time_dec>\d{1,3}))?s)
").unwrap();
CommentParser { re: re }
}
pub fn parse(&self, comment: &str) -> Result<MoveData, ()> {
let captures_opt = self.re.captures(comment);
if captures_opt.is_none() {
return Err(());
}
let captures = captures_opt.unwrap();
let eval = CommentParser::get_eval(&captures);
let time = CommentParser::get_time(&captures);
Ok(MoveData { eval: eval, time: time })
}
fn get_eval(captures: &Captures) -> i32 {
let mut result = 0;
result += match captures.name("mate") {
None | Some("") => 0,
Some(_) => 10000,
};
result += match captures.name("eval") {
None | Some("") => 0,
Some(value) => 100 * value.parse::<i32>().unwrap(),
};
result += match captures.name("eval_dec") {
None | Some("") => 0,
Some(value) => value.parse::<i32>().unwrap(),
};
result *= match captures.name("sign") {
None | Some("") | Some("+") => 1,
Some("-") => -1,
_ => unreachable!(),
};
result
}
fn get_time(captures: &Captures) -> u32 {
let mut result = 0;
result +=
match captures.name("time") {
Some(value) => 1000 * value.parse::<u32>().unwrap(),
_ => unreachable!(),
};
result +=
match captures.name("time_dec") {
None | Some("") => 0,
Some(value) => 10u32.pow((3 - value.len() as i32) as u32) *
value.parse::<u32>().unwrap(),
};
result
}
}
#[cfg(test)]
mod tests {
use super::CommentParser;
use MoveData;
#[test]
fn comment_parsing() {
let comment_parser = CommentParser::new();
assert_eq!(comment_parser.parse("-1.91/13 0.031s"), Ok(MoveData{ eval: -191, time: 31 }));
assert_eq!(comment_parser.parse("+0.18/15 0.45s"), Ok(MoveData{ eval: 18, time: 450 }));
assert_eq!(comment_parser.parse("+M17/21 0.020s"), Ok(MoveData{ eval: 10000, time: 20 }));
assert_eq!(comment_parser.parse("-M26/18 0.022s"), Ok(MoveData{ eval: -10000, time: 22 }));
}
} | pub game_number: u32, | random_line_split |
game_data.rs | use chess_pgn_parser::{Game, GameTermination};
use regex::{Captures,Regex};
use super::{GameData, MoveData};
pub struct GameMappingError {
pub game_number: u32,
pub error: GameError,
}
pub enum GameError {
UnknownGameTermination,
MissingComment { ply: u32 },
BadComment { ply: u32 },
}
pub fn map_game_data(games: &Vec<Game>)
-> Result<Vec<GameData>, GameMappingError> {
let mut result: Vec<GameData> = Vec::with_capacity(games.len());
let comment_parser = CommentParser::new();
for (index, game) in games.iter().enumerate() {
match map_single_game_data(game, &comment_parser) {
Ok(game_data) => result.push(game_data),
Err(error) => {
return Err(GameMappingError {
game_number: (index + 1) as u32,
error: error });
}
}
}
Ok(result)
}
fn map_single_game_data(game: &Game, comment_parser: &CommentParser) ->
Result<GameData, GameError> {
let score10 = match game.termination {
GameTermination::WhiteWins => 10,
GameTermination::DrawnGame => 5,
GameTermination::BlackWins => 0,
GameTermination::Unknown => {
return Err(GameError::UnknownGameTermination);
}
};
let mut move_data_vec : Vec<MoveData> =
Vec::with_capacity(game.moves.len());
for (ply, move_) in game.moves.iter().enumerate() {
let comment_opt = move_.comment.as_ref();
if comment_opt.is_none() {
return Err(GameError::MissingComment { ply: ply as u32 });
}
let comment = comment_opt.unwrap();
let result = comment_parser.parse(comment);
match result {
Ok(move_data) => move_data_vec.push(move_data),
Err(()) => {
return Err(GameError::BadComment {
ply: (ply + 1) as u32
});
}
}
}
Ok(GameData {
score10: score10,
move_data: move_data_vec
})
}
struct | {
re: Regex
}
impl CommentParser {
pub fn new() -> CommentParser {
let re = Regex::new(r"(?x)
^(?P<sign>(-|\+)?)
((?P<mate>M\d+)|((?P<eval>\d+)(\.(?P<eval_dec>\d{2}))))
/\d+\s
((?P<time>\d+)(\.(?P<time_dec>\d{1,3}))?s)
").unwrap();
CommentParser { re: re }
}
pub fn parse(&self, comment: &str) -> Result<MoveData, ()> {
let captures_opt = self.re.captures(comment);
if captures_opt.is_none() {
return Err(());
}
let captures = captures_opt.unwrap();
let eval = CommentParser::get_eval(&captures);
let time = CommentParser::get_time(&captures);
Ok(MoveData { eval: eval, time: time })
}
fn get_eval(captures: &Captures) -> i32 {
let mut result = 0;
result += match captures.name("mate") {
None | Some("") => 0,
Some(_) => 10000,
};
result += match captures.name("eval") {
None | Some("") => 0,
Some(value) => 100 * value.parse::<i32>().unwrap(),
};
result += match captures.name("eval_dec") {
None | Some("") => 0,
Some(value) => value.parse::<i32>().unwrap(),
};
result *= match captures.name("sign") {
None | Some("") | Some("+") => 1,
Some("-") => -1,
_ => unreachable!(),
};
result
}
fn get_time(captures: &Captures) -> u32 {
let mut result = 0;
result +=
match captures.name("time") {
Some(value) => 1000 * value.parse::<u32>().unwrap(),
_ => unreachable!(),
};
result +=
match captures.name("time_dec") {
None | Some("") => 0,
Some(value) => 10u32.pow((3 - value.len() as i32) as u32) *
value.parse::<u32>().unwrap(),
};
result
}
}
#[cfg(test)]
mod tests {
use super::CommentParser;
use MoveData;
#[test]
fn comment_parsing() {
let comment_parser = CommentParser::new();
assert_eq!(comment_parser.parse("-1.91/13 0.031s"), Ok(MoveData{ eval: -191, time: 31 }));
assert_eq!(comment_parser.parse("+0.18/15 0.45s"), Ok(MoveData{ eval: 18, time: 450 }));
assert_eq!(comment_parser.parse("+M17/21 0.020s"), Ok(MoveData{ eval: 10000, time: 20 }));
assert_eq!(comment_parser.parse("-M26/18 0.022s"), Ok(MoveData{ eval: -10000, time: 22 }));
}
}
| CommentParser | identifier_name |
game_data.rs | use chess_pgn_parser::{Game, GameTermination};
use regex::{Captures,Regex};
use super::{GameData, MoveData};
pub struct GameMappingError {
pub game_number: u32,
pub error: GameError,
}
pub enum GameError {
UnknownGameTermination,
MissingComment { ply: u32 },
BadComment { ply: u32 },
}
pub fn map_game_data(games: &Vec<Game>)
-> Result<Vec<GameData>, GameMappingError> {
let mut result: Vec<GameData> = Vec::with_capacity(games.len());
let comment_parser = CommentParser::new();
for (index, game) in games.iter().enumerate() {
match map_single_game_data(game, &comment_parser) {
Ok(game_data) => result.push(game_data),
Err(error) => {
return Err(GameMappingError {
game_number: (index + 1) as u32,
error: error });
}
}
}
Ok(result)
}
fn map_single_game_data(game: &Game, comment_parser: &CommentParser) ->
Result<GameData, GameError> {
let score10 = match game.termination {
GameTermination::WhiteWins => 10,
GameTermination::DrawnGame => 5,
GameTermination::BlackWins => 0,
GameTermination::Unknown => {
return Err(GameError::UnknownGameTermination);
}
};
let mut move_data_vec : Vec<MoveData> =
Vec::with_capacity(game.moves.len());
for (ply, move_) in game.moves.iter().enumerate() {
let comment_opt = move_.comment.as_ref();
if comment_opt.is_none() {
return Err(GameError::MissingComment { ply: ply as u32 });
}
let comment = comment_opt.unwrap();
let result = comment_parser.parse(comment);
match result {
Ok(move_data) => move_data_vec.push(move_data),
Err(()) => {
return Err(GameError::BadComment {
ply: (ply + 1) as u32
});
}
}
}
Ok(GameData {
score10: score10,
move_data: move_data_vec
})
}
struct CommentParser {
re: Regex
}
impl CommentParser {
pub fn new() -> CommentParser {
let re = Regex::new(r"(?x)
^(?P<sign>(-|\+)?)
((?P<mate>M\d+)|((?P<eval>\d+)(\.(?P<eval_dec>\d{2}))))
/\d+\s
((?P<time>\d+)(\.(?P<time_dec>\d{1,3}))?s)
").unwrap();
CommentParser { re: re }
}
pub fn parse(&self, comment: &str) -> Result<MoveData, ()> |
fn get_eval(captures: &Captures) -> i32 {
let mut result = 0;
result += match captures.name("mate") {
None | Some("") => 0,
Some(_) => 10000,
};
result += match captures.name("eval") {
None | Some("") => 0,
Some(value) => 100 * value.parse::<i32>().unwrap(),
};
result += match captures.name("eval_dec") {
None | Some("") => 0,
Some(value) => value.parse::<i32>().unwrap(),
};
result *= match captures.name("sign") {
None | Some("") | Some("+") => 1,
Some("-") => -1,
_ => unreachable!(),
};
result
}
fn get_time(captures: &Captures) -> u32 {
let mut result = 0;
result +=
match captures.name("time") {
Some(value) => 1000 * value.parse::<u32>().unwrap(),
_ => unreachable!(),
};
result +=
match captures.name("time_dec") {
None | Some("") => 0,
Some(value) => 10u32.pow((3 - value.len() as i32) as u32) *
value.parse::<u32>().unwrap(),
};
result
}
}
#[cfg(test)]
mod tests {
use super::CommentParser;
use MoveData;
#[test]
fn comment_parsing() {
let comment_parser = CommentParser::new();
assert_eq!(comment_parser.parse("-1.91/13 0.031s"), Ok(MoveData{ eval: -191, time: 31 }));
assert_eq!(comment_parser.parse("+0.18/15 0.45s"), Ok(MoveData{ eval: 18, time: 450 }));
assert_eq!(comment_parser.parse("+M17/21 0.020s"), Ok(MoveData{ eval: 10000, time: 20 }));
assert_eq!(comment_parser.parse("-M26/18 0.022s"), Ok(MoveData{ eval: -10000, time: 22 }));
}
}
| {
let captures_opt = self.re.captures(comment);
if captures_opt.is_none() {
return Err(());
}
let captures = captures_opt.unwrap();
let eval = CommentParser::get_eval(&captures);
let time = CommentParser::get_time(&captures);
Ok(MoveData { eval: eval, time: time })
} | identifier_body |
c_windows.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! C definitions used by libnative that don't belong in liblibc
#![allow(type_overflow)]
use libc;
pub static WSADESCRIPTION_LEN: uint = 256;
pub static WSASYS_STATUS_LEN: uint = 128;
pub static FIONBIO: libc::c_long = 0x8004667e;
static FD_SETSIZE: uint = 64;
pub static MSG_DONTWAIT: libc::c_int = 0;
pub static ERROR_ILLEGAL_CHARACTER: libc::c_int = 582;
pub static ENABLE_ECHO_INPUT: libc::DWORD = 0x4;
pub static ENABLE_EXTENDED_FLAGS: libc::DWORD = 0x80;
pub static ENABLE_INSERT_MODE: libc::DWORD = 0x20;
pub static ENABLE_LINE_INPUT: libc::DWORD = 0x2;
pub static ENABLE_PROCESSED_INPUT: libc::DWORD = 0x1;
pub static ENABLE_QUICK_EDIT_MODE: libc::DWORD = 0x40;
pub static WSA_INVALID_EVENT: WSAEVENT = 0 as WSAEVENT;
pub static FD_ACCEPT: libc::c_long = 0x08;
pub static FD_MAX_EVENTS: uint = 10;
pub static WSA_INFINITE: libc::DWORD = libc::INFINITE;
pub static WSA_WAIT_TIMEOUT: libc::DWORD = libc::consts::os::extra::WAIT_TIMEOUT;
pub static WSA_WAIT_EVENT_0: libc::DWORD = libc::consts::os::extra::WAIT_OBJECT_0;
pub static WSA_WAIT_FAILED: libc::DWORD = libc::consts::os::extra::WAIT_FAILED;
#[repr(C)]
#[cfg(target_arch = "x86")]
pub struct WSADATA {
pub wVersion: libc::WORD,
pub wHighVersion: libc::WORD,
pub szDescription: [u8,..WSADESCRIPTION_LEN + 1],
pub szSystemStatus: [u8,..WSASYS_STATUS_LEN + 1],
pub iMaxSockets: u16,
pub iMaxUdpDg: u16,
pub lpVendorInfo: *mut u8,
}
#[repr(C)]
#[cfg(target_arch = "x86_64")]
pub struct WSADATA {
pub wVersion: libc::WORD,
pub wHighVersion: libc::WORD,
pub iMaxSockets: u16,
pub iMaxUdpDg: u16,
pub lpVendorInfo: *mut u8,
pub szDescription: [u8,..WSADESCRIPTION_LEN + 1],
pub szSystemStatus: [u8,..WSASYS_STATUS_LEN + 1],
}
pub type LPWSADATA = *mut WSADATA;
#[repr(C)]
pub struct WSANETWORKEVENTS {
pub lNetworkEvents: libc::c_long,
pub iErrorCode: [libc::c_int,..FD_MAX_EVENTS],
}
pub type LPWSANETWORKEVENTS = *mut WSANETWORKEVENTS;
pub type WSAEVENT = libc::HANDLE;
#[repr(C)]
pub struct fd_set {
fd_count: libc::c_uint,
fd_array: [libc::SOCKET,..FD_SETSIZE],
}
pub fn fd_set(set: &mut fd_set, s: libc::SOCKET) {
set.fd_array[set.fd_count as uint] = s;
set.fd_count += 1;
}
#[link(name = "ws2_32")]
extern "system" {
pub fn WSAStartup(wVersionRequested: libc::WORD,
lpWSAData: LPWSADATA) -> libc::c_int;
pub fn WSAGetLastError() -> libc::c_int;
pub fn WSACloseEvent(hEvent: WSAEVENT) -> libc::BOOL;
pub fn WSACreateEvent() -> WSAEVENT;
pub fn WSAEventSelect(s: libc::SOCKET,
hEventObject: WSAEVENT,
lNetworkEvents: libc::c_long) -> libc::c_int;
pub fn WSASetEvent(hEvent: WSAEVENT) -> libc::BOOL;
pub fn WSAWaitForMultipleEvents(cEvents: libc::DWORD,
lphEvents: *const WSAEVENT,
fWaitAll: libc::BOOL,
dwTimeout: libc::DWORD,
fAltertable: libc::BOOL) -> libc::DWORD;
pub fn WSAEnumNetworkEvents(s: libc::SOCKET,
hEventObject: WSAEVENT,
lpNetworkEvents: LPWSANETWORKEVENTS)
-> libc::c_int;
pub fn ioctlsocket(s: libc::SOCKET, cmd: libc::c_long,
argp: *mut libc::c_ulong) -> libc::c_int;
pub fn select(nfds: libc::c_int,
readfds: *mut fd_set,
writefds: *mut fd_set,
exceptfds: *mut fd_set,
timeout: *mut libc::timeval) -> libc::c_int;
pub fn getsockopt(sockfd: libc::SOCKET,
level: libc::c_int,
optname: libc::c_int,
optval: *mut libc::c_char,
optlen: *mut libc::c_int) -> libc::c_int;
pub fn SetEvent(hEvent: libc::HANDLE) -> libc::BOOL;
pub fn WaitForMultipleObjects(nCount: libc::DWORD,
lpHandles: *const libc::HANDLE,
bWaitAll: libc::BOOL,
dwMilliseconds: libc::DWORD) -> libc::DWORD;
pub fn CancelIo(hFile: libc::HANDLE) -> libc::BOOL;
pub fn CancelIoEx(hFile: libc::HANDLE,
lpOverlapped: libc::LPOVERLAPPED) -> libc::BOOL;
}
pub mod compat {
use std::intrinsics::{atomic_store_relaxed, transmute};
use std::iter::Iterator;
use libc::types::os::arch::extra::{LPCWSTR, HMODULE, LPCSTR, LPVOID};
extern "system" {
fn GetModuleHandleW(lpModuleName: LPCWSTR) -> HMODULE;
fn GetProcAddress(hModule: HMODULE, lpProcName: LPCSTR) -> LPVOID;
}
// store_func() is idempotent, so using relaxed ordering for the atomics
// should be enough. This way, calling a function in this compatibility
// layer (after it's loaded) shouldn't be any slower than a regular DLL
// call.
unsafe fn store_func(ptr: *mut uint, module: &str, symbol: &str, fallback: uint) {
let module: Vec<u16> = module.utf16_units().collect();
let module = module.append_one(0);
symbol.with_c_str(|symbol| {
let handle = GetModuleHandleW(module.as_ptr());
let func: uint = transmute(GetProcAddress(handle, symbol));
atomic_store_relaxed(ptr, if func == 0 {
fallback
} else {
func
})
})
}
/// Macro for creating a compatibility fallback for a Windows function
///
/// # Example
/// ```
/// compat_fn!(adll32::SomeFunctionW(_arg: LPCWSTR) {
/// // Fallback implementation
/// })
/// ```
///
/// Note that arguments unused by the fallback implementation should not be called `_` as
/// they are used to be passed to the real function if available.
macro_rules! compat_fn(
($module:ident::$symbol:ident($($argname:ident: $argtype:ty),*)
-> $rettype:ty $fallback:block) => (
#[inline(always)]
pub unsafe fn $symbol($($argname: $argtype),*) -> $rettype {
static mut ptr: extern "system" fn($($argname: $argtype),*) -> $rettype = thunk;
extern "system" fn thunk($($argname: $argtype),*) -> $rettype {
unsafe {
::io::c::compat::store_func(&mut ptr as *mut _ as *mut uint,
stringify!($module),
stringify!($symbol),
fallback as uint);
::std::intrinsics::atomic_load_relaxed(&ptr)($($argname),*)
}
}
extern "system" fn fallback($($argname: $argtype),*) -> $rettype $fallback
::std::intrinsics::atomic_load_relaxed(&ptr)($($argname),*)
}
);
($module:ident::$symbol:ident($($argname:ident: $argtype:ty),*) $fallback:block) => (
compat_fn!($module::$symbol($($argname: $argtype),*) -> () $fallback)
)
)
/// Compatibility layer for functions in `kernel32.dll`
///
/// Latest versions of Windows this is needed for:
///
/// * `CreateSymbolicLinkW`: Windows XP, Windows Server 2003
/// * `GetFinalPathNameByHandleW`: Windows XP, Windows Server 2003
pub mod kernel32 {
use libc::types::os::arch::extra::{DWORD, LPCWSTR, BOOLEAN, HANDLE};
use libc::consts::os::extra::ERROR_CALL_NOT_IMPLEMENTED;
extern "system" {
fn SetLastError(dwErrCode: DWORD);
}
compat_fn!(kernel32::CreateSymbolicLinkW(_lpSymlinkFileName: LPCWSTR,
_lpTargetFileName: LPCWSTR,
_dwFlags: DWORD) -> BOOLEAN {
unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); }
0
})
compat_fn!(kernel32::GetFinalPathNameByHandleW(_hFile: HANDLE,
_lpszFilePath: LPCWSTR,
_cchFilePath: DWORD,
_dwFlags: DWORD) -> DWORD {
unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); }
0
})
}
}
extern "system" {
// FIXME - pInputControl should be PCONSOLE_READCONSOLE_CONTROL
pub fn ReadConsoleW(hConsoleInput: libc::HANDLE,
lpBuffer: libc::LPVOID,
nNumberOfCharsToRead: libc::DWORD,
lpNumberOfCharsRead: libc::LPDWORD,
pInputControl: libc::LPVOID) -> libc::BOOL;
pub fn WriteConsoleW(hConsoleOutput: libc::HANDLE,
lpBuffer: libc::types::os::arch::extra::LPCVOID,
nNumberOfCharsToWrite: libc::DWORD,
lpNumberOfCharsWritten: libc::LPDWORD,
lpReserved: libc::LPVOID) -> libc::BOOL;
pub fn GetConsoleMode(hConsoleHandle: libc::HANDLE,
lpMode: libc::LPDWORD) -> libc::BOOL;
| lpMode: libc::DWORD) -> libc::BOOL;
} | pub fn SetConsoleMode(hConsoleHandle: libc::HANDLE, | random_line_split |
c_windows.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! C definitions used by libnative that don't belong in liblibc
#![allow(type_overflow)]
use libc;
pub static WSADESCRIPTION_LEN: uint = 256;
pub static WSASYS_STATUS_LEN: uint = 128;
pub static FIONBIO: libc::c_long = 0x8004667e;
static FD_SETSIZE: uint = 64;
pub static MSG_DONTWAIT: libc::c_int = 0;
pub static ERROR_ILLEGAL_CHARACTER: libc::c_int = 582;
pub static ENABLE_ECHO_INPUT: libc::DWORD = 0x4;
pub static ENABLE_EXTENDED_FLAGS: libc::DWORD = 0x80;
pub static ENABLE_INSERT_MODE: libc::DWORD = 0x20;
pub static ENABLE_LINE_INPUT: libc::DWORD = 0x2;
pub static ENABLE_PROCESSED_INPUT: libc::DWORD = 0x1;
pub static ENABLE_QUICK_EDIT_MODE: libc::DWORD = 0x40;
pub static WSA_INVALID_EVENT: WSAEVENT = 0 as WSAEVENT;
pub static FD_ACCEPT: libc::c_long = 0x08;
pub static FD_MAX_EVENTS: uint = 10;
pub static WSA_INFINITE: libc::DWORD = libc::INFINITE;
pub static WSA_WAIT_TIMEOUT: libc::DWORD = libc::consts::os::extra::WAIT_TIMEOUT;
pub static WSA_WAIT_EVENT_0: libc::DWORD = libc::consts::os::extra::WAIT_OBJECT_0;
pub static WSA_WAIT_FAILED: libc::DWORD = libc::consts::os::extra::WAIT_FAILED;
#[repr(C)]
#[cfg(target_arch = "x86")]
pub struct WSADATA {
pub wVersion: libc::WORD,
pub wHighVersion: libc::WORD,
pub szDescription: [u8,..WSADESCRIPTION_LEN + 1],
pub szSystemStatus: [u8,..WSASYS_STATUS_LEN + 1],
pub iMaxSockets: u16,
pub iMaxUdpDg: u16,
pub lpVendorInfo: *mut u8,
}
#[repr(C)]
#[cfg(target_arch = "x86_64")]
pub struct WSADATA {
pub wVersion: libc::WORD,
pub wHighVersion: libc::WORD,
pub iMaxSockets: u16,
pub iMaxUdpDg: u16,
pub lpVendorInfo: *mut u8,
pub szDescription: [u8,..WSADESCRIPTION_LEN + 1],
pub szSystemStatus: [u8,..WSASYS_STATUS_LEN + 1],
}
pub type LPWSADATA = *mut WSADATA;
#[repr(C)]
pub struct WSANETWORKEVENTS {
pub lNetworkEvents: libc::c_long,
pub iErrorCode: [libc::c_int,..FD_MAX_EVENTS],
}
pub type LPWSANETWORKEVENTS = *mut WSANETWORKEVENTS;
pub type WSAEVENT = libc::HANDLE;
#[repr(C)]
pub struct | {
fd_count: libc::c_uint,
fd_array: [libc::SOCKET,..FD_SETSIZE],
}
pub fn fd_set(set: &mut fd_set, s: libc::SOCKET) {
set.fd_array[set.fd_count as uint] = s;
set.fd_count += 1;
}
#[link(name = "ws2_32")]
extern "system" {
pub fn WSAStartup(wVersionRequested: libc::WORD,
lpWSAData: LPWSADATA) -> libc::c_int;
pub fn WSAGetLastError() -> libc::c_int;
pub fn WSACloseEvent(hEvent: WSAEVENT) -> libc::BOOL;
pub fn WSACreateEvent() -> WSAEVENT;
pub fn WSAEventSelect(s: libc::SOCKET,
hEventObject: WSAEVENT,
lNetworkEvents: libc::c_long) -> libc::c_int;
pub fn WSASetEvent(hEvent: WSAEVENT) -> libc::BOOL;
pub fn WSAWaitForMultipleEvents(cEvents: libc::DWORD,
lphEvents: *const WSAEVENT,
fWaitAll: libc::BOOL,
dwTimeout: libc::DWORD,
fAltertable: libc::BOOL) -> libc::DWORD;
pub fn WSAEnumNetworkEvents(s: libc::SOCKET,
hEventObject: WSAEVENT,
lpNetworkEvents: LPWSANETWORKEVENTS)
-> libc::c_int;
pub fn ioctlsocket(s: libc::SOCKET, cmd: libc::c_long,
argp: *mut libc::c_ulong) -> libc::c_int;
pub fn select(nfds: libc::c_int,
readfds: *mut fd_set,
writefds: *mut fd_set,
exceptfds: *mut fd_set,
timeout: *mut libc::timeval) -> libc::c_int;
pub fn getsockopt(sockfd: libc::SOCKET,
level: libc::c_int,
optname: libc::c_int,
optval: *mut libc::c_char,
optlen: *mut libc::c_int) -> libc::c_int;
pub fn SetEvent(hEvent: libc::HANDLE) -> libc::BOOL;
pub fn WaitForMultipleObjects(nCount: libc::DWORD,
lpHandles: *const libc::HANDLE,
bWaitAll: libc::BOOL,
dwMilliseconds: libc::DWORD) -> libc::DWORD;
pub fn CancelIo(hFile: libc::HANDLE) -> libc::BOOL;
pub fn CancelIoEx(hFile: libc::HANDLE,
lpOverlapped: libc::LPOVERLAPPED) -> libc::BOOL;
}
pub mod compat {
use std::intrinsics::{atomic_store_relaxed, transmute};
use std::iter::Iterator;
use libc::types::os::arch::extra::{LPCWSTR, HMODULE, LPCSTR, LPVOID};
extern "system" {
fn GetModuleHandleW(lpModuleName: LPCWSTR) -> HMODULE;
fn GetProcAddress(hModule: HMODULE, lpProcName: LPCSTR) -> LPVOID;
}
// store_func() is idempotent, so using relaxed ordering for the atomics
// should be enough. This way, calling a function in this compatibility
// layer (after it's loaded) shouldn't be any slower than a regular DLL
// call.
unsafe fn store_func(ptr: *mut uint, module: &str, symbol: &str, fallback: uint) {
let module: Vec<u16> = module.utf16_units().collect();
let module = module.append_one(0);
symbol.with_c_str(|symbol| {
let handle = GetModuleHandleW(module.as_ptr());
let func: uint = transmute(GetProcAddress(handle, symbol));
atomic_store_relaxed(ptr, if func == 0 {
fallback
} else {
func
})
})
}
/// Macro for creating a compatibility fallback for a Windows function
///
/// # Example
/// ```
/// compat_fn!(adll32::SomeFunctionW(_arg: LPCWSTR) {
/// // Fallback implementation
/// })
/// ```
///
/// Note that arguments unused by the fallback implementation should not be called `_` as
/// they are used to be passed to the real function if available.
macro_rules! compat_fn(
($module:ident::$symbol:ident($($argname:ident: $argtype:ty),*)
-> $rettype:ty $fallback:block) => (
#[inline(always)]
pub unsafe fn $symbol($($argname: $argtype),*) -> $rettype {
static mut ptr: extern "system" fn($($argname: $argtype),*) -> $rettype = thunk;
extern "system" fn thunk($($argname: $argtype),*) -> $rettype {
unsafe {
::io::c::compat::store_func(&mut ptr as *mut _ as *mut uint,
stringify!($module),
stringify!($symbol),
fallback as uint);
::std::intrinsics::atomic_load_relaxed(&ptr)($($argname),*)
}
}
extern "system" fn fallback($($argname: $argtype),*) -> $rettype $fallback
::std::intrinsics::atomic_load_relaxed(&ptr)($($argname),*)
}
);
($module:ident::$symbol:ident($($argname:ident: $argtype:ty),*) $fallback:block) => (
compat_fn!($module::$symbol($($argname: $argtype),*) -> () $fallback)
)
)
/// Compatibility layer for functions in `kernel32.dll`
///
/// Latest versions of Windows this is needed for:
///
/// * `CreateSymbolicLinkW`: Windows XP, Windows Server 2003
/// * `GetFinalPathNameByHandleW`: Windows XP, Windows Server 2003
pub mod kernel32 {
use libc::types::os::arch::extra::{DWORD, LPCWSTR, BOOLEAN, HANDLE};
use libc::consts::os::extra::ERROR_CALL_NOT_IMPLEMENTED;
extern "system" {
fn SetLastError(dwErrCode: DWORD);
}
compat_fn!(kernel32::CreateSymbolicLinkW(_lpSymlinkFileName: LPCWSTR,
_lpTargetFileName: LPCWSTR,
_dwFlags: DWORD) -> BOOLEAN {
unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); }
0
})
compat_fn!(kernel32::GetFinalPathNameByHandleW(_hFile: HANDLE,
_lpszFilePath: LPCWSTR,
_cchFilePath: DWORD,
_dwFlags: DWORD) -> DWORD {
unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); }
0
})
}
}
extern "system" {
// FIXME - pInputControl should be PCONSOLE_READCONSOLE_CONTROL
pub fn ReadConsoleW(hConsoleInput: libc::HANDLE,
lpBuffer: libc::LPVOID,
nNumberOfCharsToRead: libc::DWORD,
lpNumberOfCharsRead: libc::LPDWORD,
pInputControl: libc::LPVOID) -> libc::BOOL;
pub fn WriteConsoleW(hConsoleOutput: libc::HANDLE,
lpBuffer: libc::types::os::arch::extra::LPCVOID,
nNumberOfCharsToWrite: libc::DWORD,
lpNumberOfCharsWritten: libc::LPDWORD,
lpReserved: libc::LPVOID) -> libc::BOOL;
pub fn GetConsoleMode(hConsoleHandle: libc::HANDLE,
lpMode: libc::LPDWORD) -> libc::BOOL;
pub fn SetConsoleMode(hConsoleHandle: libc::HANDLE,
lpMode: libc::DWORD) -> libc::BOOL;
}
| fd_set | identifier_name |
c_windows.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! C definitions used by libnative that don't belong in liblibc
#![allow(type_overflow)]
use libc;
pub static WSADESCRIPTION_LEN: uint = 256;
pub static WSASYS_STATUS_LEN: uint = 128;
pub static FIONBIO: libc::c_long = 0x8004667e;
static FD_SETSIZE: uint = 64;
pub static MSG_DONTWAIT: libc::c_int = 0;
pub static ERROR_ILLEGAL_CHARACTER: libc::c_int = 582;
pub static ENABLE_ECHO_INPUT: libc::DWORD = 0x4;
pub static ENABLE_EXTENDED_FLAGS: libc::DWORD = 0x80;
pub static ENABLE_INSERT_MODE: libc::DWORD = 0x20;
pub static ENABLE_LINE_INPUT: libc::DWORD = 0x2;
pub static ENABLE_PROCESSED_INPUT: libc::DWORD = 0x1;
pub static ENABLE_QUICK_EDIT_MODE: libc::DWORD = 0x40;
pub static WSA_INVALID_EVENT: WSAEVENT = 0 as WSAEVENT;
pub static FD_ACCEPT: libc::c_long = 0x08;
pub static FD_MAX_EVENTS: uint = 10;
pub static WSA_INFINITE: libc::DWORD = libc::INFINITE;
pub static WSA_WAIT_TIMEOUT: libc::DWORD = libc::consts::os::extra::WAIT_TIMEOUT;
pub static WSA_WAIT_EVENT_0: libc::DWORD = libc::consts::os::extra::WAIT_OBJECT_0;
pub static WSA_WAIT_FAILED: libc::DWORD = libc::consts::os::extra::WAIT_FAILED;
#[repr(C)]
#[cfg(target_arch = "x86")]
pub struct WSADATA {
pub wVersion: libc::WORD,
pub wHighVersion: libc::WORD,
pub szDescription: [u8,..WSADESCRIPTION_LEN + 1],
pub szSystemStatus: [u8,..WSASYS_STATUS_LEN + 1],
pub iMaxSockets: u16,
pub iMaxUdpDg: u16,
pub lpVendorInfo: *mut u8,
}
#[repr(C)]
#[cfg(target_arch = "x86_64")]
pub struct WSADATA {
pub wVersion: libc::WORD,
pub wHighVersion: libc::WORD,
pub iMaxSockets: u16,
pub iMaxUdpDg: u16,
pub lpVendorInfo: *mut u8,
pub szDescription: [u8,..WSADESCRIPTION_LEN + 1],
pub szSystemStatus: [u8,..WSASYS_STATUS_LEN + 1],
}
pub type LPWSADATA = *mut WSADATA;
#[repr(C)]
pub struct WSANETWORKEVENTS {
pub lNetworkEvents: libc::c_long,
pub iErrorCode: [libc::c_int,..FD_MAX_EVENTS],
}
pub type LPWSANETWORKEVENTS = *mut WSANETWORKEVENTS;
pub type WSAEVENT = libc::HANDLE;
#[repr(C)]
pub struct fd_set {
fd_count: libc::c_uint,
fd_array: [libc::SOCKET,..FD_SETSIZE],
}
pub fn fd_set(set: &mut fd_set, s: libc::SOCKET) {
set.fd_array[set.fd_count as uint] = s;
set.fd_count += 1;
}
#[link(name = "ws2_32")]
extern "system" {
pub fn WSAStartup(wVersionRequested: libc::WORD,
lpWSAData: LPWSADATA) -> libc::c_int;
pub fn WSAGetLastError() -> libc::c_int;
pub fn WSACloseEvent(hEvent: WSAEVENT) -> libc::BOOL;
pub fn WSACreateEvent() -> WSAEVENT;
pub fn WSAEventSelect(s: libc::SOCKET,
hEventObject: WSAEVENT,
lNetworkEvents: libc::c_long) -> libc::c_int;
pub fn WSASetEvent(hEvent: WSAEVENT) -> libc::BOOL;
pub fn WSAWaitForMultipleEvents(cEvents: libc::DWORD,
lphEvents: *const WSAEVENT,
fWaitAll: libc::BOOL,
dwTimeout: libc::DWORD,
fAltertable: libc::BOOL) -> libc::DWORD;
pub fn WSAEnumNetworkEvents(s: libc::SOCKET,
hEventObject: WSAEVENT,
lpNetworkEvents: LPWSANETWORKEVENTS)
-> libc::c_int;
pub fn ioctlsocket(s: libc::SOCKET, cmd: libc::c_long,
argp: *mut libc::c_ulong) -> libc::c_int;
pub fn select(nfds: libc::c_int,
readfds: *mut fd_set,
writefds: *mut fd_set,
exceptfds: *mut fd_set,
timeout: *mut libc::timeval) -> libc::c_int;
pub fn getsockopt(sockfd: libc::SOCKET,
level: libc::c_int,
optname: libc::c_int,
optval: *mut libc::c_char,
optlen: *mut libc::c_int) -> libc::c_int;
pub fn SetEvent(hEvent: libc::HANDLE) -> libc::BOOL;
pub fn WaitForMultipleObjects(nCount: libc::DWORD,
lpHandles: *const libc::HANDLE,
bWaitAll: libc::BOOL,
dwMilliseconds: libc::DWORD) -> libc::DWORD;
pub fn CancelIo(hFile: libc::HANDLE) -> libc::BOOL;
pub fn CancelIoEx(hFile: libc::HANDLE,
lpOverlapped: libc::LPOVERLAPPED) -> libc::BOOL;
}
pub mod compat {
use std::intrinsics::{atomic_store_relaxed, transmute};
use std::iter::Iterator;
use libc::types::os::arch::extra::{LPCWSTR, HMODULE, LPCSTR, LPVOID};
extern "system" {
fn GetModuleHandleW(lpModuleName: LPCWSTR) -> HMODULE;
fn GetProcAddress(hModule: HMODULE, lpProcName: LPCSTR) -> LPVOID;
}
// store_func() is idempotent, so using relaxed ordering for the atomics
// should be enough. This way, calling a function in this compatibility
// layer (after it's loaded) shouldn't be any slower than a regular DLL
// call.
unsafe fn store_func(ptr: *mut uint, module: &str, symbol: &str, fallback: uint) {
let module: Vec<u16> = module.utf16_units().collect();
let module = module.append_one(0);
symbol.with_c_str(|symbol| {
let handle = GetModuleHandleW(module.as_ptr());
let func: uint = transmute(GetProcAddress(handle, symbol));
atomic_store_relaxed(ptr, if func == 0 | else {
func
})
})
}
/// Macro for creating a compatibility fallback for a Windows function
///
/// # Example
/// ```
/// compat_fn!(adll32::SomeFunctionW(_arg: LPCWSTR) {
/// // Fallback implementation
/// })
/// ```
///
/// Note that arguments unused by the fallback implementation should not be called `_` as
/// they are used to be passed to the real function if available.
macro_rules! compat_fn(
($module:ident::$symbol:ident($($argname:ident: $argtype:ty),*)
-> $rettype:ty $fallback:block) => (
#[inline(always)]
pub unsafe fn $symbol($($argname: $argtype),*) -> $rettype {
static mut ptr: extern "system" fn($($argname: $argtype),*) -> $rettype = thunk;
extern "system" fn thunk($($argname: $argtype),*) -> $rettype {
unsafe {
::io::c::compat::store_func(&mut ptr as *mut _ as *mut uint,
stringify!($module),
stringify!($symbol),
fallback as uint);
::std::intrinsics::atomic_load_relaxed(&ptr)($($argname),*)
}
}
extern "system" fn fallback($($argname: $argtype),*) -> $rettype $fallback
::std::intrinsics::atomic_load_relaxed(&ptr)($($argname),*)
}
);
($module:ident::$symbol:ident($($argname:ident: $argtype:ty),*) $fallback:block) => (
compat_fn!($module::$symbol($($argname: $argtype),*) -> () $fallback)
)
)
/// Compatibility layer for functions in `kernel32.dll`
///
/// Latest versions of Windows this is needed for:
///
/// * `CreateSymbolicLinkW`: Windows XP, Windows Server 2003
/// * `GetFinalPathNameByHandleW`: Windows XP, Windows Server 2003
pub mod kernel32 {
use libc::types::os::arch::extra::{DWORD, LPCWSTR, BOOLEAN, HANDLE};
use libc::consts::os::extra::ERROR_CALL_NOT_IMPLEMENTED;
extern "system" {
fn SetLastError(dwErrCode: DWORD);
}
compat_fn!(kernel32::CreateSymbolicLinkW(_lpSymlinkFileName: LPCWSTR,
_lpTargetFileName: LPCWSTR,
_dwFlags: DWORD) -> BOOLEAN {
unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); }
0
})
compat_fn!(kernel32::GetFinalPathNameByHandleW(_hFile: HANDLE,
_lpszFilePath: LPCWSTR,
_cchFilePath: DWORD,
_dwFlags: DWORD) -> DWORD {
unsafe { SetLastError(ERROR_CALL_NOT_IMPLEMENTED as DWORD); }
0
})
}
}
extern "system" {
// FIXME - pInputControl should be PCONSOLE_READCONSOLE_CONTROL
pub fn ReadConsoleW(hConsoleInput: libc::HANDLE,
lpBuffer: libc::LPVOID,
nNumberOfCharsToRead: libc::DWORD,
lpNumberOfCharsRead: libc::LPDWORD,
pInputControl: libc::LPVOID) -> libc::BOOL;
pub fn WriteConsoleW(hConsoleOutput: libc::HANDLE,
lpBuffer: libc::types::os::arch::extra::LPCVOID,
nNumberOfCharsToWrite: libc::DWORD,
lpNumberOfCharsWritten: libc::LPDWORD,
lpReserved: libc::LPVOID) -> libc::BOOL;
pub fn GetConsoleMode(hConsoleHandle: libc::HANDLE,
lpMode: libc::LPDWORD) -> libc::BOOL;
pub fn SetConsoleMode(hConsoleHandle: libc::HANDLE,
lpMode: libc::DWORD) -> libc::BOOL;
}
| {
fallback
} | conditional_block |
lib.rs | //! Board Support Crate for the bluepill
//!
//! # Usage
//!
//! Follow `cortex-m-quickstart` [instructions][i] but remove the `memory.x`
//! linker script and the `build.rs` build script file as part of the
//! configuration of the quickstart crate. Additionally, uncomment the "if using
//! ITM" block in the `.gdbinit` file.
//!
//! [i]: https://docs.rs/cortex-m-quickstart/0.1.1/cortex_m_quickstart/
//!
//! # Examples
//!
//! Check the [examples] module.
//!
//! [examples]:./examples/index.html
#![deny(missing_docs)]
//#![deny(warnings)]
#![no_std]
#![feature(associated_type_defaults)]
extern crate cast;
pub extern crate stm32f103xx;
extern crate hal;
// For documentation only
pub mod examples;
pub mod led;
//pub mod serial;
pub mod timer;
pub mod clock;
pub mod pin; | pub mod serial;
pub mod frequency; | random_line_split |
|
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(custom_derive)]
#![feature(plugin)]
#![feature(step_trait)]
#![plugin(heapsize_plugin)]
#![plugin(serde_macros)]
#![deny(unsafe_code)]
extern crate heapsize;
extern crate num_traits;
extern crate rustc_serialize;
extern crate serde;
use std::cmp::{self, max, min};
use std::fmt;
use std::iter;
use std::marker::PhantomData;
use std::ops;
pub trait Int:
Copy
+ ops::Add<Self, Output=Self>
+ ops::Sub<Self, Output=Self>
+ cmp::Ord
{
fn zero() -> Self;
fn one() -> Self;
fn max_value() -> Self;
fn from_usize(n: usize) -> Option<Self>;
}
impl Int for isize {
#[inline]
fn zero() -> isize { 0 }
#[inline]
fn one() -> isize { 1 }
#[inline]
fn max_value() -> isize { ::std::isize::MAX }
#[inline]
fn from_usize(n: usize) -> Option<isize> { num_traits::NumCast::from(n) }
}
impl Int for usize {
#[inline]
fn zero() -> usize { 0 }
#[inline]
fn | () -> usize { 1 }
#[inline]
fn max_value() -> usize { ::std::usize::MAX }
#[inline]
fn from_usize(n: usize) -> Option<usize> { Some(n) }
}
/// An index type to be used by a `Range`
pub trait RangeIndex: Int + fmt::Debug {
type Index;
fn new(x: Self::Index) -> Self;
fn get(self) -> Self::Index;
}
impl RangeIndex for isize {
type Index = isize;
#[inline]
fn new(x: isize) -> isize { x }
#[inline]
fn get(self) -> isize { self }
}
impl RangeIndex for usize {
type Index = usize;
#[inline]
fn new(x: usize) -> usize { x }
#[inline]
fn get(self) -> usize { self }
}
/// Implements a range index type with operator overloads
#[macro_export]
macro_rules! int_range_index {
($(#[$attr:meta])* struct $Self_:ident($T:ty)) => (
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Debug, Copy)]
$(#[$attr])*
pub struct $Self_(pub $T);
impl $Self_ {
#[inline]
pub fn to_usize(self) -> usize {
self.get() as usize
}
}
impl RangeIndex for $Self_ {
type Index = $T;
#[inline]
fn new(x: $T) -> $Self_ {
$Self_(x)
}
#[inline]
fn get(self) -> $T {
match self { $Self_(x) => x }
}
}
impl $crate::Int for $Self_ {
#[inline]
fn zero() -> $Self_ { $Self_($crate::Int::zero()) }
#[inline]
fn one() -> $Self_ { $Self_($crate::Int::one()) }
#[inline]
fn max_value() -> $Self_ { $Self_($crate::Int::max_value()) }
#[inline]
fn from_usize(n: usize) -> Option<$Self_> { $crate::Int::from_usize(n).map($Self_) }
}
impl ::std::ops::Add<$Self_> for $Self_ {
type Output = $Self_;
#[inline]
fn add(self, other: $Self_) -> $Self_ {
$Self_(self.get() + other.get())
}
}
impl ::std::ops::Sub<$Self_> for $Self_ {
type Output = $Self_;
#[inline]
fn sub(self, other: $Self_) -> $Self_ {
$Self_(self.get() - other.get())
}
}
impl ::std::ops::Neg for $Self_ {
type Output = $Self_;
#[inline]
fn neg(self) -> $Self_ {
$Self_(-self.get())
}
}
)
}
/// A range of indices
#[derive(Clone, Copy, Deserialize, HeapSizeOf, RustcEncodable, Serialize)]
pub struct Range<I> {
begin: I,
length: I,
}
impl<I: RangeIndex> fmt::Debug for Range<I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{:?}.. {:?})", self.begin(), self.end())
}
}
/// An iterator over each index in a range
pub struct EachIndex<T, I> {
it: ops::Range<T>,
phantom: PhantomData<I>,
}
pub fn each_index<T: Int, I: RangeIndex<Index=T>>(start: I, stop: I) -> EachIndex<T, I> {
EachIndex { it: start.get()..stop.get(), phantom: PhantomData }
}
impl<T: Int, I: RangeIndex<Index=T>> Iterator for EachIndex<T, I>
where T: Int + iter::Step, for<'a> &'a T: ops::Add<&'a T, Output = T> {
type Item = I;
#[inline]
fn next(&mut self) -> Option<I> {
self.it.next().map(RangeIndex::new)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.it.size_hint()
}
}
impl<I: RangeIndex> Range<I> {
/// Create a new range from beginning and length offsets. This could be
/// denoted as `[begin, begin + length)`.
///
/// ~~~ignore
/// |-- begin ->|-- length ->|
/// | | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn new(begin: I, length: I) -> Range<I> {
Range { begin: begin, length: length }
}
#[inline]
pub fn empty() -> Range<I> {
Range::new(Int::zero(), Int::zero())
}
/// The index offset to the beginning of the range.
///
/// ~~~ignore
/// |-- begin ->|
/// | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn begin(&self) -> I { self.begin }
/// The index offset from the beginning to the end of the range.
///
/// ~~~ignore
/// |-- length ->|
/// | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn length(&self) -> I { self.length }
/// The index offset to the end of the range.
///
/// ~~~ignore
/// |--------- end --------->|
/// | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn end(&self) -> I { self.begin + self.length }
/// `true` if the index is between the beginning and the end of the range.
///
/// ~~~ignore
/// false true false
/// | | |
/// <- o - - + - - +=====+======+ - + - ->
/// ~~~
#[inline]
pub fn contains(&self, i: I) -> bool {
i >= self.begin() && i < self.end()
}
/// `true` if the offset from the beginning to the end of the range is zero.
#[inline]
pub fn is_empty(&self) -> bool {
self.length() == Int::zero()
}
/// Shift the entire range by the supplied index delta.
///
/// ~~~ignore
/// |-- delta ->|
/// | |
/// <- o - +============+ - - - - - | - - - ->
/// |
/// <- o - - - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn shift_by(&mut self, delta: I) {
self.begin = self.begin + delta;
}
/// Extend the end of the range by the supplied index delta.
///
/// ~~~ignore
/// |-- delta ->|
/// | |
/// <- o - - - - - +====+ - - - - - | - - - ->
/// |
/// <- o - - - - - +================+ - - - ->
/// ~~~
#[inline]
pub fn extend_by(&mut self, delta: I) {
self.length = self.length + delta;
}
/// Move the end of the range to the target index.
///
/// ~~~ignore
/// target
/// |
/// <- o - - - - - +====+ - - - - - | - - - ->
/// |
/// <- o - - - - - +================+ - - - ->
/// ~~~
#[inline]
pub fn extend_to(&mut self, target: I) {
self.length = target - self.begin;
}
/// Adjust the beginning offset and the length by the supplied deltas.
#[inline]
pub fn adjust_by(&mut self, begin_delta: I, length_delta: I) {
self.begin = self.begin + begin_delta;
self.length = self.length + length_delta;
}
/// Set the begin and length values.
#[inline]
pub fn reset(&mut self, begin: I, length: I) {
self.begin = begin;
self.length = length;
}
#[inline]
pub fn intersect(&self, other: &Range<I>) -> Range<I> {
let begin = max(self.begin(), other.begin());
let end = min(self.end(), other.end());
if end < begin {
Range::empty()
} else {
Range::new(begin, end - begin)
}
}
}
/// Methods for `Range`s with indices based on integer values
impl<T: Int, I: RangeIndex<Index=T>> Range<I> {
/// Returns an iterater that increments over `[begin, end)`.
#[inline]
pub fn each_index(&self) -> EachIndex<T, I> {
each_index(self.begin(), self.end())
}
}
| one | identifier_name |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(custom_derive)]
#![feature(plugin)]
#![feature(step_trait)]
#![plugin(heapsize_plugin)]
#![plugin(serde_macros)]
#![deny(unsafe_code)]
extern crate heapsize;
extern crate num_traits;
extern crate rustc_serialize;
extern crate serde;
use std::cmp::{self, max, min};
use std::fmt;
use std::iter;
use std::marker::PhantomData;
use std::ops;
pub trait Int:
Copy
+ ops::Add<Self, Output=Self>
+ ops::Sub<Self, Output=Self>
+ cmp::Ord
{
fn zero() -> Self;
fn one() -> Self;
fn max_value() -> Self;
fn from_usize(n: usize) -> Option<Self>;
}
impl Int for isize {
#[inline]
fn zero() -> isize { 0 }
#[inline]
fn one() -> isize { 1 }
#[inline]
fn max_value() -> isize { ::std::isize::MAX }
#[inline]
fn from_usize(n: usize) -> Option<isize> { num_traits::NumCast::from(n) }
}
impl Int for usize {
#[inline]
fn zero() -> usize { 0 }
#[inline]
fn one() -> usize { 1 }
#[inline]
fn max_value() -> usize { ::std::usize::MAX }
#[inline]
fn from_usize(n: usize) -> Option<usize> { Some(n) }
}
/// An index type to be used by a `Range`
pub trait RangeIndex: Int + fmt::Debug {
type Index;
fn new(x: Self::Index) -> Self;
fn get(self) -> Self::Index;
}
impl RangeIndex for isize {
type Index = isize;
#[inline]
fn new(x: isize) -> isize { x }
#[inline]
fn get(self) -> isize { self }
}
impl RangeIndex for usize {
type Index = usize;
#[inline]
fn new(x: usize) -> usize { x }
#[inline]
fn get(self) -> usize { self }
}
/// Implements a range index type with operator overloads
#[macro_export]
macro_rules! int_range_index {
($(#[$attr:meta])* struct $Self_:ident($T:ty)) => (
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Debug, Copy)]
$(#[$attr])*
pub struct $Self_(pub $T);
impl $Self_ {
#[inline]
pub fn to_usize(self) -> usize {
self.get() as usize
}
}
impl RangeIndex for $Self_ {
type Index = $T;
#[inline]
fn new(x: $T) -> $Self_ {
$Self_(x)
}
#[inline]
fn get(self) -> $T {
match self { $Self_(x) => x }
}
}
impl $crate::Int for $Self_ {
#[inline]
fn zero() -> $Self_ { $Self_($crate::Int::zero()) }
#[inline]
fn one() -> $Self_ { $Self_($crate::Int::one()) }
#[inline]
fn max_value() -> $Self_ { $Self_($crate::Int::max_value()) }
#[inline]
fn from_usize(n: usize) -> Option<$Self_> { $crate::Int::from_usize(n).map($Self_) }
}
impl ::std::ops::Add<$Self_> for $Self_ {
type Output = $Self_;
#[inline]
fn add(self, other: $Self_) -> $Self_ {
$Self_(self.get() + other.get())
}
}
impl ::std::ops::Sub<$Self_> for $Self_ {
type Output = $Self_;
#[inline]
fn sub(self, other: $Self_) -> $Self_ {
$Self_(self.get() - other.get())
}
}
impl ::std::ops::Neg for $Self_ {
type Output = $Self_;
#[inline]
fn neg(self) -> $Self_ {
$Self_(-self.get())
}
}
)
}
/// A range of indices
#[derive(Clone, Copy, Deserialize, HeapSizeOf, RustcEncodable, Serialize)]
pub struct Range<I> {
begin: I,
length: I,
}
impl<I: RangeIndex> fmt::Debug for Range<I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{:?}.. {:?})", self.begin(), self.end())
}
}
/// An iterator over each index in a range
pub struct EachIndex<T, I> {
it: ops::Range<T>,
phantom: PhantomData<I>,
}
pub fn each_index<T: Int, I: RangeIndex<Index=T>>(start: I, stop: I) -> EachIndex<T, I> {
EachIndex { it: start.get()..stop.get(), phantom: PhantomData }
}
impl<T: Int, I: RangeIndex<Index=T>> Iterator for EachIndex<T, I>
where T: Int + iter::Step, for<'a> &'a T: ops::Add<&'a T, Output = T> {
type Item = I;
#[inline]
fn next(&mut self) -> Option<I> {
self.it.next().map(RangeIndex::new)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.it.size_hint()
}
}
impl<I: RangeIndex> Range<I> {
/// Create a new range from beginning and length offsets. This could be
/// denoted as `[begin, begin + length)`.
///
/// ~~~ignore
/// |-- begin ->|-- length ->|
/// | | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn new(begin: I, length: I) -> Range<I> {
Range { begin: begin, length: length }
}
#[inline]
pub fn empty() -> Range<I> {
Range::new(Int::zero(), Int::zero())
}
/// The index offset to the beginning of the range.
///
/// ~~~ignore
/// |-- begin ->|
/// | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn begin(&self) -> I { self.begin }
/// The index offset from the beginning to the end of the range.
///
/// ~~~ignore
/// |-- length ->|
/// | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn length(&self) -> I { self.length }
/// The index offset to the end of the range.
///
/// ~~~ignore
/// |--------- end --------->|
/// | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn end(&self) -> I { self.begin + self.length }
/// `true` if the index is between the beginning and the end of the range.
///
/// ~~~ignore
/// false true false
/// | | |
/// <- o - - + - - +=====+======+ - + - ->
/// ~~~
#[inline]
pub fn contains(&self, i: I) -> bool {
i >= self.begin() && i < self.end()
}
/// `true` if the offset from the beginning to the end of the range is zero.
#[inline]
pub fn is_empty(&self) -> bool {
self.length() == Int::zero()
}
/// Shift the entire range by the supplied index delta.
///
/// ~~~ignore
/// |-- delta ->|
/// | |
/// <- o - +============+ - - - - - | - - - ->
/// |
/// <- o - - - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn shift_by(&mut self, delta: I) {
self.begin = self.begin + delta;
}
/// Extend the end of the range by the supplied index delta.
///
/// ~~~ignore
/// |-- delta ->|
/// | |
/// <- o - - - - - +====+ - - - - - | - - - ->
/// |
/// <- o - - - - - +================+ - - - ->
/// ~~~
#[inline]
pub fn extend_by(&mut self, delta: I) {
self.length = self.length + delta;
}
/// Move the end of the range to the target index.
///
/// ~~~ignore
/// target
/// |
/// <- o - - - - - +====+ - - - - - | - - - ->
/// |
/// <- o - - - - - +================+ - - - ->
/// ~~~
#[inline]
pub fn extend_to(&mut self, target: I) {
self.length = target - self.begin;
}
/// Adjust the beginning offset and the length by the supplied deltas.
#[inline]
pub fn adjust_by(&mut self, begin_delta: I, length_delta: I) {
self.begin = self.begin + begin_delta;
self.length = self.length + length_delta;
}
/// Set the begin and length values.
#[inline]
pub fn reset(&mut self, begin: I, length: I) |
#[inline]
pub fn intersect(&self, other: &Range<I>) -> Range<I> {
let begin = max(self.begin(), other.begin());
let end = min(self.end(), other.end());
if end < begin {
Range::empty()
} else {
Range::new(begin, end - begin)
}
}
}
/// Methods for `Range`s with indices based on integer values
impl<T: Int, I: RangeIndex<Index=T>> Range<I> {
/// Returns an iterater that increments over `[begin, end)`.
#[inline]
pub fn each_index(&self) -> EachIndex<T, I> {
each_index(self.begin(), self.end())
}
}
| {
self.begin = begin;
self.length = length;
} | identifier_body |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(custom_derive)]
#![feature(plugin)]
#![feature(step_trait)]
#![plugin(heapsize_plugin)]
#![plugin(serde_macros)]
#![deny(unsafe_code)]
extern crate heapsize;
extern crate num_traits;
extern crate rustc_serialize;
extern crate serde;
use std::cmp::{self, max, min};
use std::fmt;
use std::iter;
use std::marker::PhantomData;
use std::ops;
pub trait Int:
Copy
+ ops::Add<Self, Output=Self>
+ ops::Sub<Self, Output=Self>
+ cmp::Ord
{
fn zero() -> Self;
fn one() -> Self;
fn max_value() -> Self;
fn from_usize(n: usize) -> Option<Self>;
}
impl Int for isize {
#[inline]
fn zero() -> isize { 0 }
#[inline]
fn one() -> isize { 1 }
#[inline]
fn max_value() -> isize { ::std::isize::MAX }
#[inline]
fn from_usize(n: usize) -> Option<isize> { num_traits::NumCast::from(n) }
}
impl Int for usize {
#[inline]
fn zero() -> usize { 0 }
#[inline]
fn one() -> usize { 1 }
#[inline]
fn max_value() -> usize { ::std::usize::MAX }
#[inline]
fn from_usize(n: usize) -> Option<usize> { Some(n) }
}
/// An index type to be used by a `Range`
pub trait RangeIndex: Int + fmt::Debug {
type Index;
fn new(x: Self::Index) -> Self;
fn get(self) -> Self::Index;
}
impl RangeIndex for isize {
type Index = isize;
#[inline]
fn new(x: isize) -> isize { x }
#[inline]
fn get(self) -> isize { self }
}
impl RangeIndex for usize {
type Index = usize;
#[inline]
fn new(x: usize) -> usize { x }
#[inline]
fn get(self) -> usize { self }
}
/// Implements a range index type with operator overloads
#[macro_export]
macro_rules! int_range_index {
($(#[$attr:meta])* struct $Self_:ident($T:ty)) => (
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Debug, Copy)]
$(#[$attr])*
pub struct $Self_(pub $T);
impl $Self_ {
#[inline]
pub fn to_usize(self) -> usize {
self.get() as usize
}
}
impl RangeIndex for $Self_ {
type Index = $T;
#[inline]
fn new(x: $T) -> $Self_ {
$Self_(x)
}
#[inline]
fn get(self) -> $T {
match self { $Self_(x) => x }
}
}
impl $crate::Int for $Self_ {
#[inline]
fn zero() -> $Self_ { $Self_($crate::Int::zero()) }
#[inline]
fn one() -> $Self_ { $Self_($crate::Int::one()) }
#[inline]
fn max_value() -> $Self_ { $Self_($crate::Int::max_value()) }
#[inline]
fn from_usize(n: usize) -> Option<$Self_> { $crate::Int::from_usize(n).map($Self_) }
}
impl ::std::ops::Add<$Self_> for $Self_ {
type Output = $Self_;
#[inline] | }
impl ::std::ops::Sub<$Self_> for $Self_ {
type Output = $Self_;
#[inline]
fn sub(self, other: $Self_) -> $Self_ {
$Self_(self.get() - other.get())
}
}
impl ::std::ops::Neg for $Self_ {
type Output = $Self_;
#[inline]
fn neg(self) -> $Self_ {
$Self_(-self.get())
}
}
)
}
/// A range of indices
#[derive(Clone, Copy, Deserialize, HeapSizeOf, RustcEncodable, Serialize)]
pub struct Range<I> {
begin: I,
length: I,
}
impl<I: RangeIndex> fmt::Debug for Range<I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{:?}.. {:?})", self.begin(), self.end())
}
}
/// An iterator over each index in a range
pub struct EachIndex<T, I> {
it: ops::Range<T>,
phantom: PhantomData<I>,
}
pub fn each_index<T: Int, I: RangeIndex<Index=T>>(start: I, stop: I) -> EachIndex<T, I> {
EachIndex { it: start.get()..stop.get(), phantom: PhantomData }
}
impl<T: Int, I: RangeIndex<Index=T>> Iterator for EachIndex<T, I>
where T: Int + iter::Step, for<'a> &'a T: ops::Add<&'a T, Output = T> {
type Item = I;
#[inline]
fn next(&mut self) -> Option<I> {
self.it.next().map(RangeIndex::new)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.it.size_hint()
}
}
impl<I: RangeIndex> Range<I> {
/// Create a new range from beginning and length offsets. This could be
/// denoted as `[begin, begin + length)`.
///
/// ~~~ignore
/// |-- begin ->|-- length ->|
/// | | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn new(begin: I, length: I) -> Range<I> {
Range { begin: begin, length: length }
}
#[inline]
pub fn empty() -> Range<I> {
Range::new(Int::zero(), Int::zero())
}
/// The index offset to the beginning of the range.
///
/// ~~~ignore
/// |-- begin ->|
/// | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn begin(&self) -> I { self.begin }
/// The index offset from the beginning to the end of the range.
///
/// ~~~ignore
/// |-- length ->|
/// | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn length(&self) -> I { self.length }
/// The index offset to the end of the range.
///
/// ~~~ignore
/// |--------- end --------->|
/// | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn end(&self) -> I { self.begin + self.length }
/// `true` if the index is between the beginning and the end of the range.
///
/// ~~~ignore
/// false true false
/// | | |
/// <- o - - + - - +=====+======+ - + - ->
/// ~~~
#[inline]
pub fn contains(&self, i: I) -> bool {
i >= self.begin() && i < self.end()
}
/// `true` if the offset from the beginning to the end of the range is zero.
#[inline]
pub fn is_empty(&self) -> bool {
self.length() == Int::zero()
}
/// Shift the entire range by the supplied index delta.
///
/// ~~~ignore
/// |-- delta ->|
/// | |
/// <- o - +============+ - - - - - | - - - ->
/// |
/// <- o - - - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn shift_by(&mut self, delta: I) {
self.begin = self.begin + delta;
}
/// Extend the end of the range by the supplied index delta.
///
/// ~~~ignore
/// |-- delta ->|
/// | |
/// <- o - - - - - +====+ - - - - - | - - - ->
/// |
/// <- o - - - - - +================+ - - - ->
/// ~~~
#[inline]
pub fn extend_by(&mut self, delta: I) {
self.length = self.length + delta;
}
/// Move the end of the range to the target index.
///
/// ~~~ignore
/// target
/// |
/// <- o - - - - - +====+ - - - - - | - - - ->
/// |
/// <- o - - - - - +================+ - - - ->
/// ~~~
#[inline]
pub fn extend_to(&mut self, target: I) {
self.length = target - self.begin;
}
/// Adjust the beginning offset and the length by the supplied deltas.
#[inline]
pub fn adjust_by(&mut self, begin_delta: I, length_delta: I) {
self.begin = self.begin + begin_delta;
self.length = self.length + length_delta;
}
/// Set the begin and length values.
#[inline]
pub fn reset(&mut self, begin: I, length: I) {
self.begin = begin;
self.length = length;
}
#[inline]
pub fn intersect(&self, other: &Range<I>) -> Range<I> {
let begin = max(self.begin(), other.begin());
let end = min(self.end(), other.end());
if end < begin {
Range::empty()
} else {
Range::new(begin, end - begin)
}
}
}
/// Methods for `Range`s with indices based on integer values
impl<T: Int, I: RangeIndex<Index=T>> Range<I> {
/// Returns an iterater that increments over `[begin, end)`.
#[inline]
pub fn each_index(&self) -> EachIndex<T, I> {
each_index(self.begin(), self.end())
}
} | fn add(self, other: $Self_) -> $Self_ {
$Self_(self.get() + other.get())
} | random_line_split |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(custom_derive)]
#![feature(plugin)]
#![feature(step_trait)]
#![plugin(heapsize_plugin)]
#![plugin(serde_macros)]
#![deny(unsafe_code)]
extern crate heapsize;
extern crate num_traits;
extern crate rustc_serialize;
extern crate serde;
use std::cmp::{self, max, min};
use std::fmt;
use std::iter;
use std::marker::PhantomData;
use std::ops;
pub trait Int:
Copy
+ ops::Add<Self, Output=Self>
+ ops::Sub<Self, Output=Self>
+ cmp::Ord
{
fn zero() -> Self;
fn one() -> Self;
fn max_value() -> Self;
fn from_usize(n: usize) -> Option<Self>;
}
impl Int for isize {
#[inline]
fn zero() -> isize { 0 }
#[inline]
fn one() -> isize { 1 }
#[inline]
fn max_value() -> isize { ::std::isize::MAX }
#[inline]
fn from_usize(n: usize) -> Option<isize> { num_traits::NumCast::from(n) }
}
impl Int for usize {
#[inline]
fn zero() -> usize { 0 }
#[inline]
fn one() -> usize { 1 }
#[inline]
fn max_value() -> usize { ::std::usize::MAX }
#[inline]
fn from_usize(n: usize) -> Option<usize> { Some(n) }
}
/// An index type to be used by a `Range`
pub trait RangeIndex: Int + fmt::Debug {
type Index;
fn new(x: Self::Index) -> Self;
fn get(self) -> Self::Index;
}
impl RangeIndex for isize {
type Index = isize;
#[inline]
fn new(x: isize) -> isize { x }
#[inline]
fn get(self) -> isize { self }
}
impl RangeIndex for usize {
type Index = usize;
#[inline]
fn new(x: usize) -> usize { x }
#[inline]
fn get(self) -> usize { self }
}
/// Implements a range index type with operator overloads
#[macro_export]
macro_rules! int_range_index {
($(#[$attr:meta])* struct $Self_:ident($T:ty)) => (
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Debug, Copy)]
$(#[$attr])*
pub struct $Self_(pub $T);
impl $Self_ {
#[inline]
pub fn to_usize(self) -> usize {
self.get() as usize
}
}
impl RangeIndex for $Self_ {
type Index = $T;
#[inline]
fn new(x: $T) -> $Self_ {
$Self_(x)
}
#[inline]
fn get(self) -> $T {
match self { $Self_(x) => x }
}
}
impl $crate::Int for $Self_ {
#[inline]
fn zero() -> $Self_ { $Self_($crate::Int::zero()) }
#[inline]
fn one() -> $Self_ { $Self_($crate::Int::one()) }
#[inline]
fn max_value() -> $Self_ { $Self_($crate::Int::max_value()) }
#[inline]
fn from_usize(n: usize) -> Option<$Self_> { $crate::Int::from_usize(n).map($Self_) }
}
impl ::std::ops::Add<$Self_> for $Self_ {
type Output = $Self_;
#[inline]
fn add(self, other: $Self_) -> $Self_ {
$Self_(self.get() + other.get())
}
}
impl ::std::ops::Sub<$Self_> for $Self_ {
type Output = $Self_;
#[inline]
fn sub(self, other: $Self_) -> $Self_ {
$Self_(self.get() - other.get())
}
}
impl ::std::ops::Neg for $Self_ {
type Output = $Self_;
#[inline]
fn neg(self) -> $Self_ {
$Self_(-self.get())
}
}
)
}
/// A range of indices
#[derive(Clone, Copy, Deserialize, HeapSizeOf, RustcEncodable, Serialize)]
pub struct Range<I> {
begin: I,
length: I,
}
impl<I: RangeIndex> fmt::Debug for Range<I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[{:?}.. {:?})", self.begin(), self.end())
}
}
/// An iterator over each index in a range
pub struct EachIndex<T, I> {
it: ops::Range<T>,
phantom: PhantomData<I>,
}
pub fn each_index<T: Int, I: RangeIndex<Index=T>>(start: I, stop: I) -> EachIndex<T, I> {
EachIndex { it: start.get()..stop.get(), phantom: PhantomData }
}
impl<T: Int, I: RangeIndex<Index=T>> Iterator for EachIndex<T, I>
where T: Int + iter::Step, for<'a> &'a T: ops::Add<&'a T, Output = T> {
type Item = I;
#[inline]
fn next(&mut self) -> Option<I> {
self.it.next().map(RangeIndex::new)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.it.size_hint()
}
}
impl<I: RangeIndex> Range<I> {
/// Create a new range from beginning and length offsets. This could be
/// denoted as `[begin, begin + length)`.
///
/// ~~~ignore
/// |-- begin ->|-- length ->|
/// | | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn new(begin: I, length: I) -> Range<I> {
Range { begin: begin, length: length }
}
#[inline]
pub fn empty() -> Range<I> {
Range::new(Int::zero(), Int::zero())
}
/// The index offset to the beginning of the range.
///
/// ~~~ignore
/// |-- begin ->|
/// | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn begin(&self) -> I { self.begin }
/// The index offset from the beginning to the end of the range.
///
/// ~~~ignore
/// |-- length ->|
/// | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn length(&self) -> I { self.length }
/// The index offset to the end of the range.
///
/// ~~~ignore
/// |--------- end --------->|
/// | |
/// <- o - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn end(&self) -> I { self.begin + self.length }
/// `true` if the index is between the beginning and the end of the range.
///
/// ~~~ignore
/// false true false
/// | | |
/// <- o - - + - - +=====+======+ - + - ->
/// ~~~
#[inline]
pub fn contains(&self, i: I) -> bool {
i >= self.begin() && i < self.end()
}
/// `true` if the offset from the beginning to the end of the range is zero.
#[inline]
pub fn is_empty(&self) -> bool {
self.length() == Int::zero()
}
/// Shift the entire range by the supplied index delta.
///
/// ~~~ignore
/// |-- delta ->|
/// | |
/// <- o - +============+ - - - - - | - - - ->
/// |
/// <- o - - - - - - - +============+ - - - ->
/// ~~~
#[inline]
pub fn shift_by(&mut self, delta: I) {
self.begin = self.begin + delta;
}
/// Extend the end of the range by the supplied index delta.
///
/// ~~~ignore
/// |-- delta ->|
/// | |
/// <- o - - - - - +====+ - - - - - | - - - ->
/// |
/// <- o - - - - - +================+ - - - ->
/// ~~~
#[inline]
pub fn extend_by(&mut self, delta: I) {
self.length = self.length + delta;
}
/// Move the end of the range to the target index.
///
/// ~~~ignore
/// target
/// |
/// <- o - - - - - +====+ - - - - - | - - - ->
/// |
/// <- o - - - - - +================+ - - - ->
/// ~~~
#[inline]
pub fn extend_to(&mut self, target: I) {
self.length = target - self.begin;
}
/// Adjust the beginning offset and the length by the supplied deltas.
#[inline]
pub fn adjust_by(&mut self, begin_delta: I, length_delta: I) {
self.begin = self.begin + begin_delta;
self.length = self.length + length_delta;
}
/// Set the begin and length values.
#[inline]
pub fn reset(&mut self, begin: I, length: I) {
self.begin = begin;
self.length = length;
}
#[inline]
pub fn intersect(&self, other: &Range<I>) -> Range<I> {
let begin = max(self.begin(), other.begin());
let end = min(self.end(), other.end());
if end < begin | else {
Range::new(begin, end - begin)
}
}
}
/// Methods for `Range`s with indices based on integer values
impl<T: Int, I: RangeIndex<Index=T>> Range<I> {
/// Returns an iterater that increments over `[begin, end)`.
#[inline]
pub fn each_index(&self) -> EachIndex<T, I> {
each_index(self.begin(), self.end())
}
}
| {
Range::empty()
} | conditional_block |
pascal_str.rs | use std::borrow::{Cow, ToOwned};
use std::cmp::{Ordering, PartialEq, PartialOrd};
use std::ffi::{CStr, CString};
use std::str;
use ::utf8::PascalString;
use ::PASCAL_STRING_BUF_SIZE;
#[derive(Hash, Eq, Ord)]
pub struct PascalStr {
string: str
}
impl PascalStr {
#[inline]
pub fn as_ptr(&self) -> *const u8 {
(&self.string).as_ptr()
}
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut u8 {
&mut self.string as *mut str as *mut u8
}
#[inline]
pub fn as_str(&self) -> &str {
&self.string
}
#[inline]
pub fn as_mut_str(&mut self) -> &mut str {
&mut self.string
}
#[inline]
pub fn as_bytes(&self) -> &[u8] {
self.string.as_bytes()
}
pub fn as_cstr(&self) -> Result<Cow<CStr>, InteriorNullError> {
unimplemented!()
}
#[inline]
pub fn len(&self) -> usize {
self.string.len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.string.is_empty()
}
#[inline]
pub fn | (&self) -> bool {
self.len() == PASCAL_STRING_BUF_SIZE
}
#[inline]
pub fn chars(&self) -> Chars {
self.string.chars()
}
#[inline]
pub fn bytes(&self) -> Bytes {
self.string.bytes()
}
#[inline]
pub fn lines(&self) -> Lines {
self.string.lines()
}
}
impl<S: AsRef<str> +?Sized> PartialEq<S> for PascalStr {
#[inline]
fn eq(&self, other: &S) -> bool {
let other = other.as_ref();
self.as_str() == other
}
}
impl<S: AsRef<str> +?Sized> PartialOrd<S> for PascalStr {
#[inline]
fn partial_cmp(&self, other: &S) -> Option<Ordering> {
let other = other.as_ref();
self.as_str().partial_cmp(&other)
}
}
impl ToOwned for PascalStr {
type Owned = PascalString;
#[inline]
fn to_owned(&self) -> Self::Owned {
PascalString::from_str(self.as_str()).unwrap()
}
}
impl AsRef<str> for PascalStr {
#[inline]
fn as_ref(&self) -> &str {
&self.string
}
}
pub type Chars<'a> = str::Chars<'a>;
pub type Bytes<'a> = str::Bytes<'a>;
pub type Lines<'a> = str::Lines<'a>;
pub struct InteriorNullError;
| is_full | identifier_name |
pascal_str.rs | use std::borrow::{Cow, ToOwned};
use std::cmp::{Ordering, PartialEq, PartialOrd};
use std::ffi::{CStr, CString};
use std::str;
use ::utf8::PascalString;
use ::PASCAL_STRING_BUF_SIZE;
#[derive(Hash, Eq, Ord)]
pub struct PascalStr {
string: str
}
impl PascalStr {
#[inline]
pub fn as_ptr(&self) -> *const u8 {
(&self.string).as_ptr()
}
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut u8 {
&mut self.string as *mut str as *mut u8
}
#[inline]
pub fn as_str(&self) -> &str {
&self.string
}
#[inline]
pub fn as_mut_str(&mut self) -> &mut str {
&mut self.string
}
#[inline]
pub fn as_bytes(&self) -> &[u8] {
self.string.as_bytes()
}
pub fn as_cstr(&self) -> Result<Cow<CStr>, InteriorNullError> {
unimplemented!()
}
#[inline]
pub fn len(&self) -> usize {
self.string.len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.string.is_empty()
}
#[inline]
pub fn is_full(&self) -> bool |
#[inline]
pub fn chars(&self) -> Chars {
self.string.chars()
}
#[inline]
pub fn bytes(&self) -> Bytes {
self.string.bytes()
}
#[inline]
pub fn lines(&self) -> Lines {
self.string.lines()
}
}
impl<S: AsRef<str> +?Sized> PartialEq<S> for PascalStr {
#[inline]
fn eq(&self, other: &S) -> bool {
let other = other.as_ref();
self.as_str() == other
}
}
impl<S: AsRef<str> +?Sized> PartialOrd<S> for PascalStr {
#[inline]
fn partial_cmp(&self, other: &S) -> Option<Ordering> {
let other = other.as_ref();
self.as_str().partial_cmp(&other)
}
}
impl ToOwned for PascalStr {
type Owned = PascalString;
#[inline]
fn to_owned(&self) -> Self::Owned {
PascalString::from_str(self.as_str()).unwrap()
}
}
impl AsRef<str> for PascalStr {
#[inline]
fn as_ref(&self) -> &str {
&self.string
}
}
pub type Chars<'a> = str::Chars<'a>;
pub type Bytes<'a> = str::Bytes<'a>;
pub type Lines<'a> = str::Lines<'a>;
pub struct InteriorNullError;
| {
self.len() == PASCAL_STRING_BUF_SIZE
} | identifier_body |
pascal_str.rs | use std::borrow::{Cow, ToOwned};
use std::cmp::{Ordering, PartialEq, PartialOrd};
use std::ffi::{CStr, CString};
use std::str;
use ::utf8::PascalString;
use ::PASCAL_STRING_BUF_SIZE;
#[derive(Hash, Eq, Ord)]
pub struct PascalStr {
string: str
}
impl PascalStr {
#[inline]
pub fn as_ptr(&self) -> *const u8 {
(&self.string).as_ptr()
}
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut u8 {
&mut self.string as *mut str as *mut u8
}
#[inline]
pub fn as_str(&self) -> &str {
&self.string
}
#[inline]
pub fn as_mut_str(&mut self) -> &mut str {
&mut self.string
}
#[inline]
pub fn as_bytes(&self) -> &[u8] {
self.string.as_bytes()
}
pub fn as_cstr(&self) -> Result<Cow<CStr>, InteriorNullError> {
unimplemented!()
}
#[inline]
pub fn len(&self) -> usize {
self.string.len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.string.is_empty()
}
#[inline]
pub fn is_full(&self) -> bool {
self.len() == PASCAL_STRING_BUF_SIZE
}
#[inline]
pub fn chars(&self) -> Chars {
self.string.chars()
}
#[inline]
pub fn bytes(&self) -> Bytes {
self.string.bytes()
}
#[inline]
pub fn lines(&self) -> Lines {
self.string.lines()
}
}
impl<S: AsRef<str> +?Sized> PartialEq<S> for PascalStr {
#[inline]
fn eq(&self, other: &S) -> bool {
let other = other.as_ref();
self.as_str() == other
}
}
impl<S: AsRef<str> +?Sized> PartialOrd<S> for PascalStr {
#[inline]
fn partial_cmp(&self, other: &S) -> Option<Ordering> {
let other = other.as_ref();
self.as_str().partial_cmp(&other)
}
}
impl ToOwned for PascalStr {
type Owned = PascalString; | }
}
impl AsRef<str> for PascalStr {
#[inline]
fn as_ref(&self) -> &str {
&self.string
}
}
pub type Chars<'a> = str::Chars<'a>;
pub type Bytes<'a> = str::Bytes<'a>;
pub type Lines<'a> = str::Lines<'a>;
pub struct InteriorNullError; | #[inline]
fn to_owned(&self) -> Self::Owned {
PascalString::from_str(self.as_str()).unwrap() | random_line_split |
htmlobjectelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::Attr;
use dom::attr::AttrHelpers;
use dom::bindings::codegen::Bindings::AttrBinding::AttrMethods;
use dom::bindings::codegen::Bindings::HTMLObjectElementBinding;
use dom::bindings::codegen::Bindings::HTMLObjectElementBinding::HTMLObjectElementMethods;
use dom::bindings::codegen::InheritTypes::HTMLObjectElementDerived;
use dom::bindings::codegen::InheritTypes::{ElementCast, HTMLElementCast};
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::{Element, HTMLObjectElementTypeId};
use dom::element::AttributeHandlers;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId, NodeHelpers, window_from_node};
use dom::validitystate::ValidityState;
use dom::virtualmethods::VirtualMethods;
use servo_net::image_cache_task;
use servo_net::image_cache_task::ImageCacheTask;
use servo_util::str::DOMString;
use string_cache::Atom;
use url::Url;
#[dom_struct]
pub struct HTMLObjectElement {
htmlelement: HTMLElement,
}
impl HTMLObjectElementDerived for EventTarget {
fn is_htmlobjectelement(&self) -> bool {
*self.type_id() == NodeTargetTypeId(ElementNodeTypeId(HTMLObjectElementTypeId))
}
}
impl HTMLObjectElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLObjectElement {
HTMLObjectElement {
htmlelement: HTMLElement::new_inherited(HTMLObjectElementTypeId, localName, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLObjectElement> {
let element = HTMLObjectElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLObjectElementBinding::Wrap)
}
}
trait ProcessDataURL {
fn process_data_url(&self, image_cache: ImageCacheTask);
}
impl<'a> ProcessDataURL for JSRef<'a, HTMLObjectElement> {
// Makes the local `data` member match the status of the `data` attribute and starts
/// prefetching the image. This method must be called after `data` is changed.
fn process_data_url(&self, image_cache: ImageCacheTask) |
}
pub fn is_image_data(uri: &str) -> bool {
static types: &'static [&'static str] = &["data:image/png", "data:image/gif", "data:image/jpeg"];
types.iter().any(|&type_| uri.starts_with(type_))
}
impl<'a> HTMLObjectElementMethods for JSRef<'a, HTMLObjectElement> {
fn Validity(self) -> Temporary<ValidityState> {
let window = window_from_node(self).root();
ValidityState::new(*window)
}
// https://html.spec.whatwg.org/multipage/embedded-content.html#dom-object-type
make_getter!(Type)
// https://html.spec.whatwg.org/multipage/embedded-content.html#dom-object-type
make_setter!(SetType, "type")
}
impl<'a> VirtualMethods for JSRef<'a, HTMLObjectElement> {
fn super_type<'a>(&'a self) -> Option<&'a VirtualMethods> {
let htmlelement: &JSRef<HTMLElement> = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn after_set_attr(&self, attr: JSRef<Attr>) {
match self.super_type() {
Some(ref s) => s.after_set_attr(attr),
_ => ()
}
match attr.local_name() {
&atom!("data") => {
let window = window_from_node(*self).root();
self.process_data_url(window.image_cache_task().clone());
},
_ => ()
}
}
}
impl Reflectable for HTMLObjectElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
| {
let elem: JSRef<Element> = ElementCast::from_ref(*self);
// TODO: support other values
match (elem.get_attribute(ns!(""), &atom!("type")).map(|x| x.root().Value()),
elem.get_attribute(ns!(""), &atom!("data")).map(|x| x.root().Value())) {
(None, Some(uri)) => {
if is_image_data(uri.as_slice()) {
let data_url = Url::parse(uri.as_slice()).unwrap();
// Issue #84
image_cache.send(image_cache_task::Prefetch(data_url));
}
}
_ => { }
}
} | identifier_body |
htmlobjectelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::Attr;
use dom::attr::AttrHelpers;
use dom::bindings::codegen::Bindings::AttrBinding::AttrMethods;
use dom::bindings::codegen::Bindings::HTMLObjectElementBinding;
use dom::bindings::codegen::Bindings::HTMLObjectElementBinding::HTMLObjectElementMethods;
use dom::bindings::codegen::InheritTypes::HTMLObjectElementDerived;
use dom::bindings::codegen::InheritTypes::{ElementCast, HTMLElementCast};
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::{Element, HTMLObjectElementTypeId};
use dom::element::AttributeHandlers;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId, NodeHelpers, window_from_node};
use dom::validitystate::ValidityState;
use dom::virtualmethods::VirtualMethods;
use servo_net::image_cache_task;
use servo_net::image_cache_task::ImageCacheTask;
use servo_util::str::DOMString;
use string_cache::Atom;
use url::Url;
#[dom_struct]
pub struct HTMLObjectElement {
htmlelement: HTMLElement,
}
impl HTMLObjectElementDerived for EventTarget {
fn is_htmlobjectelement(&self) -> bool {
*self.type_id() == NodeTargetTypeId(ElementNodeTypeId(HTMLObjectElementTypeId))
}
}
impl HTMLObjectElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLObjectElement {
HTMLObjectElement {
htmlelement: HTMLElement::new_inherited(HTMLObjectElementTypeId, localName, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLObjectElement> {
let element = HTMLObjectElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLObjectElementBinding::Wrap)
}
}
trait ProcessDataURL {
fn process_data_url(&self, image_cache: ImageCacheTask);
}
impl<'a> ProcessDataURL for JSRef<'a, HTMLObjectElement> {
// Makes the local `data` member match the status of the `data` attribute and starts
/// prefetching the image. This method must be called after `data` is changed.
fn process_data_url(&self, image_cache: ImageCacheTask) {
let elem: JSRef<Element> = ElementCast::from_ref(*self);
// TODO: support other values
match (elem.get_attribute(ns!(""), &atom!("type")).map(|x| x.root().Value()),
elem.get_attribute(ns!(""), &atom!("data")).map(|x| x.root().Value())) {
(None, Some(uri)) => {
if is_image_data(uri.as_slice()) {
let data_url = Url::parse(uri.as_slice()).unwrap();
// Issue #84
image_cache.send(image_cache_task::Prefetch(data_url));
}
}
_ => { }
}
}
}
pub fn | (uri: &str) -> bool {
static types: &'static [&'static str] = &["data:image/png", "data:image/gif", "data:image/jpeg"];
types.iter().any(|&type_| uri.starts_with(type_))
}
impl<'a> HTMLObjectElementMethods for JSRef<'a, HTMLObjectElement> {
fn Validity(self) -> Temporary<ValidityState> {
let window = window_from_node(self).root();
ValidityState::new(*window)
}
// https://html.spec.whatwg.org/multipage/embedded-content.html#dom-object-type
make_getter!(Type)
// https://html.spec.whatwg.org/multipage/embedded-content.html#dom-object-type
make_setter!(SetType, "type")
}
impl<'a> VirtualMethods for JSRef<'a, HTMLObjectElement> {
fn super_type<'a>(&'a self) -> Option<&'a VirtualMethods> {
let htmlelement: &JSRef<HTMLElement> = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn after_set_attr(&self, attr: JSRef<Attr>) {
match self.super_type() {
Some(ref s) => s.after_set_attr(attr),
_ => ()
}
match attr.local_name() {
&atom!("data") => {
let window = window_from_node(*self).root();
self.process_data_url(window.image_cache_task().clone());
},
_ => ()
}
}
}
impl Reflectable for HTMLObjectElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
| is_image_data | identifier_name |
htmlobjectelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::Attr;
use dom::attr::AttrHelpers;
use dom::bindings::codegen::Bindings::AttrBinding::AttrMethods;
use dom::bindings::codegen::Bindings::HTMLObjectElementBinding;
use dom::bindings::codegen::Bindings::HTMLObjectElementBinding::HTMLObjectElementMethods;
use dom::bindings::codegen::InheritTypes::HTMLObjectElementDerived;
use dom::bindings::codegen::InheritTypes::{ElementCast, HTMLElementCast};
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::{Element, HTMLObjectElementTypeId};
use dom::element::AttributeHandlers;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId, NodeHelpers, window_from_node};
use dom::validitystate::ValidityState;
use dom::virtualmethods::VirtualMethods;
use servo_net::image_cache_task;
use servo_net::image_cache_task::ImageCacheTask;
use servo_util::str::DOMString;
use string_cache::Atom;
use url::Url;
#[dom_struct]
pub struct HTMLObjectElement {
htmlelement: HTMLElement,
}
impl HTMLObjectElementDerived for EventTarget {
fn is_htmlobjectelement(&self) -> bool {
*self.type_id() == NodeTargetTypeId(ElementNodeTypeId(HTMLObjectElementTypeId))
}
}
impl HTMLObjectElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLObjectElement {
HTMLObjectElement {
htmlelement: HTMLElement::new_inherited(HTMLObjectElementTypeId, localName, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLObjectElement> {
let element = HTMLObjectElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLObjectElementBinding::Wrap)
}
}
trait ProcessDataURL {
fn process_data_url(&self, image_cache: ImageCacheTask);
}
impl<'a> ProcessDataURL for JSRef<'a, HTMLObjectElement> {
// Makes the local `data` member match the status of the `data` attribute and starts
/// prefetching the image. This method must be called after `data` is changed.
fn process_data_url(&self, image_cache: ImageCacheTask) {
let elem: JSRef<Element> = ElementCast::from_ref(*self);
// TODO: support other values
match (elem.get_attribute(ns!(""), &atom!("type")).map(|x| x.root().Value()),
elem.get_attribute(ns!(""), &atom!("data")).map(|x| x.root().Value())) {
(None, Some(uri)) => {
if is_image_data(uri.as_slice()) {
let data_url = Url::parse(uri.as_slice()).unwrap();
// Issue #84
image_cache.send(image_cache_task::Prefetch(data_url));
}
}
_ => { }
}
}
}
pub fn is_image_data(uri: &str) -> bool {
static types: &'static [&'static str] = &["data:image/png", "data:image/gif", "data:image/jpeg"];
types.iter().any(|&type_| uri.starts_with(type_))
}
impl<'a> HTMLObjectElementMethods for JSRef<'a, HTMLObjectElement> {
fn Validity(self) -> Temporary<ValidityState> {
let window = window_from_node(self).root();
ValidityState::new(*window)
}
| // https://html.spec.whatwg.org/multipage/embedded-content.html#dom-object-type
make_setter!(SetType, "type")
}
impl<'a> VirtualMethods for JSRef<'a, HTMLObjectElement> {
fn super_type<'a>(&'a self) -> Option<&'a VirtualMethods> {
let htmlelement: &JSRef<HTMLElement> = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn after_set_attr(&self, attr: JSRef<Attr>) {
match self.super_type() {
Some(ref s) => s.after_set_attr(attr),
_ => ()
}
match attr.local_name() {
&atom!("data") => {
let window = window_from_node(*self).root();
self.process_data_url(window.image_cache_task().clone());
},
_ => ()
}
}
}
impl Reflectable for HTMLObjectElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
} | // https://html.spec.whatwg.org/multipage/embedded-content.html#dom-object-type
make_getter!(Type)
| random_line_split |
performance.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::PerformanceBinding;
use dom::bindings::js::{JS, JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object};
use dom::performancetiming::{PerformanceTiming, PerformanceTimingMethods};
use dom::window::Window;
use time;
pub type DOMHighResTimeStamp = f64;
#[deriving(Encodable)]
pub struct Performance {
reflector_: Reflector,
timing: JS<PerformanceTiming>,
}
impl Performance {
fn new_inherited(window: &JSRef<Window>) -> Performance |
pub fn new(window: &JSRef<Window>) -> Temporary<Performance> {
let performance = Performance::new_inherited(window);
reflect_dom_object(box performance, window, PerformanceBinding::Wrap)
}
}
pub trait PerformanceMethods {
fn Timing(&self) -> Temporary<PerformanceTiming>;
fn Now(&self) -> DOMHighResTimeStamp;
}
impl<'a> PerformanceMethods for JSRef<'a, Performance> {
fn Timing(&self) -> Temporary<PerformanceTiming> {
Temporary::new(self.timing.clone())
}
fn Now(&self) -> DOMHighResTimeStamp {
let navStart = self.timing.root().NavigationStartPrecise() as f64;
(time::precise_time_s() - navStart) as DOMHighResTimeStamp
}
}
impl Reflectable for Performance {
fn reflector<'a>(&'a self) -> &'a Reflector {
&self.reflector_
}
}
| {
let timing = JS::from_rooted(&PerformanceTiming::new(window).root().root_ref());
Performance {
reflector_: Reflector::new(),
timing: timing,
}
} | identifier_body |
performance.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::PerformanceBinding;
use dom::bindings::js::{JS, JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object};
use dom::performancetiming::{PerformanceTiming, PerformanceTimingMethods};
use dom::window::Window;
use time;
pub type DOMHighResTimeStamp = f64;
#[deriving(Encodable)]
pub struct Performance {
reflector_: Reflector,
timing: JS<PerformanceTiming>,
}
impl Performance {
fn new_inherited(window: &JSRef<Window>) -> Performance {
let timing = JS::from_rooted(&PerformanceTiming::new(window).root().root_ref());
Performance {
reflector_: Reflector::new(),
timing: timing,
}
}
pub fn new(window: &JSRef<Window>) -> Temporary<Performance> {
let performance = Performance::new_inherited(window);
reflect_dom_object(box performance, window, PerformanceBinding::Wrap)
}
}
pub trait PerformanceMethods {
fn Timing(&self) -> Temporary<PerformanceTiming>;
fn Now(&self) -> DOMHighResTimeStamp;
} | fn Timing(&self) -> Temporary<PerformanceTiming> {
Temporary::new(self.timing.clone())
}
fn Now(&self) -> DOMHighResTimeStamp {
let navStart = self.timing.root().NavigationStartPrecise() as f64;
(time::precise_time_s() - navStart) as DOMHighResTimeStamp
}
}
impl Reflectable for Performance {
fn reflector<'a>(&'a self) -> &'a Reflector {
&self.reflector_
}
} |
impl<'a> PerformanceMethods for JSRef<'a, Performance> { | random_line_split |
performance.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::PerformanceBinding;
use dom::bindings::js::{JS, JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object};
use dom::performancetiming::{PerformanceTiming, PerformanceTimingMethods};
use dom::window::Window;
use time;
pub type DOMHighResTimeStamp = f64;
#[deriving(Encodable)]
pub struct Performance {
reflector_: Reflector,
timing: JS<PerformanceTiming>,
}
impl Performance {
fn new_inherited(window: &JSRef<Window>) -> Performance {
let timing = JS::from_rooted(&PerformanceTiming::new(window).root().root_ref());
Performance {
reflector_: Reflector::new(),
timing: timing,
}
}
pub fn new(window: &JSRef<Window>) -> Temporary<Performance> {
let performance = Performance::new_inherited(window);
reflect_dom_object(box performance, window, PerformanceBinding::Wrap)
}
}
pub trait PerformanceMethods {
fn Timing(&self) -> Temporary<PerformanceTiming>;
fn Now(&self) -> DOMHighResTimeStamp;
}
impl<'a> PerformanceMethods for JSRef<'a, Performance> {
fn | (&self) -> Temporary<PerformanceTiming> {
Temporary::new(self.timing.clone())
}
fn Now(&self) -> DOMHighResTimeStamp {
let navStart = self.timing.root().NavigationStartPrecise() as f64;
(time::precise_time_s() - navStart) as DOMHighResTimeStamp
}
}
impl Reflectable for Performance {
fn reflector<'a>(&'a self) -> &'a Reflector {
&self.reflector_
}
}
| Timing | identifier_name |
mod.rs | use std::fs;
use std::io;
pub mod cgroup_reader;
pub mod ns_reader;
pub mod process;
fn load_cgroups(procs: Vec<process::Process>) -> Vec<cgroup_reader::Reader> {
let cgroups = Vec::<cgroup_reader::Reader>::new();
for p in procs {
cgroups.push(cgroup_reader::new(p.pid));
}
cgroups
}
pub struct Container {
processes: Vec<process::Process>,
namespaces: Vec<String>,
cgroups: Vec<cgroup_reader::Reader>,
update_intv: i32
}
impl Container {
fn update(&self) |
}
pub fn new(group: ns_reader::NS_Group) -> Container {
let (namespaces, process) = group;
return Container{
processes: process,
namespaces: namespaces,
cgroups: load_cgroups(process),
update_intv: 1,
}
}
| {
loop {
for cgroup in self.cgroups {
let kvs = cgroup.read();
for kv in kvs {
let (key, val) = kv;
println!("{} : {}", key, val);
}
}
}
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.