file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
chmod.rs | #![crate_name = "uu_chmod"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Alex Lyon <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
#[cfg(unix)]
extern crate libc;
extern crate walker;
#[macro_use]
extern crate uucore;
use std::fs;
use std::os::unix::fs::{MetadataExt, PermissionsExt};
use std::path::Path;
use walker::Walker;
#[cfg(not(windows))]
use uucore::mode;
const NAME: &'static str = "chmod";
static SUMMARY: &'static str = "Change the mode of each FILE to MODE.
With --reference, change the mode of each FILE to that of RFILE.";
static LONG_HELP: &'static str = "
Each MODE is of the form '[ugoa]*([-+=]([rwxXst]*|[ugo]))+|[-+=]?[0-7]+'.
";
pub fn uumain(mut args: Vec<String>) -> i32 | let negative_option = sanitize_input(&mut args);
let mut matches = opts.parse(args);
if matches.free.is_empty() {
show_error!("missing an argument");
show_error!("for help, try '{} --help'", NAME);
return 1;
} else {
let changes = matches.opt_present("changes");
let quiet = matches.opt_present("quiet");
let verbose = matches.opt_present("verbose");
let preserve_root = matches.opt_present("preserve-root");
let recursive = matches.opt_present("recursive");
let fmode = matches
.opt_str("reference")
.and_then(|ref fref| match fs::metadata(fref) {
Ok(meta) => Some(meta.mode()),
Err(err) => crash!(1, "cannot stat attribues of '{}': {}", fref, err),
});
let cmode = if fmode.is_none() {
// If there was a negative option, now it's a good time to
// use it.
if negative_option.is_some() {
negative_option
} else {
Some(matches.free.remove(0))
}
} else {
None
};
let chmoder = Chmoder {
changes: changes,
quiet: quiet,
verbose: verbose,
preserve_root: preserve_root,
recursive: recursive,
fmode: fmode,
cmode: cmode,
};
match chmoder.chmod(matches.free) {
Ok(()) => {}
Err(e) => return e,
}
}
0
}
fn sanitize_input(args: &mut Vec<String>) -> Option<String> {
for i in 0..args.len() {
let first = args[i].chars().nth(0).unwrap();
if first!= '-' {
continue;
}
if let Some(second) = args[i].chars().nth(1) {
match second {
'r' | 'w' | 'x' | 'X' |'s' | 't' | 'u' | 'g' | 'o' | '0'...'7' => {
return Some(args.remove(i));
}
_ => {}
}
}
}
None
}
struct Chmoder {
changes: bool,
quiet: bool,
verbose: bool,
preserve_root: bool,
recursive: bool,
fmode: Option<u32>,
cmode: Option<String>,
}
impl Chmoder {
fn chmod(&self, files: Vec<String>) -> Result<(), i32> {
let mut r = Ok(());
for filename in &files {
let filename = &filename[..];
let file = Path::new(filename);
if file.exists() {
if file.is_dir() {
if!self.preserve_root || filename!= "/" {
if self.recursive {
let walk_dir = match Walker::new(&file) {
Ok(m) => m,
Err(f) => {
crash!(1, "{}", f.to_string());
}
};
// XXX: here (and elsewhere) we see that this impl will have issues
// with non-UTF-8 filenames. Using OsString won't fix this because
// on Windows OsStrings cannot be built out of non-UTF-8 chars. One
// possible fix is to use CStrings rather than Strings in the args
// to chmod() and chmod_file().
r = self.chmod(
walk_dir
.filter_map(|x| match x {
Ok(o) => match o.path().into_os_string().to_str() {
Some(s) => Some(s.to_owned()),
None => None,
},
Err(_) => None,
})
.collect(),
).and(r);
r = self.chmod_file(&file, filename).and(r);
}
} else {
show_error!("could not change permissions of directory '{}'", filename);
r = Err(1);
}
} else {
r = self.chmod_file(&file, filename).and(r);
}
} else {
show_error!("no such file or directory '{}'", filename);
r = Err(1);
}
}
r
}
#[cfg(windows)]
fn chmod_file(&self, file: &Path, name: &str) -> Result<(), i32> {
// chmod is useless on Windows
// it doesn't set any permissions at all
// instead it just sets the readonly attribute on the file
Err(0)
}
#[cfg(any(unix, target_os = "redox"))]
fn chmod_file(&self, file: &Path, name: &str) -> Result<(), i32> {
let mut fperm = match fs::metadata(name) {
Ok(meta) => meta.mode() & 0o7777,
Err(err) => {
if!self.quiet {
show_error!("{}", err);
}
return Err(1);
}
};
match self.fmode {
Some(mode) => try!(self.change_file(fperm, mode, file, name)),
None => {
let cmode_unwrapped = self.cmode.clone().unwrap();
for mode in cmode_unwrapped.split(',') {
// cmode is guaranteed to be Some in this case
let arr: &[char] = &['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'];
let result = if mode.contains(arr) {
mode::parse_numeric(fperm, mode)
} else {
mode::parse_symbolic(fperm, mode, file.is_dir())
};
match result {
Ok(mode) => {
try!(self.change_file(fperm, mode, file, name));
fperm = mode;
}
Err(f) => {
if!self.quiet {
show_error!("{}", f);
}
return Err(1);
}
}
}
}
}
Ok(())
}
fn change_file(&self, fperm: u32, mode: u32, file: &Path, path: &str) -> Result<(), i32> {
if fperm == mode {
if self.verbose &&!self.changes {
show_info!("mode of '{}' retained as {:o}", file.display(), fperm);
}
Ok(())
} else if let Err(err) =
fs::set_permissions(Path::new(path), fs::Permissions::from_mode(mode))
{
if!self.quiet {
show_error!("{}", err);
}
if self.verbose {
show_info!(
"failed to change mode of file '{}' from {:o} to {:o}",
file.display(),
fperm,
mode
);
}
Err(1)
} else {
if self.verbose || self.changes {
show_info!(
"mode of '{}' changed from {:o} to {:o}",
file.display(),
fperm,
mode
);
}
Ok(())
}
}
}
| {
let syntax = format!(
"[OPTION]... MODE[,MODE]... FILE...
{0} [OPTION]... OCTAL-MODE FILE...
{0} [OPTION]... --reference=RFILE FILE...",
NAME
);
let mut opts = new_coreopts!(&syntax, SUMMARY, LONG_HELP);
opts.optflag("c", "changes", "like verbose but report only when a change is made \
(unimplemented)")
// TODO: support --silent (can be done using clap)
.optflag("f", "quiet", "suppress most error messages (unimplemented)")
.optflag("v", "verbose", "output a diagnostic for every file processed (unimplemented)")
.optflag("", "no-preserve-root", "do not treat '/' specially (the default)")
.optflag("", "preserve-root", "fail to operate recursively on '/'")
.optopt("", "reference", "use RFILE's mode instead of MODE values", "RFILE")
.optflag("R", "recursive", "change files and directories recursively");
// sanitize input for - at beginning (e.g. chmod -x testfile). Remove
// the option and save it for later, after parsing is finished. | identifier_body |
chmod.rs | #![crate_name = "uu_chmod"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Alex Lyon <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
#[cfg(unix)]
extern crate libc;
extern crate walker;
#[macro_use]
extern crate uucore;
use std::fs;
use std::os::unix::fs::{MetadataExt, PermissionsExt};
use std::path::Path;
use walker::Walker;
#[cfg(not(windows))]
use uucore::mode;
const NAME: &'static str = "chmod";
static SUMMARY: &'static str = "Change the mode of each FILE to MODE.
With --reference, change the mode of each FILE to that of RFILE.";
static LONG_HELP: &'static str = "
Each MODE is of the form '[ugoa]*([-+=]([rwxXst]*|[ugo]))+|[-+=]?[0-7]+'.
";
pub fn uumain(mut args: Vec<String>) -> i32 {
let syntax = format!(
"[OPTION]... MODE[,MODE]... FILE...
{0} [OPTION]... OCTAL-MODE FILE...
{0} [OPTION]... --reference=RFILE FILE...",
NAME
);
let mut opts = new_coreopts!(&syntax, SUMMARY, LONG_HELP);
opts.optflag("c", "changes", "like verbose but report only when a change is made \
(unimplemented)")
// TODO: support --silent (can be done using clap)
.optflag("f", "quiet", "suppress most error messages (unimplemented)")
.optflag("v", "verbose", "output a diagnostic for every file processed (unimplemented)")
.optflag("", "no-preserve-root", "do not treat '/' specially (the default)")
.optflag("", "preserve-root", "fail to operate recursively on '/'")
.optopt("", "reference", "use RFILE's mode instead of MODE values", "RFILE")
.optflag("R", "recursive", "change files and directories recursively");
// sanitize input for - at beginning (e.g. chmod -x testfile). Remove
// the option and save it for later, after parsing is finished.
let negative_option = sanitize_input(&mut args);
let mut matches = opts.parse(args);
if matches.free.is_empty() {
show_error!("missing an argument");
show_error!("for help, try '{} --help'", NAME);
return 1;
} else {
let changes = matches.opt_present("changes");
let quiet = matches.opt_present("quiet");
let verbose = matches.opt_present("verbose");
let preserve_root = matches.opt_present("preserve-root");
let recursive = matches.opt_present("recursive");
let fmode = matches
.opt_str("reference")
.and_then(|ref fref| match fs::metadata(fref) {
Ok(meta) => Some(meta.mode()),
Err(err) => crash!(1, "cannot stat attribues of '{}': {}", fref, err),
});
let cmode = if fmode.is_none() {
// If there was a negative option, now it's a good time to
// use it.
if negative_option.is_some() {
negative_option
} else {
Some(matches.free.remove(0))
}
} else {
None
};
let chmoder = Chmoder {
changes: changes,
quiet: quiet,
verbose: verbose,
preserve_root: preserve_root,
recursive: recursive,
fmode: fmode,
cmode: cmode,
};
match chmoder.chmod(matches.free) {
Ok(()) => {}
Err(e) => return e,
}
}
0
}
fn sanitize_input(args: &mut Vec<String>) -> Option<String> {
for i in 0..args.len() {
let first = args[i].chars().nth(0).unwrap();
if first!= '-' {
continue;
}
if let Some(second) = args[i].chars().nth(1) {
match second {
'r' | 'w' | 'x' | 'X' |'s' | 't' | 'u' | 'g' | 'o' | '0'...'7' => {
return Some(args.remove(i));
}
_ => {}
}
}
}
None
}
struct | {
changes: bool,
quiet: bool,
verbose: bool,
preserve_root: bool,
recursive: bool,
fmode: Option<u32>,
cmode: Option<String>,
}
impl Chmoder {
fn chmod(&self, files: Vec<String>) -> Result<(), i32> {
let mut r = Ok(());
for filename in &files {
let filename = &filename[..];
let file = Path::new(filename);
if file.exists() {
if file.is_dir() {
if!self.preserve_root || filename!= "/" {
if self.recursive {
let walk_dir = match Walker::new(&file) {
Ok(m) => m,
Err(f) => {
crash!(1, "{}", f.to_string());
}
};
// XXX: here (and elsewhere) we see that this impl will have issues
// with non-UTF-8 filenames. Using OsString won't fix this because
// on Windows OsStrings cannot be built out of non-UTF-8 chars. One
// possible fix is to use CStrings rather than Strings in the args
// to chmod() and chmod_file().
r = self.chmod(
walk_dir
.filter_map(|x| match x {
Ok(o) => match o.path().into_os_string().to_str() {
Some(s) => Some(s.to_owned()),
None => None,
},
Err(_) => None,
})
.collect(),
).and(r);
r = self.chmod_file(&file, filename).and(r);
}
} else {
show_error!("could not change permissions of directory '{}'", filename);
r = Err(1);
}
} else {
r = self.chmod_file(&file, filename).and(r);
}
} else {
show_error!("no such file or directory '{}'", filename);
r = Err(1);
}
}
r
}
#[cfg(windows)]
fn chmod_file(&self, file: &Path, name: &str) -> Result<(), i32> {
// chmod is useless on Windows
// it doesn't set any permissions at all
// instead it just sets the readonly attribute on the file
Err(0)
}
#[cfg(any(unix, target_os = "redox"))]
fn chmod_file(&self, file: &Path, name: &str) -> Result<(), i32> {
let mut fperm = match fs::metadata(name) {
Ok(meta) => meta.mode() & 0o7777,
Err(err) => {
if!self.quiet {
show_error!("{}", err);
}
return Err(1);
}
};
match self.fmode {
Some(mode) => try!(self.change_file(fperm, mode, file, name)),
None => {
let cmode_unwrapped = self.cmode.clone().unwrap();
for mode in cmode_unwrapped.split(',') {
// cmode is guaranteed to be Some in this case
let arr: &[char] = &['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'];
let result = if mode.contains(arr) {
mode::parse_numeric(fperm, mode)
} else {
mode::parse_symbolic(fperm, mode, file.is_dir())
};
match result {
Ok(mode) => {
try!(self.change_file(fperm, mode, file, name));
fperm = mode;
}
Err(f) => {
if!self.quiet {
show_error!("{}", f);
}
return Err(1);
}
}
}
}
}
Ok(())
}
fn change_file(&self, fperm: u32, mode: u32, file: &Path, path: &str) -> Result<(), i32> {
if fperm == mode {
if self.verbose &&!self.changes {
show_info!("mode of '{}' retained as {:o}", file.display(), fperm);
}
Ok(())
} else if let Err(err) =
fs::set_permissions(Path::new(path), fs::Permissions::from_mode(mode))
{
if!self.quiet {
show_error!("{}", err);
}
if self.verbose {
show_info!(
"failed to change mode of file '{}' from {:o} to {:o}",
file.display(),
fperm,
mode
);
}
Err(1)
} else {
if self.verbose || self.changes {
show_info!(
"mode of '{}' changed from {:o} to {:o}",
file.display(),
fperm,
mode
);
}
Ok(())
}
}
}
| Chmoder | identifier_name |
chmod.rs | #![crate_name = "uu_chmod"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Alex Lyon <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
#[cfg(unix)]
extern crate libc;
extern crate walker;
#[macro_use]
extern crate uucore;
use std::fs;
use std::os::unix::fs::{MetadataExt, PermissionsExt};
use std::path::Path;
use walker::Walker;
#[cfg(not(windows))]
use uucore::mode;
const NAME: &'static str = "chmod";
static SUMMARY: &'static str = "Change the mode of each FILE to MODE.
With --reference, change the mode of each FILE to that of RFILE.";
static LONG_HELP: &'static str = "
Each MODE is of the form '[ugoa]*([-+=]([rwxXst]*|[ugo]))+|[-+=]?[0-7]+'.
";
pub fn uumain(mut args: Vec<String>) -> i32 {
let syntax = format!(
"[OPTION]... MODE[,MODE]... FILE...
{0} [OPTION]... OCTAL-MODE FILE...
{0} [OPTION]... --reference=RFILE FILE...",
NAME
);
let mut opts = new_coreopts!(&syntax, SUMMARY, LONG_HELP);
opts.optflag("c", "changes", "like verbose but report only when a change is made \
(unimplemented)")
// TODO: support --silent (can be done using clap)
.optflag("f", "quiet", "suppress most error messages (unimplemented)")
.optflag("v", "verbose", "output a diagnostic for every file processed (unimplemented)")
.optflag("", "no-preserve-root", "do not treat '/' specially (the default)")
.optflag("", "preserve-root", "fail to operate recursively on '/'")
.optopt("", "reference", "use RFILE's mode instead of MODE values", "RFILE")
.optflag("R", "recursive", "change files and directories recursively");
// sanitize input for - at beginning (e.g. chmod -x testfile). Remove
// the option and save it for later, after parsing is finished.
let negative_option = sanitize_input(&mut args);
let mut matches = opts.parse(args);
if matches.free.is_empty() {
show_error!("missing an argument");
show_error!("for help, try '{} --help'", NAME);
return 1;
} else {
let changes = matches.opt_present("changes");
let quiet = matches.opt_present("quiet");
let verbose = matches.opt_present("verbose");
let preserve_root = matches.opt_present("preserve-root");
let recursive = matches.opt_present("recursive");
let fmode = matches
.opt_str("reference")
.and_then(|ref fref| match fs::metadata(fref) {
Ok(meta) => Some(meta.mode()),
Err(err) => crash!(1, "cannot stat attribues of '{}': {}", fref, err),
});
let cmode = if fmode.is_none() {
// If there was a negative option, now it's a good time to
// use it.
if negative_option.is_some() {
negative_option
} else {
Some(matches.free.remove(0))
}
} else {
None
};
let chmoder = Chmoder {
changes: changes,
quiet: quiet,
verbose: verbose,
preserve_root: preserve_root,
recursive: recursive, | match chmoder.chmod(matches.free) {
Ok(()) => {}
Err(e) => return e,
}
}
0
}
fn sanitize_input(args: &mut Vec<String>) -> Option<String> {
for i in 0..args.len() {
let first = args[i].chars().nth(0).unwrap();
if first!= '-' {
continue;
}
if let Some(second) = args[i].chars().nth(1) {
match second {
'r' | 'w' | 'x' | 'X' |'s' | 't' | 'u' | 'g' | 'o' | '0'...'7' => {
return Some(args.remove(i));
}
_ => {}
}
}
}
None
}
struct Chmoder {
changes: bool,
quiet: bool,
verbose: bool,
preserve_root: bool,
recursive: bool,
fmode: Option<u32>,
cmode: Option<String>,
}
impl Chmoder {
fn chmod(&self, files: Vec<String>) -> Result<(), i32> {
let mut r = Ok(());
for filename in &files {
let filename = &filename[..];
let file = Path::new(filename);
if file.exists() {
if file.is_dir() {
if!self.preserve_root || filename!= "/" {
if self.recursive {
let walk_dir = match Walker::new(&file) {
Ok(m) => m,
Err(f) => {
crash!(1, "{}", f.to_string());
}
};
// XXX: here (and elsewhere) we see that this impl will have issues
// with non-UTF-8 filenames. Using OsString won't fix this because
// on Windows OsStrings cannot be built out of non-UTF-8 chars. One
// possible fix is to use CStrings rather than Strings in the args
// to chmod() and chmod_file().
r = self.chmod(
walk_dir
.filter_map(|x| match x {
Ok(o) => match o.path().into_os_string().to_str() {
Some(s) => Some(s.to_owned()),
None => None,
},
Err(_) => None,
})
.collect(),
).and(r);
r = self.chmod_file(&file, filename).and(r);
}
} else {
show_error!("could not change permissions of directory '{}'", filename);
r = Err(1);
}
} else {
r = self.chmod_file(&file, filename).and(r);
}
} else {
show_error!("no such file or directory '{}'", filename);
r = Err(1);
}
}
r
}
#[cfg(windows)]
fn chmod_file(&self, file: &Path, name: &str) -> Result<(), i32> {
// chmod is useless on Windows
// it doesn't set any permissions at all
// instead it just sets the readonly attribute on the file
Err(0)
}
#[cfg(any(unix, target_os = "redox"))]
fn chmod_file(&self, file: &Path, name: &str) -> Result<(), i32> {
let mut fperm = match fs::metadata(name) {
Ok(meta) => meta.mode() & 0o7777,
Err(err) => {
if!self.quiet {
show_error!("{}", err);
}
return Err(1);
}
};
match self.fmode {
Some(mode) => try!(self.change_file(fperm, mode, file, name)),
None => {
let cmode_unwrapped = self.cmode.clone().unwrap();
for mode in cmode_unwrapped.split(',') {
// cmode is guaranteed to be Some in this case
let arr: &[char] = &['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'];
let result = if mode.contains(arr) {
mode::parse_numeric(fperm, mode)
} else {
mode::parse_symbolic(fperm, mode, file.is_dir())
};
match result {
Ok(mode) => {
try!(self.change_file(fperm, mode, file, name));
fperm = mode;
}
Err(f) => {
if!self.quiet {
show_error!("{}", f);
}
return Err(1);
}
}
}
}
}
Ok(())
}
fn change_file(&self, fperm: u32, mode: u32, file: &Path, path: &str) -> Result<(), i32> {
if fperm == mode {
if self.verbose &&!self.changes {
show_info!("mode of '{}' retained as {:o}", file.display(), fperm);
}
Ok(())
} else if let Err(err) =
fs::set_permissions(Path::new(path), fs::Permissions::from_mode(mode))
{
if!self.quiet {
show_error!("{}", err);
}
if self.verbose {
show_info!(
"failed to change mode of file '{}' from {:o} to {:o}",
file.display(),
fperm,
mode
);
}
Err(1)
} else {
if self.verbose || self.changes {
show_info!(
"mode of '{}' changed from {:o} to {:o}",
file.display(),
fperm,
mode
);
}
Ok(())
}
}
} | fmode: fmode,
cmode: cmode,
}; | random_line_split |
chmod.rs | #![crate_name = "uu_chmod"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Alex Lyon <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
#[cfg(unix)]
extern crate libc;
extern crate walker;
#[macro_use]
extern crate uucore;
use std::fs;
use std::os::unix::fs::{MetadataExt, PermissionsExt};
use std::path::Path;
use walker::Walker;
#[cfg(not(windows))]
use uucore::mode;
const NAME: &'static str = "chmod";
static SUMMARY: &'static str = "Change the mode of each FILE to MODE.
With --reference, change the mode of each FILE to that of RFILE.";
static LONG_HELP: &'static str = "
Each MODE is of the form '[ugoa]*([-+=]([rwxXst]*|[ugo]))+|[-+=]?[0-7]+'.
";
pub fn uumain(mut args: Vec<String>) -> i32 {
let syntax = format!(
"[OPTION]... MODE[,MODE]... FILE...
{0} [OPTION]... OCTAL-MODE FILE...
{0} [OPTION]... --reference=RFILE FILE...",
NAME
);
let mut opts = new_coreopts!(&syntax, SUMMARY, LONG_HELP);
opts.optflag("c", "changes", "like verbose but report only when a change is made \
(unimplemented)")
// TODO: support --silent (can be done using clap)
.optflag("f", "quiet", "suppress most error messages (unimplemented)")
.optflag("v", "verbose", "output a diagnostic for every file processed (unimplemented)")
.optflag("", "no-preserve-root", "do not treat '/' specially (the default)")
.optflag("", "preserve-root", "fail to operate recursively on '/'")
.optopt("", "reference", "use RFILE's mode instead of MODE values", "RFILE")
.optflag("R", "recursive", "change files and directories recursively");
// sanitize input for - at beginning (e.g. chmod -x testfile). Remove
// the option and save it for later, after parsing is finished.
let negative_option = sanitize_input(&mut args);
let mut matches = opts.parse(args);
if matches.free.is_empty() {
show_error!("missing an argument");
show_error!("for help, try '{} --help'", NAME);
return 1;
} else | } else {
None
};
let chmoder = Chmoder {
changes: changes,
quiet: quiet,
verbose: verbose,
preserve_root: preserve_root,
recursive: recursive,
fmode: fmode,
cmode: cmode,
};
match chmoder.chmod(matches.free) {
Ok(()) => {}
Err(e) => return e,
}
}
0
}
fn sanitize_input(args: &mut Vec<String>) -> Option<String> {
for i in 0..args.len() {
let first = args[i].chars().nth(0).unwrap();
if first!= '-' {
continue;
}
if let Some(second) = args[i].chars().nth(1) {
match second {
'r' | 'w' | 'x' | 'X' |'s' | 't' | 'u' | 'g' | 'o' | '0'...'7' => {
return Some(args.remove(i));
}
_ => {}
}
}
}
None
}
struct Chmoder {
changes: bool,
quiet: bool,
verbose: bool,
preserve_root: bool,
recursive: bool,
fmode: Option<u32>,
cmode: Option<String>,
}
impl Chmoder {
fn chmod(&self, files: Vec<String>) -> Result<(), i32> {
let mut r = Ok(());
for filename in &files {
let filename = &filename[..];
let file = Path::new(filename);
if file.exists() {
if file.is_dir() {
if!self.preserve_root || filename!= "/" {
if self.recursive {
let walk_dir = match Walker::new(&file) {
Ok(m) => m,
Err(f) => {
crash!(1, "{}", f.to_string());
}
};
// XXX: here (and elsewhere) we see that this impl will have issues
// with non-UTF-8 filenames. Using OsString won't fix this because
// on Windows OsStrings cannot be built out of non-UTF-8 chars. One
// possible fix is to use CStrings rather than Strings in the args
// to chmod() and chmod_file().
r = self.chmod(
walk_dir
.filter_map(|x| match x {
Ok(o) => match o.path().into_os_string().to_str() {
Some(s) => Some(s.to_owned()),
None => None,
},
Err(_) => None,
})
.collect(),
).and(r);
r = self.chmod_file(&file, filename).and(r);
}
} else {
show_error!("could not change permissions of directory '{}'", filename);
r = Err(1);
}
} else {
r = self.chmod_file(&file, filename).and(r);
}
} else {
show_error!("no such file or directory '{}'", filename);
r = Err(1);
}
}
r
}
#[cfg(windows)]
fn chmod_file(&self, file: &Path, name: &str) -> Result<(), i32> {
// chmod is useless on Windows
// it doesn't set any permissions at all
// instead it just sets the readonly attribute on the file
Err(0)
}
#[cfg(any(unix, target_os = "redox"))]
fn chmod_file(&self, file: &Path, name: &str) -> Result<(), i32> {
let mut fperm = match fs::metadata(name) {
Ok(meta) => meta.mode() & 0o7777,
Err(err) => {
if!self.quiet {
show_error!("{}", err);
}
return Err(1);
}
};
match self.fmode {
Some(mode) => try!(self.change_file(fperm, mode, file, name)),
None => {
let cmode_unwrapped = self.cmode.clone().unwrap();
for mode in cmode_unwrapped.split(',') {
// cmode is guaranteed to be Some in this case
let arr: &[char] = &['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'];
let result = if mode.contains(arr) {
mode::parse_numeric(fperm, mode)
} else {
mode::parse_symbolic(fperm, mode, file.is_dir())
};
match result {
Ok(mode) => {
try!(self.change_file(fperm, mode, file, name));
fperm = mode;
}
Err(f) => {
if!self.quiet {
show_error!("{}", f);
}
return Err(1);
}
}
}
}
}
Ok(())
}
fn change_file(&self, fperm: u32, mode: u32, file: &Path, path: &str) -> Result<(), i32> {
if fperm == mode {
if self.verbose &&!self.changes {
show_info!("mode of '{}' retained as {:o}", file.display(), fperm);
}
Ok(())
} else if let Err(err) =
fs::set_permissions(Path::new(path), fs::Permissions::from_mode(mode))
{
if!self.quiet {
show_error!("{}", err);
}
if self.verbose {
show_info!(
"failed to change mode of file '{}' from {:o} to {:o}",
file.display(),
fperm,
mode
);
}
Err(1)
} else {
if self.verbose || self.changes {
show_info!(
"mode of '{}' changed from {:o} to {:o}",
file.display(),
fperm,
mode
);
}
Ok(())
}
}
}
| {
let changes = matches.opt_present("changes");
let quiet = matches.opt_present("quiet");
let verbose = matches.opt_present("verbose");
let preserve_root = matches.opt_present("preserve-root");
let recursive = matches.opt_present("recursive");
let fmode = matches
.opt_str("reference")
.and_then(|ref fref| match fs::metadata(fref) {
Ok(meta) => Some(meta.mode()),
Err(err) => crash!(1, "cannot stat attribues of '{}': {}", fref, err),
});
let cmode = if fmode.is_none() {
// If there was a negative option, now it's a good time to
// use it.
if negative_option.is_some() {
negative_option
} else {
Some(matches.free.remove(0))
} | conditional_block |
gdb-pretty-struct-and-enums.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-windows failing on win32 bot
// ignore-freebsd: output doesn't match
// ignore-tidy-linelength
// ignore-lldb
// ignore-android: FIXME(#10381)
// compile-flags:-g
// This test uses some GDB Python API features (e.g. accessing anonymous fields)
// which are only available in newer GDB version. The following directive will
// case the test runner to ignore this test if an older GDB version is used:
// min-gdb-version 7.7
|
// gdb-command: print regular_struct
// gdb-check:$1 = RegularStruct = {the_first_field = 101, the_second_field = 102.5, the_third_field = false, the_fourth_field = "I'm so pretty, oh so pretty..."}
// gdb-command: print tuple
// gdb-check:$2 = {true, 103, "blub"}
// gdb-command: print tuple_struct
// gdb-check:$3 = TupleStruct = {-104.5, 105}
// gdb-command: print empty_struct
// gdb-check:$4 = EmptyStruct
// gdb-command: print c_style_enum1
// gdb-check:$5 = CStyleEnumVar1
// gdb-command: print c_style_enum2
// gdb-check:$6 = CStyleEnumVar2
// gdb-command: print c_style_enum3
// gdb-check:$7 = CStyleEnumVar3
// gdb-command: print mixed_enum_c_style_var
// gdb-check:$8 = MixedEnumCStyleVar
// gdb-command: print mixed_enum_tuple_var
// gdb-check:$9 = MixedEnumTupleVar = {106, 107, false}
// gdb-command: print mixed_enum_struct_var
// gdb-check:$10 = MixedEnumStructVar = {field1 = 108.5, field2 = 109}
// gdb-command: print some
// gdb-check:$11 = Some = {110}
// gdb-command: print none
// gdb-check:$12 = None
// gdb-command: print some_fat
// gdb-check:$13 = Some = {"abc"}
// gdb-command: print none_fat
// gdb-check:$14 = None
// gdb-command: print nested_variant1
// gdb-check:$15 = NestedVariant1 = {NestedStruct = {regular_struct = RegularStruct = {the_first_field = 111, the_second_field = 112.5, the_third_field = true, the_fourth_field = "NestedStructString1"}, tuple_struct = TupleStruct = {113.5, 114}, empty_struct = EmptyStruct, c_style_enum = CStyleEnumVar2, mixed_enum = MixedEnumTupleVar = {115, 116, false}}}
// gdb-command: print nested_variant2
// gdb-check:$16 = NestedVariant2 = {abc = NestedStruct = {regular_struct = RegularStruct = {the_first_field = 117, the_second_field = 118.5, the_third_field = false, the_fourth_field = "NestedStructString10"}, tuple_struct = TupleStruct = {119.5, 120}, empty_struct = EmptyStruct, c_style_enum = CStyleEnumVar3, mixed_enum = MixedEnumStructVar = {field1 = 121.5, field2 = -122}}}
// gdb-command: print none_check1
// gdb-check:$17 = None
// gdb-command: print none_check2
// gdb-check:$18 = None
#![allow(dead_code, unused_variables)]
use self::CStyleEnum::{CStyleEnumVar1, CStyleEnumVar2, CStyleEnumVar3};
use self::MixedEnum::{MixedEnumCStyleVar, MixedEnumTupleVar, MixedEnumStructVar};
use self::NestedEnum::{NestedVariant1, NestedVariant2};
struct RegularStruct {
the_first_field: isize,
the_second_field: f64,
the_third_field: bool,
the_fourth_field: &'static str,
}
struct TupleStruct(f64, i16);
struct EmptyStruct;
enum CStyleEnum {
CStyleEnumVar1,
CStyleEnumVar2,
CStyleEnumVar3,
}
enum MixedEnum {
MixedEnumCStyleVar,
MixedEnumTupleVar(u32, u16, bool),
MixedEnumStructVar { field1: f64, field2: i32 }
}
struct NestedStruct {
regular_struct: RegularStruct,
tuple_struct: TupleStruct,
empty_struct: EmptyStruct,
c_style_enum: CStyleEnum,
mixed_enum: MixedEnum,
}
enum NestedEnum {
NestedVariant1(NestedStruct),
NestedVariant2 { abc: NestedStruct }
}
fn main() {
let regular_struct = RegularStruct {
the_first_field: 101,
the_second_field: 102.5,
the_third_field: false,
the_fourth_field: "I'm so pretty, oh so pretty..."
};
let tuple = ( true, 103u32, "blub" );
let tuple_struct = TupleStruct(-104.5, 105);
let empty_struct = EmptyStruct;
let c_style_enum1 = CStyleEnumVar1;
let c_style_enum2 = CStyleEnumVar2;
let c_style_enum3 = CStyleEnumVar3;
let mixed_enum_c_style_var = MixedEnumCStyleVar;
let mixed_enum_tuple_var = MixedEnumTupleVar(106, 107, false);
let mixed_enum_struct_var = MixedEnumStructVar { field1: 108.5, field2: 109 };
let some = Some(110_usize);
let none: Option<isize> = None;
let some_fat = Some("abc");
let none_fat: Option<&'static str> = None;
let nested_variant1 = NestedVariant1(
NestedStruct {
regular_struct: RegularStruct {
the_first_field: 111,
the_second_field: 112.5,
the_third_field: true,
the_fourth_field: "NestedStructString1",
},
tuple_struct: TupleStruct(113.5, 114),
empty_struct: EmptyStruct,
c_style_enum: CStyleEnumVar2,
mixed_enum: MixedEnumTupleVar(115, 116, false)
}
);
let nested_variant2 = NestedVariant2 {
abc: NestedStruct {
regular_struct: RegularStruct {
the_first_field: 117,
the_second_field: 118.5,
the_third_field: false,
the_fourth_field: "NestedStructString10",
},
tuple_struct: TupleStruct(119.5, 120),
empty_struct: EmptyStruct,
c_style_enum: CStyleEnumVar3,
mixed_enum: MixedEnumStructVar {
field1: 121.5,
field2: -122
}
}
};
let none_check1: Option<(usize, Vec<usize>)> = None;
let none_check2: Option<String> = None;
zzz(); // #break
}
fn zzz() { () } | // gdb-command: run | random_line_split |
gdb-pretty-struct-and-enums.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-windows failing on win32 bot
// ignore-freebsd: output doesn't match
// ignore-tidy-linelength
// ignore-lldb
// ignore-android: FIXME(#10381)
// compile-flags:-g
// This test uses some GDB Python API features (e.g. accessing anonymous fields)
// which are only available in newer GDB version. The following directive will
// case the test runner to ignore this test if an older GDB version is used:
// min-gdb-version 7.7
// gdb-command: run
// gdb-command: print regular_struct
// gdb-check:$1 = RegularStruct = {the_first_field = 101, the_second_field = 102.5, the_third_field = false, the_fourth_field = "I'm so pretty, oh so pretty..."}
// gdb-command: print tuple
// gdb-check:$2 = {true, 103, "blub"}
// gdb-command: print tuple_struct
// gdb-check:$3 = TupleStruct = {-104.5, 105}
// gdb-command: print empty_struct
// gdb-check:$4 = EmptyStruct
// gdb-command: print c_style_enum1
// gdb-check:$5 = CStyleEnumVar1
// gdb-command: print c_style_enum2
// gdb-check:$6 = CStyleEnumVar2
// gdb-command: print c_style_enum3
// gdb-check:$7 = CStyleEnumVar3
// gdb-command: print mixed_enum_c_style_var
// gdb-check:$8 = MixedEnumCStyleVar
// gdb-command: print mixed_enum_tuple_var
// gdb-check:$9 = MixedEnumTupleVar = {106, 107, false}
// gdb-command: print mixed_enum_struct_var
// gdb-check:$10 = MixedEnumStructVar = {field1 = 108.5, field2 = 109}
// gdb-command: print some
// gdb-check:$11 = Some = {110}
// gdb-command: print none
// gdb-check:$12 = None
// gdb-command: print some_fat
// gdb-check:$13 = Some = {"abc"}
// gdb-command: print none_fat
// gdb-check:$14 = None
// gdb-command: print nested_variant1
// gdb-check:$15 = NestedVariant1 = {NestedStruct = {regular_struct = RegularStruct = {the_first_field = 111, the_second_field = 112.5, the_third_field = true, the_fourth_field = "NestedStructString1"}, tuple_struct = TupleStruct = {113.5, 114}, empty_struct = EmptyStruct, c_style_enum = CStyleEnumVar2, mixed_enum = MixedEnumTupleVar = {115, 116, false}}}
// gdb-command: print nested_variant2
// gdb-check:$16 = NestedVariant2 = {abc = NestedStruct = {regular_struct = RegularStruct = {the_first_field = 117, the_second_field = 118.5, the_third_field = false, the_fourth_field = "NestedStructString10"}, tuple_struct = TupleStruct = {119.5, 120}, empty_struct = EmptyStruct, c_style_enum = CStyleEnumVar3, mixed_enum = MixedEnumStructVar = {field1 = 121.5, field2 = -122}}}
// gdb-command: print none_check1
// gdb-check:$17 = None
// gdb-command: print none_check2
// gdb-check:$18 = None
#![allow(dead_code, unused_variables)]
use self::CStyleEnum::{CStyleEnumVar1, CStyleEnumVar2, CStyleEnumVar3};
use self::MixedEnum::{MixedEnumCStyleVar, MixedEnumTupleVar, MixedEnumStructVar};
use self::NestedEnum::{NestedVariant1, NestedVariant2};
struct RegularStruct {
the_first_field: isize,
the_second_field: f64,
the_third_field: bool,
the_fourth_field: &'static str,
}
struct TupleStruct(f64, i16);
struct EmptyStruct;
enum | {
CStyleEnumVar1,
CStyleEnumVar2,
CStyleEnumVar3,
}
enum MixedEnum {
MixedEnumCStyleVar,
MixedEnumTupleVar(u32, u16, bool),
MixedEnumStructVar { field1: f64, field2: i32 }
}
struct NestedStruct {
regular_struct: RegularStruct,
tuple_struct: TupleStruct,
empty_struct: EmptyStruct,
c_style_enum: CStyleEnum,
mixed_enum: MixedEnum,
}
enum NestedEnum {
NestedVariant1(NestedStruct),
NestedVariant2 { abc: NestedStruct }
}
fn main() {
let regular_struct = RegularStruct {
the_first_field: 101,
the_second_field: 102.5,
the_third_field: false,
the_fourth_field: "I'm so pretty, oh so pretty..."
};
let tuple = ( true, 103u32, "blub" );
let tuple_struct = TupleStruct(-104.5, 105);
let empty_struct = EmptyStruct;
let c_style_enum1 = CStyleEnumVar1;
let c_style_enum2 = CStyleEnumVar2;
let c_style_enum3 = CStyleEnumVar3;
let mixed_enum_c_style_var = MixedEnumCStyleVar;
let mixed_enum_tuple_var = MixedEnumTupleVar(106, 107, false);
let mixed_enum_struct_var = MixedEnumStructVar { field1: 108.5, field2: 109 };
let some = Some(110_usize);
let none: Option<isize> = None;
let some_fat = Some("abc");
let none_fat: Option<&'static str> = None;
let nested_variant1 = NestedVariant1(
NestedStruct {
regular_struct: RegularStruct {
the_first_field: 111,
the_second_field: 112.5,
the_third_field: true,
the_fourth_field: "NestedStructString1",
},
tuple_struct: TupleStruct(113.5, 114),
empty_struct: EmptyStruct,
c_style_enum: CStyleEnumVar2,
mixed_enum: MixedEnumTupleVar(115, 116, false)
}
);
let nested_variant2 = NestedVariant2 {
abc: NestedStruct {
regular_struct: RegularStruct {
the_first_field: 117,
the_second_field: 118.5,
the_third_field: false,
the_fourth_field: "NestedStructString10",
},
tuple_struct: TupleStruct(119.5, 120),
empty_struct: EmptyStruct,
c_style_enum: CStyleEnumVar3,
mixed_enum: MixedEnumStructVar {
field1: 121.5,
field2: -122
}
}
};
let none_check1: Option<(usize, Vec<usize>)> = None;
let none_check2: Option<String> = None;
zzz(); // #break
}
fn zzz() { () }
| CStyleEnum | identifier_name |
bswap16.rs | #![feature(core, core_intrinsics)]
extern crate core;
#[cfg(test)]
mod tests {
use core::intrinsics::bswap16;
// pub fn bswap16(x: u16) -> u16;
macro_rules! bswap16_test {
($value:expr, $reverse:expr) => ({
let x: u16 = $value;
let result: u16 = unsafe { bswap16(x) };
assert_eq!(result, $reverse);
})
}
#[test]
fn bswap16_test1() |
}
| {
bswap16_test!(0x0000, 0x0000);
bswap16_test!(0x0001, 0x0100);
bswap16_test!(0x0002, 0x0200);
bswap16_test!(0x0004, 0x0400);
bswap16_test!(0x0008, 0x0800);
bswap16_test!(0x0010, 0x1000);
bswap16_test!(0x0020, 0x2000);
bswap16_test!(0x0040, 0x4000);
bswap16_test!(0x0080, 0x8000);
bswap16_test!(0x0100, 0x0001);
bswap16_test!(0x0200, 0x0002);
bswap16_test!(0x0400, 0x0004);
bswap16_test!(0x0800, 0x0008);
bswap16_test!(0x1000, 0x0010);
bswap16_test!(0x2000, 0x0020);
bswap16_test!(0x4000, 0x0040);
bswap16_test!(0x8000, 0x0080);
} | identifier_body |
bswap16.rs | #![feature(core, core_intrinsics)]
extern crate core;
#[cfg(test)]
mod tests {
use core::intrinsics::bswap16;
// pub fn bswap16(x: u16) -> u16;
macro_rules! bswap16_test {
($value:expr, $reverse:expr) => ({
let x: u16 = $value;
let result: u16 = unsafe { bswap16(x) };
assert_eq!(result, $reverse);
})
}
#[test]
fn | () {
bswap16_test!(0x0000, 0x0000);
bswap16_test!(0x0001, 0x0100);
bswap16_test!(0x0002, 0x0200);
bswap16_test!(0x0004, 0x0400);
bswap16_test!(0x0008, 0x0800);
bswap16_test!(0x0010, 0x1000);
bswap16_test!(0x0020, 0x2000);
bswap16_test!(0x0040, 0x4000);
bswap16_test!(0x0080, 0x8000);
bswap16_test!(0x0100, 0x0001);
bswap16_test!(0x0200, 0x0002);
bswap16_test!(0x0400, 0x0004);
bswap16_test!(0x0800, 0x0008);
bswap16_test!(0x1000, 0x0010);
bswap16_test!(0x2000, 0x0020);
bswap16_test!(0x4000, 0x0040);
bswap16_test!(0x8000, 0x0080);
}
}
| bswap16_test1 | identifier_name |
bswap16.rs | #![feature(core, core_intrinsics)]
extern crate core;
#[cfg(test)]
mod tests {
use core::intrinsics::bswap16;
// pub fn bswap16(x: u16) -> u16;
macro_rules! bswap16_test {
($value:expr, $reverse:expr) => ({
let x: u16 = $value;
let result: u16 = unsafe { bswap16(x) };
assert_eq!(result, $reverse);
})
}
#[test]
fn bswap16_test1() {
bswap16_test!(0x0000, 0x0000);
bswap16_test!(0x0001, 0x0100);
bswap16_test!(0x0002, 0x0200);
bswap16_test!(0x0004, 0x0400);
bswap16_test!(0x0008, 0x0800);
bswap16_test!(0x0010, 0x1000);
bswap16_test!(0x0020, 0x2000);
bswap16_test!(0x0040, 0x4000);
bswap16_test!(0x0080, 0x8000);
bswap16_test!(0x0100, 0x0001);
bswap16_test!(0x0200, 0x0002);
bswap16_test!(0x0400, 0x0004);
bswap16_test!(0x0800, 0x0008);
bswap16_test!(0x1000, 0x0010);
bswap16_test!(0x2000, 0x0020);
bswap16_test!(0x4000, 0x0040);
bswap16_test!(0x8000, 0x0080); | } | } | random_line_split |
main.rs | use std::fs::File; // File::open(&str)
use std::io::prelude::*; // std::fs::File.read_to_string(&mut str)
static VOWELS: [char; 6] = ['a', 'e', 'i', 'o', 'u', 'y'];
fn get_input() -> String {
let mut file = match File::open("input.txt") {
Ok(input) => input,
Err(err) => panic!("Error: {}", err),
};
let mut input = String::new();
match file.read_to_string(&mut input) {
Ok(input) => input,
Err(err) => panic!("Error: {}", err),
};
input
}
fn main() {
let input = get_input()
.to_lowercase();
let data: Vec<&str> = input
.trim()
.lines()
.collect();
let mut counters: Vec<isize> = Vec::new();
for (i, string) in data[1..].iter().enumerate() {
counters.push(0);
for ch in string.chars() {
if VOWELS.contains(&ch) {
counters[i] += 1;
}
}
}
| }
println!("");
} | println!("");
for ans in counters {
print!("{} ", ans); | random_line_split |
main.rs | use std::fs::File; // File::open(&str)
use std::io::prelude::*; // std::fs::File.read_to_string(&mut str)
static VOWELS: [char; 6] = ['a', 'e', 'i', 'o', 'u', 'y'];
fn | () -> String {
let mut file = match File::open("input.txt") {
Ok(input) => input,
Err(err) => panic!("Error: {}", err),
};
let mut input = String::new();
match file.read_to_string(&mut input) {
Ok(input) => input,
Err(err) => panic!("Error: {}", err),
};
input
}
fn main() {
let input = get_input()
.to_lowercase();
let data: Vec<&str> = input
.trim()
.lines()
.collect();
let mut counters: Vec<isize> = Vec::new();
for (i, string) in data[1..].iter().enumerate() {
counters.push(0);
for ch in string.chars() {
if VOWELS.contains(&ch) {
counters[i] += 1;
}
}
}
println!("");
for ans in counters {
print!("{} ", ans);
}
println!("");
}
| get_input | identifier_name |
main.rs | use std::fs::File; // File::open(&str)
use std::io::prelude::*; // std::fs::File.read_to_string(&mut str)
static VOWELS: [char; 6] = ['a', 'e', 'i', 'o', 'u', 'y'];
fn get_input() -> String |
fn main() {
let input = get_input()
.to_lowercase();
let data: Vec<&str> = input
.trim()
.lines()
.collect();
let mut counters: Vec<isize> = Vec::new();
for (i, string) in data[1..].iter().enumerate() {
counters.push(0);
for ch in string.chars() {
if VOWELS.contains(&ch) {
counters[i] += 1;
}
}
}
println!("");
for ans in counters {
print!("{} ", ans);
}
println!("");
}
| {
let mut file = match File::open("input.txt") {
Ok(input) => input,
Err(err) => panic!("Error: {}", err),
};
let mut input = String::new();
match file.read_to_string(&mut input) {
Ok(input) => input,
Err(err) => panic!("Error: {}", err),
};
input
} | identifier_body |
attr.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Functions dealing with attributes and meta items
use ast;
use ast::{Attribute, Attribute_, MetaItem, MetaWord, MetaNameValue, MetaList};
use codemap::{Span, Spanned, spanned, dummy_spanned};
use codemap::BytePos;
use diagnostic::SpanHandler;
use parse::comments::{doc_comment_style, strip_doc_comment_decoration};
use parse::token::InternedString;
use parse::token;
use crateid::CrateId;
use collections::HashSet;
pub trait AttrMetaMethods {
// This could be changed to `fn check_name(&self, name: InternedString) ->
// bool` which would facilitate a side table recording which
// attributes/meta items are used/unused.
/// Retrieve the name of the meta item, e.g. foo in #[foo],
/// #[foo="bar"] and #[foo(bar)]
fn name(&self) -> InternedString;
/**
* Gets the string value if self is a MetaNameValue variant
* containing a string, otherwise None.
*/
fn value_str(&self) -> Option<InternedString>;
/// Gets a list of inner meta items from a list MetaItem type.
fn meta_item_list<'a>(&'a self) -> Option<&'a [@MetaItem]>;
/**
* If the meta item is a name-value type with a string value then returns
* a tuple containing the name and string value, otherwise `None`
*/
fn name_str_pair(&self) -> Option<(InternedString,InternedString)>;
}
impl AttrMetaMethods for Attribute {
fn name(&self) -> InternedString { self.meta().name() }
fn value_str(&self) -> Option<InternedString> {
self.meta().value_str()
}
fn meta_item_list<'a>(&'a self) -> Option<&'a [@MetaItem]> {
self.node.value.meta_item_list()
}
fn name_str_pair(&self) -> Option<(InternedString,InternedString)> {
self.meta().name_str_pair()
}
}
impl AttrMetaMethods for MetaItem {
fn name(&self) -> InternedString {
match self.node {
MetaWord(ref n) => (*n).clone(),
MetaNameValue(ref n, _) => (*n).clone(),
MetaList(ref n, _) => (*n).clone(),
}
}
fn value_str(&self) -> Option<InternedString> {
match self.node {
MetaNameValue(_, ref v) => {
match v.node {
ast::LitStr(ref s, _) => Some((*s).clone()),
_ => None,
}
},
_ => None
}
}
fn meta_item_list<'a>(&'a self) -> Option<&'a [@MetaItem]> {
match self.node {
MetaList(_, ref l) => Some(l.as_slice()),
_ => None
}
}
fn name_str_pair(&self) -> Option<(InternedString,InternedString)> {
self.value_str().map(|s| (self.name(), s))
}
}
// Annoying, but required to get test_cfg to work
impl AttrMetaMethods for @MetaItem {
fn name(&self) -> InternedString { (**self).name() }
fn value_str(&self) -> Option<InternedString> { (**self).value_str() }
fn meta_item_list<'a>(&'a self) -> Option<&'a [@MetaItem]> {
(**self).meta_item_list()
}
fn name_str_pair(&self) -> Option<(InternedString,InternedString)> {
(**self).name_str_pair()
}
}
pub trait AttributeMethods {
fn meta(&self) -> @MetaItem;
fn desugar_doc(&self) -> Attribute;
}
impl AttributeMethods for Attribute {
/// Extract the MetaItem from inside this Attribute.
fn meta(&self) -> @MetaItem {
self.node.value
}
/// Convert self to a normal #[doc="foo"] comment, if it is a
/// comment like `///` or `/** */`. (Returns self unchanged for
/// non-sugared doc attributes.)
fn desugar_doc(&self) -> Attribute {
if self.node.is_sugared_doc {
let comment = self.value_str().unwrap();
let meta = mk_name_value_item_str(
InternedString::new("doc"),
token::intern_and_get_ident(strip_doc_comment_decoration(
comment.get())));
mk_attr(meta)
} else {
*self
}
}
}
/* Constructors */
pub fn mk_name_value_item_str(name: InternedString, value: InternedString)
-> @MetaItem {
let value_lit = dummy_spanned(ast::LitStr(value, ast::CookedStr));
mk_name_value_item(name, value_lit)
}
pub fn mk_name_value_item(name: InternedString, value: ast::Lit)
-> @MetaItem {
@dummy_spanned(MetaNameValue(name, value))
}
pub fn mk_list_item(name: InternedString, items: Vec<@MetaItem> ) -> @MetaItem {
@dummy_spanned(MetaList(name, items))
}
pub fn mk_word_item(name: InternedString) -> @MetaItem {
@dummy_spanned(MetaWord(name))
}
pub fn mk_attr(item: @MetaItem) -> Attribute {
dummy_spanned(Attribute_ {
style: ast::AttrInner,
value: item,
is_sugared_doc: false,
})
}
pub fn mk_sugared_doc_attr(text: InternedString, lo: BytePos, hi: BytePos)
-> Attribute {
let style = doc_comment_style(text.get());
let lit = spanned(lo, hi, ast::LitStr(text, ast::CookedStr));
let attr = Attribute_ {
style: style,
value: @spanned(lo, hi, MetaNameValue(InternedString::new("doc"),
lit)),
is_sugared_doc: true
};
spanned(lo, hi, attr)
}
/* Searching */
/// Check if `needle` occurs in `haystack` by a structural
/// comparison. This is slightly subtle, and relies on ignoring the
/// span included in the `==` comparison a plain MetaItem.
pub fn contains(haystack: &[@ast::MetaItem],
needle: @ast::MetaItem) -> bool {
debug!("attr::contains (name={})", needle.name());
haystack.iter().any(|item| {
debug!(" testing: {}", item.name());
item.node == needle.node
})
}
pub fn contains_name<AM: AttrMetaMethods>(metas: &[AM], name: &str) -> bool {
debug!("attr::contains_name (name={})", name);
metas.iter().any(|item| {
debug!(" testing: {}", item.name());
item.name().equiv(&name)
})
}
pub fn first_attr_value_str_by_name(attrs: &[Attribute], name: &str)
-> Option<InternedString> {
attrs.iter()
.find(|at| at.name().equiv(&name))
.and_then(|at| at.value_str())
}
pub fn last_meta_item_value_str_by_name(items: &[@MetaItem], name: &str)
-> Option<InternedString> {
items.rev_iter()
.find(|mi| mi.name().equiv(&name))
.and_then(|i| i.value_str())
}
/* Higher-level applications */
pub fn sort_meta_items(items: &[@MetaItem]) -> Vec<@MetaItem> {
// This is sort of stupid here, but we need to sort by
// human-readable strings.
let mut v = items.iter()
.map(|&mi| (mi.name(), mi))
.collect::<Vec<(InternedString, @MetaItem)> >();
v.sort_by(|&(ref a, _), &(ref b, _)| a.cmp(b));
// There doesn't seem to be a more optimal way to do this
v.move_iter().map(|(_, m)| {
match m.node {
MetaList(ref n, ref mis) => {
@Spanned {
node: MetaList((*n).clone(),
sort_meta_items(mis.as_slice())),
.. /*bad*/ (*m).clone()
}
}
_ => m
} | /**
* From a list of crate attributes get only the meta_items that affect crate
* linkage
*/
pub fn find_linkage_metas(attrs: &[Attribute]) -> Vec<@MetaItem> {
let mut result = Vec::new();
for attr in attrs.iter().filter(|at| at.name().equiv(&("link"))) {
match attr.meta().node {
MetaList(_, ref items) => result.push_all(items.as_slice()),
_ => ()
}
}
result
}
pub fn find_crateid(attrs: &[Attribute]) -> Option<CrateId> {
match first_attr_value_str_by_name(attrs, "crate_id") {
None => None,
Some(id) => from_str::<CrateId>(id.get()),
}
}
#[deriving(Eq)]
pub enum InlineAttr {
InlineNone,
InlineHint,
InlineAlways,
InlineNever,
}
/// True if something like #[inline] is found in the list of attrs.
pub fn find_inline_attr(attrs: &[Attribute]) -> InlineAttr {
// FIXME (#2809)---validate the usage of #[inline] and #[inline]
attrs.iter().fold(InlineNone, |ia,attr| {
match attr.node.value.node {
MetaWord(ref n) if n.equiv(&("inline")) => InlineHint,
MetaList(ref n, ref items) if n.equiv(&("inline")) => {
if contains_name(items.as_slice(), "always") {
InlineAlways
} else if contains_name(items.as_slice(), "never") {
InlineNever
} else {
InlineHint
}
}
_ => ia
}
})
}
/// Tests if any `cfg(...)` meta items in `metas` match `cfg`. e.g.
///
/// test_cfg(`[foo="a", bar]`, `[cfg(foo), cfg(bar)]`) == true
/// test_cfg(`[foo="a", bar]`, `[cfg(not(bar))]`) == false
/// test_cfg(`[foo="a", bar]`, `[cfg(bar, foo="a")]`) == true
/// test_cfg(`[foo="a", bar]`, `[cfg(bar, foo="b")]`) == false
pub fn test_cfg<AM: AttrMetaMethods, It: Iterator<AM>>
(cfg: &[@MetaItem], mut metas: It) -> bool {
// having no #[cfg(...)] attributes counts as matching.
let mut no_cfgs = true;
// this would be much nicer as a chain of iterator adaptors, but
// this doesn't work.
let some_cfg_matches = metas.any(|mi| {
debug!("testing name: {}", mi.name());
if mi.name().equiv(&("cfg")) { // it is a #[cfg()] attribute
debug!("is cfg");
no_cfgs = false;
// only #[cfg(...)] ones are understood.
match mi.meta_item_list() {
Some(cfg_meta) => {
debug!("is cfg(...)");
cfg_meta.iter().all(|cfg_mi| {
debug!("cfg({}[...])", cfg_mi.name());
match cfg_mi.node {
ast::MetaList(ref s, ref not_cfgs)
if s.equiv(&("not")) => {
debug!("not!");
// inside #[cfg(not(...))], so these need to all
// not match.
!not_cfgs.iter().all(|mi| {
debug!("cfg(not({}[...]))", mi.name());
contains(cfg, *mi)
})
}
_ => contains(cfg, *cfg_mi)
}
})
}
None => false
}
} else {
false
}
});
debug!("test_cfg (no_cfgs={}, some_cfg_matches={})", no_cfgs, some_cfg_matches);
no_cfgs || some_cfg_matches
}
/// Represents the #[deprecated="foo"] (etc) attributes.
pub struct Stability {
pub level: StabilityLevel,
pub text: Option<InternedString>
}
/// The available stability levels.
#[deriving(Eq,Ord,Clone,Show)]
pub enum StabilityLevel {
Deprecated,
Experimental,
Unstable,
Stable,
Frozen,
Locked
}
/// Find the first stability attribute. `None` if none exists.
pub fn find_stability<AM: AttrMetaMethods, It: Iterator<AM>>(mut metas: It)
-> Option<Stability> {
for m in metas {
let level = match m.name().get() {
"deprecated" => Deprecated,
"experimental" => Experimental,
"unstable" => Unstable,
"stable" => Stable,
"frozen" => Frozen,
"locked" => Locked,
_ => continue // not a stability level
};
return Some(Stability {
level: level,
text: m.value_str()
});
}
None
}
pub fn require_unique_names(diagnostic: &SpanHandler, metas: &[@MetaItem]) {
let mut set = HashSet::new();
for meta in metas.iter() {
let name = meta.name();
if!set.insert(name.clone()) {
diagnostic.span_fatal(meta.span,
format!("duplicate meta item `{}`", name));
}
}
}
/**
* Fold this over attributes to parse #[repr(...)] forms.
*
* Valid repr contents: any of the primitive integral type names (see
* `int_type_of_word`, below) to specify the discriminant type; and `C`, to use
* the same discriminant size that the corresponding C enum would. These are
* not allowed on univariant or zero-variant enums, which have no discriminant.
*
* If a discriminant type is so specified, then the discriminant will be
* present (before fields, if any) with that type; reprensentation
* optimizations which would remove it will not be done.
*/
pub fn find_repr_attr(diagnostic: &SpanHandler, attr: @ast::MetaItem, acc: ReprAttr)
-> ReprAttr {
let mut acc = acc;
match attr.node {
ast::MetaList(ref s, ref items) if s.equiv(&("repr")) => {
for item in items.iter() {
match item.node {
ast::MetaWord(ref word) => {
let hint = match word.get() {
// Can't use "extern" because it's not a lexical identifier.
"C" => ReprExtern,
_ => match int_type_of_word(word.get()) {
Some(ity) => ReprInt(item.span, ity),
None => {
// Not a word we recognize
diagnostic.span_err(item.span,
"unrecognized representation hint");
ReprAny
}
}
};
if hint!= ReprAny {
if acc == ReprAny {
acc = hint;
} else if acc!= hint {
diagnostic.span_warn(item.span,
"conflicting representation hint ignored")
}
}
}
// Not a word:
_ => diagnostic.span_err(item.span, "unrecognized representation hint")
}
}
}
// Not a "repr" hint: ignore.
_ => { }
}
acc
}
fn int_type_of_word(s: &str) -> Option<IntType> {
match s {
"i8" => Some(SignedInt(ast::TyI8)),
"u8" => Some(UnsignedInt(ast::TyU8)),
"i16" => Some(SignedInt(ast::TyI16)),
"u16" => Some(UnsignedInt(ast::TyU16)),
"i32" => Some(SignedInt(ast::TyI32)),
"u32" => Some(UnsignedInt(ast::TyU32)),
"i64" => Some(SignedInt(ast::TyI64)),
"u64" => Some(UnsignedInt(ast::TyU64)),
"int" => Some(SignedInt(ast::TyI)),
"uint" => Some(UnsignedInt(ast::TyU)),
_ => None
}
}
#[deriving(Eq, Show)]
pub enum ReprAttr {
ReprAny,
ReprInt(Span, IntType),
ReprExtern
}
impl ReprAttr {
pub fn is_ffi_safe(&self) -> bool {
match *self {
ReprAny => false,
ReprInt(_sp, ity) => ity.is_ffi_safe(),
ReprExtern => true
}
}
}
#[deriving(Eq, Show)]
pub enum IntType {
SignedInt(ast::IntTy),
UnsignedInt(ast::UintTy)
}
impl IntType {
#[inline]
pub fn is_signed(self) -> bool {
match self {
SignedInt(..) => true,
UnsignedInt(..) => false
}
}
fn is_ffi_safe(self) -> bool {
match self {
SignedInt(ast::TyI8) | UnsignedInt(ast::TyU8) |
SignedInt(ast::TyI16) | UnsignedInt(ast::TyU16) |
SignedInt(ast::TyI32) | UnsignedInt(ast::TyU32) |
SignedInt(ast::TyI64) | UnsignedInt(ast::TyU64) => true,
_ => false
}
}
} | }).collect()
}
| random_line_split |
attr.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Functions dealing with attributes and meta items
use ast;
use ast::{Attribute, Attribute_, MetaItem, MetaWord, MetaNameValue, MetaList};
use codemap::{Span, Spanned, spanned, dummy_spanned};
use codemap::BytePos;
use diagnostic::SpanHandler;
use parse::comments::{doc_comment_style, strip_doc_comment_decoration};
use parse::token::InternedString;
use parse::token;
use crateid::CrateId;
use collections::HashSet;
pub trait AttrMetaMethods {
// This could be changed to `fn check_name(&self, name: InternedString) ->
// bool` which would facilitate a side table recording which
// attributes/meta items are used/unused.
/// Retrieve the name of the meta item, e.g. foo in #[foo],
/// #[foo="bar"] and #[foo(bar)]
fn name(&self) -> InternedString;
/**
* Gets the string value if self is a MetaNameValue variant
* containing a string, otherwise None.
*/
fn value_str(&self) -> Option<InternedString>;
/// Gets a list of inner meta items from a list MetaItem type.
fn meta_item_list<'a>(&'a self) -> Option<&'a [@MetaItem]>;
/**
* If the meta item is a name-value type with a string value then returns
* a tuple containing the name and string value, otherwise `None`
*/
fn name_str_pair(&self) -> Option<(InternedString,InternedString)>;
}
impl AttrMetaMethods for Attribute {
fn name(&self) -> InternedString { self.meta().name() }
fn value_str(&self) -> Option<InternedString> {
self.meta().value_str()
}
fn meta_item_list<'a>(&'a self) -> Option<&'a [@MetaItem]> {
self.node.value.meta_item_list()
}
fn name_str_pair(&self) -> Option<(InternedString,InternedString)> {
self.meta().name_str_pair()
}
}
impl AttrMetaMethods for MetaItem {
fn name(&self) -> InternedString {
match self.node {
MetaWord(ref n) => (*n).clone(),
MetaNameValue(ref n, _) => (*n).clone(),
MetaList(ref n, _) => (*n).clone(),
}
}
fn value_str(&self) -> Option<InternedString> {
match self.node {
MetaNameValue(_, ref v) => {
match v.node {
ast::LitStr(ref s, _) => Some((*s).clone()),
_ => None,
}
},
_ => None
}
}
fn meta_item_list<'a>(&'a self) -> Option<&'a [@MetaItem]> {
match self.node {
MetaList(_, ref l) => Some(l.as_slice()),
_ => None
}
}
fn name_str_pair(&self) -> Option<(InternedString,InternedString)> {
self.value_str().map(|s| (self.name(), s))
}
}
// Annoying, but required to get test_cfg to work
impl AttrMetaMethods for @MetaItem {
fn name(&self) -> InternedString { (**self).name() }
fn value_str(&self) -> Option<InternedString> { (**self).value_str() }
fn meta_item_list<'a>(&'a self) -> Option<&'a [@MetaItem]> {
(**self).meta_item_list()
}
fn name_str_pair(&self) -> Option<(InternedString,InternedString)> {
(**self).name_str_pair()
}
}
pub trait AttributeMethods {
fn meta(&self) -> @MetaItem;
fn desugar_doc(&self) -> Attribute;
}
impl AttributeMethods for Attribute {
/// Extract the MetaItem from inside this Attribute.
fn meta(&self) -> @MetaItem {
self.node.value
}
/// Convert self to a normal #[doc="foo"] comment, if it is a
/// comment like `///` or `/** */`. (Returns self unchanged for
/// non-sugared doc attributes.)
fn desugar_doc(&self) -> Attribute {
if self.node.is_sugared_doc {
let comment = self.value_str().unwrap();
let meta = mk_name_value_item_str(
InternedString::new("doc"),
token::intern_and_get_ident(strip_doc_comment_decoration(
comment.get())));
mk_attr(meta)
} else {
*self
}
}
}
/* Constructors */
pub fn mk_name_value_item_str(name: InternedString, value: InternedString)
-> @MetaItem {
let value_lit = dummy_spanned(ast::LitStr(value, ast::CookedStr));
mk_name_value_item(name, value_lit)
}
pub fn mk_name_value_item(name: InternedString, value: ast::Lit)
-> @MetaItem {
@dummy_spanned(MetaNameValue(name, value))
}
pub fn mk_list_item(name: InternedString, items: Vec<@MetaItem> ) -> @MetaItem {
@dummy_spanned(MetaList(name, items))
}
pub fn mk_word_item(name: InternedString) -> @MetaItem {
@dummy_spanned(MetaWord(name))
}
pub fn mk_attr(item: @MetaItem) -> Attribute {
dummy_spanned(Attribute_ {
style: ast::AttrInner,
value: item,
is_sugared_doc: false,
})
}
pub fn mk_sugared_doc_attr(text: InternedString, lo: BytePos, hi: BytePos)
-> Attribute {
let style = doc_comment_style(text.get());
let lit = spanned(lo, hi, ast::LitStr(text, ast::CookedStr));
let attr = Attribute_ {
style: style,
value: @spanned(lo, hi, MetaNameValue(InternedString::new("doc"),
lit)),
is_sugared_doc: true
};
spanned(lo, hi, attr)
}
/* Searching */
/// Check if `needle` occurs in `haystack` by a structural
/// comparison. This is slightly subtle, and relies on ignoring the
/// span included in the `==` comparison a plain MetaItem.
pub fn contains(haystack: &[@ast::MetaItem],
needle: @ast::MetaItem) -> bool {
debug!("attr::contains (name={})", needle.name());
haystack.iter().any(|item| {
debug!(" testing: {}", item.name());
item.node == needle.node
})
}
pub fn | <AM: AttrMetaMethods>(metas: &[AM], name: &str) -> bool {
debug!("attr::contains_name (name={})", name);
metas.iter().any(|item| {
debug!(" testing: {}", item.name());
item.name().equiv(&name)
})
}
pub fn first_attr_value_str_by_name(attrs: &[Attribute], name: &str)
-> Option<InternedString> {
attrs.iter()
.find(|at| at.name().equiv(&name))
.and_then(|at| at.value_str())
}
pub fn last_meta_item_value_str_by_name(items: &[@MetaItem], name: &str)
-> Option<InternedString> {
items.rev_iter()
.find(|mi| mi.name().equiv(&name))
.and_then(|i| i.value_str())
}
/* Higher-level applications */
pub fn sort_meta_items(items: &[@MetaItem]) -> Vec<@MetaItem> {
// This is sort of stupid here, but we need to sort by
// human-readable strings.
let mut v = items.iter()
.map(|&mi| (mi.name(), mi))
.collect::<Vec<(InternedString, @MetaItem)> >();
v.sort_by(|&(ref a, _), &(ref b, _)| a.cmp(b));
// There doesn't seem to be a more optimal way to do this
v.move_iter().map(|(_, m)| {
match m.node {
MetaList(ref n, ref mis) => {
@Spanned {
node: MetaList((*n).clone(),
sort_meta_items(mis.as_slice())),
.. /*bad*/ (*m).clone()
}
}
_ => m
}
}).collect()
}
/**
* From a list of crate attributes get only the meta_items that affect crate
* linkage
*/
pub fn find_linkage_metas(attrs: &[Attribute]) -> Vec<@MetaItem> {
let mut result = Vec::new();
for attr in attrs.iter().filter(|at| at.name().equiv(&("link"))) {
match attr.meta().node {
MetaList(_, ref items) => result.push_all(items.as_slice()),
_ => ()
}
}
result
}
pub fn find_crateid(attrs: &[Attribute]) -> Option<CrateId> {
match first_attr_value_str_by_name(attrs, "crate_id") {
None => None,
Some(id) => from_str::<CrateId>(id.get()),
}
}
#[deriving(Eq)]
pub enum InlineAttr {
InlineNone,
InlineHint,
InlineAlways,
InlineNever,
}
/// True if something like #[inline] is found in the list of attrs.
pub fn find_inline_attr(attrs: &[Attribute]) -> InlineAttr {
// FIXME (#2809)---validate the usage of #[inline] and #[inline]
attrs.iter().fold(InlineNone, |ia,attr| {
match attr.node.value.node {
MetaWord(ref n) if n.equiv(&("inline")) => InlineHint,
MetaList(ref n, ref items) if n.equiv(&("inline")) => {
if contains_name(items.as_slice(), "always") {
InlineAlways
} else if contains_name(items.as_slice(), "never") {
InlineNever
} else {
InlineHint
}
}
_ => ia
}
})
}
/// Tests if any `cfg(...)` meta items in `metas` match `cfg`. e.g.
///
/// test_cfg(`[foo="a", bar]`, `[cfg(foo), cfg(bar)]`) == true
/// test_cfg(`[foo="a", bar]`, `[cfg(not(bar))]`) == false
/// test_cfg(`[foo="a", bar]`, `[cfg(bar, foo="a")]`) == true
/// test_cfg(`[foo="a", bar]`, `[cfg(bar, foo="b")]`) == false
pub fn test_cfg<AM: AttrMetaMethods, It: Iterator<AM>>
(cfg: &[@MetaItem], mut metas: It) -> bool {
// having no #[cfg(...)] attributes counts as matching.
let mut no_cfgs = true;
// this would be much nicer as a chain of iterator adaptors, but
// this doesn't work.
let some_cfg_matches = metas.any(|mi| {
debug!("testing name: {}", mi.name());
if mi.name().equiv(&("cfg")) { // it is a #[cfg()] attribute
debug!("is cfg");
no_cfgs = false;
// only #[cfg(...)] ones are understood.
match mi.meta_item_list() {
Some(cfg_meta) => {
debug!("is cfg(...)");
cfg_meta.iter().all(|cfg_mi| {
debug!("cfg({}[...])", cfg_mi.name());
match cfg_mi.node {
ast::MetaList(ref s, ref not_cfgs)
if s.equiv(&("not")) => {
debug!("not!");
// inside #[cfg(not(...))], so these need to all
// not match.
!not_cfgs.iter().all(|mi| {
debug!("cfg(not({}[...]))", mi.name());
contains(cfg, *mi)
})
}
_ => contains(cfg, *cfg_mi)
}
})
}
None => false
}
} else {
false
}
});
debug!("test_cfg (no_cfgs={}, some_cfg_matches={})", no_cfgs, some_cfg_matches);
no_cfgs || some_cfg_matches
}
/// Represents the #[deprecated="foo"] (etc) attributes.
pub struct Stability {
pub level: StabilityLevel,
pub text: Option<InternedString>
}
/// The available stability levels.
#[deriving(Eq,Ord,Clone,Show)]
pub enum StabilityLevel {
Deprecated,
Experimental,
Unstable,
Stable,
Frozen,
Locked
}
/// Find the first stability attribute. `None` if none exists.
pub fn find_stability<AM: AttrMetaMethods, It: Iterator<AM>>(mut metas: It)
-> Option<Stability> {
for m in metas {
let level = match m.name().get() {
"deprecated" => Deprecated,
"experimental" => Experimental,
"unstable" => Unstable,
"stable" => Stable,
"frozen" => Frozen,
"locked" => Locked,
_ => continue // not a stability level
};
return Some(Stability {
level: level,
text: m.value_str()
});
}
None
}
pub fn require_unique_names(diagnostic: &SpanHandler, metas: &[@MetaItem]) {
let mut set = HashSet::new();
for meta in metas.iter() {
let name = meta.name();
if!set.insert(name.clone()) {
diagnostic.span_fatal(meta.span,
format!("duplicate meta item `{}`", name));
}
}
}
/**
* Fold this over attributes to parse #[repr(...)] forms.
*
* Valid repr contents: any of the primitive integral type names (see
* `int_type_of_word`, below) to specify the discriminant type; and `C`, to use
* the same discriminant size that the corresponding C enum would. These are
* not allowed on univariant or zero-variant enums, which have no discriminant.
*
* If a discriminant type is so specified, then the discriminant will be
* present (before fields, if any) with that type; reprensentation
* optimizations which would remove it will not be done.
*/
pub fn find_repr_attr(diagnostic: &SpanHandler, attr: @ast::MetaItem, acc: ReprAttr)
-> ReprAttr {
let mut acc = acc;
match attr.node {
ast::MetaList(ref s, ref items) if s.equiv(&("repr")) => {
for item in items.iter() {
match item.node {
ast::MetaWord(ref word) => {
let hint = match word.get() {
// Can't use "extern" because it's not a lexical identifier.
"C" => ReprExtern,
_ => match int_type_of_word(word.get()) {
Some(ity) => ReprInt(item.span, ity),
None => {
// Not a word we recognize
diagnostic.span_err(item.span,
"unrecognized representation hint");
ReprAny
}
}
};
if hint!= ReprAny {
if acc == ReprAny {
acc = hint;
} else if acc!= hint {
diagnostic.span_warn(item.span,
"conflicting representation hint ignored")
}
}
}
// Not a word:
_ => diagnostic.span_err(item.span, "unrecognized representation hint")
}
}
}
// Not a "repr" hint: ignore.
_ => { }
}
acc
}
fn int_type_of_word(s: &str) -> Option<IntType> {
match s {
"i8" => Some(SignedInt(ast::TyI8)),
"u8" => Some(UnsignedInt(ast::TyU8)),
"i16" => Some(SignedInt(ast::TyI16)),
"u16" => Some(UnsignedInt(ast::TyU16)),
"i32" => Some(SignedInt(ast::TyI32)),
"u32" => Some(UnsignedInt(ast::TyU32)),
"i64" => Some(SignedInt(ast::TyI64)),
"u64" => Some(UnsignedInt(ast::TyU64)),
"int" => Some(SignedInt(ast::TyI)),
"uint" => Some(UnsignedInt(ast::TyU)),
_ => None
}
}
#[deriving(Eq, Show)]
pub enum ReprAttr {
ReprAny,
ReprInt(Span, IntType),
ReprExtern
}
impl ReprAttr {
pub fn is_ffi_safe(&self) -> bool {
match *self {
ReprAny => false,
ReprInt(_sp, ity) => ity.is_ffi_safe(),
ReprExtern => true
}
}
}
#[deriving(Eq, Show)]
pub enum IntType {
SignedInt(ast::IntTy),
UnsignedInt(ast::UintTy)
}
impl IntType {
#[inline]
pub fn is_signed(self) -> bool {
match self {
SignedInt(..) => true,
UnsignedInt(..) => false
}
}
fn is_ffi_safe(self) -> bool {
match self {
SignedInt(ast::TyI8) | UnsignedInt(ast::TyU8) |
SignedInt(ast::TyI16) | UnsignedInt(ast::TyU16) |
SignedInt(ast::TyI32) | UnsignedInt(ast::TyU32) |
SignedInt(ast::TyI64) | UnsignedInt(ast::TyU64) => true,
_ => false
}
}
}
| contains_name | identifier_name |
attr.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Functions dealing with attributes and meta items
use ast;
use ast::{Attribute, Attribute_, MetaItem, MetaWord, MetaNameValue, MetaList};
use codemap::{Span, Spanned, spanned, dummy_spanned};
use codemap::BytePos;
use diagnostic::SpanHandler;
use parse::comments::{doc_comment_style, strip_doc_comment_decoration};
use parse::token::InternedString;
use parse::token;
use crateid::CrateId;
use collections::HashSet;
pub trait AttrMetaMethods {
// This could be changed to `fn check_name(&self, name: InternedString) ->
// bool` which would facilitate a side table recording which
// attributes/meta items are used/unused.
/// Retrieve the name of the meta item, e.g. foo in #[foo],
/// #[foo="bar"] and #[foo(bar)]
fn name(&self) -> InternedString;
/**
* Gets the string value if self is a MetaNameValue variant
* containing a string, otherwise None.
*/
fn value_str(&self) -> Option<InternedString>;
/// Gets a list of inner meta items from a list MetaItem type.
fn meta_item_list<'a>(&'a self) -> Option<&'a [@MetaItem]>;
/**
* If the meta item is a name-value type with a string value then returns
* a tuple containing the name and string value, otherwise `None`
*/
fn name_str_pair(&self) -> Option<(InternedString,InternedString)>;
}
impl AttrMetaMethods for Attribute {
fn name(&self) -> InternedString { self.meta().name() }
fn value_str(&self) -> Option<InternedString> {
self.meta().value_str()
}
fn meta_item_list<'a>(&'a self) -> Option<&'a [@MetaItem]> {
self.node.value.meta_item_list()
}
fn name_str_pair(&self) -> Option<(InternedString,InternedString)> {
self.meta().name_str_pair()
}
}
impl AttrMetaMethods for MetaItem {
fn name(&self) -> InternedString {
match self.node {
MetaWord(ref n) => (*n).clone(),
MetaNameValue(ref n, _) => (*n).clone(),
MetaList(ref n, _) => (*n).clone(),
}
}
fn value_str(&self) -> Option<InternedString> {
match self.node {
MetaNameValue(_, ref v) => {
match v.node {
ast::LitStr(ref s, _) => Some((*s).clone()),
_ => None,
}
},
_ => None
}
}
fn meta_item_list<'a>(&'a self) -> Option<&'a [@MetaItem]> {
match self.node {
MetaList(_, ref l) => Some(l.as_slice()),
_ => None
}
}
fn name_str_pair(&self) -> Option<(InternedString,InternedString)> {
self.value_str().map(|s| (self.name(), s))
}
}
// Annoying, but required to get test_cfg to work
impl AttrMetaMethods for @MetaItem {
fn name(&self) -> InternedString { (**self).name() }
fn value_str(&self) -> Option<InternedString> { (**self).value_str() }
fn meta_item_list<'a>(&'a self) -> Option<&'a [@MetaItem]> {
(**self).meta_item_list()
}
fn name_str_pair(&self) -> Option<(InternedString,InternedString)> {
(**self).name_str_pair()
}
}
pub trait AttributeMethods {
fn meta(&self) -> @MetaItem;
fn desugar_doc(&self) -> Attribute;
}
impl AttributeMethods for Attribute {
/// Extract the MetaItem from inside this Attribute.
fn meta(&self) -> @MetaItem {
self.node.value
}
/// Convert self to a normal #[doc="foo"] comment, if it is a
/// comment like `///` or `/** */`. (Returns self unchanged for
/// non-sugared doc attributes.)
fn desugar_doc(&self) -> Attribute {
if self.node.is_sugared_doc {
let comment = self.value_str().unwrap();
let meta = mk_name_value_item_str(
InternedString::new("doc"),
token::intern_and_get_ident(strip_doc_comment_decoration(
comment.get())));
mk_attr(meta)
} else {
*self
}
}
}
/* Constructors */
pub fn mk_name_value_item_str(name: InternedString, value: InternedString)
-> @MetaItem {
let value_lit = dummy_spanned(ast::LitStr(value, ast::CookedStr));
mk_name_value_item(name, value_lit)
}
pub fn mk_name_value_item(name: InternedString, value: ast::Lit)
-> @MetaItem {
@dummy_spanned(MetaNameValue(name, value))
}
pub fn mk_list_item(name: InternedString, items: Vec<@MetaItem> ) -> @MetaItem {
@dummy_spanned(MetaList(name, items))
}
pub fn mk_word_item(name: InternedString) -> @MetaItem {
@dummy_spanned(MetaWord(name))
}
pub fn mk_attr(item: @MetaItem) -> Attribute {
dummy_spanned(Attribute_ {
style: ast::AttrInner,
value: item,
is_sugared_doc: false,
})
}
pub fn mk_sugared_doc_attr(text: InternedString, lo: BytePos, hi: BytePos)
-> Attribute {
let style = doc_comment_style(text.get());
let lit = spanned(lo, hi, ast::LitStr(text, ast::CookedStr));
let attr = Attribute_ {
style: style,
value: @spanned(lo, hi, MetaNameValue(InternedString::new("doc"),
lit)),
is_sugared_doc: true
};
spanned(lo, hi, attr)
}
/* Searching */
/// Check if `needle` occurs in `haystack` by a structural
/// comparison. This is slightly subtle, and relies on ignoring the
/// span included in the `==` comparison a plain MetaItem.
pub fn contains(haystack: &[@ast::MetaItem],
needle: @ast::MetaItem) -> bool {
debug!("attr::contains (name={})", needle.name());
haystack.iter().any(|item| {
debug!(" testing: {}", item.name());
item.node == needle.node
})
}
pub fn contains_name<AM: AttrMetaMethods>(metas: &[AM], name: &str) -> bool {
debug!("attr::contains_name (name={})", name);
metas.iter().any(|item| {
debug!(" testing: {}", item.name());
item.name().equiv(&name)
})
}
pub fn first_attr_value_str_by_name(attrs: &[Attribute], name: &str)
-> Option<InternedString> {
attrs.iter()
.find(|at| at.name().equiv(&name))
.and_then(|at| at.value_str())
}
pub fn last_meta_item_value_str_by_name(items: &[@MetaItem], name: &str)
-> Option<InternedString> {
items.rev_iter()
.find(|mi| mi.name().equiv(&name))
.and_then(|i| i.value_str())
}
/* Higher-level applications */
pub fn sort_meta_items(items: &[@MetaItem]) -> Vec<@MetaItem> | }
}).collect()
}
/**
* From a list of crate attributes get only the meta_items that affect crate
* linkage
*/
pub fn find_linkage_metas(attrs: &[Attribute]) -> Vec<@MetaItem> {
let mut result = Vec::new();
for attr in attrs.iter().filter(|at| at.name().equiv(&("link"))) {
match attr.meta().node {
MetaList(_, ref items) => result.push_all(items.as_slice()),
_ => ()
}
}
result
}
pub fn find_crateid(attrs: &[Attribute]) -> Option<CrateId> {
match first_attr_value_str_by_name(attrs, "crate_id") {
None => None,
Some(id) => from_str::<CrateId>(id.get()),
}
}
#[deriving(Eq)]
pub enum InlineAttr {
InlineNone,
InlineHint,
InlineAlways,
InlineNever,
}
/// True if something like #[inline] is found in the list of attrs.
pub fn find_inline_attr(attrs: &[Attribute]) -> InlineAttr {
// FIXME (#2809)---validate the usage of #[inline] and #[inline]
attrs.iter().fold(InlineNone, |ia,attr| {
match attr.node.value.node {
MetaWord(ref n) if n.equiv(&("inline")) => InlineHint,
MetaList(ref n, ref items) if n.equiv(&("inline")) => {
if contains_name(items.as_slice(), "always") {
InlineAlways
} else if contains_name(items.as_slice(), "never") {
InlineNever
} else {
InlineHint
}
}
_ => ia
}
})
}
/// Tests if any `cfg(...)` meta items in `metas` match `cfg`. e.g.
///
/// test_cfg(`[foo="a", bar]`, `[cfg(foo), cfg(bar)]`) == true
/// test_cfg(`[foo="a", bar]`, `[cfg(not(bar))]`) == false
/// test_cfg(`[foo="a", bar]`, `[cfg(bar, foo="a")]`) == true
/// test_cfg(`[foo="a", bar]`, `[cfg(bar, foo="b")]`) == false
pub fn test_cfg<AM: AttrMetaMethods, It: Iterator<AM>>
(cfg: &[@MetaItem], mut metas: It) -> bool {
// having no #[cfg(...)] attributes counts as matching.
let mut no_cfgs = true;
// this would be much nicer as a chain of iterator adaptors, but
// this doesn't work.
let some_cfg_matches = metas.any(|mi| {
debug!("testing name: {}", mi.name());
if mi.name().equiv(&("cfg")) { // it is a #[cfg()] attribute
debug!("is cfg");
no_cfgs = false;
// only #[cfg(...)] ones are understood.
match mi.meta_item_list() {
Some(cfg_meta) => {
debug!("is cfg(...)");
cfg_meta.iter().all(|cfg_mi| {
debug!("cfg({}[...])", cfg_mi.name());
match cfg_mi.node {
ast::MetaList(ref s, ref not_cfgs)
if s.equiv(&("not")) => {
debug!("not!");
// inside #[cfg(not(...))], so these need to all
// not match.
!not_cfgs.iter().all(|mi| {
debug!("cfg(not({}[...]))", mi.name());
contains(cfg, *mi)
})
}
_ => contains(cfg, *cfg_mi)
}
})
}
None => false
}
} else {
false
}
});
debug!("test_cfg (no_cfgs={}, some_cfg_matches={})", no_cfgs, some_cfg_matches);
no_cfgs || some_cfg_matches
}
/// Represents the #[deprecated="foo"] (etc) attributes.
pub struct Stability {
pub level: StabilityLevel,
pub text: Option<InternedString>
}
/// The available stability levels.
#[deriving(Eq,Ord,Clone,Show)]
pub enum StabilityLevel {
Deprecated,
Experimental,
Unstable,
Stable,
Frozen,
Locked
}
/// Find the first stability attribute. `None` if none exists.
pub fn find_stability<AM: AttrMetaMethods, It: Iterator<AM>>(mut metas: It)
-> Option<Stability> {
for m in metas {
let level = match m.name().get() {
"deprecated" => Deprecated,
"experimental" => Experimental,
"unstable" => Unstable,
"stable" => Stable,
"frozen" => Frozen,
"locked" => Locked,
_ => continue // not a stability level
};
return Some(Stability {
level: level,
text: m.value_str()
});
}
None
}
pub fn require_unique_names(diagnostic: &SpanHandler, metas: &[@MetaItem]) {
let mut set = HashSet::new();
for meta in metas.iter() {
let name = meta.name();
if!set.insert(name.clone()) {
diagnostic.span_fatal(meta.span,
format!("duplicate meta item `{}`", name));
}
}
}
/**
* Fold this over attributes to parse #[repr(...)] forms.
*
* Valid repr contents: any of the primitive integral type names (see
* `int_type_of_word`, below) to specify the discriminant type; and `C`, to use
* the same discriminant size that the corresponding C enum would. These are
* not allowed on univariant or zero-variant enums, which have no discriminant.
*
* If a discriminant type is so specified, then the discriminant will be
* present (before fields, if any) with that type; reprensentation
* optimizations which would remove it will not be done.
*/
pub fn find_repr_attr(diagnostic: &SpanHandler, attr: @ast::MetaItem, acc: ReprAttr)
-> ReprAttr {
let mut acc = acc;
match attr.node {
ast::MetaList(ref s, ref items) if s.equiv(&("repr")) => {
for item in items.iter() {
match item.node {
ast::MetaWord(ref word) => {
let hint = match word.get() {
// Can't use "extern" because it's not a lexical identifier.
"C" => ReprExtern,
_ => match int_type_of_word(word.get()) {
Some(ity) => ReprInt(item.span, ity),
None => {
// Not a word we recognize
diagnostic.span_err(item.span,
"unrecognized representation hint");
ReprAny
}
}
};
if hint!= ReprAny {
if acc == ReprAny {
acc = hint;
} else if acc!= hint {
diagnostic.span_warn(item.span,
"conflicting representation hint ignored")
}
}
}
// Not a word:
_ => diagnostic.span_err(item.span, "unrecognized representation hint")
}
}
}
// Not a "repr" hint: ignore.
_ => { }
}
acc
}
fn int_type_of_word(s: &str) -> Option<IntType> {
match s {
"i8" => Some(SignedInt(ast::TyI8)),
"u8" => Some(UnsignedInt(ast::TyU8)),
"i16" => Some(SignedInt(ast::TyI16)),
"u16" => Some(UnsignedInt(ast::TyU16)),
"i32" => Some(SignedInt(ast::TyI32)),
"u32" => Some(UnsignedInt(ast::TyU32)),
"i64" => Some(SignedInt(ast::TyI64)),
"u64" => Some(UnsignedInt(ast::TyU64)),
"int" => Some(SignedInt(ast::TyI)),
"uint" => Some(UnsignedInt(ast::TyU)),
_ => None
}
}
#[deriving(Eq, Show)]
pub enum ReprAttr {
ReprAny,
ReprInt(Span, IntType),
ReprExtern
}
impl ReprAttr {
pub fn is_ffi_safe(&self) -> bool {
match *self {
ReprAny => false,
ReprInt(_sp, ity) => ity.is_ffi_safe(),
ReprExtern => true
}
}
}
#[deriving(Eq, Show)]
pub enum IntType {
SignedInt(ast::IntTy),
UnsignedInt(ast::UintTy)
}
impl IntType {
#[inline]
pub fn is_signed(self) -> bool {
match self {
SignedInt(..) => true,
UnsignedInt(..) => false
}
}
fn is_ffi_safe(self) -> bool {
match self {
SignedInt(ast::TyI8) | UnsignedInt(ast::TyU8) |
SignedInt(ast::TyI16) | UnsignedInt(ast::TyU16) |
SignedInt(ast::TyI32) | UnsignedInt(ast::TyU32) |
SignedInt(ast::TyI64) | UnsignedInt(ast::TyU64) => true,
_ => false
}
}
}
| {
// This is sort of stupid here, but we need to sort by
// human-readable strings.
let mut v = items.iter()
.map(|&mi| (mi.name(), mi))
.collect::<Vec<(InternedString, @MetaItem)> >();
v.sort_by(|&(ref a, _), &(ref b, _)| a.cmp(b));
// There doesn't seem to be a more optimal way to do this
v.move_iter().map(|(_, m)| {
match m.node {
MetaList(ref n, ref mis) => {
@Spanned {
node: MetaList((*n).clone(),
sort_meta_items(mis.as_slice())),
.. /*bad*/ (*m).clone()
}
}
_ => m | identifier_body |
str_concat.rs | //
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use handlebars::{Handlebars, Helper, HelperDef, RenderContext};
use super::super::RenderResult;
#[derive(Clone, Copy)]
pub struct StrConcatHelper;
impl HelperDef for StrConcatHelper {
fn call(&self, h: &Helper, _: &Handlebars, rc: &mut RenderContext) -> RenderResult<()> {
let list: Vec<String> = h.params()
.iter()
.map(|v| v.value())
.filter(|v|!v.is_object())
.map(|v| v.to_string().replace("\"", ""))
.collect();
rc.writer.write(list.concat().into_bytes().as_ref())?;
Ok(())
}
}
pub static STR_CONCAT: StrConcatHelper = StrConcatHelper;
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_concat_helper() {
let mut handlebars = Handlebars::new();
handlebars.register_helper("strConcat", Box::new(STR_CONCAT));
let expected = "foobarbaz";
assert_eq!(
expected,
handlebars
.template_render("{{strConcat \"foo\" \"bar\" \"baz\"}}", &json!({}))
.unwrap()
);
}
} | // Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at | random_line_split |
|
str_concat.rs | // Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use handlebars::{Handlebars, Helper, HelperDef, RenderContext};
use super::super::RenderResult;
#[derive(Clone, Copy)]
pub struct StrConcatHelper;
impl HelperDef for StrConcatHelper {
fn call(&self, h: &Helper, _: &Handlebars, rc: &mut RenderContext) -> RenderResult<()> {
let list: Vec<String> = h.params()
.iter()
.map(|v| v.value())
.filter(|v|!v.is_object())
.map(|v| v.to_string().replace("\"", ""))
.collect();
rc.writer.write(list.concat().into_bytes().as_ref())?;
Ok(())
}
}
pub static STR_CONCAT: StrConcatHelper = StrConcatHelper;
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_concat_helper() |
}
| {
let mut handlebars = Handlebars::new();
handlebars.register_helper("strConcat", Box::new(STR_CONCAT));
let expected = "foobarbaz";
assert_eq!(
expected,
handlebars
.template_render("{{strConcat \"foo\" \"bar\" \"baz\"}}", &json!({}))
.unwrap()
);
} | identifier_body |
str_concat.rs | // Copyright (c) 2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use handlebars::{Handlebars, Helper, HelperDef, RenderContext};
use super::super::RenderResult;
#[derive(Clone, Copy)]
pub struct StrConcatHelper;
impl HelperDef for StrConcatHelper {
fn | (&self, h: &Helper, _: &Handlebars, rc: &mut RenderContext) -> RenderResult<()> {
let list: Vec<String> = h.params()
.iter()
.map(|v| v.value())
.filter(|v|!v.is_object())
.map(|v| v.to_string().replace("\"", ""))
.collect();
rc.writer.write(list.concat().into_bytes().as_ref())?;
Ok(())
}
}
pub static STR_CONCAT: StrConcatHelper = StrConcatHelper;
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_concat_helper() {
let mut handlebars = Handlebars::new();
handlebars.register_helper("strConcat", Box::new(STR_CONCAT));
let expected = "foobarbaz";
assert_eq!(
expected,
handlebars
.template_render("{{strConcat \"foo\" \"bar\" \"baz\"}}", &json!({}))
.unwrap()
);
}
}
| call | identifier_name |
comments.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::CommentStyle::*;
use ast;
use codemap::{BytePos, CharPos, CodeMap, Pos};
use diagnostic;
use parse::lexer::{is_whitespace, Reader};
use parse::lexer::{StringReader, TokenAndSpan};
use parse::lexer::is_block_doc_comment;
use parse::lexer;
use print::pprust;
use std::io;
use std::str;
use std::string::String;
use std::uint;
#[deriving(Clone, PartialEq)]
pub enum CommentStyle {
/// No code on either side of each line of the comment
Isolated,
/// Code exists to the left of the comment
Trailing,
/// Code before /* foo */ and after the comment
Mixed,
/// Just a manual blank line "\n\n", for layout
BlankLine,
}
#[deriving(Clone)]
pub struct Comment {
pub style: CommentStyle,
pub lines: Vec<String>,
pub pos: BytePos,
}
pub fn is_doc_comment(s: &str) -> bool {
(s.starts_with("///") && super::is_doc_comment(s)) ||
s.starts_with("//!") ||
(s.starts_with("/**") && is_block_doc_comment(s)) ||
s.starts_with("/*!")
}
pub fn doc_comment_style(comment: &str) -> ast::AttrStyle {
assert!(is_doc_comment(comment));
if comment.starts_with("//!") || comment.starts_with("/*!") {
ast::AttrInner
} else {
ast::AttrOuter
}
}
pub fn strip_doc_comment_decoration(comment: &str) -> String {
/// remove whitespace-only lines from the start/end of lines
fn vertical_trim(lines: Vec<String> ) -> Vec<String> {
let mut i = 0u;
let mut j = lines.len();
// first line of all-stars should be omitted
if lines.len() > 0 &&
lines[0].as_slice().chars().all(|c| c == '*') {
i += 1;
}
while i < j && lines[i].as_slice().trim().is_empty() {
i += 1;
}
// like the first, a last line of all stars should be omitted
if j > i && lines[j - 1]
.as_slice()
.chars()
.skip(1)
.all(|c| c == '*') {
j -= 1;
}
while j > i && lines[j - 1].as_slice().trim().is_empty() {
j -= 1;
}
return lines.slice(i, j).iter().map(|x| (*x).clone()).collect();
}
/// remove a "[ \t]*\*" block from each line, if possible
fn horizontal_trim(lines: Vec<String> ) -> Vec<String> {
let mut i = uint::MAX;
let mut can_trim = true;
let mut first = true;
for line in lines.iter() {
for (j, c) in line.as_slice().chars().enumerate() {
if j > i ||!"* \t".contains_char(c) {
can_trim = false;
break;
}
if c == '*' {
if first {
i = j;
first = false;
} else if i!= j {
can_trim = false;
}
break;
}
}
if i > line.len() {
can_trim = false;
}
if!can_trim {
break;
}
}
if can_trim {
lines.iter().map(|line| {
line.as_slice().slice(i + 1, line.len()).to_string()
}).collect()
} else {
lines
}
}
// one-line comments lose their prefix
static ONLINERS: &'static [&'static str] = &["///!", "///", "//!", "//"];
for prefix in ONLINERS.iter() {
if comment.starts_with(*prefix) {
return comment.slice_from(prefix.len()).to_string();
}
}
if comment.starts_with("/*") {
let lines = comment.slice(3u, comment.len() - 2u)
.lines_any()
.map(|s| s.to_string())
.collect::<Vec<String> >();
let lines = vertical_trim(lines);
let lines = horizontal_trim(lines);
return lines.connect("\n");
}
panic!("not a doc-comment: {}", comment);
}
fn push_blank_line_comment(rdr: &StringReader, comments: &mut Vec<Comment>) {
debug!(">>> blank-line comment");
comments.push(Comment {
style: BlankLine,
lines: Vec::new(),
pos: rdr.last_pos,
});
}
fn consume_whitespace_counting_blank_lines(rdr: &mut StringReader,
comments: &mut Vec<Comment>) {
while is_whitespace(rdr.curr) &&!rdr.is_eof() {
if rdr.col == CharPos(0u) && rdr.curr_is('\n') {
push_blank_line_comment(rdr, &mut *comments);
}
rdr.bump();
}
}
fn read_shebang_comment(rdr: &mut StringReader, code_to_the_left: bool,
comments: &mut Vec<Comment>) {
debug!(">>> shebang comment");
let p = rdr.last_pos;
debug!("<<< shebang comment");
comments.push(Comment {
style: if code_to_the_left { Trailing } else { Isolated },
lines: vec!(rdr.read_one_line_comment()),
pos: p
});
}
fn read_line_comments(rdr: &mut StringReader, code_to_the_left: bool,
comments: &mut Vec<Comment>) {
debug!(">>> line comments");
let p = rdr.last_pos;
let mut lines: Vec<String> = Vec::new();
while rdr.curr_is('/') && rdr.nextch_is('/') {
let line = rdr.read_one_line_comment();
debug!("{}", line);
// Doc comments are not put in comments.
if is_doc_comment(line.as_slice()) {
break;
}
lines.push(line);
rdr.consume_non_eol_whitespace();
}
debug!("<<< line comments");
if!lines.is_empty() {
comments.push(Comment {
style: if code_to_the_left { Trailing } else { Isolated },
lines: lines,
pos: p
});
}
}
/// Returns None if the first col chars of s contain a non-whitespace char.
/// Otherwise returns Some(k) where k is first char offset after that leading
/// whitespace. Note k may be outside bounds of s.
fn all_whitespace(s: &str, col: CharPos) -> Option<uint> {
let len = s.len();
let mut col = col.to_uint();
let mut cursor: uint = 0;
while col > 0 && cursor < len {
let r: str::CharRange = s.char_range_at(cursor);
if!r.ch.is_whitespace() {
return None;
}
cursor = r.next;
col -= 1;
}
return Some(cursor);
}
fn trim_whitespace_prefix_and_push_line(lines: &mut Vec<String>,
s: String, col: CharPos) {
let len = s.len();
let s1 = match all_whitespace(s.as_slice(), col) {
Some(col) => {
if col < len {
s.as_slice().slice(col, len).to_string()
} else {
"".to_string()
}
}
None => s,
};
debug!("pushing line: {}", s1);
lines.push(s1);
}
fn read_block_comment(rdr: &mut StringReader,
code_to_the_left: bool,
comments: &mut Vec<Comment> ) {
debug!(">>> block comment");
let p = rdr.last_pos;
let mut lines: Vec<String> = Vec::new();
let col = rdr.col;
rdr.bump();
rdr.bump();
let mut curr_line = String::from_str("/*");
// doc-comments are not really comments, they are attributes
if (rdr.curr_is('*') &&!rdr.nextch_is('*')) || rdr.curr_is('!') {
while!(rdr.curr_is('*') && rdr.nextch_is('/')) &&!rdr.is_eof() {
curr_line.push(rdr.curr.unwrap());
rdr.bump();
}
if!rdr.is_eof() {
curr_line.push_str("*/");
rdr.bump();
rdr.bump();
}
if is_block_doc_comment(curr_line.as_slice()) {
return
}
assert!(!curr_line.as_slice().contains_char('\n'));
lines.push(curr_line);
} else {
let mut level: int = 1;
while level > 0 {
debug!("=== block comment level {}", level);
if rdr.is_eof() {
rdr.fatal("unterminated block comment");
}
if rdr.curr_is('\n') {
trim_whitespace_prefix_and_push_line(&mut lines,
curr_line,
col);
curr_line = String::new();
rdr.bump();
} else {
curr_line.push(rdr.curr.unwrap());
if rdr.curr_is('/') && rdr.nextch_is('*') {
rdr.bump();
rdr.bump();
curr_line.push('*');
level += 1;
} else {
if rdr.curr_is('*') && rdr.nextch_is('/') {
rdr.bump();
rdr.bump();
curr_line.push('/');
level -= 1;
} else { rdr.bump(); }
}
}
}
if curr_line.len()!= 0 {
trim_whitespace_prefix_and_push_line(&mut lines,
curr_line,
col);
}
}
let mut style = if code_to_the_left { Trailing } else { Isolated };
rdr.consume_non_eol_whitespace();
if!rdr.is_eof() &&!rdr.curr_is('\n') && lines.len() == 1u {
style = Mixed;
}
debug!("<<< block comment");
comments.push(Comment {style: style, lines: lines, pos: p});
}
fn consume_comment(rdr: &mut StringReader,
code_to_the_left: bool,
comments: &mut Vec<Comment> ) {
debug!(">>> consume comment");
if rdr.curr_is('/') && rdr.nextch_is('/') {
read_line_comments(rdr, code_to_the_left, comments);
} else if rdr.curr_is('/') && rdr.nextch_is('*') {
read_block_comment(rdr, code_to_the_left, comments);
} else if rdr.curr_is('#') && rdr.nextch_is('!') {
read_shebang_comment(rdr, code_to_the_left, comments);
} else { panic!(); }
debug!("<<< consume comment");
}
#[deriving(Clone)]
pub struct Literal {
pub lit: String,
pub pos: BytePos,
}
// it appears this function is called only from pprust... that's
// probably not a good thing.
pub fn gather_comments_and_literals(span_diagnostic: &diagnostic::SpanHandler,
path: String,
srdr: &mut io::Reader)
-> (Vec<Comment>, Vec<Literal>) {
let src = srdr.read_to_end().unwrap();
let src = String::from_utf8(src).unwrap();
let cm = CodeMap::new();
let filemap = cm.new_filemap(path, src);
let mut rdr = lexer::StringReader::new_raw(span_diagnostic, filemap);
let mut comments: Vec<Comment> = Vec::new();
let mut literals: Vec<Literal> = Vec::new();
let mut first_read: bool = true;
while!rdr.is_eof() {
loop {
let mut code_to_the_left =!first_read;
rdr.consume_non_eol_whitespace();
if rdr.curr_is('\n') {
code_to_the_left = false;
consume_whitespace_counting_blank_lines(&mut rdr, &mut comments);
}
while rdr.peeking_at_comment() {
consume_comment(&mut rdr, code_to_the_left, &mut comments);
consume_whitespace_counting_blank_lines(&mut rdr, &mut comments);
}
break;
}
let bstart = rdr.last_pos;
rdr.next_token();
//discard, and look ahead; we're working with internal state
let TokenAndSpan { tok, sp } = rdr.peek();
if tok.is_lit() {
rdr.with_str_from(bstart, |s| {
debug!("tok lit: {}", s);
literals.push(Literal {lit: s.to_string(), pos: sp.lo});
})
} else {
debug!("tok: {}", pprust::token_to_string(&tok));
}
first_read = false;
}
(comments, literals)
}
#[cfg(test)]
mod test {
use super::*;
#[test] fn test_block_doc_comment_1() |
#[test] fn test_block_doc_comment_2() {
let comment = "/**\n * Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " Test\n Test".to_string());
}
#[test] fn test_block_doc_comment_3() {
let comment = "/**\n let a: *int;\n *a = 5;\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " let a: *int;\n *a = 5;".to_string());
}
#[test] fn test_block_doc_comment_4() {
let comment = "/*******************\n test\n *********************/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " test".to_string());
}
#[test] fn test_line_doc_comment() {
let stripped = strip_doc_comment_decoration("/// test");
assert_eq!(stripped, " test".to_string());
let stripped = strip_doc_comment_decoration("///! test");
assert_eq!(stripped, " test".to_string());
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, " test".to_string());
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, " test".to_string());
let stripped = strip_doc_comment_decoration("///test");
assert_eq!(stripped, "test".to_string());
let stripped = strip_doc_comment_decoration("///!test");
assert_eq!(stripped, "test".to_string());
let stripped = strip_doc_comment_decoration("//test");
assert_eq!(stripped, "test".to_string());
}
}
| {
let comment = "/**\n * Test \n ** Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " Test \n* Test\n Test".to_string());
} | identifier_body |
comments.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::CommentStyle::*;
use ast;
use codemap::{BytePos, CharPos, CodeMap, Pos};
use diagnostic;
use parse::lexer::{is_whitespace, Reader};
use parse::lexer::{StringReader, TokenAndSpan};
use parse::lexer::is_block_doc_comment;
use parse::lexer;
use print::pprust;
use std::io;
use std::str;
use std::string::String;
use std::uint;
#[deriving(Clone, PartialEq)]
pub enum CommentStyle {
/// No code on either side of each line of the comment
Isolated,
/// Code exists to the left of the comment
Trailing,
/// Code before /* foo */ and after the comment
Mixed,
/// Just a manual blank line "\n\n", for layout
BlankLine,
}
#[deriving(Clone)]
pub struct Comment {
pub style: CommentStyle,
pub lines: Vec<String>,
pub pos: BytePos,
}
pub fn is_doc_comment(s: &str) -> bool {
(s.starts_with("///") && super::is_doc_comment(s)) ||
s.starts_with("//!") ||
(s.starts_with("/**") && is_block_doc_comment(s)) ||
s.starts_with("/*!")
}
pub fn doc_comment_style(comment: &str) -> ast::AttrStyle {
assert!(is_doc_comment(comment));
if comment.starts_with("//!") || comment.starts_with("/*!") {
ast::AttrInner
} else {
ast::AttrOuter
}
}
pub fn strip_doc_comment_decoration(comment: &str) -> String {
/// remove whitespace-only lines from the start/end of lines
fn vertical_trim(lines: Vec<String> ) -> Vec<String> {
let mut i = 0u;
let mut j = lines.len();
// first line of all-stars should be omitted
if lines.len() > 0 &&
lines[0].as_slice().chars().all(|c| c == '*') {
i += 1;
}
while i < j && lines[i].as_slice().trim().is_empty() {
i += 1;
}
// like the first, a last line of all stars should be omitted
if j > i && lines[j - 1]
.as_slice()
.chars()
.skip(1)
.all(|c| c == '*') {
j -= 1;
}
while j > i && lines[j - 1].as_slice().trim().is_empty() {
j -= 1;
}
return lines.slice(i, j).iter().map(|x| (*x).clone()).collect();
}
/// remove a "[ \t]*\*" block from each line, if possible
fn horizontal_trim(lines: Vec<String> ) -> Vec<String> {
let mut i = uint::MAX;
let mut can_trim = true;
let mut first = true;
for line in lines.iter() {
for (j, c) in line.as_slice().chars().enumerate() {
if j > i ||!"* \t".contains_char(c) |
if c == '*' {
if first {
i = j;
first = false;
} else if i!= j {
can_trim = false;
}
break;
}
}
if i > line.len() {
can_trim = false;
}
if!can_trim {
break;
}
}
if can_trim {
lines.iter().map(|line| {
line.as_slice().slice(i + 1, line.len()).to_string()
}).collect()
} else {
lines
}
}
// one-line comments lose their prefix
static ONLINERS: &'static [&'static str] = &["///!", "///", "//!", "//"];
for prefix in ONLINERS.iter() {
if comment.starts_with(*prefix) {
return comment.slice_from(prefix.len()).to_string();
}
}
if comment.starts_with("/*") {
let lines = comment.slice(3u, comment.len() - 2u)
.lines_any()
.map(|s| s.to_string())
.collect::<Vec<String> >();
let lines = vertical_trim(lines);
let lines = horizontal_trim(lines);
return lines.connect("\n");
}
panic!("not a doc-comment: {}", comment);
}
fn push_blank_line_comment(rdr: &StringReader, comments: &mut Vec<Comment>) {
debug!(">>> blank-line comment");
comments.push(Comment {
style: BlankLine,
lines: Vec::new(),
pos: rdr.last_pos,
});
}
fn consume_whitespace_counting_blank_lines(rdr: &mut StringReader,
comments: &mut Vec<Comment>) {
while is_whitespace(rdr.curr) &&!rdr.is_eof() {
if rdr.col == CharPos(0u) && rdr.curr_is('\n') {
push_blank_line_comment(rdr, &mut *comments);
}
rdr.bump();
}
}
fn read_shebang_comment(rdr: &mut StringReader, code_to_the_left: bool,
comments: &mut Vec<Comment>) {
debug!(">>> shebang comment");
let p = rdr.last_pos;
debug!("<<< shebang comment");
comments.push(Comment {
style: if code_to_the_left { Trailing } else { Isolated },
lines: vec!(rdr.read_one_line_comment()),
pos: p
});
}
fn read_line_comments(rdr: &mut StringReader, code_to_the_left: bool,
comments: &mut Vec<Comment>) {
debug!(">>> line comments");
let p = rdr.last_pos;
let mut lines: Vec<String> = Vec::new();
while rdr.curr_is('/') && rdr.nextch_is('/') {
let line = rdr.read_one_line_comment();
debug!("{}", line);
// Doc comments are not put in comments.
if is_doc_comment(line.as_slice()) {
break;
}
lines.push(line);
rdr.consume_non_eol_whitespace();
}
debug!("<<< line comments");
if!lines.is_empty() {
comments.push(Comment {
style: if code_to_the_left { Trailing } else { Isolated },
lines: lines,
pos: p
});
}
}
/// Returns None if the first col chars of s contain a non-whitespace char.
/// Otherwise returns Some(k) where k is first char offset after that leading
/// whitespace. Note k may be outside bounds of s.
fn all_whitespace(s: &str, col: CharPos) -> Option<uint> {
let len = s.len();
let mut col = col.to_uint();
let mut cursor: uint = 0;
while col > 0 && cursor < len {
let r: str::CharRange = s.char_range_at(cursor);
if!r.ch.is_whitespace() {
return None;
}
cursor = r.next;
col -= 1;
}
return Some(cursor);
}
fn trim_whitespace_prefix_and_push_line(lines: &mut Vec<String>,
s: String, col: CharPos) {
let len = s.len();
let s1 = match all_whitespace(s.as_slice(), col) {
Some(col) => {
if col < len {
s.as_slice().slice(col, len).to_string()
} else {
"".to_string()
}
}
None => s,
};
debug!("pushing line: {}", s1);
lines.push(s1);
}
fn read_block_comment(rdr: &mut StringReader,
code_to_the_left: bool,
comments: &mut Vec<Comment> ) {
debug!(">>> block comment");
let p = rdr.last_pos;
let mut lines: Vec<String> = Vec::new();
let col = rdr.col;
rdr.bump();
rdr.bump();
let mut curr_line = String::from_str("/*");
// doc-comments are not really comments, they are attributes
if (rdr.curr_is('*') &&!rdr.nextch_is('*')) || rdr.curr_is('!') {
while!(rdr.curr_is('*') && rdr.nextch_is('/')) &&!rdr.is_eof() {
curr_line.push(rdr.curr.unwrap());
rdr.bump();
}
if!rdr.is_eof() {
curr_line.push_str("*/");
rdr.bump();
rdr.bump();
}
if is_block_doc_comment(curr_line.as_slice()) {
return
}
assert!(!curr_line.as_slice().contains_char('\n'));
lines.push(curr_line);
} else {
let mut level: int = 1;
while level > 0 {
debug!("=== block comment level {}", level);
if rdr.is_eof() {
rdr.fatal("unterminated block comment");
}
if rdr.curr_is('\n') {
trim_whitespace_prefix_and_push_line(&mut lines,
curr_line,
col);
curr_line = String::new();
rdr.bump();
} else {
curr_line.push(rdr.curr.unwrap());
if rdr.curr_is('/') && rdr.nextch_is('*') {
rdr.bump();
rdr.bump();
curr_line.push('*');
level += 1;
} else {
if rdr.curr_is('*') && rdr.nextch_is('/') {
rdr.bump();
rdr.bump();
curr_line.push('/');
level -= 1;
} else { rdr.bump(); }
}
}
}
if curr_line.len()!= 0 {
trim_whitespace_prefix_and_push_line(&mut lines,
curr_line,
col);
}
}
let mut style = if code_to_the_left { Trailing } else { Isolated };
rdr.consume_non_eol_whitespace();
if!rdr.is_eof() &&!rdr.curr_is('\n') && lines.len() == 1u {
style = Mixed;
}
debug!("<<< block comment");
comments.push(Comment {style: style, lines: lines, pos: p});
}
fn consume_comment(rdr: &mut StringReader,
code_to_the_left: bool,
comments: &mut Vec<Comment> ) {
debug!(">>> consume comment");
if rdr.curr_is('/') && rdr.nextch_is('/') {
read_line_comments(rdr, code_to_the_left, comments);
} else if rdr.curr_is('/') && rdr.nextch_is('*') {
read_block_comment(rdr, code_to_the_left, comments);
} else if rdr.curr_is('#') && rdr.nextch_is('!') {
read_shebang_comment(rdr, code_to_the_left, comments);
} else { panic!(); }
debug!("<<< consume comment");
}
#[deriving(Clone)]
pub struct Literal {
pub lit: String,
pub pos: BytePos,
}
// it appears this function is called only from pprust... that's
// probably not a good thing.
pub fn gather_comments_and_literals(span_diagnostic: &diagnostic::SpanHandler,
path: String,
srdr: &mut io::Reader)
-> (Vec<Comment>, Vec<Literal>) {
let src = srdr.read_to_end().unwrap();
let src = String::from_utf8(src).unwrap();
let cm = CodeMap::new();
let filemap = cm.new_filemap(path, src);
let mut rdr = lexer::StringReader::new_raw(span_diagnostic, filemap);
let mut comments: Vec<Comment> = Vec::new();
let mut literals: Vec<Literal> = Vec::new();
let mut first_read: bool = true;
while!rdr.is_eof() {
loop {
let mut code_to_the_left =!first_read;
rdr.consume_non_eol_whitespace();
if rdr.curr_is('\n') {
code_to_the_left = false;
consume_whitespace_counting_blank_lines(&mut rdr, &mut comments);
}
while rdr.peeking_at_comment() {
consume_comment(&mut rdr, code_to_the_left, &mut comments);
consume_whitespace_counting_blank_lines(&mut rdr, &mut comments);
}
break;
}
let bstart = rdr.last_pos;
rdr.next_token();
//discard, and look ahead; we're working with internal state
let TokenAndSpan { tok, sp } = rdr.peek();
if tok.is_lit() {
rdr.with_str_from(bstart, |s| {
debug!("tok lit: {}", s);
literals.push(Literal {lit: s.to_string(), pos: sp.lo});
})
} else {
debug!("tok: {}", pprust::token_to_string(&tok));
}
first_read = false;
}
(comments, literals)
}
#[cfg(test)]
mod test {
use super::*;
#[test] fn test_block_doc_comment_1() {
let comment = "/**\n * Test \n ** Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " Test \n* Test\n Test".to_string());
}
#[test] fn test_block_doc_comment_2() {
let comment = "/**\n * Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " Test\n Test".to_string());
}
#[test] fn test_block_doc_comment_3() {
let comment = "/**\n let a: *int;\n *a = 5;\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " let a: *int;\n *a = 5;".to_string());
}
#[test] fn test_block_doc_comment_4() {
let comment = "/*******************\n test\n *********************/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " test".to_string());
}
#[test] fn test_line_doc_comment() {
let stripped = strip_doc_comment_decoration("/// test");
assert_eq!(stripped, " test".to_string());
let stripped = strip_doc_comment_decoration("///! test");
assert_eq!(stripped, " test".to_string());
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, " test".to_string());
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, " test".to_string());
let stripped = strip_doc_comment_decoration("///test");
assert_eq!(stripped, "test".to_string());
let stripped = strip_doc_comment_decoration("///!test");
assert_eq!(stripped, "test".to_string());
let stripped = strip_doc_comment_decoration("//test");
assert_eq!(stripped, "test".to_string());
}
}
| {
can_trim = false;
break;
} | conditional_block |
comments.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::CommentStyle::*;
use ast;
use codemap::{BytePos, CharPos, CodeMap, Pos};
use diagnostic;
use parse::lexer::{is_whitespace, Reader};
use parse::lexer::{StringReader, TokenAndSpan};
use parse::lexer::is_block_doc_comment;
use parse::lexer;
use print::pprust;
use std::io;
use std::str;
use std::string::String;
use std::uint;
#[deriving(Clone, PartialEq)]
pub enum | {
/// No code on either side of each line of the comment
Isolated,
/// Code exists to the left of the comment
Trailing,
/// Code before /* foo */ and after the comment
Mixed,
/// Just a manual blank line "\n\n", for layout
BlankLine,
}
#[deriving(Clone)]
pub struct Comment {
pub style: CommentStyle,
pub lines: Vec<String>,
pub pos: BytePos,
}
pub fn is_doc_comment(s: &str) -> bool {
(s.starts_with("///") && super::is_doc_comment(s)) ||
s.starts_with("//!") ||
(s.starts_with("/**") && is_block_doc_comment(s)) ||
s.starts_with("/*!")
}
pub fn doc_comment_style(comment: &str) -> ast::AttrStyle {
assert!(is_doc_comment(comment));
if comment.starts_with("//!") || comment.starts_with("/*!") {
ast::AttrInner
} else {
ast::AttrOuter
}
}
pub fn strip_doc_comment_decoration(comment: &str) -> String {
/// remove whitespace-only lines from the start/end of lines
fn vertical_trim(lines: Vec<String> ) -> Vec<String> {
let mut i = 0u;
let mut j = lines.len();
// first line of all-stars should be omitted
if lines.len() > 0 &&
lines[0].as_slice().chars().all(|c| c == '*') {
i += 1;
}
while i < j && lines[i].as_slice().trim().is_empty() {
i += 1;
}
// like the first, a last line of all stars should be omitted
if j > i && lines[j - 1]
.as_slice()
.chars()
.skip(1)
.all(|c| c == '*') {
j -= 1;
}
while j > i && lines[j - 1].as_slice().trim().is_empty() {
j -= 1;
}
return lines.slice(i, j).iter().map(|x| (*x).clone()).collect();
}
/// remove a "[ \t]*\*" block from each line, if possible
fn horizontal_trim(lines: Vec<String> ) -> Vec<String> {
let mut i = uint::MAX;
let mut can_trim = true;
let mut first = true;
for line in lines.iter() {
for (j, c) in line.as_slice().chars().enumerate() {
if j > i ||!"* \t".contains_char(c) {
can_trim = false;
break;
}
if c == '*' {
if first {
i = j;
first = false;
} else if i!= j {
can_trim = false;
}
break;
}
}
if i > line.len() {
can_trim = false;
}
if!can_trim {
break;
}
}
if can_trim {
lines.iter().map(|line| {
line.as_slice().slice(i + 1, line.len()).to_string()
}).collect()
} else {
lines
}
}
// one-line comments lose their prefix
static ONLINERS: &'static [&'static str] = &["///!", "///", "//!", "//"];
for prefix in ONLINERS.iter() {
if comment.starts_with(*prefix) {
return comment.slice_from(prefix.len()).to_string();
}
}
if comment.starts_with("/*") {
let lines = comment.slice(3u, comment.len() - 2u)
.lines_any()
.map(|s| s.to_string())
.collect::<Vec<String> >();
let lines = vertical_trim(lines);
let lines = horizontal_trim(lines);
return lines.connect("\n");
}
panic!("not a doc-comment: {}", comment);
}
fn push_blank_line_comment(rdr: &StringReader, comments: &mut Vec<Comment>) {
debug!(">>> blank-line comment");
comments.push(Comment {
style: BlankLine,
lines: Vec::new(),
pos: rdr.last_pos,
});
}
fn consume_whitespace_counting_blank_lines(rdr: &mut StringReader,
comments: &mut Vec<Comment>) {
while is_whitespace(rdr.curr) &&!rdr.is_eof() {
if rdr.col == CharPos(0u) && rdr.curr_is('\n') {
push_blank_line_comment(rdr, &mut *comments);
}
rdr.bump();
}
}
fn read_shebang_comment(rdr: &mut StringReader, code_to_the_left: bool,
comments: &mut Vec<Comment>) {
debug!(">>> shebang comment");
let p = rdr.last_pos;
debug!("<<< shebang comment");
comments.push(Comment {
style: if code_to_the_left { Trailing } else { Isolated },
lines: vec!(rdr.read_one_line_comment()),
pos: p
});
}
fn read_line_comments(rdr: &mut StringReader, code_to_the_left: bool,
comments: &mut Vec<Comment>) {
debug!(">>> line comments");
let p = rdr.last_pos;
let mut lines: Vec<String> = Vec::new();
while rdr.curr_is('/') && rdr.nextch_is('/') {
let line = rdr.read_one_line_comment();
debug!("{}", line);
// Doc comments are not put in comments.
if is_doc_comment(line.as_slice()) {
break;
}
lines.push(line);
rdr.consume_non_eol_whitespace();
}
debug!("<<< line comments");
if!lines.is_empty() {
comments.push(Comment {
style: if code_to_the_left { Trailing } else { Isolated },
lines: lines,
pos: p
});
}
}
/// Returns None if the first col chars of s contain a non-whitespace char.
/// Otherwise returns Some(k) where k is first char offset after that leading
/// whitespace. Note k may be outside bounds of s.
fn all_whitespace(s: &str, col: CharPos) -> Option<uint> {
let len = s.len();
let mut col = col.to_uint();
let mut cursor: uint = 0;
while col > 0 && cursor < len {
let r: str::CharRange = s.char_range_at(cursor);
if!r.ch.is_whitespace() {
return None;
}
cursor = r.next;
col -= 1;
}
return Some(cursor);
}
fn trim_whitespace_prefix_and_push_line(lines: &mut Vec<String>,
s: String, col: CharPos) {
let len = s.len();
let s1 = match all_whitespace(s.as_slice(), col) {
Some(col) => {
if col < len {
s.as_slice().slice(col, len).to_string()
} else {
"".to_string()
}
}
None => s,
};
debug!("pushing line: {}", s1);
lines.push(s1);
}
fn read_block_comment(rdr: &mut StringReader,
code_to_the_left: bool,
comments: &mut Vec<Comment> ) {
debug!(">>> block comment");
let p = rdr.last_pos;
let mut lines: Vec<String> = Vec::new();
let col = rdr.col;
rdr.bump();
rdr.bump();
let mut curr_line = String::from_str("/*");
// doc-comments are not really comments, they are attributes
if (rdr.curr_is('*') &&!rdr.nextch_is('*')) || rdr.curr_is('!') {
while!(rdr.curr_is('*') && rdr.nextch_is('/')) &&!rdr.is_eof() {
curr_line.push(rdr.curr.unwrap());
rdr.bump();
}
if!rdr.is_eof() {
curr_line.push_str("*/");
rdr.bump();
rdr.bump();
}
if is_block_doc_comment(curr_line.as_slice()) {
return
}
assert!(!curr_line.as_slice().contains_char('\n'));
lines.push(curr_line);
} else {
let mut level: int = 1;
while level > 0 {
debug!("=== block comment level {}", level);
if rdr.is_eof() {
rdr.fatal("unterminated block comment");
}
if rdr.curr_is('\n') {
trim_whitespace_prefix_and_push_line(&mut lines,
curr_line,
col);
curr_line = String::new();
rdr.bump();
} else {
curr_line.push(rdr.curr.unwrap());
if rdr.curr_is('/') && rdr.nextch_is('*') {
rdr.bump();
rdr.bump();
curr_line.push('*');
level += 1;
} else {
if rdr.curr_is('*') && rdr.nextch_is('/') {
rdr.bump();
rdr.bump();
curr_line.push('/');
level -= 1;
} else { rdr.bump(); }
}
}
}
if curr_line.len()!= 0 {
trim_whitespace_prefix_and_push_line(&mut lines,
curr_line,
col);
}
}
let mut style = if code_to_the_left { Trailing } else { Isolated };
rdr.consume_non_eol_whitespace();
if!rdr.is_eof() &&!rdr.curr_is('\n') && lines.len() == 1u {
style = Mixed;
}
debug!("<<< block comment");
comments.push(Comment {style: style, lines: lines, pos: p});
}
fn consume_comment(rdr: &mut StringReader,
code_to_the_left: bool,
comments: &mut Vec<Comment> ) {
debug!(">>> consume comment");
if rdr.curr_is('/') && rdr.nextch_is('/') {
read_line_comments(rdr, code_to_the_left, comments);
} else if rdr.curr_is('/') && rdr.nextch_is('*') {
read_block_comment(rdr, code_to_the_left, comments);
} else if rdr.curr_is('#') && rdr.nextch_is('!') {
read_shebang_comment(rdr, code_to_the_left, comments);
} else { panic!(); }
debug!("<<< consume comment");
}
#[deriving(Clone)]
pub struct Literal {
pub lit: String,
pub pos: BytePos,
}
// it appears this function is called only from pprust... that's
// probably not a good thing.
pub fn gather_comments_and_literals(span_diagnostic: &diagnostic::SpanHandler,
path: String,
srdr: &mut io::Reader)
-> (Vec<Comment>, Vec<Literal>) {
let src = srdr.read_to_end().unwrap();
let src = String::from_utf8(src).unwrap();
let cm = CodeMap::new();
let filemap = cm.new_filemap(path, src);
let mut rdr = lexer::StringReader::new_raw(span_diagnostic, filemap);
let mut comments: Vec<Comment> = Vec::new();
let mut literals: Vec<Literal> = Vec::new();
let mut first_read: bool = true;
while!rdr.is_eof() {
loop {
let mut code_to_the_left =!first_read;
rdr.consume_non_eol_whitespace();
if rdr.curr_is('\n') {
code_to_the_left = false;
consume_whitespace_counting_blank_lines(&mut rdr, &mut comments);
}
while rdr.peeking_at_comment() {
consume_comment(&mut rdr, code_to_the_left, &mut comments);
consume_whitespace_counting_blank_lines(&mut rdr, &mut comments);
}
break;
}
let bstart = rdr.last_pos;
rdr.next_token();
//discard, and look ahead; we're working with internal state
let TokenAndSpan { tok, sp } = rdr.peek();
if tok.is_lit() {
rdr.with_str_from(bstart, |s| {
debug!("tok lit: {}", s);
literals.push(Literal {lit: s.to_string(), pos: sp.lo});
})
} else {
debug!("tok: {}", pprust::token_to_string(&tok));
}
first_read = false;
}
(comments, literals)
}
#[cfg(test)]
mod test {
use super::*;
#[test] fn test_block_doc_comment_1() {
let comment = "/**\n * Test \n ** Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " Test \n* Test\n Test".to_string());
}
#[test] fn test_block_doc_comment_2() {
let comment = "/**\n * Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " Test\n Test".to_string());
}
#[test] fn test_block_doc_comment_3() {
let comment = "/**\n let a: *int;\n *a = 5;\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " let a: *int;\n *a = 5;".to_string());
}
#[test] fn test_block_doc_comment_4() {
let comment = "/*******************\n test\n *********************/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " test".to_string());
}
#[test] fn test_line_doc_comment() {
let stripped = strip_doc_comment_decoration("/// test");
assert_eq!(stripped, " test".to_string());
let stripped = strip_doc_comment_decoration("///! test");
assert_eq!(stripped, " test".to_string());
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, " test".to_string());
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, " test".to_string());
let stripped = strip_doc_comment_decoration("///test");
assert_eq!(stripped, "test".to_string());
let stripped = strip_doc_comment_decoration("///!test");
assert_eq!(stripped, "test".to_string());
let stripped = strip_doc_comment_decoration("//test");
assert_eq!(stripped, "test".to_string());
}
}
| CommentStyle | identifier_name |
comments.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::CommentStyle::*;
use ast;
use codemap::{BytePos, CharPos, CodeMap, Pos};
use diagnostic;
use parse::lexer::{is_whitespace, Reader};
use parse::lexer::{StringReader, TokenAndSpan};
use parse::lexer::is_block_doc_comment;
use parse::lexer;
use print::pprust;
use std::io;
use std::str;
use std::string::String;
use std::uint;
#[deriving(Clone, PartialEq)]
pub enum CommentStyle {
/// No code on either side of each line of the comment
Isolated,
/// Code exists to the left of the comment
Trailing,
/// Code before /* foo */ and after the comment
Mixed,
/// Just a manual blank line "\n\n", for layout
BlankLine,
}
#[deriving(Clone)]
pub struct Comment {
pub style: CommentStyle,
pub lines: Vec<String>,
pub pos: BytePos,
}
pub fn is_doc_comment(s: &str) -> bool {
(s.starts_with("///") && super::is_doc_comment(s)) ||
s.starts_with("//!") ||
(s.starts_with("/**") && is_block_doc_comment(s)) ||
s.starts_with("/*!")
}
pub fn doc_comment_style(comment: &str) -> ast::AttrStyle {
assert!(is_doc_comment(comment));
if comment.starts_with("//!") || comment.starts_with("/*!") {
ast::AttrInner
} else {
ast::AttrOuter
}
}
pub fn strip_doc_comment_decoration(comment: &str) -> String {
/// remove whitespace-only lines from the start/end of lines
fn vertical_trim(lines: Vec<String> ) -> Vec<String> {
let mut i = 0u;
let mut j = lines.len();
// first line of all-stars should be omitted
if lines.len() > 0 &&
lines[0].as_slice().chars().all(|c| c == '*') {
i += 1;
}
while i < j && lines[i].as_slice().trim().is_empty() {
i += 1;
}
// like the first, a last line of all stars should be omitted
if j > i && lines[j - 1]
.as_slice()
.chars()
.skip(1)
.all(|c| c == '*') {
j -= 1;
}
while j > i && lines[j - 1].as_slice().trim().is_empty() {
j -= 1;
}
return lines.slice(i, j).iter().map(|x| (*x).clone()).collect();
}
/// remove a "[ \t]*\*" block from each line, if possible
fn horizontal_trim(lines: Vec<String> ) -> Vec<String> {
let mut i = uint::MAX;
let mut can_trim = true;
let mut first = true;
for line in lines.iter() {
for (j, c) in line.as_slice().chars().enumerate() {
if j > i ||!"* \t".contains_char(c) {
can_trim = false;
break;
}
if c == '*' {
if first {
i = j;
first = false;
} else if i!= j {
can_trim = false;
}
break;
}
}
if i > line.len() {
can_trim = false;
}
if!can_trim {
break;
}
}
if can_trim {
lines.iter().map(|line| {
line.as_slice().slice(i + 1, line.len()).to_string()
}).collect()
} else {
lines
}
}
// one-line comments lose their prefix
static ONLINERS: &'static [&'static str] = &["///!", "///", "//!", "//"];
for prefix in ONLINERS.iter() {
if comment.starts_with(*prefix) {
return comment.slice_from(prefix.len()).to_string();
}
}
if comment.starts_with("/*") {
let lines = comment.slice(3u, comment.len() - 2u)
.lines_any()
.map(|s| s.to_string())
.collect::<Vec<String> >();
let lines = vertical_trim(lines);
let lines = horizontal_trim(lines);
return lines.connect("\n");
}
panic!("not a doc-comment: {}", comment);
}
fn push_blank_line_comment(rdr: &StringReader, comments: &mut Vec<Comment>) {
debug!(">>> blank-line comment");
comments.push(Comment {
style: BlankLine,
lines: Vec::new(),
pos: rdr.last_pos,
});
}
fn consume_whitespace_counting_blank_lines(rdr: &mut StringReader,
comments: &mut Vec<Comment>) {
while is_whitespace(rdr.curr) &&!rdr.is_eof() {
if rdr.col == CharPos(0u) && rdr.curr_is('\n') {
push_blank_line_comment(rdr, &mut *comments);
}
rdr.bump();
}
}
fn read_shebang_comment(rdr: &mut StringReader, code_to_the_left: bool,
comments: &mut Vec<Comment>) {
debug!(">>> shebang comment");
let p = rdr.last_pos;
debug!("<<< shebang comment");
comments.push(Comment {
style: if code_to_the_left { Trailing } else { Isolated },
lines: vec!(rdr.read_one_line_comment()),
pos: p
});
}
fn read_line_comments(rdr: &mut StringReader, code_to_the_left: bool,
comments: &mut Vec<Comment>) {
debug!(">>> line comments");
let p = rdr.last_pos;
let mut lines: Vec<String> = Vec::new();
while rdr.curr_is('/') && rdr.nextch_is('/') {
let line = rdr.read_one_line_comment();
debug!("{}", line);
// Doc comments are not put in comments.
if is_doc_comment(line.as_slice()) {
break;
}
lines.push(line);
rdr.consume_non_eol_whitespace();
}
debug!("<<< line comments");
if!lines.is_empty() {
comments.push(Comment {
style: if code_to_the_left { Trailing } else { Isolated },
lines: lines,
pos: p
});
}
}
/// Returns None if the first col chars of s contain a non-whitespace char.
/// Otherwise returns Some(k) where k is first char offset after that leading
/// whitespace. Note k may be outside bounds of s.
fn all_whitespace(s: &str, col: CharPos) -> Option<uint> {
let len = s.len();
let mut col = col.to_uint();
let mut cursor: uint = 0;
while col > 0 && cursor < len {
let r: str::CharRange = s.char_range_at(cursor);
if!r.ch.is_whitespace() {
return None;
}
cursor = r.next;
col -= 1;
}
return Some(cursor);
}
fn trim_whitespace_prefix_and_push_line(lines: &mut Vec<String>,
s: String, col: CharPos) {
let len = s.len();
let s1 = match all_whitespace(s.as_slice(), col) {
Some(col) => {
if col < len {
s.as_slice().slice(col, len).to_string()
} else {
"".to_string()
}
}
None => s,
};
debug!("pushing line: {}", s1);
lines.push(s1);
}
fn read_block_comment(rdr: &mut StringReader,
code_to_the_left: bool,
comments: &mut Vec<Comment> ) {
debug!(">>> block comment");
let p = rdr.last_pos;
let mut lines: Vec<String> = Vec::new();
let col = rdr.col;
rdr.bump();
rdr.bump();
let mut curr_line = String::from_str("/*");
// doc-comments are not really comments, they are attributes
if (rdr.curr_is('*') &&!rdr.nextch_is('*')) || rdr.curr_is('!') {
while!(rdr.curr_is('*') && rdr.nextch_is('/')) &&!rdr.is_eof() {
curr_line.push(rdr.curr.unwrap());
rdr.bump();
}
if!rdr.is_eof() {
curr_line.push_str("*/");
rdr.bump();
rdr.bump();
}
if is_block_doc_comment(curr_line.as_slice()) {
return
}
assert!(!curr_line.as_slice().contains_char('\n'));
lines.push(curr_line);
} else {
let mut level: int = 1;
while level > 0 {
debug!("=== block comment level {}", level);
if rdr.is_eof() {
rdr.fatal("unterminated block comment");
}
if rdr.curr_is('\n') {
trim_whitespace_prefix_and_push_line(&mut lines,
curr_line,
col);
curr_line = String::new();
rdr.bump();
} else {
curr_line.push(rdr.curr.unwrap());
if rdr.curr_is('/') && rdr.nextch_is('*') {
rdr.bump();
rdr.bump();
curr_line.push('*');
level += 1;
} else {
if rdr.curr_is('*') && rdr.nextch_is('/') { | curr_line.push('/');
level -= 1;
} else { rdr.bump(); }
}
}
}
if curr_line.len()!= 0 {
trim_whitespace_prefix_and_push_line(&mut lines,
curr_line,
col);
}
}
let mut style = if code_to_the_left { Trailing } else { Isolated };
rdr.consume_non_eol_whitespace();
if!rdr.is_eof() &&!rdr.curr_is('\n') && lines.len() == 1u {
style = Mixed;
}
debug!("<<< block comment");
comments.push(Comment {style: style, lines: lines, pos: p});
}
fn consume_comment(rdr: &mut StringReader,
code_to_the_left: bool,
comments: &mut Vec<Comment> ) {
debug!(">>> consume comment");
if rdr.curr_is('/') && rdr.nextch_is('/') {
read_line_comments(rdr, code_to_the_left, comments);
} else if rdr.curr_is('/') && rdr.nextch_is('*') {
read_block_comment(rdr, code_to_the_left, comments);
} else if rdr.curr_is('#') && rdr.nextch_is('!') {
read_shebang_comment(rdr, code_to_the_left, comments);
} else { panic!(); }
debug!("<<< consume comment");
}
#[deriving(Clone)]
pub struct Literal {
pub lit: String,
pub pos: BytePos,
}
// it appears this function is called only from pprust... that's
// probably not a good thing.
pub fn gather_comments_and_literals(span_diagnostic: &diagnostic::SpanHandler,
path: String,
srdr: &mut io::Reader)
-> (Vec<Comment>, Vec<Literal>) {
let src = srdr.read_to_end().unwrap();
let src = String::from_utf8(src).unwrap();
let cm = CodeMap::new();
let filemap = cm.new_filemap(path, src);
let mut rdr = lexer::StringReader::new_raw(span_diagnostic, filemap);
let mut comments: Vec<Comment> = Vec::new();
let mut literals: Vec<Literal> = Vec::new();
let mut first_read: bool = true;
while!rdr.is_eof() {
loop {
let mut code_to_the_left =!first_read;
rdr.consume_non_eol_whitespace();
if rdr.curr_is('\n') {
code_to_the_left = false;
consume_whitespace_counting_blank_lines(&mut rdr, &mut comments);
}
while rdr.peeking_at_comment() {
consume_comment(&mut rdr, code_to_the_left, &mut comments);
consume_whitespace_counting_blank_lines(&mut rdr, &mut comments);
}
break;
}
let bstart = rdr.last_pos;
rdr.next_token();
//discard, and look ahead; we're working with internal state
let TokenAndSpan { tok, sp } = rdr.peek();
if tok.is_lit() {
rdr.with_str_from(bstart, |s| {
debug!("tok lit: {}", s);
literals.push(Literal {lit: s.to_string(), pos: sp.lo});
})
} else {
debug!("tok: {}", pprust::token_to_string(&tok));
}
first_read = false;
}
(comments, literals)
}
#[cfg(test)]
mod test {
use super::*;
#[test] fn test_block_doc_comment_1() {
let comment = "/**\n * Test \n ** Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " Test \n* Test\n Test".to_string());
}
#[test] fn test_block_doc_comment_2() {
let comment = "/**\n * Test\n * Test\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " Test\n Test".to_string());
}
#[test] fn test_block_doc_comment_3() {
let comment = "/**\n let a: *int;\n *a = 5;\n*/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " let a: *int;\n *a = 5;".to_string());
}
#[test] fn test_block_doc_comment_4() {
let comment = "/*******************\n test\n *********************/";
let stripped = strip_doc_comment_decoration(comment);
assert_eq!(stripped, " test".to_string());
}
#[test] fn test_line_doc_comment() {
let stripped = strip_doc_comment_decoration("/// test");
assert_eq!(stripped, " test".to_string());
let stripped = strip_doc_comment_decoration("///! test");
assert_eq!(stripped, " test".to_string());
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, " test".to_string());
let stripped = strip_doc_comment_decoration("// test");
assert_eq!(stripped, " test".to_string());
let stripped = strip_doc_comment_decoration("///test");
assert_eq!(stripped, "test".to_string());
let stripped = strip_doc_comment_decoration("///!test");
assert_eq!(stripped, "test".to_string());
let stripped = strip_doc_comment_decoration("//test");
assert_eq!(stripped, "test".to_string());
}
} | rdr.bump();
rdr.bump(); | random_line_split |
overload.rs | // https://rustbyexample.com/macros/overload.html
// http://rust-lang-ja.org/rust-by-example/macros/overload.html
// `test!` will compare `$left` and `$right`
// in different ways depending on how you invoke it:
macro_rules! test {
// Arguments don't need to be separated by a comma.
// Any template can be used!
($left:expr; and $right:expr) => (
println!("{:?} and {:?} is {:?}",
stringify!($left),
stringify!($right),
$left && $right)
);
// ^ each arm must end with a semicolon.
($left:expr; or $right:expr) => (
println!("{:?} or {:?} is {:?}",
stringify!($left),
stringify!($right),
$left || $right)
);
}
fn | () {
test!(1i32 + 1 == 2i32; and 2i32 * 2 == 4i32);
test!(true; or false);
}
| main | identifier_name |
overload.rs | // https://rustbyexample.com/macros/overload.html
// http://rust-lang-ja.org/rust-by-example/macros/overload.html
// `test!` will compare `$left` and `$right`
// in different ways depending on how you invoke it:
macro_rules! test {
// Arguments don't need to be separated by a comma.
// Any template can be used!
($left:expr; and $right:expr) => (
println!("{:?} and {:?} is {:?}",
stringify!($left),
stringify!($right),
$left && $right)
);
// ^ each arm must end with a semicolon.
($left:expr; or $right:expr) => (
println!("{:?} or {:?} is {:?}",
stringify!($left),
stringify!($right),
$left || $right)
);
}
fn main() | {
test!(1i32 + 1 == 2i32; and 2i32 * 2 == 4i32);
test!(true; or false);
} | identifier_body |
|
overload.rs | // https://rustbyexample.com/macros/overload.html
// http://rust-lang-ja.org/rust-by-example/macros/overload.html
// `test!` will compare `$left` and `$right`
// in different ways depending on how you invoke it:
macro_rules! test {
// Arguments don't need to be separated by a comma.
// Any template can be used! | );
// ^ each arm must end with a semicolon.
($left:expr; or $right:expr) => (
println!("{:?} or {:?} is {:?}",
stringify!($left),
stringify!($right),
$left || $right)
);
}
fn main() {
test!(1i32 + 1 == 2i32; and 2i32 * 2 == 4i32);
test!(true; or false);
} | ($left:expr; and $right:expr) => (
println!("{:?} and {:?} is {:?}",
stringify!($left),
stringify!($right),
$left && $right) | random_line_split |
weird-exprs.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::util;
// Just a grab bag of stuff that you wouldn't want to actually write.
fn strange() -> bool { let _x: bool = return true; }
fn funny() {
fn f(_x: ()) { }
f(return);
}
fn what() {
fn the(x: @mut bool) { return while!*x { *x = true; }; }
let i = @mut false;
let dont = {||the(i)};
dont();
assert!((*i));
}
fn zombiejesus() {
loop {
while (return) {
if (return) {
match (return) {
1 => {
if (return) {
return
} else {
return
}
}
_ => { return }
};
} else if (return) {
return;
}
}
if (return) { break; }
}
}
fn notsure() {
let mut _x;
let mut _y = (_x = 0) == (_x = 0);
let mut _z = (_x = 0) < (_x = 0);
let _a = (_x += 0) == (_x = 0);
let _b = util::swap(&mut _y, &mut _z) == util::swap(&mut _y, &mut _z);
}
fn canttouchthis() -> uint {
fn p() -> bool { true }
let _a = (assert!((true)) == (assert!(p())));
let _c = (assert!((p())) == ());
let _b: bool = (debug!("%d", 0) == (return 0u));
}
fn angrydome() {
loop { if break { } }
let mut i = 0;
loop { i += 1; if i == 1 { match (loop) { 1 => { }, _ => fail!("wat") } }
break; }
}
fn evil_lincoln() { let evil = debug!("lincoln"); }
pub fn | () {
strange();
funny();
what();
zombiejesus();
notsure();
canttouchthis();
angrydome();
evil_lincoln();
}
| main | identifier_name |
weird-exprs.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::util;
// Just a grab bag of stuff that you wouldn't want to actually write.
fn strange() -> bool { let _x: bool = return true; }
fn funny() {
fn f(_x: ()) { }
f(return);
}
fn what() {
fn the(x: @mut bool) { return while!*x { *x = true; }; }
let i = @mut false;
let dont = {||the(i)};
dont();
assert!((*i));
}
fn zombiejesus() {
loop {
while (return) {
if (return) {
match (return) {
1 => {
if (return) {
return
} else {
return
}
}
_ => { return }
};
} else if (return) {
return;
}
}
if (return) { break; }
}
}
fn notsure() {
let mut _x;
let mut _y = (_x = 0) == (_x = 0);
let mut _z = (_x = 0) < (_x = 0);
let _a = (_x += 0) == (_x = 0);
let _b = util::swap(&mut _y, &mut _z) == util::swap(&mut _y, &mut _z);
}
fn canttouchthis() -> uint {
fn p() -> bool { true }
let _a = (assert!((true)) == (assert!(p())));
let _c = (assert!((p())) == ());
let _b: bool = (debug!("%d", 0) == (return 0u));
}
fn angrydome() {
loop { if break { } }
let mut i = 0;
loop { i += 1; if i == 1 { match (loop) { 1 => { }, _ => fail!("wat") } }
break; }
}
fn evil_lincoln() |
pub fn main() {
strange();
funny();
what();
zombiejesus();
notsure();
canttouchthis();
angrydome();
evil_lincoln();
}
| { let evil = debug!("lincoln"); } | identifier_body |
weird-exprs.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::util;
// Just a grab bag of stuff that you wouldn't want to actually write.
fn strange() -> bool { let _x: bool = return true; }
fn funny() {
fn f(_x: ()) { }
f(return);
}
fn what() {
fn the(x: @mut bool) { return while!*x { *x = true; }; }
let i = @mut false;
let dont = {||the(i)};
dont();
assert!((*i));
}
fn zombiejesus() {
loop {
while (return) {
if (return) {
match (return) {
1 => {
if (return) {
return
} else {
return
}
}
_ => { return }
};
} else if (return) {
return;
}
}
if (return) { break; }
}
}
fn notsure() {
let mut _x;
let mut _y = (_x = 0) == (_x = 0);
let mut _z = (_x = 0) < (_x = 0);
let _a = (_x += 0) == (_x = 0); | let _a = (assert!((true)) == (assert!(p())));
let _c = (assert!((p())) == ());
let _b: bool = (debug!("%d", 0) == (return 0u));
}
fn angrydome() {
loop { if break { } }
let mut i = 0;
loop { i += 1; if i == 1 { match (loop) { 1 => { }, _ => fail!("wat") } }
break; }
}
fn evil_lincoln() { let evil = debug!("lincoln"); }
pub fn main() {
strange();
funny();
what();
zombiejesus();
notsure();
canttouchthis();
angrydome();
evil_lincoln();
} | let _b = util::swap(&mut _y, &mut _z) == util::swap(&mut _y, &mut _z);
}
fn canttouchthis() -> uint {
fn p() -> bool { true } | random_line_split |
weird-exprs.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::util;
// Just a grab bag of stuff that you wouldn't want to actually write.
fn strange() -> bool { let _x: bool = return true; }
fn funny() {
fn f(_x: ()) { }
f(return);
}
fn what() {
fn the(x: @mut bool) { return while!*x { *x = true; }; }
let i = @mut false;
let dont = {||the(i)};
dont();
assert!((*i));
}
fn zombiejesus() {
loop {
while (return) {
if (return) {
match (return) {
1 => {
if (return) {
return
} else {
return
}
}
_ => |
};
} else if (return) {
return;
}
}
if (return) { break; }
}
}
fn notsure() {
let mut _x;
let mut _y = (_x = 0) == (_x = 0);
let mut _z = (_x = 0) < (_x = 0);
let _a = (_x += 0) == (_x = 0);
let _b = util::swap(&mut _y, &mut _z) == util::swap(&mut _y, &mut _z);
}
fn canttouchthis() -> uint {
fn p() -> bool { true }
let _a = (assert!((true)) == (assert!(p())));
let _c = (assert!((p())) == ());
let _b: bool = (debug!("%d", 0) == (return 0u));
}
fn angrydome() {
loop { if break { } }
let mut i = 0;
loop { i += 1; if i == 1 { match (loop) { 1 => { }, _ => fail!("wat") } }
break; }
}
fn evil_lincoln() { let evil = debug!("lincoln"); }
pub fn main() {
strange();
funny();
what();
zombiejesus();
notsure();
canttouchthis();
angrydome();
evil_lincoln();
}
| { return } | conditional_block |
scalar.rs | use std::usize;
use test::{black_box, Bencher};
use tipb::ScalarFuncSig;
fn get_scalar_args_with_match(sig: ScalarFuncSig) -> (usize, usize) {
// Only select some functions to benchmark
let (min_args, max_args) = match sig {
ScalarFuncSig::LtInt => (2, 2),
ScalarFuncSig::CastIntAsInt => (1, 1),
ScalarFuncSig::IfInt => (3, 3),
ScalarFuncSig::JsonArraySig => (0, usize::MAX),
ScalarFuncSig::CoalesceDecimal => (1, usize::MAX),
ScalarFuncSig::JsonExtractSig => (2, usize::MAX),
ScalarFuncSig::JsonSetSig => (3, usize::MAX),
_ => (0, 0),
};
(min_args, max_args)
}
fn init_scalar_args_map() -> HashMap<ScalarFuncSig, (usize, usize)> {
let mut m: HashMap<ScalarFuncSig, (usize, usize)> = HashMap::default();
let tbls = vec![
(ScalarFuncSig::LtInt, (2, 2)),
(ScalarFuncSig::CastIntAsInt, (1, 1)),
(ScalarFuncSig::IfInt, (3, 3)),
(ScalarFuncSig::JsonArraySig, (0, usize::MAX)),
(ScalarFuncSig::CoalesceDecimal, (1, usize::MAX)),
(ScalarFuncSig::JsonExtractSig, (2, usize::MAX)),
(ScalarFuncSig::JsonSetSig, (3, usize::MAX)),
(ScalarFuncSig::Acos, (0, 0)),
];
for tbl in tbls {
m.insert(tbl.0, tbl.1);
}
m
}
fn get_scalar_args_with_map(
m: &HashMap<ScalarFuncSig, (usize, usize)>,
sig: ScalarFuncSig,
) -> (usize, usize) {
if let Some((min_args, max_args)) = m.get(&sig).cloned() {
return (min_args, max_args);
}
(0, 0)
}
#[bench]
fn bench_get_scalar_args_with_match(b: &mut Bencher) {
b.iter(|| {
for _ in 0..1000 {
black_box(get_scalar_args_with_match(black_box(ScalarFuncSig::AbsInt)));
}
})
}
#[bench]
fn bench_get_scalar_args_with_map(b: &mut Bencher) {
let m = init_scalar_args_map();
b.iter(|| {
for _ in 0..1000 {
black_box(get_scalar_args_with_map(
black_box(&m),
black_box(ScalarFuncSig::AbsInt),
));
}
})
} | use collections::HashMap; | random_line_split |
|
scalar.rs | use collections::HashMap;
use std::usize;
use test::{black_box, Bencher};
use tipb::ScalarFuncSig;
fn get_scalar_args_with_match(sig: ScalarFuncSig) -> (usize, usize) {
// Only select some functions to benchmark
let (min_args, max_args) = match sig {
ScalarFuncSig::LtInt => (2, 2),
ScalarFuncSig::CastIntAsInt => (1, 1),
ScalarFuncSig::IfInt => (3, 3),
ScalarFuncSig::JsonArraySig => (0, usize::MAX),
ScalarFuncSig::CoalesceDecimal => (1, usize::MAX),
ScalarFuncSig::JsonExtractSig => (2, usize::MAX),
ScalarFuncSig::JsonSetSig => (3, usize::MAX),
_ => (0, 0),
};
(min_args, max_args)
}
fn init_scalar_args_map() -> HashMap<ScalarFuncSig, (usize, usize)> {
let mut m: HashMap<ScalarFuncSig, (usize, usize)> = HashMap::default();
let tbls = vec![
(ScalarFuncSig::LtInt, (2, 2)),
(ScalarFuncSig::CastIntAsInt, (1, 1)),
(ScalarFuncSig::IfInt, (3, 3)),
(ScalarFuncSig::JsonArraySig, (0, usize::MAX)),
(ScalarFuncSig::CoalesceDecimal, (1, usize::MAX)),
(ScalarFuncSig::JsonExtractSig, (2, usize::MAX)),
(ScalarFuncSig::JsonSetSig, (3, usize::MAX)),
(ScalarFuncSig::Acos, (0, 0)),
];
for tbl in tbls {
m.insert(tbl.0, tbl.1);
}
m
}
fn get_scalar_args_with_map(
m: &HashMap<ScalarFuncSig, (usize, usize)>,
sig: ScalarFuncSig,
) -> (usize, usize) {
if let Some((min_args, max_args)) = m.get(&sig).cloned() {
return (min_args, max_args);
}
(0, 0)
}
#[bench]
fn bench_get_scalar_args_with_match(b: &mut Bencher) |
#[bench]
fn bench_get_scalar_args_with_map(b: &mut Bencher) {
let m = init_scalar_args_map();
b.iter(|| {
for _ in 0..1000 {
black_box(get_scalar_args_with_map(
black_box(&m),
black_box(ScalarFuncSig::AbsInt),
));
}
})
}
| {
b.iter(|| {
for _ in 0..1000 {
black_box(get_scalar_args_with_match(black_box(ScalarFuncSig::AbsInt)));
}
})
} | identifier_body |
scalar.rs | use collections::HashMap;
use std::usize;
use test::{black_box, Bencher};
use tipb::ScalarFuncSig;
fn get_scalar_args_with_match(sig: ScalarFuncSig) -> (usize, usize) {
// Only select some functions to benchmark
let (min_args, max_args) = match sig {
ScalarFuncSig::LtInt => (2, 2),
ScalarFuncSig::CastIntAsInt => (1, 1),
ScalarFuncSig::IfInt => (3, 3),
ScalarFuncSig::JsonArraySig => (0, usize::MAX),
ScalarFuncSig::CoalesceDecimal => (1, usize::MAX),
ScalarFuncSig::JsonExtractSig => (2, usize::MAX),
ScalarFuncSig::JsonSetSig => (3, usize::MAX),
_ => (0, 0),
};
(min_args, max_args)
}
fn | () -> HashMap<ScalarFuncSig, (usize, usize)> {
let mut m: HashMap<ScalarFuncSig, (usize, usize)> = HashMap::default();
let tbls = vec![
(ScalarFuncSig::LtInt, (2, 2)),
(ScalarFuncSig::CastIntAsInt, (1, 1)),
(ScalarFuncSig::IfInt, (3, 3)),
(ScalarFuncSig::JsonArraySig, (0, usize::MAX)),
(ScalarFuncSig::CoalesceDecimal, (1, usize::MAX)),
(ScalarFuncSig::JsonExtractSig, (2, usize::MAX)),
(ScalarFuncSig::JsonSetSig, (3, usize::MAX)),
(ScalarFuncSig::Acos, (0, 0)),
];
for tbl in tbls {
m.insert(tbl.0, tbl.1);
}
m
}
fn get_scalar_args_with_map(
m: &HashMap<ScalarFuncSig, (usize, usize)>,
sig: ScalarFuncSig,
) -> (usize, usize) {
if let Some((min_args, max_args)) = m.get(&sig).cloned() {
return (min_args, max_args);
}
(0, 0)
}
#[bench]
fn bench_get_scalar_args_with_match(b: &mut Bencher) {
b.iter(|| {
for _ in 0..1000 {
black_box(get_scalar_args_with_match(black_box(ScalarFuncSig::AbsInt)));
}
})
}
#[bench]
fn bench_get_scalar_args_with_map(b: &mut Bencher) {
let m = init_scalar_args_map();
b.iter(|| {
for _ in 0..1000 {
black_box(get_scalar_args_with_map(
black_box(&m),
black_box(ScalarFuncSig::AbsInt),
));
}
})
}
| init_scalar_args_map | identifier_name |
issue-38293.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that `fn foo::bar::{self}` only imports `bar` in the type namespace.
mod foo {
pub fn f() |
}
use foo::f::{self}; //~ ERROR unresolved import `foo::f`
mod bar {
pub fn baz() {}
pub mod baz {}
}
use bar::baz::{self};
fn main() {
baz(); //~ ERROR expected function, found module `baz`
}
| { } | identifier_body |
issue-38293.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that `fn foo::bar::{self}` only imports `bar` in the type namespace.
mod foo {
pub fn | () { }
}
use foo::f::{self}; //~ ERROR unresolved import `foo::f`
mod bar {
pub fn baz() {}
pub mod baz {}
}
use bar::baz::{self};
fn main() {
baz(); //~ ERROR expected function, found module `baz`
}
| f | identifier_name |
issue-38293.rs | // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | // option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that `fn foo::bar::{self}` only imports `bar` in the type namespace.
mod foo {
pub fn f() { }
}
use foo::f::{self}; //~ ERROR unresolved import `foo::f`
mod bar {
pub fn baz() {}
pub mod baz {}
}
use bar::baz::{self};
fn main() {
baz(); //~ ERROR expected function, found module `baz`
} | random_line_split |
|
xsdt.rs | //! Extended System Description Table
use core::mem; | pub struct Xsdt(&'static Sdt);
impl Xsdt {
/// Cast SDT to XSDT if signature matches.
pub fn new(sdt: &'static Sdt) -> Option<Self> {
if &sdt.signature == b"XSDT" {
Some(Xsdt(sdt))
} else {
None
}
}
/// Get a iterator for the table entries.
pub fn iter(&self) -> XsdtIter {
XsdtIter {
sdt: self.0,
index: 0
}
}
}
/// XSDT as an array of 64-bit physical addresses that point to other DESCRIPTION_HEADERs. So we use
/// an iterator to walk through it.
pub struct XsdtIter {
sdt: &'static Sdt,
index: usize
}
impl Iterator for XsdtIter {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
if self.index < self.sdt.data_len() / mem::size_of::<u64>() {
// get the item
let item = unsafe { *(self.sdt.data_address() as *const u64).offset(self.index as isize) };
// increment the index
self.index += 1;
// return the found entry
return Some(item as usize);
}
// When there is no more elements return a None value
None
}
} |
use super::sdt::Sdt;
/// XSDT structure
#[derive(Debug)] | random_line_split |
xsdt.rs | //! Extended System Description Table
use core::mem;
use super::sdt::Sdt;
/// XSDT structure
#[derive(Debug)]
pub struct Xsdt(&'static Sdt);
impl Xsdt {
/// Cast SDT to XSDT if signature matches.
pub fn new(sdt: &'static Sdt) -> Option<Self> {
if &sdt.signature == b"XSDT" {
Some(Xsdt(sdt))
} else {
None
}
}
/// Get a iterator for the table entries.
pub fn iter(&self) -> XsdtIter {
XsdtIter {
sdt: self.0,
index: 0
}
}
}
/// XSDT as an array of 64-bit physical addresses that point to other DESCRIPTION_HEADERs. So we use
/// an iterator to walk through it.
pub struct XsdtIter {
sdt: &'static Sdt,
index: usize
}
impl Iterator for XsdtIter {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
if self.index < self.sdt.data_len() / mem::size_of::<u64>() |
// When there is no more elements return a None value
None
}
}
| {
// get the item
let item = unsafe { *(self.sdt.data_address() as *const u64).offset(self.index as isize) };
// increment the index
self.index += 1;
// return the found entry
return Some(item as usize);
} | conditional_block |
xsdt.rs | //! Extended System Description Table
use core::mem;
use super::sdt::Sdt;
/// XSDT structure
#[derive(Debug)]
pub struct Xsdt(&'static Sdt);
impl Xsdt {
/// Cast SDT to XSDT if signature matches.
pub fn new(sdt: &'static Sdt) -> Option<Self> {
if &sdt.signature == b"XSDT" {
Some(Xsdt(sdt))
} else {
None
}
}
/// Get a iterator for the table entries.
pub fn | (&self) -> XsdtIter {
XsdtIter {
sdt: self.0,
index: 0
}
}
}
/// XSDT as an array of 64-bit physical addresses that point to other DESCRIPTION_HEADERs. So we use
/// an iterator to walk through it.
pub struct XsdtIter {
sdt: &'static Sdt,
index: usize
}
impl Iterator for XsdtIter {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
if self.index < self.sdt.data_len() / mem::size_of::<u64>() {
// get the item
let item = unsafe { *(self.sdt.data_address() as *const u64).offset(self.index as isize) };
// increment the index
self.index += 1;
// return the found entry
return Some(item as usize);
}
// When there is no more elements return a None value
None
}
}
| iter | identifier_name |
emulation.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use actor::{Actor, ActorMessageStatus, ActorRegistry};
use serde_json::{Map, Value};
use std::net::TcpStream;
pub struct EmulationActor {
pub name: String,
}
impl Actor for EmulationActor {
fn name(&self) -> String {
self.name.clone()
}
fn handle_message(
&self,
_registry: &ActorRegistry,
msg_type: &str,
_msg: &Map<String, Value>,
_stream: &mut TcpStream,
) -> Result<ActorMessageStatus, ()> {
Ok(match msg_type {
_ => ActorMessageStatus::Ignored,
})
}
}
impl EmulationActor {
pub fn | (name: String) -> EmulationActor {
EmulationActor { name: name }
}
}
| new | identifier_name |
emulation.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use actor::{Actor, ActorMessageStatus, ActorRegistry};
use serde_json::{Map, Value};
use std::net::TcpStream;
pub struct EmulationActor {
pub name: String,
}
impl Actor for EmulationActor {
fn name(&self) -> String {
self.name.clone()
}
fn handle_message(
&self,
_registry: &ActorRegistry,
msg_type: &str,
_msg: &Map<String, Value>,
_stream: &mut TcpStream,
) -> Result<ActorMessageStatus, ()> |
}
impl EmulationActor {
pub fn new(name: String) -> EmulationActor {
EmulationActor { name: name }
}
}
| {
Ok(match msg_type {
_ => ActorMessageStatus::Ignored,
})
} | identifier_body |
emulation.rs |
use actor::{Actor, ActorMessageStatus, ActorRegistry};
use serde_json::{Map, Value};
use std::net::TcpStream;
pub struct EmulationActor {
pub name: String,
}
impl Actor for EmulationActor {
fn name(&self) -> String {
self.name.clone()
}
fn handle_message(
&self,
_registry: &ActorRegistry,
msg_type: &str,
_msg: &Map<String, Value>,
_stream: &mut TcpStream,
) -> Result<ActorMessageStatus, ()> {
Ok(match msg_type {
_ => ActorMessageStatus::Ignored,
})
}
}
impl EmulationActor {
pub fn new(name: String) -> EmulationActor {
EmulationActor { name: name }
}
} | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ | random_line_split |
|
theme.rs | use crate::color::{LinSrgba, Srgba};
use std::collections::HashMap;
/// A set of styling defaults used for coloring texturing geometric primitives that have no entry
/// within the **Draw**'s inner **ColorMap**.
#[derive(Clone, Debug)]
pub struct Theme {
/// Fill color defaults.
pub fill_color: Color,
/// Stroke color defaults.
pub stroke_color: Color,
}
/// A set of defaults used for coloring.
#[derive(Clone, Debug)]
pub struct Color {
pub default: Srgba,
pub primitive: HashMap<Primitive, Srgba>,
}
/// Primitive geometry types that may have unique default styles.
///
/// These are used as keys into the **Theme**'s geometry primitive default values.
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
pub enum Primitive {
Arrow,
Cuboid,
Ellipse,
Line,
Mesh,
Path,
Polygon,
Quad,
Rect,
Text,
Texture,
Tri,
}
impl Theme {
/// Retrieve the non-linear sRGBA fill color representation for the given primitive.
pub fn fill_srgba(&self, prim: &Primitive) -> Srgba {
self.fill_color | .get(prim)
.map(|&c| c)
.unwrap_or(self.fill_color.default)
}
/// Retrieve the linaer sRGBA fill color representation for the given primitive.
pub fn fill_lin_srgba(&self, prim: &Primitive) -> LinSrgba {
self.fill_srgba(prim).into_linear()
}
/// Retrieve the non-linear sRGBA stroke color representation for the given primitive.
pub fn stroke_srgba(&self, prim: &Primitive) -> Srgba {
self.stroke_color
.primitive
.get(prim)
.map(|&c| c)
.unwrap_or(self.stroke_color.default)
}
/// Retrieve the linaer sRGBA stroke color representation for the given primitive.
pub fn stroke_lin_srgba(&self, prim: &Primitive) -> LinSrgba {
self.stroke_srgba(prim).into_linear()
}
}
impl Default for Theme {
fn default() -> Self {
// TODO: This should be pub const.
let default_fill = Srgba::new(1.0, 1.0, 1.0, 1.0);
let default_stroke = Srgba::new(0.0, 0.0, 0.0, 1.0);
let fill_color = Color {
default: default_fill,
primitive: Default::default(),
};
let mut stroke_color = Color {
default: default_stroke,
primitive: Default::default(),
};
stroke_color
.primitive
.insert(Primitive::Arrow, default_fill);
Theme {
fill_color,
stroke_color,
}
}
} | .primitive | random_line_split |
theme.rs | use crate::color::{LinSrgba, Srgba};
use std::collections::HashMap;
/// A set of styling defaults used for coloring texturing geometric primitives that have no entry
/// within the **Draw**'s inner **ColorMap**.
#[derive(Clone, Debug)]
pub struct Theme {
/// Fill color defaults.
pub fill_color: Color,
/// Stroke color defaults.
pub stroke_color: Color,
}
/// A set of defaults used for coloring.
#[derive(Clone, Debug)]
pub struct Color {
pub default: Srgba,
pub primitive: HashMap<Primitive, Srgba>,
}
/// Primitive geometry types that may have unique default styles.
///
/// These are used as keys into the **Theme**'s geometry primitive default values.
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
pub enum Primitive {
Arrow,
Cuboid,
Ellipse,
Line,
Mesh,
Path,
Polygon,
Quad,
Rect,
Text,
Texture,
Tri,
}
impl Theme {
/// Retrieve the non-linear sRGBA fill color representation for the given primitive.
pub fn fill_srgba(&self, prim: &Primitive) -> Srgba {
self.fill_color
.primitive
.get(prim)
.map(|&c| c)
.unwrap_or(self.fill_color.default)
}
/// Retrieve the linaer sRGBA fill color representation for the given primitive.
pub fn fill_lin_srgba(&self, prim: &Primitive) -> LinSrgba {
self.fill_srgba(prim).into_linear()
}
/// Retrieve the non-linear sRGBA stroke color representation for the given primitive.
pub fn stroke_srgba(&self, prim: &Primitive) -> Srgba |
/// Retrieve the linaer sRGBA stroke color representation for the given primitive.
pub fn stroke_lin_srgba(&self, prim: &Primitive) -> LinSrgba {
self.stroke_srgba(prim).into_linear()
}
}
impl Default for Theme {
fn default() -> Self {
// TODO: This should be pub const.
let default_fill = Srgba::new(1.0, 1.0, 1.0, 1.0);
let default_stroke = Srgba::new(0.0, 0.0, 0.0, 1.0);
let fill_color = Color {
default: default_fill,
primitive: Default::default(),
};
let mut stroke_color = Color {
default: default_stroke,
primitive: Default::default(),
};
stroke_color
.primitive
.insert(Primitive::Arrow, default_fill);
Theme {
fill_color,
stroke_color,
}
}
}
| {
self.stroke_color
.primitive
.get(prim)
.map(|&c| c)
.unwrap_or(self.stroke_color.default)
} | identifier_body |
theme.rs | use crate::color::{LinSrgba, Srgba};
use std::collections::HashMap;
/// A set of styling defaults used for coloring texturing geometric primitives that have no entry
/// within the **Draw**'s inner **ColorMap**.
#[derive(Clone, Debug)]
pub struct Theme {
/// Fill color defaults.
pub fill_color: Color,
/// Stroke color defaults.
pub stroke_color: Color,
}
/// A set of defaults used for coloring.
#[derive(Clone, Debug)]
pub struct Color {
pub default: Srgba,
pub primitive: HashMap<Primitive, Srgba>,
}
/// Primitive geometry types that may have unique default styles.
///
/// These are used as keys into the **Theme**'s geometry primitive default values.
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
pub enum Primitive {
Arrow,
Cuboid,
Ellipse,
Line,
Mesh,
Path,
Polygon,
Quad,
Rect,
Text,
Texture,
Tri,
}
impl Theme {
/// Retrieve the non-linear sRGBA fill color representation for the given primitive.
pub fn fill_srgba(&self, prim: &Primitive) -> Srgba {
self.fill_color
.primitive
.get(prim)
.map(|&c| c)
.unwrap_or(self.fill_color.default)
}
/// Retrieve the linaer sRGBA fill color representation for the given primitive.
pub fn fill_lin_srgba(&self, prim: &Primitive) -> LinSrgba {
self.fill_srgba(prim).into_linear()
}
/// Retrieve the non-linear sRGBA stroke color representation for the given primitive.
pub fn | (&self, prim: &Primitive) -> Srgba {
self.stroke_color
.primitive
.get(prim)
.map(|&c| c)
.unwrap_or(self.stroke_color.default)
}
/// Retrieve the linaer sRGBA stroke color representation for the given primitive.
pub fn stroke_lin_srgba(&self, prim: &Primitive) -> LinSrgba {
self.stroke_srgba(prim).into_linear()
}
}
impl Default for Theme {
fn default() -> Self {
// TODO: This should be pub const.
let default_fill = Srgba::new(1.0, 1.0, 1.0, 1.0);
let default_stroke = Srgba::new(0.0, 0.0, 0.0, 1.0);
let fill_color = Color {
default: default_fill,
primitive: Default::default(),
};
let mut stroke_color = Color {
default: default_stroke,
primitive: Default::default(),
};
stroke_color
.primitive
.insert(Primitive::Arrow, default_fill);
Theme {
fill_color,
stroke_color,
}
}
}
| stroke_srgba | identifier_name |
errors.rs | // ------------------------------------------------------------------------- //
// Imports //
// ------------------------------------------------------------------------- //
// Standard libraries imports
use std::process;
use std::result::Result as StdResult;
// External crates imports
use ansi_term::Colour::Red;
// Project imports
use core::display::{boxify, stderr};
// ------------------------------------------------------------------------- //
// Types //
// ------------------------------------------------------------------------- //
pub type Result<T> = StdResult<T, Error>;
// ------------------------------------------------------------------------- //
// Structures //
// ------------------------------------------------------------------------- //
/// The different kinds of errors.
#[allow(dead_code)]
pub enum ErrorKind {
/// Occurs in case of IO related error.
IO,
/// Occurs when the program is run outside of a workspace.
NotInWorkspace,
/// Occurs in case of parsing error.
Parse,
}
/// A structure
pub struct Error {
/// The kind of error.
pub kind: ErrorKind,
/// The error message.
pub message: String,
/// An optional error content.
pub error: Option<String>,
}
impl Error {
/// Exit the program by displaying an error message and returning 1.
pub fn exit(&self) ->! {
self.print();
process::exit(1);
}
/// Exit the program by displaying an error message and returning 1.
pub fn print(&self) {
let mut msg = self.message.clone();
match self.error {
None => (),
Some(_) => msg.push(':'),
}
stderr(&Red.paint(msg).to_string());
match self.error.clone() {
None => (),
Some(error) => stderr(&boxify(error, Red)),
}
}
} |
// ------------------------------------------------------------------------- //
// Traits //
// ------------------------------------------------------------------------- //
/// A fallible unwrap
pub trait Fallible<T> {
/// Return the value from Ok Result, or fail in case of Error.
fn unwrap_or_fail(self) -> T;
}
/// Implements Fallible for Result
impl<T> Fallible<T> for Result<T> {
fn unwrap_or_fail(self) -> T {
match self {
Ok(val) => val,
Err(err) => err.exit(),
}
}
} | random_line_split |
|
errors.rs | // ------------------------------------------------------------------------- //
// Imports //
// ------------------------------------------------------------------------- //
// Standard libraries imports
use std::process;
use std::result::Result as StdResult;
// External crates imports
use ansi_term::Colour::Red;
// Project imports
use core::display::{boxify, stderr};
// ------------------------------------------------------------------------- //
// Types //
// ------------------------------------------------------------------------- //
pub type Result<T> = StdResult<T, Error>;
// ------------------------------------------------------------------------- //
// Structures //
// ------------------------------------------------------------------------- //
/// The different kinds of errors.
#[allow(dead_code)]
pub enum ErrorKind {
/// Occurs in case of IO related error.
IO,
/// Occurs when the program is run outside of a workspace.
NotInWorkspace,
/// Occurs in case of parsing error.
Parse,
}
/// A structure
pub struct Error {
/// The kind of error.
pub kind: ErrorKind,
/// The error message.
pub message: String,
/// An optional error content.
pub error: Option<String>,
}
impl Error {
/// Exit the program by displaying an error message and returning 1.
pub fn exit(&self) ->! {
self.print();
process::exit(1);
}
/// Exit the program by displaying an error message and returning 1.
pub fn | (&self) {
let mut msg = self.message.clone();
match self.error {
None => (),
Some(_) => msg.push(':'),
}
stderr(&Red.paint(msg).to_string());
match self.error.clone() {
None => (),
Some(error) => stderr(&boxify(error, Red)),
}
}
}
// ------------------------------------------------------------------------- //
// Traits //
// ------------------------------------------------------------------------- //
/// A fallible unwrap
pub trait Fallible<T> {
/// Return the value from Ok Result, or fail in case of Error.
fn unwrap_or_fail(self) -> T;
}
/// Implements Fallible for Result
impl<T> Fallible<T> for Result<T> {
fn unwrap_or_fail(self) -> T {
match self {
Ok(val) => val,
Err(err) => err.exit(),
}
}
}
| print | identifier_name |
uievent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::UIEventBinding;
use crate::dom::bindings::codegen::Bindings::UIEventBinding::UIEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{DomRoot, MutNullableDom};
use crate::dom::bindings::str::DOMString;
use crate::dom::event::{Event, EventBubbles, EventCancelable};
use crate::dom::window::Window;
use dom_struct::dom_struct;
use servo_atoms::Atom;
use std::cell::Cell;
use std::default::Default;
// https://w3c.github.io/uievents/#interface-uievent
#[dom_struct]
pub struct UIEvent {
event: Event,
view: MutNullableDom<Window>,
detail: Cell<i32>,
}
impl UIEvent {
pub fn new_inherited() -> UIEvent {
UIEvent {
event: Event::new_inherited(),
view: Default::default(),
detail: Cell::new(0),
}
}
pub fn new_uninitialized(window: &Window) -> DomRoot<UIEvent> {
reflect_dom_object(
Box::new(UIEvent::new_inherited()),
window,
UIEventBinding::Wrap,
)
}
pub fn new(
window: &Window,
type_: DOMString,
can_bubble: EventBubbles,
cancelable: EventCancelable,
view: Option<&Window>,
detail: i32,
) -> DomRoot<UIEvent> {
let ev = UIEvent::new_uninitialized(window);
ev.InitUIEvent(
type_,
bool::from(can_bubble),
bool::from(cancelable),
view,
detail,
);
ev
}
#[allow(non_snake_case)]
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &UIEventBinding::UIEventInit,
) -> Fallible<DomRoot<UIEvent>> {
let bubbles = EventBubbles::from(init.parent.bubbles);
let cancelable = EventCancelable::from(init.parent.cancelable);
let event = UIEvent::new(
window,
type_,
bubbles,
cancelable,
init.view.as_deref(),
init.detail,
);
Ok(event)
}
}
impl UIEventMethods for UIEvent {
// https://w3c.github.io/uievents/#widl-UIEvent-view
fn GetView(&self) -> Option<DomRoot<Window>> |
// https://w3c.github.io/uievents/#widl-UIEvent-detail
fn Detail(&self) -> i32 {
self.detail.get()
}
// https://w3c.github.io/uievents/#widl-UIEvent-initUIEvent
fn InitUIEvent(
&self,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<&Window>,
detail: i32,
) {
let event = self.upcast::<Event>();
if event.dispatching() {
return;
}
event.init_event(Atom::from(type_), can_bubble, cancelable);
self.view.set(view);
self.detail.set(detail);
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}
| {
self.view.get()
} | identifier_body |
uievent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::UIEventBinding;
use crate::dom::bindings::codegen::Bindings::UIEventBinding::UIEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{DomRoot, MutNullableDom};
use crate::dom::bindings::str::DOMString;
use crate::dom::event::{Event, EventBubbles, EventCancelable};
use crate::dom::window::Window;
use dom_struct::dom_struct;
use servo_atoms::Atom;
use std::cell::Cell;
use std::default::Default;
// https://w3c.github.io/uievents/#interface-uievent
#[dom_struct]
pub struct UIEvent {
event: Event,
view: MutNullableDom<Window>,
detail: Cell<i32>,
}
impl UIEvent {
pub fn new_inherited() -> UIEvent {
UIEvent {
event: Event::new_inherited(),
view: Default::default(),
detail: Cell::new(0),
}
}
pub fn new_uninitialized(window: &Window) -> DomRoot<UIEvent> {
reflect_dom_object(
Box::new(UIEvent::new_inherited()),
window,
UIEventBinding::Wrap,
)
}
pub fn new(
window: &Window,
type_: DOMString,
can_bubble: EventBubbles,
cancelable: EventCancelable,
view: Option<&Window>,
detail: i32,
) -> DomRoot<UIEvent> {
let ev = UIEvent::new_uninitialized(window);
ev.InitUIEvent(
type_,
bool::from(can_bubble),
bool::from(cancelable),
view,
detail,
);
ev
}
#[allow(non_snake_case)]
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &UIEventBinding::UIEventInit,
) -> Fallible<DomRoot<UIEvent>> {
let bubbles = EventBubbles::from(init.parent.bubbles);
let cancelable = EventCancelable::from(init.parent.cancelable);
let event = UIEvent::new(
window,
type_,
bubbles,
cancelable,
init.view.as_deref(),
init.detail,
);
Ok(event)
}
}
impl UIEventMethods for UIEvent {
// https://w3c.github.io/uievents/#widl-UIEvent-view
fn GetView(&self) -> Option<DomRoot<Window>> {
self.view.get()
}
// https://w3c.github.io/uievents/#widl-UIEvent-detail
fn Detail(&self) -> i32 {
self.detail.get()
}
// https://w3c.github.io/uievents/#widl-UIEvent-initUIEvent
fn InitUIEvent(
&self,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<&Window>,
detail: i32,
) {
let event = self.upcast::<Event>();
if event.dispatching() |
event.init_event(Atom::from(type_), can_bubble, cancelable);
self.view.set(view);
self.detail.set(detail);
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}
| {
return;
} | conditional_block |
uievent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::UIEventBinding;
use crate::dom::bindings::codegen::Bindings::UIEventBinding::UIEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{DomRoot, MutNullableDom};
use crate::dom::bindings::str::DOMString;
use crate::dom::event::{Event, EventBubbles, EventCancelable};
use crate::dom::window::Window;
use dom_struct::dom_struct;
use servo_atoms::Atom;
use std::cell::Cell;
use std::default::Default;
// https://w3c.github.io/uievents/#interface-uievent
#[dom_struct]
pub struct UIEvent {
event: Event,
view: MutNullableDom<Window>,
detail: Cell<i32>,
}
impl UIEvent {
pub fn new_inherited() -> UIEvent {
UIEvent {
event: Event::new_inherited(),
view: Default::default(),
detail: Cell::new(0),
}
}
pub fn new_uninitialized(window: &Window) -> DomRoot<UIEvent> {
reflect_dom_object(
Box::new(UIEvent::new_inherited()),
window,
UIEventBinding::Wrap,
)
}
pub fn new(
window: &Window,
type_: DOMString,
can_bubble: EventBubbles,
cancelable: EventCancelable,
view: Option<&Window>,
detail: i32,
) -> DomRoot<UIEvent> {
let ev = UIEvent::new_uninitialized(window);
ev.InitUIEvent(
type_,
bool::from(can_bubble),
bool::from(cancelable),
view,
detail,
);
ev
}
#[allow(non_snake_case)]
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &UIEventBinding::UIEventInit,
) -> Fallible<DomRoot<UIEvent>> {
let bubbles = EventBubbles::from(init.parent.bubbles);
let cancelable = EventCancelable::from(init.parent.cancelable);
let event = UIEvent::new(
window,
type_,
bubbles,
cancelable,
init.view.as_deref(),
init.detail,
);
Ok(event)
}
}
impl UIEventMethods for UIEvent {
// https://w3c.github.io/uievents/#widl-UIEvent-view
fn GetView(&self) -> Option<DomRoot<Window>> {
self.view.get()
}
// https://w3c.github.io/uievents/#widl-UIEvent-detail
fn Detail(&self) -> i32 {
self.detail.get()
}
// https://w3c.github.io/uievents/#widl-UIEvent-initUIEvent | type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<&Window>,
detail: i32,
) {
let event = self.upcast::<Event>();
if event.dispatching() {
return;
}
event.init_event(Atom::from(type_), can_bubble, cancelable);
self.view.set(view);
self.detail.set(detail);
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
} | fn InitUIEvent(
&self, | random_line_split |
uievent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::EventBinding::EventMethods;
use crate::dom::bindings::codegen::Bindings::UIEventBinding;
use crate::dom::bindings::codegen::Bindings::UIEventBinding::UIEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{DomRoot, MutNullableDom};
use crate::dom::bindings::str::DOMString;
use crate::dom::event::{Event, EventBubbles, EventCancelable};
use crate::dom::window::Window;
use dom_struct::dom_struct;
use servo_atoms::Atom;
use std::cell::Cell;
use std::default::Default;
// https://w3c.github.io/uievents/#interface-uievent
#[dom_struct]
pub struct UIEvent {
event: Event,
view: MutNullableDom<Window>,
detail: Cell<i32>,
}
impl UIEvent {
pub fn new_inherited() -> UIEvent {
UIEvent {
event: Event::new_inherited(),
view: Default::default(),
detail: Cell::new(0),
}
}
pub fn new_uninitialized(window: &Window) -> DomRoot<UIEvent> {
reflect_dom_object(
Box::new(UIEvent::new_inherited()),
window,
UIEventBinding::Wrap,
)
}
pub fn new(
window: &Window,
type_: DOMString,
can_bubble: EventBubbles,
cancelable: EventCancelable,
view: Option<&Window>,
detail: i32,
) -> DomRoot<UIEvent> {
let ev = UIEvent::new_uninitialized(window);
ev.InitUIEvent(
type_,
bool::from(can_bubble),
bool::from(cancelable),
view,
detail,
);
ev
}
#[allow(non_snake_case)]
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &UIEventBinding::UIEventInit,
) -> Fallible<DomRoot<UIEvent>> {
let bubbles = EventBubbles::from(init.parent.bubbles);
let cancelable = EventCancelable::from(init.parent.cancelable);
let event = UIEvent::new(
window,
type_,
bubbles,
cancelable,
init.view.as_deref(),
init.detail,
);
Ok(event)
}
}
impl UIEventMethods for UIEvent {
// https://w3c.github.io/uievents/#widl-UIEvent-view
fn GetView(&self) -> Option<DomRoot<Window>> {
self.view.get()
}
// https://w3c.github.io/uievents/#widl-UIEvent-detail
fn Detail(&self) -> i32 {
self.detail.get()
}
// https://w3c.github.io/uievents/#widl-UIEvent-initUIEvent
fn | (
&self,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<&Window>,
detail: i32,
) {
let event = self.upcast::<Event>();
if event.dispatching() {
return;
}
event.init_event(Atom::from(type_), can_bubble, cancelable);
self.view.set(view);
self.detail.set(detail);
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}
| InitUIEvent | identifier_name |
main.rs | // Crypto challenge Set1 / Challenge 1
// Convert hex to base64
extern crate codec;
#[cfg(not(test))]
fn main() |
#[test]
fn challenge1() {
let input = "49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d";
let output = codec::to_base64( codec::from_hex(input).ok().unwrap().as_slice() );
assert_eq!(output, String::from_str("SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t"));
}
| {
let args = std::os::args();
if args.len() != 2 {
println!("USAGE: challenge1 HEX_ENCODED_STRING");
} else {
let input = args[1].as_slice();
match codec::from_hex(input) {
Err(msg) => println!("Invalid hex string: {}", msg),
Ok(binary) => println!("{}", codec::to_base64(binary.as_slice()))
}
}
} | identifier_body |
main.rs | // Crypto challenge Set1 / Challenge 1
// Convert hex to base64
extern crate codec;
#[cfg(not(test))]
fn main() {
let args = std::os::args();
if args.len()!= 2 {
println!("USAGE: challenge1 HEX_ENCODED_STRING");
} else {
let input = args[1].as_slice();
match codec::from_hex(input) {
Err(msg) => println!("Invalid hex string: {}", msg),
Ok(binary) => println!("{}", codec::to_base64(binary.as_slice())) | #[test]
fn challenge1() {
let input = "49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d";
let output = codec::to_base64( codec::from_hex(input).ok().unwrap().as_slice() );
assert_eq!(output, String::from_str("SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t"));
} | }
}
}
| random_line_split |
main.rs | // Crypto challenge Set1 / Challenge 1
// Convert hex to base64
extern crate codec;
#[cfg(not(test))]
fn main() {
let args = std::os::args();
if args.len()!= 2 {
println!("USAGE: challenge1 HEX_ENCODED_STRING");
} else {
let input = args[1].as_slice();
match codec::from_hex(input) {
Err(msg) => println!("Invalid hex string: {}", msg),
Ok(binary) => println!("{}", codec::to_base64(binary.as_slice()))
}
}
}
#[test]
fn | () {
let input = "49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d";
let output = codec::to_base64( codec::from_hex(input).ok().unwrap().as_slice() );
assert_eq!(output, String::from_str("SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t"));
}
| challenge1 | identifier_name |
mkfifo.rs | #![crate_name = "mkfifo"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Michael Gehring <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
extern crate libc;
#[macro_use]
extern crate uucore;
use libc::mkfifo;
use std::ffi::CString;
use std::io::{Error, Write};
static NAME: &'static str = "mkfifo";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optopt("m", "mode", "file permissions for the fifo", "(default 0666)");
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(err) => panic!("{}", err),
};
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
if matches.opt_present("help") || matches.free.is_empty() {
let msg = format!("{0} {1}
Usage:
{0} [OPTIONS] NAME...
Create a FIFO with the given name.", NAME, VERSION);
print!("{}", opts.usage(&msg));
if matches.free.is_empty() {
return 1;
}
return 0;
}
let mode = match matches.opt_str("m") {
Some(m) => match usize::from_str_radix(&m, 8) {
Ok(m) => m,
Err(e)=> {
show_error!("invalid mode: {}", e);
return 1;
}
},
None => 0o666,
};
let mut exit_status = 0;
for f in matches.free.iter() {
let err = unsafe { mkfifo(CString::new(f.as_bytes()).unwrap().as_ptr(), mode as libc::mode_t) };
if err == -1 {
show_error!("creating '{}': {}", f, Error::last_os_error().raw_os_error().unwrap());
exit_status = 1;
}
}
exit_status
}
#[allow(dead_code)]
fn | () {
std::process::exit(uumain(std::env::args().collect()));
}
| main | identifier_name |
mkfifo.rs | #![crate_name = "mkfifo"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Michael Gehring <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
extern crate libc;
#[macro_use]
extern crate uucore;
use libc::mkfifo;
use std::ffi::CString;
use std::io::{Error, Write};
static NAME: &'static str = "mkfifo";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
| opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(err) => panic!("{}", err),
};
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
if matches.opt_present("help") || matches.free.is_empty() {
let msg = format!("{0} {1}
Usage:
{0} [OPTIONS] NAME...
Create a FIFO with the given name.", NAME, VERSION);
print!("{}", opts.usage(&msg));
if matches.free.is_empty() {
return 1;
}
return 0;
}
let mode = match matches.opt_str("m") {
Some(m) => match usize::from_str_radix(&m, 8) {
Ok(m) => m,
Err(e)=> {
show_error!("invalid mode: {}", e);
return 1;
}
},
None => 0o666,
};
let mut exit_status = 0;
for f in matches.free.iter() {
let err = unsafe { mkfifo(CString::new(f.as_bytes()).unwrap().as_ptr(), mode as libc::mode_t) };
if err == -1 {
show_error!("creating '{}': {}", f, Error::last_os_error().raw_os_error().unwrap());
exit_status = 1;
}
}
exit_status
}
#[allow(dead_code)]
fn main() {
std::process::exit(uumain(std::env::args().collect()));
} | pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optopt("m", "mode", "file permissions for the fifo", "(default 0666)");
opts.optflag("h", "help", "display this help and exit"); | random_line_split |
mkfifo.rs | #![crate_name = "mkfifo"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Michael Gehring <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
extern crate libc;
#[macro_use]
extern crate uucore;
use libc::mkfifo;
use std::ffi::CString;
use std::io::{Error, Write};
static NAME: &'static str = "mkfifo";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optopt("m", "mode", "file permissions for the fifo", "(default 0666)");
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(err) => panic!("{}", err),
};
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
if matches.opt_present("help") || matches.free.is_empty() {
let msg = format!("{0} {1}
Usage:
{0} [OPTIONS] NAME...
Create a FIFO with the given name.", NAME, VERSION);
print!("{}", opts.usage(&msg));
if matches.free.is_empty() |
return 0;
}
let mode = match matches.opt_str("m") {
Some(m) => match usize::from_str_radix(&m, 8) {
Ok(m) => m,
Err(e)=> {
show_error!("invalid mode: {}", e);
return 1;
}
},
None => 0o666,
};
let mut exit_status = 0;
for f in matches.free.iter() {
let err = unsafe { mkfifo(CString::new(f.as_bytes()).unwrap().as_ptr(), mode as libc::mode_t) };
if err == -1 {
show_error!("creating '{}': {}", f, Error::last_os_error().raw_os_error().unwrap());
exit_status = 1;
}
}
exit_status
}
#[allow(dead_code)]
fn main() {
std::process::exit(uumain(std::env::args().collect()));
}
| {
return 1;
} | conditional_block |
mkfifo.rs | #![crate_name = "mkfifo"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Michael Gehring <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
extern crate libc;
#[macro_use]
extern crate uucore;
use libc::mkfifo;
use std::ffi::CString;
use std::io::{Error, Write};
static NAME: &'static str = "mkfifo";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = getopts::Options::new();
opts.optopt("m", "mode", "file permissions for the fifo", "(default 0666)");
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(err) => panic!("{}", err),
};
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
if matches.opt_present("help") || matches.free.is_empty() {
let msg = format!("{0} {1}
Usage:
{0} [OPTIONS] NAME...
Create a FIFO with the given name.", NAME, VERSION);
print!("{}", opts.usage(&msg));
if matches.free.is_empty() {
return 1;
}
return 0;
}
let mode = match matches.opt_str("m") {
Some(m) => match usize::from_str_radix(&m, 8) {
Ok(m) => m,
Err(e)=> {
show_error!("invalid mode: {}", e);
return 1;
}
},
None => 0o666,
};
let mut exit_status = 0;
for f in matches.free.iter() {
let err = unsafe { mkfifo(CString::new(f.as_bytes()).unwrap().as_ptr(), mode as libc::mode_t) };
if err == -1 {
show_error!("creating '{}': {}", f, Error::last_os_error().raw_os_error().unwrap());
exit_status = 1;
}
}
exit_status
}
#[allow(dead_code)]
fn main() | {
std::process::exit(uumain(std::env::args().collect()));
} | identifier_body |
|
extendablemessageevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::ExtendableMessageEventBinding;
use crate::dom::bindings::codegen::Bindings::ExtendableMessageEventBinding::ExtendableMessageEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::bindings::trace::RootedTraceableBox;
use crate::dom::event::Event;
use crate::dom::eventtarget::EventTarget;
use crate::dom::extendableevent::ExtendableEvent;
use crate::dom::globalscope::GlobalScope;
use crate::dom::serviceworkerglobalscope::ServiceWorkerGlobalScope;
use dom_struct::dom_struct;
use js::jsapi::{Heap, JSContext};
use js::jsval::JSVal;
use js::rust::HandleValue;
use servo_atoms::Atom;
#[dom_struct]
pub struct ExtendableMessageEvent {
event: ExtendableEvent,
data: Heap<JSVal>,
origin: DOMString,
lastEventId: DOMString,
}
impl ExtendableMessageEvent {
pub fn new(
global: &GlobalScope,
type_: Atom,
bubbles: bool,
cancelable: bool,
data: HandleValue,
origin: DOMString,
lastEventId: DOMString,
) -> DomRoot<ExtendableMessageEvent> {
let ev = Box::new(ExtendableMessageEvent {
event: ExtendableEvent::new_inherited(),
data: Heap::default(),
origin: origin,
lastEventId: lastEventId,
});
let ev = reflect_dom_object(ev, global, ExtendableMessageEventBinding::Wrap);
{
let event = ev.upcast::<Event>();
event.init_event(type_, bubbles, cancelable);
}
ev.data.set(data.get());
ev
}
pub fn | (
worker: &ServiceWorkerGlobalScope,
type_: DOMString,
init: RootedTraceableBox<ExtendableMessageEventBinding::ExtendableMessageEventInit>,
) -> Fallible<DomRoot<ExtendableMessageEvent>> {
let global = worker.upcast::<GlobalScope>();
let ev = ExtendableMessageEvent::new(
global,
Atom::from(type_),
init.parent.parent.bubbles,
init.parent.parent.cancelable,
init.data.handle(),
init.origin.clone().unwrap(),
init.lastEventId.clone().unwrap(),
);
Ok(ev)
}
}
impl ExtendableMessageEvent {
pub fn dispatch_jsval(target: &EventTarget, scope: &GlobalScope, message: HandleValue) {
let Extendablemessageevent = ExtendableMessageEvent::new(
scope,
atom!("message"),
false,
false,
message,
DOMString::new(),
DOMString::new(),
);
Extendablemessageevent.upcast::<Event>().fire(target);
}
}
impl ExtendableMessageEventMethods for ExtendableMessageEvent {
#[allow(unsafe_code)]
// https://w3c.github.io/ServiceWorker/#extendablemessage-event-data-attribute
unsafe fn Data(&self, _cx: *mut JSContext) -> JSVal {
self.data.get()
}
// https://w3c.github.io/ServiceWorker/#extendablemessage-event-origin-attribute
fn Origin(&self) -> DOMString {
self.origin.clone()
}
// https://w3c.github.io/ServiceWorker/#extendablemessage-event-lasteventid-attribute
fn LastEventId(&self) -> DOMString {
self.lastEventId.clone()
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
}
| Constructor | identifier_name |
extendablemessageevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::ExtendableMessageEventBinding;
use crate::dom::bindings::codegen::Bindings::ExtendableMessageEventBinding::ExtendableMessageEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::bindings::trace::RootedTraceableBox;
use crate::dom::event::Event;
use crate::dom::eventtarget::EventTarget;
use crate::dom::extendableevent::ExtendableEvent;
use crate::dom::globalscope::GlobalScope;
use crate::dom::serviceworkerglobalscope::ServiceWorkerGlobalScope;
use dom_struct::dom_struct;
use js::jsapi::{Heap, JSContext};
use js::jsval::JSVal;
use js::rust::HandleValue;
use servo_atoms::Atom;
#[dom_struct]
pub struct ExtendableMessageEvent {
event: ExtendableEvent,
data: Heap<JSVal>,
origin: DOMString,
lastEventId: DOMString,
}
impl ExtendableMessageEvent {
pub fn new(
global: &GlobalScope,
type_: Atom,
bubbles: bool,
cancelable: bool,
data: HandleValue,
origin: DOMString,
lastEventId: DOMString,
) -> DomRoot<ExtendableMessageEvent> {
let ev = Box::new(ExtendableMessageEvent {
event: ExtendableEvent::new_inherited(),
data: Heap::default(),
origin: origin,
lastEventId: lastEventId,
});
let ev = reflect_dom_object(ev, global, ExtendableMessageEventBinding::Wrap);
{
let event = ev.upcast::<Event>();
event.init_event(type_, bubbles, cancelable);
}
ev.data.set(data.get());
ev
}
pub fn Constructor(
worker: &ServiceWorkerGlobalScope,
type_: DOMString,
init: RootedTraceableBox<ExtendableMessageEventBinding::ExtendableMessageEventInit>,
) -> Fallible<DomRoot<ExtendableMessageEvent>> {
let global = worker.upcast::<GlobalScope>();
let ev = ExtendableMessageEvent::new(
global,
Atom::from(type_),
init.parent.parent.bubbles,
init.parent.parent.cancelable,
init.data.handle(),
init.origin.clone().unwrap(),
init.lastEventId.clone().unwrap(),
);
Ok(ev)
}
}
impl ExtendableMessageEvent {
pub fn dispatch_jsval(target: &EventTarget, scope: &GlobalScope, message: HandleValue) { | message,
DOMString::new(),
DOMString::new(),
);
Extendablemessageevent.upcast::<Event>().fire(target);
}
}
impl ExtendableMessageEventMethods for ExtendableMessageEvent {
#[allow(unsafe_code)]
// https://w3c.github.io/ServiceWorker/#extendablemessage-event-data-attribute
unsafe fn Data(&self, _cx: *mut JSContext) -> JSVal {
self.data.get()
}
// https://w3c.github.io/ServiceWorker/#extendablemessage-event-origin-attribute
fn Origin(&self) -> DOMString {
self.origin.clone()
}
// https://w3c.github.io/ServiceWorker/#extendablemessage-event-lasteventid-attribute
fn LastEventId(&self) -> DOMString {
self.lastEventId.clone()
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.event.IsTrusted()
}
} | let Extendablemessageevent = ExtendableMessageEvent::new(
scope,
atom!("message"),
false,
false, | random_line_split |
extendablemessageevent.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::ExtendableMessageEventBinding;
use crate::dom::bindings::codegen::Bindings::ExtendableMessageEventBinding::ExtendableMessageEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::bindings::trace::RootedTraceableBox;
use crate::dom::event::Event;
use crate::dom::eventtarget::EventTarget;
use crate::dom::extendableevent::ExtendableEvent;
use crate::dom::globalscope::GlobalScope;
use crate::dom::serviceworkerglobalscope::ServiceWorkerGlobalScope;
use dom_struct::dom_struct;
use js::jsapi::{Heap, JSContext};
use js::jsval::JSVal;
use js::rust::HandleValue;
use servo_atoms::Atom;
#[dom_struct]
pub struct ExtendableMessageEvent {
event: ExtendableEvent,
data: Heap<JSVal>,
origin: DOMString,
lastEventId: DOMString,
}
impl ExtendableMessageEvent {
pub fn new(
global: &GlobalScope,
type_: Atom,
bubbles: bool,
cancelable: bool,
data: HandleValue,
origin: DOMString,
lastEventId: DOMString,
) -> DomRoot<ExtendableMessageEvent> {
let ev = Box::new(ExtendableMessageEvent {
event: ExtendableEvent::new_inherited(),
data: Heap::default(),
origin: origin,
lastEventId: lastEventId,
});
let ev = reflect_dom_object(ev, global, ExtendableMessageEventBinding::Wrap);
{
let event = ev.upcast::<Event>();
event.init_event(type_, bubbles, cancelable);
}
ev.data.set(data.get());
ev
}
pub fn Constructor(
worker: &ServiceWorkerGlobalScope,
type_: DOMString,
init: RootedTraceableBox<ExtendableMessageEventBinding::ExtendableMessageEventInit>,
) -> Fallible<DomRoot<ExtendableMessageEvent>> {
let global = worker.upcast::<GlobalScope>();
let ev = ExtendableMessageEvent::new(
global,
Atom::from(type_),
init.parent.parent.bubbles,
init.parent.parent.cancelable,
init.data.handle(),
init.origin.clone().unwrap(),
init.lastEventId.clone().unwrap(),
);
Ok(ev)
}
}
impl ExtendableMessageEvent {
pub fn dispatch_jsval(target: &EventTarget, scope: &GlobalScope, message: HandleValue) {
let Extendablemessageevent = ExtendableMessageEvent::new(
scope,
atom!("message"),
false,
false,
message,
DOMString::new(),
DOMString::new(),
);
Extendablemessageevent.upcast::<Event>().fire(target);
}
}
impl ExtendableMessageEventMethods for ExtendableMessageEvent {
#[allow(unsafe_code)]
// https://w3c.github.io/ServiceWorker/#extendablemessage-event-data-attribute
unsafe fn Data(&self, _cx: *mut JSContext) -> JSVal {
self.data.get()
}
// https://w3c.github.io/ServiceWorker/#extendablemessage-event-origin-attribute
fn Origin(&self) -> DOMString {
self.origin.clone()
}
// https://w3c.github.io/ServiceWorker/#extendablemessage-event-lasteventid-attribute
fn LastEventId(&self) -> DOMString {
self.lastEventId.clone()
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool |
}
| {
self.event.IsTrusted()
} | identifier_body |
inline.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Support for inlining external documentation into the current AST.
use syntax::ast;
use syntax::ast_util;
use syntax::attr::AttrMetaMethods;
use rustc::metadata::csearch;
use rustc::metadata::decoder;
use rustc::middle::def;
use rustc::middle::ty;
use rustc::middle::subst;
use rustc::middle::stability;
use core::DocContext;
use doctree;
use clean;
use super::Clean;
/// Attempt to inline the definition of a local node id into this AST.
///
/// This function will fetch the definition of the id specified, and if it is
/// from another crate it will attempt to inline the documentation from the
/// other crate into this crate.
///
/// This is primarily used for `pub use` statements which are, in general,
/// implementation details. Inlining the documentation should help provide a
/// better experience when reading the documentation in this use case.
///
/// The returned value is `None` if the `id` could not be inlined, and `Some`
/// of a vector of items if it was successfully expanded.
pub fn try_inline(cx: &DocContext, id: ast::NodeId, into: Option<ast::Ident>)
-> Option<Vec<clean::Item>> {
let tcx = match cx.tcx_opt() {
Some(tcx) => tcx,
None => return None,
};
let def = match tcx.def_map.borrow().find(&id) {
Some(def) => *def,
None => return None,
};
let did = def.def_id();
if ast_util::is_local(did) { return None }
try_inline_def(cx, tcx, def).map(|vec| {
vec.into_iter().map(|mut item| {
match into {
Some(into) if item.name.is_some() => {
item.name = Some(into.clean(cx));
}
_ => {}
}
item
}).collect()
})
}
fn | (cx: &DocContext, tcx: &ty::ctxt,
def: def::Def) -> Option<Vec<clean::Item>> {
let mut ret = Vec::new();
let did = def.def_id();
let inner = match def {
def::DefTrait(did) => {
record_extern_fqn(cx, did, clean::TypeTrait);
clean::TraitItem(build_external_trait(cx, tcx, did))
}
def::DefFn(did, style, false) => {
// If this function is a tuple struct constructor, we just skip it
record_extern_fqn(cx, did, clean::TypeFunction);
clean::FunctionItem(build_external_function(cx, tcx, did, style))
}
def::DefStruct(did) => {
record_extern_fqn(cx, did, clean::TypeStruct);
ret.extend(build_impls(cx, tcx, did).into_iter());
clean::StructItem(build_struct(cx, tcx, did))
}
def::DefTy(did, false) => {
record_extern_fqn(cx, did, clean::TypeTypedef);
ret.extend(build_impls(cx, tcx, did).into_iter());
build_type(cx, tcx, did)
}
def::DefTy(did, true) => {
record_extern_fqn(cx, did, clean::TypeEnum);
ret.extend(build_impls(cx, tcx, did).into_iter());
build_type(cx, tcx, did)
}
// Assume that the enum type is reexported next to the variant, and
// variants don't show up in documentation specially.
def::DefVariant(..) => return Some(Vec::new()),
def::DefMod(did) => {
record_extern_fqn(cx, did, clean::TypeModule);
clean::ModuleItem(build_module(cx, tcx, did))
}
def::DefStatic(did, mtbl) => {
record_extern_fqn(cx, did, clean::TypeStatic);
clean::StaticItem(build_static(cx, tcx, did, mtbl))
}
_ => return None,
};
let fqn = csearch::get_item_path(tcx, did);
cx.inlined.borrow_mut().as_mut().unwrap().insert(did);
ret.push(clean::Item {
source: clean::Span::empty(),
name: Some(fqn.last().unwrap().to_string()),
attrs: load_attrs(cx, tcx, did),
inner: inner,
visibility: Some(ast::Public),
stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
Some(ret)
}
pub fn load_attrs(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Attribute> {
let mut attrs = Vec::new();
csearch::get_item_attrs(&tcx.sess.cstore, did, |v| {
attrs.extend(v.into_iter().map(|a| {
a.clean(cx)
}));
});
attrs
}
/// Record an external fully qualified name in the external_paths cache.
///
/// These names are used later on by HTML rendering to generate things like
/// source links back to the original item.
pub fn record_extern_fqn(cx: &DocContext, did: ast::DefId, kind: clean::TypeKind) {
match cx.tcx_opt() {
Some(tcx) => {
let fqn = csearch::get_item_path(tcx, did);
let fqn = fqn.into_iter().map(|i| i.to_string()).collect();
cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind));
}
None => {}
}
}
pub fn build_external_trait(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Trait {
let def = ty::lookup_trait_def(tcx, did);
let trait_items = ty::trait_items(tcx, did).clean(cx);
let provided = ty::provided_trait_methods(tcx, did);
let mut items = trait_items.into_iter().map(|trait_item| {
if provided.iter().any(|a| a.def_id == trait_item.def_id) {
clean::ProvidedMethod(trait_item)
} else {
clean::RequiredMethod(trait_item)
}
});
let trait_def = ty::lookup_trait_def(tcx, did);
let bounds = trait_def.bounds.clean(cx);
clean::Trait {
generics: (&def.generics, subst::TypeSpace).clean(cx),
items: items.collect(),
bounds: bounds,
}
}
fn build_external_function(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId,
style: ast::FnStyle) -> clean::Function {
let t = ty::lookup_item_type(tcx, did);
clean::Function {
decl: match ty::get(t.ty).sty {
ty::ty_bare_fn(ref f) => (did, &f.sig).clean(cx),
_ => fail!("bad function"),
},
generics: (&t.generics, subst::FnSpace).clean(cx),
fn_style: style,
}
}
fn build_struct(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::Struct {
use syntax::parse::token::special_idents::unnamed_field;
let t = ty::lookup_item_type(tcx, did);
let fields = ty::lookup_struct_fields(tcx, did);
clean::Struct {
struct_type: match fields.as_slice() {
[] => doctree::Unit,
[ref f] if f.name == unnamed_field.name => doctree::Newtype,
[ref f,..] if f.name == unnamed_field.name => doctree::Tuple,
_ => doctree::Plain,
},
generics: (&t.generics, subst::TypeSpace).clean(cx),
fields: fields.clean(cx),
fields_stripped: false,
}
}
fn build_type(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::ItemEnum {
let t = ty::lookup_item_type(tcx, did);
match ty::get(t.ty).sty {
ty::ty_enum(edid, _) if!csearch::is_typedef(&tcx.sess.cstore, did) => {
return clean::EnumItem(clean::Enum {
generics: (&t.generics, subst::TypeSpace).clean(cx),
variants_stripped: false,
variants: ty::enum_variants(tcx, edid).clean(cx),
})
}
_ => {}
}
clean::TypedefItem(clean::Typedef {
type_: t.ty.clean(cx),
generics: (&t.generics, subst::TypeSpace).clean(cx),
})
}
fn build_impls(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Item> {
ty::populate_implementations_for_type_if_necessary(tcx, did);
let mut impls = Vec::new();
match tcx.inherent_impls.borrow().find(&did) {
None => {}
Some(i) => {
impls.extend(i.iter().map(|&did| { build_impl(cx, tcx, did) }));
}
}
// If this is the first time we've inlined something from this crate, then
// we inline *all* impls from the crate into this crate. Note that there's
// currently no way for us to filter this based on type, and we likely need
// many impls for a variety of reasons.
//
// Primarily, the impls will be used to populate the documentation for this
// type being inlined, but impls can also be used when generating
// documentation for primitives (no way to find those specifically).
if cx.populated_crate_impls.borrow_mut().insert(did.krate) {
csearch::each_top_level_item_of_crate(&tcx.sess.cstore,
did.krate,
|def, _, _| {
populate_impls(cx, tcx, def, &mut impls)
});
fn populate_impls(cx: &DocContext, tcx: &ty::ctxt,
def: decoder::DefLike,
impls: &mut Vec<Option<clean::Item>>) {
match def {
decoder::DlImpl(did) => impls.push(build_impl(cx, tcx, did)),
decoder::DlDef(def::DefMod(did)) => {
csearch::each_child_of_item(&tcx.sess.cstore,
did,
|def, _, _| {
populate_impls(cx, tcx, def, impls)
})
}
_ => {}
}
}
}
impls.into_iter().filter_map(|a| a).collect()
}
fn build_impl(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Option<clean::Item> {
if!cx.inlined.borrow_mut().as_mut().unwrap().insert(did) {
return None
}
let associated_trait = csearch::get_impl_trait(tcx, did);
// If this is an impl for a #[doc(hidden)] trait, be sure to not inline it.
match associated_trait {
Some(ref t) => {
let trait_attrs = load_attrs(cx, tcx, t.def_id);
if trait_attrs.iter().any(|a| is_doc_hidden(a)) {
return None
}
}
None => {}
}
let attrs = load_attrs(cx, tcx, did);
let ty = ty::lookup_item_type(tcx, did);
let trait_items = csearch::get_impl_items(&tcx.sess.cstore, did)
.iter()
.filter_map(|did| {
let did = did.def_id();
let impl_item = ty::impl_or_trait_item(tcx, did);
match impl_item {
ty::MethodTraitItem(method) => {
if method.vis!= ast::Public && associated_trait.is_none() {
return None
}
let mut item = method.clean(cx);
item.inner = match item.inner.clone() {
clean::TyMethodItem(clean::TyMethod {
fn_style, decl, self_, generics
}) => {
clean::MethodItem(clean::Method {
fn_style: fn_style,
decl: decl,
self_: self_,
generics: generics,
})
}
_ => fail!("not a tymethod"),
};
Some(item)
}
ty::TypeTraitItem(_) => {
// FIXME(pcwalton): Implement.
None
}
}
}).collect();
return Some(clean::Item {
inner: clean::ImplItem(clean::Impl {
derived: clean::detect_derived(attrs.as_slice()),
trait_: associated_trait.clean(cx).map(|bound| {
match bound {
clean::TraitBound(ty) => ty,
clean::UnboxedFnBound(..) |
clean::RegionBound(..) => unreachable!(),
}
}),
for_: ty.ty.clean(cx),
generics: (&ty.generics, subst::TypeSpace).clean(cx),
items: trait_items,
}),
source: clean::Span::empty(),
name: None,
attrs: attrs,
visibility: Some(ast::Inherited),
stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
fn is_doc_hidden(a: &clean::Attribute) -> bool {
match *a {
clean::List(ref name, ref inner) if name.as_slice() == "doc" => {
inner.iter().any(|a| {
match *a {
clean::Word(ref s) => s.as_slice() == "hidden",
_ => false,
}
})
}
_ => false
}
}
}
fn build_module(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Module {
let mut items = Vec::new();
fill_in(cx, tcx, did, &mut items);
return clean::Module {
items: items,
is_crate: false,
};
// FIXME: this doesn't handle reexports inside the module itself.
// Should they be handled?
fn fill_in(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId,
items: &mut Vec<clean::Item>) {
csearch::each_child_of_item(&tcx.sess.cstore, did, |def, _, vis| {
match def {
decoder::DlDef(def::DefForeignMod(did)) => {
fill_in(cx, tcx, did, items);
}
decoder::DlDef(def) if vis == ast::Public => {
match try_inline_def(cx, tcx, def) {
Some(i) => items.extend(i.into_iter()),
None => {}
}
}
decoder::DlDef(..) => {}
// All impls were inlined above
decoder::DlImpl(..) => {}
decoder::DlField => fail!("unimplemented field"),
}
});
}
}
fn build_static(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId,
mutable: bool) -> clean::Static {
clean::Static {
type_: ty::lookup_item_type(tcx, did).ty.clean(cx),
mutability: if mutable {clean::Mutable} else {clean::Immutable},
expr: "\n\n\n".to_string(), // trigger the "[definition]" links
}
}
| try_inline_def | identifier_name |
inline.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Support for inlining external documentation into the current AST.
use syntax::ast;
use syntax::ast_util;
use syntax::attr::AttrMetaMethods;
use rustc::metadata::csearch;
use rustc::metadata::decoder;
use rustc::middle::def;
use rustc::middle::ty;
use rustc::middle::subst;
use rustc::middle::stability;
use core::DocContext;
use doctree;
use clean;
use super::Clean;
/// Attempt to inline the definition of a local node id into this AST.
///
/// This function will fetch the definition of the id specified, and if it is
/// from another crate it will attempt to inline the documentation from the
/// other crate into this crate.
///
/// This is primarily used for `pub use` statements which are, in general,
/// implementation details. Inlining the documentation should help provide a
/// better experience when reading the documentation in this use case.
///
/// The returned value is `None` if the `id` could not be inlined, and `Some`
/// of a vector of items if it was successfully expanded.
pub fn try_inline(cx: &DocContext, id: ast::NodeId, into: Option<ast::Ident>)
-> Option<Vec<clean::Item>> {
let tcx = match cx.tcx_opt() {
Some(tcx) => tcx,
None => return None,
};
let def = match tcx.def_map.borrow().find(&id) {
Some(def) => *def,
None => return None,
};
let did = def.def_id();
if ast_util::is_local(did) { return None }
try_inline_def(cx, tcx, def).map(|vec| {
vec.into_iter().map(|mut item| {
match into {
Some(into) if item.name.is_some() => {
item.name = Some(into.clean(cx));
}
_ => {}
}
item
}).collect()
})
}
fn try_inline_def(cx: &DocContext, tcx: &ty::ctxt,
def: def::Def) -> Option<Vec<clean::Item>> {
let mut ret = Vec::new();
let did = def.def_id();
let inner = match def {
def::DefTrait(did) => {
record_extern_fqn(cx, did, clean::TypeTrait);
clean::TraitItem(build_external_trait(cx, tcx, did))
}
def::DefFn(did, style, false) => {
// If this function is a tuple struct constructor, we just skip it
record_extern_fqn(cx, did, clean::TypeFunction);
clean::FunctionItem(build_external_function(cx, tcx, did, style))
}
def::DefStruct(did) => {
record_extern_fqn(cx, did, clean::TypeStruct);
ret.extend(build_impls(cx, tcx, did).into_iter());
clean::StructItem(build_struct(cx, tcx, did))
}
def::DefTy(did, false) => {
record_extern_fqn(cx, did, clean::TypeTypedef);
ret.extend(build_impls(cx, tcx, did).into_iter());
build_type(cx, tcx, did)
}
def::DefTy(did, true) => {
record_extern_fqn(cx, did, clean::TypeEnum);
ret.extend(build_impls(cx, tcx, did).into_iter());
build_type(cx, tcx, did)
}
// Assume that the enum type is reexported next to the variant, and
// variants don't show up in documentation specially.
def::DefVariant(..) => return Some(Vec::new()),
def::DefMod(did) => {
record_extern_fqn(cx, did, clean::TypeModule);
clean::ModuleItem(build_module(cx, tcx, did))
}
def::DefStatic(did, mtbl) => {
record_extern_fqn(cx, did, clean::TypeStatic);
clean::StaticItem(build_static(cx, tcx, did, mtbl))
}
_ => return None,
};
let fqn = csearch::get_item_path(tcx, did);
cx.inlined.borrow_mut().as_mut().unwrap().insert(did);
ret.push(clean::Item {
source: clean::Span::empty(),
name: Some(fqn.last().unwrap().to_string()),
attrs: load_attrs(cx, tcx, did),
inner: inner,
visibility: Some(ast::Public),
stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
Some(ret)
}
pub fn load_attrs(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Attribute> {
let mut attrs = Vec::new();
csearch::get_item_attrs(&tcx.sess.cstore, did, |v| {
attrs.extend(v.into_iter().map(|a| {
a.clean(cx)
}));
});
attrs
}
/// Record an external fully qualified name in the external_paths cache.
///
/// These names are used later on by HTML rendering to generate things like
/// source links back to the original item.
pub fn record_extern_fqn(cx: &DocContext, did: ast::DefId, kind: clean::TypeKind) {
match cx.tcx_opt() {
Some(tcx) => {
let fqn = csearch::get_item_path(tcx, did);
let fqn = fqn.into_iter().map(|i| i.to_string()).collect();
cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind));
}
None => {}
}
}
pub fn build_external_trait(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Trait {
let def = ty::lookup_trait_def(tcx, did);
let trait_items = ty::trait_items(tcx, did).clean(cx);
let provided = ty::provided_trait_methods(tcx, did);
let mut items = trait_items.into_iter().map(|trait_item| {
if provided.iter().any(|a| a.def_id == trait_item.def_id) {
clean::ProvidedMethod(trait_item)
} else {
clean::RequiredMethod(trait_item)
}
});
let trait_def = ty::lookup_trait_def(tcx, did);
let bounds = trait_def.bounds.clean(cx);
clean::Trait {
generics: (&def.generics, subst::TypeSpace).clean(cx),
items: items.collect(),
bounds: bounds,
}
}
fn build_external_function(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId,
style: ast::FnStyle) -> clean::Function {
let t = ty::lookup_item_type(tcx, did);
clean::Function {
decl: match ty::get(t.ty).sty {
ty::ty_bare_fn(ref f) => (did, &f.sig).clean(cx),
_ => fail!("bad function"),
},
generics: (&t.generics, subst::FnSpace).clean(cx),
fn_style: style,
}
}
fn build_struct(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::Struct {
use syntax::parse::token::special_idents::unnamed_field;
let t = ty::lookup_item_type(tcx, did);
let fields = ty::lookup_struct_fields(tcx, did);
clean::Struct {
struct_type: match fields.as_slice() {
[] => doctree::Unit,
[ref f] if f.name == unnamed_field.name => doctree::Newtype,
[ref f,..] if f.name == unnamed_field.name => doctree::Tuple,
_ => doctree::Plain,
},
generics: (&t.generics, subst::TypeSpace).clean(cx),
fields: fields.clean(cx),
fields_stripped: false,
}
}
fn build_type(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::ItemEnum |
fn build_impls(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Item> {
ty::populate_implementations_for_type_if_necessary(tcx, did);
let mut impls = Vec::new();
match tcx.inherent_impls.borrow().find(&did) {
None => {}
Some(i) => {
impls.extend(i.iter().map(|&did| { build_impl(cx, tcx, did) }));
}
}
// If this is the first time we've inlined something from this crate, then
// we inline *all* impls from the crate into this crate. Note that there's
// currently no way for us to filter this based on type, and we likely need
// many impls for a variety of reasons.
//
// Primarily, the impls will be used to populate the documentation for this
// type being inlined, but impls can also be used when generating
// documentation for primitives (no way to find those specifically).
if cx.populated_crate_impls.borrow_mut().insert(did.krate) {
csearch::each_top_level_item_of_crate(&tcx.sess.cstore,
did.krate,
|def, _, _| {
populate_impls(cx, tcx, def, &mut impls)
});
fn populate_impls(cx: &DocContext, tcx: &ty::ctxt,
def: decoder::DefLike,
impls: &mut Vec<Option<clean::Item>>) {
match def {
decoder::DlImpl(did) => impls.push(build_impl(cx, tcx, did)),
decoder::DlDef(def::DefMod(did)) => {
csearch::each_child_of_item(&tcx.sess.cstore,
did,
|def, _, _| {
populate_impls(cx, tcx, def, impls)
})
}
_ => {}
}
}
}
impls.into_iter().filter_map(|a| a).collect()
}
fn build_impl(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Option<clean::Item> {
if!cx.inlined.borrow_mut().as_mut().unwrap().insert(did) {
return None
}
let associated_trait = csearch::get_impl_trait(tcx, did);
// If this is an impl for a #[doc(hidden)] trait, be sure to not inline it.
match associated_trait {
Some(ref t) => {
let trait_attrs = load_attrs(cx, tcx, t.def_id);
if trait_attrs.iter().any(|a| is_doc_hidden(a)) {
return None
}
}
None => {}
}
let attrs = load_attrs(cx, tcx, did);
let ty = ty::lookup_item_type(tcx, did);
let trait_items = csearch::get_impl_items(&tcx.sess.cstore, did)
.iter()
.filter_map(|did| {
let did = did.def_id();
let impl_item = ty::impl_or_trait_item(tcx, did);
match impl_item {
ty::MethodTraitItem(method) => {
if method.vis!= ast::Public && associated_trait.is_none() {
return None
}
let mut item = method.clean(cx);
item.inner = match item.inner.clone() {
clean::TyMethodItem(clean::TyMethod {
fn_style, decl, self_, generics
}) => {
clean::MethodItem(clean::Method {
fn_style: fn_style,
decl: decl,
self_: self_,
generics: generics,
})
}
_ => fail!("not a tymethod"),
};
Some(item)
}
ty::TypeTraitItem(_) => {
// FIXME(pcwalton): Implement.
None
}
}
}).collect();
return Some(clean::Item {
inner: clean::ImplItem(clean::Impl {
derived: clean::detect_derived(attrs.as_slice()),
trait_: associated_trait.clean(cx).map(|bound| {
match bound {
clean::TraitBound(ty) => ty,
clean::UnboxedFnBound(..) |
clean::RegionBound(..) => unreachable!(),
}
}),
for_: ty.ty.clean(cx),
generics: (&ty.generics, subst::TypeSpace).clean(cx),
items: trait_items,
}),
source: clean::Span::empty(),
name: None,
attrs: attrs,
visibility: Some(ast::Inherited),
stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
fn is_doc_hidden(a: &clean::Attribute) -> bool {
match *a {
clean::List(ref name, ref inner) if name.as_slice() == "doc" => {
inner.iter().any(|a| {
match *a {
clean::Word(ref s) => s.as_slice() == "hidden",
_ => false,
}
})
}
_ => false
}
}
}
fn build_module(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Module {
let mut items = Vec::new();
fill_in(cx, tcx, did, &mut items);
return clean::Module {
items: items,
is_crate: false,
};
// FIXME: this doesn't handle reexports inside the module itself.
// Should they be handled?
fn fill_in(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId,
items: &mut Vec<clean::Item>) {
csearch::each_child_of_item(&tcx.sess.cstore, did, |def, _, vis| {
match def {
decoder::DlDef(def::DefForeignMod(did)) => {
fill_in(cx, tcx, did, items);
}
decoder::DlDef(def) if vis == ast::Public => {
match try_inline_def(cx, tcx, def) {
Some(i) => items.extend(i.into_iter()),
None => {}
}
}
decoder::DlDef(..) => {}
// All impls were inlined above
decoder::DlImpl(..) => {}
decoder::DlField => fail!("unimplemented field"),
}
});
}
}
fn build_static(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId,
mutable: bool) -> clean::Static {
clean::Static {
type_: ty::lookup_item_type(tcx, did).ty.clean(cx),
mutability: if mutable {clean::Mutable} else {clean::Immutable},
expr: "\n\n\n".to_string(), // trigger the "[definition]" links
}
}
| {
let t = ty::lookup_item_type(tcx, did);
match ty::get(t.ty).sty {
ty::ty_enum(edid, _) if !csearch::is_typedef(&tcx.sess.cstore, did) => {
return clean::EnumItem(clean::Enum {
generics: (&t.generics, subst::TypeSpace).clean(cx),
variants_stripped: false,
variants: ty::enum_variants(tcx, edid).clean(cx),
})
}
_ => {}
}
clean::TypedefItem(clean::Typedef {
type_: t.ty.clean(cx),
generics: (&t.generics, subst::TypeSpace).clean(cx),
})
} | identifier_body |
inline.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Support for inlining external documentation into the current AST.
use syntax::ast;
use syntax::ast_util;
use syntax::attr::AttrMetaMethods;
use rustc::metadata::csearch;
use rustc::metadata::decoder;
use rustc::middle::def;
use rustc::middle::ty;
use rustc::middle::subst;
use rustc::middle::stability;
use core::DocContext;
use doctree;
use clean;
use super::Clean;
/// Attempt to inline the definition of a local node id into this AST.
///
/// This function will fetch the definition of the id specified, and if it is
/// from another crate it will attempt to inline the documentation from the
/// other crate into this crate.
///
/// This is primarily used for `pub use` statements which are, in general,
/// implementation details. Inlining the documentation should help provide a
/// better experience when reading the documentation in this use case.
///
/// The returned value is `None` if the `id` could not be inlined, and `Some`
/// of a vector of items if it was successfully expanded.
pub fn try_inline(cx: &DocContext, id: ast::NodeId, into: Option<ast::Ident>)
-> Option<Vec<clean::Item>> {
let tcx = match cx.tcx_opt() {
Some(tcx) => tcx,
None => return None,
};
let def = match tcx.def_map.borrow().find(&id) {
Some(def) => *def,
None => return None,
};
let did = def.def_id();
if ast_util::is_local(did) { return None }
try_inline_def(cx, tcx, def).map(|vec| {
vec.into_iter().map(|mut item| {
match into {
Some(into) if item.name.is_some() => {
item.name = Some(into.clean(cx));
}
_ => {}
}
item
}).collect()
})
}
fn try_inline_def(cx: &DocContext, tcx: &ty::ctxt,
def: def::Def) -> Option<Vec<clean::Item>> {
let mut ret = Vec::new();
let did = def.def_id();
let inner = match def {
def::DefTrait(did) => {
record_extern_fqn(cx, did, clean::TypeTrait);
clean::TraitItem(build_external_trait(cx, tcx, did))
}
def::DefFn(did, style, false) => {
// If this function is a tuple struct constructor, we just skip it
record_extern_fqn(cx, did, clean::TypeFunction);
clean::FunctionItem(build_external_function(cx, tcx, did, style))
}
def::DefStruct(did) => {
record_extern_fqn(cx, did, clean::TypeStruct);
ret.extend(build_impls(cx, tcx, did).into_iter());
clean::StructItem(build_struct(cx, tcx, did))
}
def::DefTy(did, false) => {
record_extern_fqn(cx, did, clean::TypeTypedef);
ret.extend(build_impls(cx, tcx, did).into_iter());
build_type(cx, tcx, did)
}
def::DefTy(did, true) => {
record_extern_fqn(cx, did, clean::TypeEnum);
ret.extend(build_impls(cx, tcx, did).into_iter());
build_type(cx, tcx, did)
}
// Assume that the enum type is reexported next to the variant, and
// variants don't show up in documentation specially.
def::DefVariant(..) => return Some(Vec::new()),
def::DefMod(did) => {
record_extern_fqn(cx, did, clean::TypeModule);
clean::ModuleItem(build_module(cx, tcx, did))
}
def::DefStatic(did, mtbl) => {
record_extern_fqn(cx, did, clean::TypeStatic);
clean::StaticItem(build_static(cx, tcx, did, mtbl))
}
_ => return None,
};
let fqn = csearch::get_item_path(tcx, did);
cx.inlined.borrow_mut().as_mut().unwrap().insert(did);
ret.push(clean::Item {
source: clean::Span::empty(),
name: Some(fqn.last().unwrap().to_string()),
attrs: load_attrs(cx, tcx, did),
inner: inner,
visibility: Some(ast::Public),
stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
Some(ret)
}
pub fn load_attrs(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Attribute> {
let mut attrs = Vec::new();
csearch::get_item_attrs(&tcx.sess.cstore, did, |v| {
attrs.extend(v.into_iter().map(|a| {
a.clean(cx)
}));
});
attrs
}
/// Record an external fully qualified name in the external_paths cache.
///
/// These names are used later on by HTML rendering to generate things like
/// source links back to the original item.
pub fn record_extern_fqn(cx: &DocContext, did: ast::DefId, kind: clean::TypeKind) {
match cx.tcx_opt() {
Some(tcx) => {
let fqn = csearch::get_item_path(tcx, did);
let fqn = fqn.into_iter().map(|i| i.to_string()).collect();
cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind));
}
None => {}
}
}
pub fn build_external_trait(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Trait {
let def = ty::lookup_trait_def(tcx, did);
let trait_items = ty::trait_items(tcx, did).clean(cx);
let provided = ty::provided_trait_methods(tcx, did);
let mut items = trait_items.into_iter().map(|trait_item| {
if provided.iter().any(|a| a.def_id == trait_item.def_id) {
clean::ProvidedMethod(trait_item)
} else {
clean::RequiredMethod(trait_item)
}
});
let trait_def = ty::lookup_trait_def(tcx, did);
let bounds = trait_def.bounds.clean(cx);
clean::Trait {
generics: (&def.generics, subst::TypeSpace).clean(cx),
items: items.collect(),
bounds: bounds,
}
}
fn build_external_function(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId,
style: ast::FnStyle) -> clean::Function {
let t = ty::lookup_item_type(tcx, did);
clean::Function {
decl: match ty::get(t.ty).sty {
ty::ty_bare_fn(ref f) => (did, &f.sig).clean(cx),
_ => fail!("bad function"),
},
generics: (&t.generics, subst::FnSpace).clean(cx),
fn_style: style,
}
}
fn build_struct(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::Struct {
use syntax::parse::token::special_idents::unnamed_field;
let t = ty::lookup_item_type(tcx, did);
let fields = ty::lookup_struct_fields(tcx, did);
clean::Struct {
struct_type: match fields.as_slice() {
[] => doctree::Unit,
[ref f] if f.name == unnamed_field.name => doctree::Newtype,
[ref f,..] if f.name == unnamed_field.name => doctree::Tuple,
_ => doctree::Plain,
},
generics: (&t.generics, subst::TypeSpace).clean(cx),
fields: fields.clean(cx),
fields_stripped: false,
}
}
fn build_type(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::ItemEnum {
let t = ty::lookup_item_type(tcx, did);
match ty::get(t.ty).sty {
ty::ty_enum(edid, _) if!csearch::is_typedef(&tcx.sess.cstore, did) => {
return clean::EnumItem(clean::Enum {
generics: (&t.generics, subst::TypeSpace).clean(cx),
variants_stripped: false,
variants: ty::enum_variants(tcx, edid).clean(cx),
})
}
_ => {}
}
clean::TypedefItem(clean::Typedef {
type_: t.ty.clean(cx),
generics: (&t.generics, subst::TypeSpace).clean(cx),
})
}
fn build_impls(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Item> {
ty::populate_implementations_for_type_if_necessary(tcx, did);
let mut impls = Vec::new();
match tcx.inherent_impls.borrow().find(&did) {
None => {}
Some(i) => {
impls.extend(i.iter().map(|&did| { build_impl(cx, tcx, did) }));
}
}
// If this is the first time we've inlined something from this crate, then
// we inline *all* impls from the crate into this crate. Note that there's
// currently no way for us to filter this based on type, and we likely need
// many impls for a variety of reasons.
//
// Primarily, the impls will be used to populate the documentation for this
// type being inlined, but impls can also be used when generating
// documentation for primitives (no way to find those specifically).
if cx.populated_crate_impls.borrow_mut().insert(did.krate) {
csearch::each_top_level_item_of_crate(&tcx.sess.cstore,
did.krate,
|def, _, _| {
populate_impls(cx, tcx, def, &mut impls)
});
fn populate_impls(cx: &DocContext, tcx: &ty::ctxt,
def: decoder::DefLike,
impls: &mut Vec<Option<clean::Item>>) {
match def {
decoder::DlImpl(did) => impls.push(build_impl(cx, tcx, did)),
decoder::DlDef(def::DefMod(did)) => {
csearch::each_child_of_item(&tcx.sess.cstore,
did,
|def, _, _| {
populate_impls(cx, tcx, def, impls)
})
}
_ => {}
}
}
}
impls.into_iter().filter_map(|a| a).collect()
}
fn build_impl(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Option<clean::Item> {
if!cx.inlined.borrow_mut().as_mut().unwrap().insert(did) {
return None
}
let associated_trait = csearch::get_impl_trait(tcx, did);
// If this is an impl for a #[doc(hidden)] trait, be sure to not inline it.
match associated_trait {
Some(ref t) => {
let trait_attrs = load_attrs(cx, tcx, t.def_id);
if trait_attrs.iter().any(|a| is_doc_hidden(a)) {
return None
}
}
None => {}
}
let attrs = load_attrs(cx, tcx, did);
let ty = ty::lookup_item_type(tcx, did);
let trait_items = csearch::get_impl_items(&tcx.sess.cstore, did)
.iter()
.filter_map(|did| {
let did = did.def_id();
let impl_item = ty::impl_or_trait_item(tcx, did);
match impl_item {
ty::MethodTraitItem(method) => {
if method.vis!= ast::Public && associated_trait.is_none() {
return None
}
let mut item = method.clean(cx);
item.inner = match item.inner.clone() {
clean::TyMethodItem(clean::TyMethod {
fn_style, decl, self_, generics
}) => |
_ => fail!("not a tymethod"),
};
Some(item)
}
ty::TypeTraitItem(_) => {
// FIXME(pcwalton): Implement.
None
}
}
}).collect();
return Some(clean::Item {
inner: clean::ImplItem(clean::Impl {
derived: clean::detect_derived(attrs.as_slice()),
trait_: associated_trait.clean(cx).map(|bound| {
match bound {
clean::TraitBound(ty) => ty,
clean::UnboxedFnBound(..) |
clean::RegionBound(..) => unreachable!(),
}
}),
for_: ty.ty.clean(cx),
generics: (&ty.generics, subst::TypeSpace).clean(cx),
items: trait_items,
}),
source: clean::Span::empty(),
name: None,
attrs: attrs,
visibility: Some(ast::Inherited),
stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
fn is_doc_hidden(a: &clean::Attribute) -> bool {
match *a {
clean::List(ref name, ref inner) if name.as_slice() == "doc" => {
inner.iter().any(|a| {
match *a {
clean::Word(ref s) => s.as_slice() == "hidden",
_ => false,
}
})
}
_ => false
}
}
}
fn build_module(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Module {
let mut items = Vec::new();
fill_in(cx, tcx, did, &mut items);
return clean::Module {
items: items,
is_crate: false,
};
// FIXME: this doesn't handle reexports inside the module itself.
// Should they be handled?
fn fill_in(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId,
items: &mut Vec<clean::Item>) {
csearch::each_child_of_item(&tcx.sess.cstore, did, |def, _, vis| {
match def {
decoder::DlDef(def::DefForeignMod(did)) => {
fill_in(cx, tcx, did, items);
}
decoder::DlDef(def) if vis == ast::Public => {
match try_inline_def(cx, tcx, def) {
Some(i) => items.extend(i.into_iter()),
None => {}
}
}
decoder::DlDef(..) => {}
// All impls were inlined above
decoder::DlImpl(..) => {}
decoder::DlField => fail!("unimplemented field"),
}
});
}
}
fn build_static(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId,
mutable: bool) -> clean::Static {
clean::Static {
type_: ty::lookup_item_type(tcx, did).ty.clean(cx),
mutability: if mutable {clean::Mutable} else {clean::Immutable},
expr: "\n\n\n".to_string(), // trigger the "[definition]" links
}
}
| {
clean::MethodItem(clean::Method {
fn_style: fn_style,
decl: decl,
self_: self_,
generics: generics,
})
} | conditional_block |
inline.rs | // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Support for inlining external documentation into the current AST.
use syntax::ast;
use syntax::ast_util;
use syntax::attr::AttrMetaMethods;
use rustc::metadata::csearch;
use rustc::metadata::decoder;
use rustc::middle::def;
use rustc::middle::ty;
use rustc::middle::subst;
use rustc::middle::stability;
use core::DocContext;
use doctree;
use clean;
use super::Clean;
/// Attempt to inline the definition of a local node id into this AST.
///
/// This function will fetch the definition of the id specified, and if it is
/// from another crate it will attempt to inline the documentation from the
/// other crate into this crate.
///
/// This is primarily used for `pub use` statements which are, in general,
/// implementation details. Inlining the documentation should help provide a
/// better experience when reading the documentation in this use case.
///
/// The returned value is `None` if the `id` could not be inlined, and `Some`
/// of a vector of items if it was successfully expanded.
pub fn try_inline(cx: &DocContext, id: ast::NodeId, into: Option<ast::Ident>)
-> Option<Vec<clean::Item>> {
let tcx = match cx.tcx_opt() {
Some(tcx) => tcx,
None => return None,
};
let def = match tcx.def_map.borrow().find(&id) {
Some(def) => *def,
None => return None,
};
let did = def.def_id();
if ast_util::is_local(did) { return None }
try_inline_def(cx, tcx, def).map(|vec| {
vec.into_iter().map(|mut item| {
match into {
Some(into) if item.name.is_some() => {
item.name = Some(into.clean(cx));
}
_ => {}
}
item
}).collect()
})
}
fn try_inline_def(cx: &DocContext, tcx: &ty::ctxt,
def: def::Def) -> Option<Vec<clean::Item>> {
let mut ret = Vec::new();
let did = def.def_id();
let inner = match def {
def::DefTrait(did) => {
record_extern_fqn(cx, did, clean::TypeTrait);
clean::TraitItem(build_external_trait(cx, tcx, did))
}
def::DefFn(did, style, false) => {
// If this function is a tuple struct constructor, we just skip it
record_extern_fqn(cx, did, clean::TypeFunction);
clean::FunctionItem(build_external_function(cx, tcx, did, style))
}
def::DefStruct(did) => {
record_extern_fqn(cx, did, clean::TypeStruct);
ret.extend(build_impls(cx, tcx, did).into_iter());
clean::StructItem(build_struct(cx, tcx, did))
}
def::DefTy(did, false) => {
record_extern_fqn(cx, did, clean::TypeTypedef);
ret.extend(build_impls(cx, tcx, did).into_iter());
build_type(cx, tcx, did)
}
def::DefTy(did, true) => {
record_extern_fqn(cx, did, clean::TypeEnum);
ret.extend(build_impls(cx, tcx, did).into_iter());
build_type(cx, tcx, did)
}
// Assume that the enum type is reexported next to the variant, and
// variants don't show up in documentation specially.
def::DefVariant(..) => return Some(Vec::new()),
def::DefMod(did) => {
record_extern_fqn(cx, did, clean::TypeModule);
clean::ModuleItem(build_module(cx, tcx, did))
}
def::DefStatic(did, mtbl) => {
record_extern_fqn(cx, did, clean::TypeStatic);
clean::StaticItem(build_static(cx, tcx, did, mtbl))
}
_ => return None,
};
let fqn = csearch::get_item_path(tcx, did);
cx.inlined.borrow_mut().as_mut().unwrap().insert(did);
ret.push(clean::Item {
source: clean::Span::empty(),
name: Some(fqn.last().unwrap().to_string()),
attrs: load_attrs(cx, tcx, did),
inner: inner,
visibility: Some(ast::Public),
stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
Some(ret)
}
pub fn load_attrs(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Attribute> {
let mut attrs = Vec::new();
csearch::get_item_attrs(&tcx.sess.cstore, did, |v| {
attrs.extend(v.into_iter().map(|a| {
a.clean(cx)
}));
});
attrs
}
/// Record an external fully qualified name in the external_paths cache.
///
/// These names are used later on by HTML rendering to generate things like
/// source links back to the original item.
pub fn record_extern_fqn(cx: &DocContext, did: ast::DefId, kind: clean::TypeKind) {
match cx.tcx_opt() {
Some(tcx) => {
let fqn = csearch::get_item_path(tcx, did);
let fqn = fqn.into_iter().map(|i| i.to_string()).collect();
cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind));
}
None => {}
}
}
pub fn build_external_trait(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Trait {
let def = ty::lookup_trait_def(tcx, did);
let trait_items = ty::trait_items(tcx, did).clean(cx);
let provided = ty::provided_trait_methods(tcx, did);
let mut items = trait_items.into_iter().map(|trait_item| {
if provided.iter().any(|a| a.def_id == trait_item.def_id) {
clean::ProvidedMethod(trait_item)
} else {
clean::RequiredMethod(trait_item)
}
});
let trait_def = ty::lookup_trait_def(tcx, did);
let bounds = trait_def.bounds.clean(cx);
clean::Trait {
generics: (&def.generics, subst::TypeSpace).clean(cx),
items: items.collect(),
bounds: bounds,
}
}
fn build_external_function(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId,
style: ast::FnStyle) -> clean::Function {
let t = ty::lookup_item_type(tcx, did);
clean::Function {
decl: match ty::get(t.ty).sty {
ty::ty_bare_fn(ref f) => (did, &f.sig).clean(cx),
_ => fail!("bad function"),
},
generics: (&t.generics, subst::FnSpace).clean(cx),
fn_style: style,
}
}
fn build_struct(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::Struct {
use syntax::parse::token::special_idents::unnamed_field;
let t = ty::lookup_item_type(tcx, did);
let fields = ty::lookup_struct_fields(tcx, did);
clean::Struct {
struct_type: match fields.as_slice() {
[] => doctree::Unit,
[ref f] if f.name == unnamed_field.name => doctree::Newtype,
[ref f,..] if f.name == unnamed_field.name => doctree::Tuple,
_ => doctree::Plain,
},
generics: (&t.generics, subst::TypeSpace).clean(cx),
fields: fields.clean(cx),
fields_stripped: false,
}
}
fn build_type(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::ItemEnum {
let t = ty::lookup_item_type(tcx, did);
match ty::get(t.ty).sty {
ty::ty_enum(edid, _) if!csearch::is_typedef(&tcx.sess.cstore, did) => {
return clean::EnumItem(clean::Enum {
generics: (&t.generics, subst::TypeSpace).clean(cx),
variants_stripped: false,
variants: ty::enum_variants(tcx, edid).clean(cx),
})
}
_ => {}
}
clean::TypedefItem(clean::Typedef {
type_: t.ty.clean(cx),
generics: (&t.generics, subst::TypeSpace).clean(cx),
})
}
fn build_impls(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Item> {
ty::populate_implementations_for_type_if_necessary(tcx, did);
let mut impls = Vec::new();
match tcx.inherent_impls.borrow().find(&did) {
None => {}
Some(i) => {
impls.extend(i.iter().map(|&did| { build_impl(cx, tcx, did) }));
}
}
// If this is the first time we've inlined something from this crate, then
// we inline *all* impls from the crate into this crate. Note that there's
// currently no way for us to filter this based on type, and we likely need
// many impls for a variety of reasons.
//
// Primarily, the impls will be used to populate the documentation for this
// type being inlined, but impls can also be used when generating
// documentation for primitives (no way to find those specifically).
if cx.populated_crate_impls.borrow_mut().insert(did.krate) {
csearch::each_top_level_item_of_crate(&tcx.sess.cstore,
did.krate,
|def, _, _| {
populate_impls(cx, tcx, def, &mut impls)
});
fn populate_impls(cx: &DocContext, tcx: &ty::ctxt,
def: decoder::DefLike,
impls: &mut Vec<Option<clean::Item>>) {
match def {
decoder::DlImpl(did) => impls.push(build_impl(cx, tcx, did)),
decoder::DlDef(def::DefMod(did)) => {
csearch::each_child_of_item(&tcx.sess.cstore,
did,
|def, _, _| {
populate_impls(cx, tcx, def, impls)
})
}
_ => {}
}
}
}
impls.into_iter().filter_map(|a| a).collect()
}
fn build_impl(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Option<clean::Item> {
if!cx.inlined.borrow_mut().as_mut().unwrap().insert(did) {
return None
}
let associated_trait = csearch::get_impl_trait(tcx, did);
// If this is an impl for a #[doc(hidden)] trait, be sure to not inline it.
match associated_trait {
Some(ref t) => {
let trait_attrs = load_attrs(cx, tcx, t.def_id);
if trait_attrs.iter().any(|a| is_doc_hidden(a)) {
return None
}
}
None => {}
}
let attrs = load_attrs(cx, tcx, did);
let ty = ty::lookup_item_type(tcx, did);
let trait_items = csearch::get_impl_items(&tcx.sess.cstore, did)
.iter()
.filter_map(|did| {
let did = did.def_id();
let impl_item = ty::impl_or_trait_item(tcx, did);
match impl_item {
ty::MethodTraitItem(method) => {
if method.vis!= ast::Public && associated_trait.is_none() {
return None
}
let mut item = method.clean(cx);
item.inner = match item.inner.clone() {
clean::TyMethodItem(clean::TyMethod {
fn_style, decl, self_, generics
}) => {
clean::MethodItem(clean::Method {
fn_style: fn_style,
decl: decl,
self_: self_,
generics: generics,
})
}
_ => fail!("not a tymethod"), | ty::TypeTraitItem(_) => {
// FIXME(pcwalton): Implement.
None
}
}
}).collect();
return Some(clean::Item {
inner: clean::ImplItem(clean::Impl {
derived: clean::detect_derived(attrs.as_slice()),
trait_: associated_trait.clean(cx).map(|bound| {
match bound {
clean::TraitBound(ty) => ty,
clean::UnboxedFnBound(..) |
clean::RegionBound(..) => unreachable!(),
}
}),
for_: ty.ty.clean(cx),
generics: (&ty.generics, subst::TypeSpace).clean(cx),
items: trait_items,
}),
source: clean::Span::empty(),
name: None,
attrs: attrs,
visibility: Some(ast::Inherited),
stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
fn is_doc_hidden(a: &clean::Attribute) -> bool {
match *a {
clean::List(ref name, ref inner) if name.as_slice() == "doc" => {
inner.iter().any(|a| {
match *a {
clean::Word(ref s) => s.as_slice() == "hidden",
_ => false,
}
})
}
_ => false
}
}
}
fn build_module(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Module {
let mut items = Vec::new();
fill_in(cx, tcx, did, &mut items);
return clean::Module {
items: items,
is_crate: false,
};
// FIXME: this doesn't handle reexports inside the module itself.
// Should they be handled?
fn fill_in(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId,
items: &mut Vec<clean::Item>) {
csearch::each_child_of_item(&tcx.sess.cstore, did, |def, _, vis| {
match def {
decoder::DlDef(def::DefForeignMod(did)) => {
fill_in(cx, tcx, did, items);
}
decoder::DlDef(def) if vis == ast::Public => {
match try_inline_def(cx, tcx, def) {
Some(i) => items.extend(i.into_iter()),
None => {}
}
}
decoder::DlDef(..) => {}
// All impls were inlined above
decoder::DlImpl(..) => {}
decoder::DlField => fail!("unimplemented field"),
}
});
}
}
fn build_static(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId,
mutable: bool) -> clean::Static {
clean::Static {
type_: ty::lookup_item_type(tcx, did).ty.clean(cx),
mutability: if mutable {clean::Mutable} else {clean::Immutable},
expr: "\n\n\n".to_string(), // trigger the "[definition]" links
}
} | };
Some(item)
} | random_line_split |
misc.rs | /// Module for miscellaneous instructions
use jeebie::core::cpu::CPU;
use jeebie::core::registers::Register8::*;
use jeebie::core::registers::Register16::*;
// 'NOP' 00 4
pub fn nop(cpu: &mut CPU) -> i32 { 4 }
// 'SWAP A' CB 37 8
pub fn SWAP_a(cpu: &mut CPU) -> i32 {
cpu.compute_swap(A);
8
}
// 'SWAP B' CB 30 8
pub fn SWAP_b(cpu: &mut CPU) -> i32 { | // 'SWAP C' CB 31 8
pub fn SWAP_c(cpu: &mut CPU) -> i32 {
cpu.compute_swap(C);
8
}
// 'SWAP D' CB 32 8
pub fn SWAP_d(cpu: &mut CPU) -> i32 {
cpu.compute_swap(D);
8
}
// 'SWAP E' CB 33 8
pub fn SWAP_e(cpu: &mut CPU) -> i32 {
cpu.compute_swap(E);
8
}
// 'SWAP H' CB 34 8
pub fn SWAP_h(cpu: &mut CPU) -> i32 {
cpu.compute_swap(H);
8
}
// 'SWAP L' CB 35 8
pub fn SWAP_l(cpu: &mut CPU) -> i32 {
cpu.compute_swap(L);
8
}
// 'SWAP (HL)' CB 36 16
pub fn SWAP_hl(cpu: &mut CPU) -> i32 {
cpu.compute_swap(RegisterAddress(HL));
16
}
// 'EI' FB 4
pub fn EI(cpu: &mut CPU) -> i32 {
cpu.interrupts_enabled = true;
4
}
// 'DI' F3 4
pub fn DI(cpu: &mut CPU) -> i32 {
cpu.interrupts_enabled = false;
4
} | cpu.compute_swap(B);
8
}
| random_line_split |
misc.rs | /// Module for miscellaneous instructions
use jeebie::core::cpu::CPU;
use jeebie::core::registers::Register8::*;
use jeebie::core::registers::Register16::*;
// 'NOP' 00 4
pub fn nop(cpu: &mut CPU) -> i32 { 4 }
// 'SWAP A' CB 37 8
pub fn SWAP_a(cpu: &mut CPU) -> i32 {
cpu.compute_swap(A);
8
}
// 'SWAP B' CB 30 8
pub fn SWAP_b(cpu: &mut CPU) -> i32 {
cpu.compute_swap(B);
8
}
// 'SWAP C' CB 31 8
pub fn SWAP_c(cpu: &mut CPU) -> i32 {
cpu.compute_swap(C);
8
}
// 'SWAP D' CB 32 8
pub fn SWAP_d(cpu: &mut CPU) -> i32 {
cpu.compute_swap(D);
8
}
// 'SWAP E' CB 33 8
pub fn SWAP_e(cpu: &mut CPU) -> i32 {
cpu.compute_swap(E);
8
}
// 'SWAP H' CB 34 8
pub fn SWAP_h(cpu: &mut CPU) -> i32 {
cpu.compute_swap(H);
8
}
// 'SWAP L' CB 35 8
pub fn SWAP_l(cpu: &mut CPU) -> i32 {
cpu.compute_swap(L);
8
}
// 'SWAP (HL)' CB 36 16
pub fn | (cpu: &mut CPU) -> i32 {
cpu.compute_swap(RegisterAddress(HL));
16
}
// 'EI' FB 4
pub fn EI(cpu: &mut CPU) -> i32 {
cpu.interrupts_enabled = true;
4
}
// 'DI' F3 4
pub fn DI(cpu: &mut CPU) -> i32 {
cpu.interrupts_enabled = false;
4
} | SWAP_hl | identifier_name |
misc.rs | /// Module for miscellaneous instructions
use jeebie::core::cpu::CPU;
use jeebie::core::registers::Register8::*;
use jeebie::core::registers::Register16::*;
// 'NOP' 00 4
pub fn nop(cpu: &mut CPU) -> i32 { 4 }
// 'SWAP A' CB 37 8
pub fn SWAP_a(cpu: &mut CPU) -> i32 {
cpu.compute_swap(A);
8
}
// 'SWAP B' CB 30 8
pub fn SWAP_b(cpu: &mut CPU) -> i32 {
cpu.compute_swap(B);
8
}
// 'SWAP C' CB 31 8
pub fn SWAP_c(cpu: &mut CPU) -> i32 {
cpu.compute_swap(C);
8
}
// 'SWAP D' CB 32 8
pub fn SWAP_d(cpu: &mut CPU) -> i32 {
cpu.compute_swap(D);
8
}
// 'SWAP E' CB 33 8
pub fn SWAP_e(cpu: &mut CPU) -> i32 {
cpu.compute_swap(E);
8
}
// 'SWAP H' CB 34 8
pub fn SWAP_h(cpu: &mut CPU) -> i32 {
cpu.compute_swap(H);
8
}
// 'SWAP L' CB 35 8
pub fn SWAP_l(cpu: &mut CPU) -> i32 |
// 'SWAP (HL)' CB 36 16
pub fn SWAP_hl(cpu: &mut CPU) -> i32 {
cpu.compute_swap(RegisterAddress(HL));
16
}
// 'EI' FB 4
pub fn EI(cpu: &mut CPU) -> i32 {
cpu.interrupts_enabled = true;
4
}
// 'DI' F3 4
pub fn DI(cpu: &mut CPU) -> i32 {
cpu.interrupts_enabled = false;
4
} | {
cpu.compute_swap(L);
8
} | identifier_body |
vec.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your |
// -*- rust -*-
pub fn main() {
let v: ~[int] = ~[10, 20];
assert!((v[0] == 10));
assert!((v[1] == 20));
let mut x: int = 0;
assert!((v[x] == 10));
assert!((v[x + 1] == 20));
x = x + 1;
assert!((v[x] == 20));
assert!((v[x - 1] == 10));
} | // option. This file may not be copied, modified, or distributed
// except according to those terms.
| random_line_split |
vec.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// -*- rust -*-
pub fn main() | {
let v: ~[int] = ~[10, 20];
assert!((v[0] == 10));
assert!((v[1] == 20));
let mut x: int = 0;
assert!((v[x] == 10));
assert!((v[x + 1] == 20));
x = x + 1;
assert!((v[x] == 20));
assert!((v[x - 1] == 10));
} | identifier_body |
|
vec.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// -*- rust -*-
pub fn | () {
let v: ~[int] = ~[10, 20];
assert!((v[0] == 10));
assert!((v[1] == 20));
let mut x: int = 0;
assert!((v[x] == 10));
assert!((v[x + 1] == 20));
x = x + 1;
assert!((v[x] == 20));
assert!((v[x - 1] == 10));
}
| main | identifier_name |
usage.rs | // Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
macro_rules! println_stderr(
($($arg:tt)*) => { {
let r = writeln!(&mut ::std::io::stderr(), $($arg)*);
r.expect("failed printing to stderr");
} }
);
macro_rules! otry {
($e: expr) => (
match $e {
Some(ref v) => v,
None => {
return None;
}
}
)
}
macro_rules! usage {
(
{
$(
$field_a:ident : $typ_a:ty,
)*
}
{
$(
$field:ident : $typ:ty = $default:expr, or $from_config:expr,
)*
}
{
$(
$field_s:ident : $typ_s:ty, display $default_s:expr, or $from_config_s:expr,
)*
}
) => {
use toml;
use std::{fs, io, process};
use std::io::{Read, Write};
use util::version;
use docopt::{Docopt, Error as DocoptError};
use helpers::replace_home;
use rustc_serialize;
#[derive(Debug)]
pub enum ArgsError {
Docopt(DocoptError),
Parsing(Vec<toml::ParserError>),
Decode(toml::DecodeError),
Config(String, io::Error),
UnknownFields(String),
}
impl ArgsError {
pub fn exit(self) ->! {
match self {
ArgsError::Docopt(e) => e.exit(),
ArgsError::Parsing(errors) => {
println_stderr!("There is an error in config file.");
for e in &errors {
println_stderr!("{}", e);
}
process::exit(2)
},
ArgsError::Decode(e) => {
println_stderr!("You might have supplied invalid parameters in config file.");
println_stderr!("{}", e);
process::exit(2)
},
ArgsError::Config(path, e) => {
println_stderr!("There was an error reading your config file at: {}", path);
println_stderr!("{}", e);
process::exit(2)
},
ArgsError::UnknownFields(fields) => {
println_stderr!("You have some extra fields in your config file:");
println_stderr!("{}", fields);
process::exit(2)
}
}
}
}
impl From<DocoptError> for ArgsError {
fn from(e: DocoptError) -> Self { ArgsError::Docopt(e) }
}
impl From<toml::DecodeError> for ArgsError {
fn from(e: toml::DecodeError) -> Self { ArgsError::Decode(e) }
}
#[derive(Debug, PartialEq)]
pub struct Args {
$(
pub $field_a: $typ_a,
)*
$(
pub $field: $typ,
)*
$(
pub $field_s: $typ_s,
)*
}
impl Default for Args {
fn default() -> Self {
Args {
$(
$field_a: Default::default(),
)*
$(
$field: $default.into(),
)*
$(
$field_s: Default::default(),
)*
}
}
}
#[derive(Default, Debug, PartialEq, Clone, RustcDecodable)]
struct RawArgs {
$(
$field_a: $typ_a,
)*
$(
$field: Option<$typ>,
)*
$(
$field_s: Option<$typ_s>,
)*
}
impl Args {
pub fn parse<S: AsRef<str>>(command: &[S]) -> Result<Self, ArgsError> {
let raw_args = RawArgs::parse(command)?;
// Skip loading config file if no_config flag is specified
if raw_args.flag_no_config {
return Ok(raw_args.into_args(Config::default()));
}
| let config = match (fs::File::open(&config_file), raw_args.flag_config.is_some()) {
// Load config file
(Ok(mut file), _) => {
println_stderr!("Loading config file from {}", &config_file);
let mut config = String::new();
file.read_to_string(&mut config).map_err(|e| ArgsError::Config(config_file, e))?;
Self::parse_config(&config)?
},
// Don't display error in case default config cannot be loaded.
(Err(_), false) => Config::default(),
// Config set from CLI (fail with error)
(Err(e), true) => {
return Err(ArgsError::Config(config_file, e));
},
};
Ok(raw_args.into_args(config))
}
#[cfg(test)]
pub fn parse_without_config<S: AsRef<str>>(command: &[S]) -> Result<Self, ArgsError> {
Self::parse_with_config(command, Config::default())
}
#[cfg(test)]
fn parse_with_config<S: AsRef<str>>(command: &[S], config: Config) -> Result<Self, ArgsError> {
RawArgs::parse(command).map(|raw| raw.into_args(config)).map_err(ArgsError::Docopt)
}
fn parse_config(config: &str) -> Result<Config, ArgsError> {
let mut value_parser = toml::Parser::new(&config);
match value_parser.parse() {
Some(value) => {
let mut decoder = toml::Decoder::new(toml::Value::Table(value));
let result = rustc_serialize::Decodable::decode(&mut decoder);
match (result, decoder.toml) {
(Err(e), _) => Err(e.into()),
(_, Some(toml)) => Err(ArgsError::UnknownFields(toml::encode_str(&toml))),
(Ok(config), None) => Ok(config),
}
},
None => Err(ArgsError::Parsing(value_parser.errors)),
}
}
pub fn print_version() -> String {
format!(include_str!("./version.txt"), version())
}
}
impl RawArgs {
fn into_args(self, config: Config) -> Args {
let mut args = Args::default();
$(
args.$field_a = self.$field_a;
)*
$(
args.$field = self.$field.or_else(|| $from_config(&config)).unwrap_or_else(|| $default.into());
)*
$(
args.$field_s = self.$field_s.or_else(|| $from_config_s(&config)).unwrap_or(None);
)*
args
}
pub fn parse<S: AsRef<str>>(command: &[S]) -> Result<Self, DocoptError> {
Docopt::new(Self::usage()).and_then(|d| d.argv(command).decode())
}
fn usage() -> String {
format!(
include_str!("./usage.txt"),
$(
$field={ let v: $typ = $default.into(); v },
// Uncomment this to debug
// "named argument never used" error
// $field = $default,
)*
$(
$field_s = $default_s,
)*
)
}
}
};
} | let config_file = raw_args.flag_config.clone().unwrap_or_else(|| raw_args.clone().into_args(Config::default()).flag_config);
let config_file = replace_home(&::dir::default_data_path(), &config_file); | random_line_split |
lib.rs | //! # RACC -- Rust Another Compiler-Compiler
//!
//! This is a port of Barkeley YACC to Rust. It runs as a procedural macro, and so allows you to
//! define grammars directly in Rust source code, rather than calling an external tool or writing
//! a `build.rs` script.
//!
//! # How to write a grammar
//!
//! Here is a very brief example of how to use RACC. This program evaluates a very limited class
//! of numeric expressions.
//!
//! In `Cargo.toml:
//!
//! ```toml,ignore
//! racc = "0.1.0"
//! ```
//!
//! In your code:
//!
//! ```rust,ignore
//!
//! racc::grammar! {
//! uint ctx; // application context; not used in this example
//! i32; // the type of values in the value stack, i.e. %union
//!
//! // This is the list of tokens defined for your grammar.
//! // RACC will generate named constants using these names; use those constants
//! // when calling push_token().
//! NUM; PLUS; MINUS; LPAREN; RPAREN;
//!
//! // Define the rules of your language. The first rule implicitly defines the goal symbol.
//! // Note the presence of '=x' in the rule definitions. These are name bindings, which RACC
//! // uses in order to allow your code blocks (which are in {... } braces) to access the
//! // values for each symbol. The values come from the value stack in the parser state machine.
//! // When you call push_token(), you provide both the token code and the "value" for that token.
//!
//! Expr : NUM=x { x };
//!
//! Expr : LPAREN Expr=x RPAREN { x };
//!
//! Expr : Expr=left PLUS Expr=right {
//! // You can put arbitrary code here.
//! println!("evaluating: {} + {}", left, right);
//!
//! // The value of the action block is used as the
//! // value of the rule (reduction). Note the absence
//! // of a semi-colon here.
//! left + right
//! };
//!
//! Expr : Expr=left MINUS Expr=right {
//! println!("evaluating: {} - {}", left, right);
//! left - right
//! };
//! }
//!
//! fn main() {
//! // The tokens in our input, and their numeric values.
//! let tokens = vec![
//! (LPAREN, -1),
//! (NUM, 50),
//! (PLUS, -1),
//! (NUM, 25),
//! (RPAREN, -1),
//! (MINUS, -1),
//! (NUM, 10)
//! ];
//!
//! // Create a parser.
//! let mut parser = new_parser();
//!
//! let mut ctx: uint = 0; // App context; not used in this example.
//!
//! for &(token, value) in tokens.iter() {
//! parser.push_token(&mut ctx, token, value);
//! }
//!
//! match parser.finish() {
//! FinishParseResult::Accept(value) => println!("Accepted: {}", value),
//! FinishParseResult::SyntaxError => println!("Syntax error")
//! }
//! }
//! */
//! ```
//!
//! ## Advancing the parser state machine
//!
//! Berkeley YACC generates a `yyparse` function, as the primary entry point to the parser.
//! Your code is integrated into `yyparse` in several ways. First, `yyparse` will call your
//! `yylex` function in order to read the next token from the input. Then `yyparse` will
//! advance the state machine, and when rules have been matched ("reduced"), the action code
//! that you provided (in `{... }` blocks) will execute.
//!
//! In this model, the `yyparse` method runs until all of the tokens have been processed, or
//! until an action block prematurely exits the parser. However, this model suffers from
//! several problems. It puts too much control in the generated code, and requires the
//! parser generator (YACC / RACC) to call into too many "hook" functions, such as `yylex`.
//!
//! Instead, in RACC I have decided to use a different API model. Instead of generating a
//! `yyparse` function, RACC generates parsing tables and a `reduce` function. The `reduce`
//! function contains all of the rule action blocks (your code). RACC also generates a
//! `new_parser` method, which returns a new `ParsingState` struct which contains references
//! to the parsing tables and the generated `reduce` method. Your app then makes calls
//! to `parser.push_token()` to push tokens into the parser. This inverts the control-flow
//! model -- your app code is in control, and makes brief calls into the RACC runtime and
//! generated code in order to advance the state of the parser.
//!
//! This is simpler and more flexible, and I hope will be a more natural fit for Rust.
//! This parsing model also works well with Rust's lifetime model. Each parser object
//! (each instance of `ParsingState`) contains only the state necessary to advance the
//! state machine, and the contents of the "value" stack.
//!
//! ## Accessing external data during parsing
//!
//! It is often necessary, when imlementing a parser, to access external or "environmental" | //! it were an option).
//!
//! RACC provides a safe means to access such data. Rules may access an "app context".
//! When the app calls `push_token` or `finish`, the app also passes a `&mut` reference
//! to an "app context" value. The type of this value can be anything defined by the
//! application. (It is necessary to specify the type in the `grammar!` definition.)
//! Within the scope of the rule action, this value may be accessed by using the identifier
//! specified in the grammar definition. In the example above, the identifier is `ctx`,
//! and the type of the context is `uint`.
//!
//! ## Propagating values through the parsing tree
//!
//! In Berkeley YACC, the tokenizer stage (lexer) may set the `yylval` variable to a value,
//! in order to specify a "value" for the current token. This value is shifted onto the
//! value stack, and is accessible to rule actions using the `$1`.. `$n` syntax. Rules
//! specify the result value of the rule by assigning the `$$` value.
//!
//! RACC has a similar facility, but the syntax for using it is different. The syntax in
//! RACC is a more natural fit for Rust. Instead of using `$1` bindings, RACC grammars
//! specify name bindings using `= name` after a symbol in a rule definition. For example:
//!
//! ```rust,ignore
//! Expr : Expr=left PLUS Expr=right {
//! println!("evaluating: {} + {}", left, right);
//! left + right
//! };
//! ```
//!
//! In this code, `Expr=left` means "match the symbol `Expr` and bind its value to the
//! name `left` within the scope of the action," and similarly for `Expr=right`.
//! Instead of using `$$` for setting the value of the rule action, the value of the rule
//! action is simply the value of the action, when evaluated using the normal rules of Rust.
//! This is why the action block in the example ends with `left + right` and not `left + right;`.
//! Ending the action with `;` would mean that the rule evaluates to `()`.
//!
//! Note that all rules must evaluate to a value, even if that value is `()` or `None`, and
//! the type of the value must match the type specified in the grammar. RACC (like Rust) will
//! not perform any implicit conversions, or insert any implicit `None` values.
//!
//! If you do not wish to propagate values in this way, you can use a symbol value of `()`.
//! If you do this, then you may have empty rule actions.
//!
//! ## Finishing parsing
//!
//! In Berkeley YACC, the lexer indicates the end of an input stream by reporting a `YYEOF`
//! token. Because RACC uses a push model rather than a pull model, a RACC-based parser
//! indicates the end of the input stream by calling the `parser.finish()` method. The
//! `finish` method performs any final reductions that are necessary, and then checks whether
//! the grammar accepts the input. If the grammar accepts the input, then `finish` will
//! return `FinishParseResult::Accept(value)`, where `value` is the value of the entire
//! parse tree.
//!
//! # License
//!
//! Berkeley YACC is in the public domain. From its `README` file:
//!
//! ```text
//! Berkeley Yacc is in the public domain. The data structures and algorithms
//! used in Berkeley Yacc are all either taken from documents available to the
//! general public or are inventions of the author. Anyone may freely distribute
//! source or binary forms of Berkeley Yacc whether unchanged or modified.
//! Distributers may charge whatever fees they can obtain for Berkeley Yacc.
//! Programs generated by Berkeley Yacc may be distributed freely.
//! ```
//!
//! RACC is published under the MIT open-source license, which should be compatible with nearly all
//! open source needs and should be compatible with the letter and spirit of Berkeley YACC's license.
//!
//! # Stability
//!
//! The ideas implemented in YACC are stable and time-tested. RACC is a port of YACC, and should
//! be considered unstable. The implementation may contain porting bugs, where the behavior of
//! RACC is not faithful to the original YACC. Rust procedural macros and the ecosystem supporting
//! them is also still growing and changing.
//!
//! So if you build anything using RACC, please be aware that both Rust and RACC are still evolving
//! quickly, and your code may break quickly and without notice.
//!
//! The original Berkeley YACC contains a strident disclaimer, which is repeated here:
//!
//! ```text
//! Berkeley Yacc is distributed with no warranty whatever. The author
//! and any other contributors take no responsibility for the consequences of
//! its use.
//! ```
//!
//! That disclaimer applies to this Rust port, as well. The author (of the Rust port) makes no
//! claim of suitability for any purpose, and takes no responsibility for the consequences of its use.
//!
//! # TODO List
//!
//! * Allow grammars to specify precedence and associativity. The underlying code implements
//! support for precedence and associativity, exactly as in Berkeley YACC, but this is not
//! yet expose.
//!
//! * Support reading standalone grammars, either using the Rust parser or something else.
//!
//! * Port a lexical analyzer, too.
//!
//! # Author
//!
//! RACC was implemented by Arlie Davis `[email protected]`. I did this as an experiment
//! in porting a well-known (and useful) tool to Rust. I was also intrigued by Rust's support
//! for procedural macros, and I wanted to see whether I could implement something interesting
//! using procedural macros.
//!
//! # Feedback
//!
//! Feel free to send me any feedback on RACC to `[email protected]`.
#![recursion_limit = "256"]
#![warn(rust_2018_idioms)]
#![allow(clippy::needless_lifetimes)]
#![allow(clippy::cognitive_complexity)]
mod grammar;
mod lalr;
mod lr0;
mod mkpar;
mod output;
mod reader;
mod tvec;
mod util;
mod warshall;
use proc_macro2::Span;
use syn::Ident;
macro_rules! int_alias {
(type $name:ident = $int:ty;) => {
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
struct $name(pub $int);
impl $name {
pub fn index(&self) -> usize {
self.0 as usize
}
}
impl core::ops::Add<$int> for $name {
type Output = Self;
fn add(self, rhs: $int) -> $name {
$name(self.0 + rhs)
}
}
impl core::fmt::Display for $name {
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
core::fmt::Display::fmt(&self.0, fmt)
}
}
impl core::convert::From<$name> for usize {
fn from(i: $name) -> usize {
i.0 as usize
}
}
impl core::convert::From<usize> for $name {
fn from(i: usize) -> $name {
$name(i as $int)
}
}
};
}
// Type aliases
int_alias! {type Symbol = i16;}
int_alias! {type Var = i16;}
int_alias! {type Rule = i16;}
int_alias! {type State = i16;}
int_alias! {type Item = i16;}
int_alias! {type Token = i16;}
impl Rule {
// const RULE_NULL: Rule = Rule(0);
// const RULE_0: Rule = Rule(0);
const RULE_1: Rule = Rule(1);
const RULE_2: Rule = Rule(2);
}
impl Symbol {
pub const NULL: Symbol = Symbol(0);
pub const ERROR: Symbol = Token::ERROR.to_symbol();
}
impl Token {
/// Converts a token to a symbol. This is trivial, since all tokens are symbols
/// starting at zero.
pub const fn to_symbol(self) -> Symbol {
Symbol(self.0)
}
pub const ERROR: Token = Token(1);
}
impl Item {
pub const NULL: Item = Item(0);
}
#[derive(Copy, Clone, PartialEq, Eq, Ord, PartialOrd)]
struct SymbolOrRule(i16);
impl SymbolOrRule {
pub fn rule(rule: Rule) -> SymbolOrRule {
assert!(rule.0 > 0);
Self(-rule.0)
}
pub fn symbol(symbol: Symbol) -> SymbolOrRule {
assert!(symbol.0 >= 0);
Self(symbol.0)
}
pub fn is_symbol(self) -> bool {
self.0 >= 0
}
pub fn is_rule(self) -> bool {
self.0 < 0
}
pub fn as_symbol(self) -> Symbol {
assert!(self.is_symbol());
Symbol(self.0)
}
pub fn as_rule(self) -> Rule {
assert!(self.is_rule());
Rule(-self.0)
}
}
use core::fmt::{Debug, Formatter};
impl Debug for SymbolOrRule {
fn fmt(&self, fmt: &mut Formatter<'_>) -> core::fmt::Result {
if self.is_symbol() {
write!(fmt, "Symbol({})", self.as_symbol().index())
} else {
write!(fmt, "Rule({})", self.as_rule().index())
}
}
}
type StateOrRule = i16;
use reader::GrammarDef;
fn racc_grammar2(tokens: proc_macro2::TokenStream) -> syn::Result<proc_macro2::TokenStream> {
let grammar_def: GrammarDef = syn::parse2::<GrammarDef>(tokens)?;
let context_param_ident = Ident::new("context", Span::call_site());
let gram = &grammar_def.grammar;
let lr0 = lr0::compute_lr0(&gram);
let lalr_out = lalr::run_lalr_phase(&gram, &lr0);
let yaccparser = mkpar::make_parser(&gram, &lr0, &lalr_out);
Ok(output::output_parser_to_token_stream(
&gram,
&lalr_out.gotos,
&yaccparser,
&grammar_def.rule_blocks,
&grammar_def.rhs_bindings,
grammar_def.context_ty,
context_param_ident,
grammar_def.value_ty,
))
}
#[proc_macro]
pub fn racc_grammar(tokens: proc_macro::TokenStream) -> proc_macro::TokenStream {
let tokens2: proc_macro2::TokenStream = tokens.into();
racc_grammar2(tokens2).unwrap().into()
} | //! state, when executing rule actions. In C parsers, this is usually done with global
//! variables. However, this is not an option in Rust (and would be undesirable, even if | random_line_split |
lib.rs | //! # RACC -- Rust Another Compiler-Compiler
//!
//! This is a port of Barkeley YACC to Rust. It runs as a procedural macro, and so allows you to
//! define grammars directly in Rust source code, rather than calling an external tool or writing
//! a `build.rs` script.
//!
//! # How to write a grammar
//!
//! Here is a very brief example of how to use RACC. This program evaluates a very limited class
//! of numeric expressions.
//!
//! In `Cargo.toml:
//!
//! ```toml,ignore
//! racc = "0.1.0"
//! ```
//!
//! In your code:
//!
//! ```rust,ignore
//!
//! racc::grammar! {
//! uint ctx; // application context; not used in this example
//! i32; // the type of values in the value stack, i.e. %union
//!
//! // This is the list of tokens defined for your grammar.
//! // RACC will generate named constants using these names; use those constants
//! // when calling push_token().
//! NUM; PLUS; MINUS; LPAREN; RPAREN;
//!
//! // Define the rules of your language. The first rule implicitly defines the goal symbol.
//! // Note the presence of '=x' in the rule definitions. These are name bindings, which RACC
//! // uses in order to allow your code blocks (which are in {... } braces) to access the
//! // values for each symbol. The values come from the value stack in the parser state machine.
//! // When you call push_token(), you provide both the token code and the "value" for that token.
//!
//! Expr : NUM=x { x };
//!
//! Expr : LPAREN Expr=x RPAREN { x };
//!
//! Expr : Expr=left PLUS Expr=right {
//! // You can put arbitrary code here.
//! println!("evaluating: {} + {}", left, right);
//!
//! // The value of the action block is used as the
//! // value of the rule (reduction). Note the absence
//! // of a semi-colon here.
//! left + right
//! };
//!
//! Expr : Expr=left MINUS Expr=right {
//! println!("evaluating: {} - {}", left, right);
//! left - right
//! };
//! }
//!
//! fn main() {
//! // The tokens in our input, and their numeric values.
//! let tokens = vec![
//! (LPAREN, -1),
//! (NUM, 50),
//! (PLUS, -1),
//! (NUM, 25),
//! (RPAREN, -1),
//! (MINUS, -1),
//! (NUM, 10)
//! ];
//!
//! // Create a parser.
//! let mut parser = new_parser();
//!
//! let mut ctx: uint = 0; // App context; not used in this example.
//!
//! for &(token, value) in tokens.iter() {
//! parser.push_token(&mut ctx, token, value);
//! }
//!
//! match parser.finish() {
//! FinishParseResult::Accept(value) => println!("Accepted: {}", value),
//! FinishParseResult::SyntaxError => println!("Syntax error")
//! }
//! }
//! */
//! ```
//!
//! ## Advancing the parser state machine
//!
//! Berkeley YACC generates a `yyparse` function, as the primary entry point to the parser.
//! Your code is integrated into `yyparse` in several ways. First, `yyparse` will call your
//! `yylex` function in order to read the next token from the input. Then `yyparse` will
//! advance the state machine, and when rules have been matched ("reduced"), the action code
//! that you provided (in `{... }` blocks) will execute.
//!
//! In this model, the `yyparse` method runs until all of the tokens have been processed, or
//! until an action block prematurely exits the parser. However, this model suffers from
//! several problems. It puts too much control in the generated code, and requires the
//! parser generator (YACC / RACC) to call into too many "hook" functions, such as `yylex`.
//!
//! Instead, in RACC I have decided to use a different API model. Instead of generating a
//! `yyparse` function, RACC generates parsing tables and a `reduce` function. The `reduce`
//! function contains all of the rule action blocks (your code). RACC also generates a
//! `new_parser` method, which returns a new `ParsingState` struct which contains references
//! to the parsing tables and the generated `reduce` method. Your app then makes calls
//! to `parser.push_token()` to push tokens into the parser. This inverts the control-flow
//! model -- your app code is in control, and makes brief calls into the RACC runtime and
//! generated code in order to advance the state of the parser.
//!
//! This is simpler and more flexible, and I hope will be a more natural fit for Rust.
//! This parsing model also works well with Rust's lifetime model. Each parser object
//! (each instance of `ParsingState`) contains only the state necessary to advance the
//! state machine, and the contents of the "value" stack.
//!
//! ## Accessing external data during parsing
//!
//! It is often necessary, when imlementing a parser, to access external or "environmental"
//! state, when executing rule actions. In C parsers, this is usually done with global
//! variables. However, this is not an option in Rust (and would be undesirable, even if
//! it were an option).
//!
//! RACC provides a safe means to access such data. Rules may access an "app context".
//! When the app calls `push_token` or `finish`, the app also passes a `&mut` reference
//! to an "app context" value. The type of this value can be anything defined by the
//! application. (It is necessary to specify the type in the `grammar!` definition.)
//! Within the scope of the rule action, this value may be accessed by using the identifier
//! specified in the grammar definition. In the example above, the identifier is `ctx`,
//! and the type of the context is `uint`.
//!
//! ## Propagating values through the parsing tree
//!
//! In Berkeley YACC, the tokenizer stage (lexer) may set the `yylval` variable to a value,
//! in order to specify a "value" for the current token. This value is shifted onto the
//! value stack, and is accessible to rule actions using the `$1`.. `$n` syntax. Rules
//! specify the result value of the rule by assigning the `$$` value.
//!
//! RACC has a similar facility, but the syntax for using it is different. The syntax in
//! RACC is a more natural fit for Rust. Instead of using `$1` bindings, RACC grammars
//! specify name bindings using `= name` after a symbol in a rule definition. For example:
//!
//! ```rust,ignore
//! Expr : Expr=left PLUS Expr=right {
//! println!("evaluating: {} + {}", left, right);
//! left + right
//! };
//! ```
//!
//! In this code, `Expr=left` means "match the symbol `Expr` and bind its value to the
//! name `left` within the scope of the action," and similarly for `Expr=right`.
//! Instead of using `$$` for setting the value of the rule action, the value of the rule
//! action is simply the value of the action, when evaluated using the normal rules of Rust.
//! This is why the action block in the example ends with `left + right` and not `left + right;`.
//! Ending the action with `;` would mean that the rule evaluates to `()`.
//!
//! Note that all rules must evaluate to a value, even if that value is `()` or `None`, and
//! the type of the value must match the type specified in the grammar. RACC (like Rust) will
//! not perform any implicit conversions, or insert any implicit `None` values.
//!
//! If you do not wish to propagate values in this way, you can use a symbol value of `()`.
//! If you do this, then you may have empty rule actions.
//!
//! ## Finishing parsing
//!
//! In Berkeley YACC, the lexer indicates the end of an input stream by reporting a `YYEOF`
//! token. Because RACC uses a push model rather than a pull model, a RACC-based parser
//! indicates the end of the input stream by calling the `parser.finish()` method. The
//! `finish` method performs any final reductions that are necessary, and then checks whether
//! the grammar accepts the input. If the grammar accepts the input, then `finish` will
//! return `FinishParseResult::Accept(value)`, where `value` is the value of the entire
//! parse tree.
//!
//! # License
//!
//! Berkeley YACC is in the public domain. From its `README` file:
//!
//! ```text
//! Berkeley Yacc is in the public domain. The data structures and algorithms
//! used in Berkeley Yacc are all either taken from documents available to the
//! general public or are inventions of the author. Anyone may freely distribute
//! source or binary forms of Berkeley Yacc whether unchanged or modified.
//! Distributers may charge whatever fees they can obtain for Berkeley Yacc.
//! Programs generated by Berkeley Yacc may be distributed freely.
//! ```
//!
//! RACC is published under the MIT open-source license, which should be compatible with nearly all
//! open source needs and should be compatible with the letter and spirit of Berkeley YACC's license.
//!
//! # Stability
//!
//! The ideas implemented in YACC are stable and time-tested. RACC is a port of YACC, and should
//! be considered unstable. The implementation may contain porting bugs, where the behavior of
//! RACC is not faithful to the original YACC. Rust procedural macros and the ecosystem supporting
//! them is also still growing and changing.
//!
//! So if you build anything using RACC, please be aware that both Rust and RACC are still evolving
//! quickly, and your code may break quickly and without notice.
//!
//! The original Berkeley YACC contains a strident disclaimer, which is repeated here:
//!
//! ```text
//! Berkeley Yacc is distributed with no warranty whatever. The author
//! and any other contributors take no responsibility for the consequences of
//! its use.
//! ```
//!
//! That disclaimer applies to this Rust port, as well. The author (of the Rust port) makes no
//! claim of suitability for any purpose, and takes no responsibility for the consequences of its use.
//!
//! # TODO List
//!
//! * Allow grammars to specify precedence and associativity. The underlying code implements
//! support for precedence and associativity, exactly as in Berkeley YACC, but this is not
//! yet expose.
//!
//! * Support reading standalone grammars, either using the Rust parser or something else.
//!
//! * Port a lexical analyzer, too.
//!
//! # Author
//!
//! RACC was implemented by Arlie Davis `[email protected]`. I did this as an experiment
//! in porting a well-known (and useful) tool to Rust. I was also intrigued by Rust's support
//! for procedural macros, and I wanted to see whether I could implement something interesting
//! using procedural macros.
//!
//! # Feedback
//!
//! Feel free to send me any feedback on RACC to `[email protected]`.
#![recursion_limit = "256"]
#![warn(rust_2018_idioms)]
#![allow(clippy::needless_lifetimes)]
#![allow(clippy::cognitive_complexity)]
mod grammar;
mod lalr;
mod lr0;
mod mkpar;
mod output;
mod reader;
mod tvec;
mod util;
mod warshall;
use proc_macro2::Span;
use syn::Ident;
macro_rules! int_alias {
(type $name:ident = $int:ty;) => {
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
struct $name(pub $int);
impl $name {
pub fn index(&self) -> usize {
self.0 as usize
}
}
impl core::ops::Add<$int> for $name {
type Output = Self;
fn add(self, rhs: $int) -> $name {
$name(self.0 + rhs)
}
}
impl core::fmt::Display for $name {
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
core::fmt::Display::fmt(&self.0, fmt)
}
}
impl core::convert::From<$name> for usize {
fn from(i: $name) -> usize {
i.0 as usize
}
}
impl core::convert::From<usize> for $name {
fn from(i: usize) -> $name {
$name(i as $int)
}
}
};
}
// Type aliases
int_alias! {type Symbol = i16;}
int_alias! {type Var = i16;}
int_alias! {type Rule = i16;}
int_alias! {type State = i16;}
int_alias! {type Item = i16;}
int_alias! {type Token = i16;}
impl Rule {
// const RULE_NULL: Rule = Rule(0);
// const RULE_0: Rule = Rule(0);
const RULE_1: Rule = Rule(1);
const RULE_2: Rule = Rule(2);
}
impl Symbol {
pub const NULL: Symbol = Symbol(0);
pub const ERROR: Symbol = Token::ERROR.to_symbol();
}
impl Token {
/// Converts a token to a symbol. This is trivial, since all tokens are symbols
/// starting at zero.
pub const fn to_symbol(self) -> Symbol {
Symbol(self.0)
}
pub const ERROR: Token = Token(1);
}
impl Item {
pub const NULL: Item = Item(0);
}
#[derive(Copy, Clone, PartialEq, Eq, Ord, PartialOrd)]
struct | (i16);
impl SymbolOrRule {
pub fn rule(rule: Rule) -> SymbolOrRule {
assert!(rule.0 > 0);
Self(-rule.0)
}
pub fn symbol(symbol: Symbol) -> SymbolOrRule {
assert!(symbol.0 >= 0);
Self(symbol.0)
}
pub fn is_symbol(self) -> bool {
self.0 >= 0
}
pub fn is_rule(self) -> bool {
self.0 < 0
}
pub fn as_symbol(self) -> Symbol {
assert!(self.is_symbol());
Symbol(self.0)
}
pub fn as_rule(self) -> Rule {
assert!(self.is_rule());
Rule(-self.0)
}
}
use core::fmt::{Debug, Formatter};
impl Debug for SymbolOrRule {
fn fmt(&self, fmt: &mut Formatter<'_>) -> core::fmt::Result {
if self.is_symbol() {
write!(fmt, "Symbol({})", self.as_symbol().index())
} else {
write!(fmt, "Rule({})", self.as_rule().index())
}
}
}
type StateOrRule = i16;
use reader::GrammarDef;
fn racc_grammar2(tokens: proc_macro2::TokenStream) -> syn::Result<proc_macro2::TokenStream> {
let grammar_def: GrammarDef = syn::parse2::<GrammarDef>(tokens)?;
let context_param_ident = Ident::new("context", Span::call_site());
let gram = &grammar_def.grammar;
let lr0 = lr0::compute_lr0(&gram);
let lalr_out = lalr::run_lalr_phase(&gram, &lr0);
let yaccparser = mkpar::make_parser(&gram, &lr0, &lalr_out);
Ok(output::output_parser_to_token_stream(
&gram,
&lalr_out.gotos,
&yaccparser,
&grammar_def.rule_blocks,
&grammar_def.rhs_bindings,
grammar_def.context_ty,
context_param_ident,
grammar_def.value_ty,
))
}
#[proc_macro]
pub fn racc_grammar(tokens: proc_macro::TokenStream) -> proc_macro::TokenStream {
let tokens2: proc_macro2::TokenStream = tokens.into();
racc_grammar2(tokens2).unwrap().into()
}
| SymbolOrRule | identifier_name |
lib.rs | //! # RACC -- Rust Another Compiler-Compiler
//!
//! This is a port of Barkeley YACC to Rust. It runs as a procedural macro, and so allows you to
//! define grammars directly in Rust source code, rather than calling an external tool or writing
//! a `build.rs` script.
//!
//! # How to write a grammar
//!
//! Here is a very brief example of how to use RACC. This program evaluates a very limited class
//! of numeric expressions.
//!
//! In `Cargo.toml:
//!
//! ```toml,ignore
//! racc = "0.1.0"
//! ```
//!
//! In your code:
//!
//! ```rust,ignore
//!
//! racc::grammar! {
//! uint ctx; // application context; not used in this example
//! i32; // the type of values in the value stack, i.e. %union
//!
//! // This is the list of tokens defined for your grammar.
//! // RACC will generate named constants using these names; use those constants
//! // when calling push_token().
//! NUM; PLUS; MINUS; LPAREN; RPAREN;
//!
//! // Define the rules of your language. The first rule implicitly defines the goal symbol.
//! // Note the presence of '=x' in the rule definitions. These are name bindings, which RACC
//! // uses in order to allow your code blocks (which are in {... } braces) to access the
//! // values for each symbol. The values come from the value stack in the parser state machine.
//! // When you call push_token(), you provide both the token code and the "value" for that token.
//!
//! Expr : NUM=x { x };
//!
//! Expr : LPAREN Expr=x RPAREN { x };
//!
//! Expr : Expr=left PLUS Expr=right {
//! // You can put arbitrary code here.
//! println!("evaluating: {} + {}", left, right);
//!
//! // The value of the action block is used as the
//! // value of the rule (reduction). Note the absence
//! // of a semi-colon here.
//! left + right
//! };
//!
//! Expr : Expr=left MINUS Expr=right {
//! println!("evaluating: {} - {}", left, right);
//! left - right
//! };
//! }
//!
//! fn main() {
//! // The tokens in our input, and their numeric values.
//! let tokens = vec![
//! (LPAREN, -1),
//! (NUM, 50),
//! (PLUS, -1),
//! (NUM, 25),
//! (RPAREN, -1),
//! (MINUS, -1),
//! (NUM, 10)
//! ];
//!
//! // Create a parser.
//! let mut parser = new_parser();
//!
//! let mut ctx: uint = 0; // App context; not used in this example.
//!
//! for &(token, value) in tokens.iter() {
//! parser.push_token(&mut ctx, token, value);
//! }
//!
//! match parser.finish() {
//! FinishParseResult::Accept(value) => println!("Accepted: {}", value),
//! FinishParseResult::SyntaxError => println!("Syntax error")
//! }
//! }
//! */
//! ```
//!
//! ## Advancing the parser state machine
//!
//! Berkeley YACC generates a `yyparse` function, as the primary entry point to the parser.
//! Your code is integrated into `yyparse` in several ways. First, `yyparse` will call your
//! `yylex` function in order to read the next token from the input. Then `yyparse` will
//! advance the state machine, and when rules have been matched ("reduced"), the action code
//! that you provided (in `{... }` blocks) will execute.
//!
//! In this model, the `yyparse` method runs until all of the tokens have been processed, or
//! until an action block prematurely exits the parser. However, this model suffers from
//! several problems. It puts too much control in the generated code, and requires the
//! parser generator (YACC / RACC) to call into too many "hook" functions, such as `yylex`.
//!
//! Instead, in RACC I have decided to use a different API model. Instead of generating a
//! `yyparse` function, RACC generates parsing tables and a `reduce` function. The `reduce`
//! function contains all of the rule action blocks (your code). RACC also generates a
//! `new_parser` method, which returns a new `ParsingState` struct which contains references
//! to the parsing tables and the generated `reduce` method. Your app then makes calls
//! to `parser.push_token()` to push tokens into the parser. This inverts the control-flow
//! model -- your app code is in control, and makes brief calls into the RACC runtime and
//! generated code in order to advance the state of the parser.
//!
//! This is simpler and more flexible, and I hope will be a more natural fit for Rust.
//! This parsing model also works well with Rust's lifetime model. Each parser object
//! (each instance of `ParsingState`) contains only the state necessary to advance the
//! state machine, and the contents of the "value" stack.
//!
//! ## Accessing external data during parsing
//!
//! It is often necessary, when imlementing a parser, to access external or "environmental"
//! state, when executing rule actions. In C parsers, this is usually done with global
//! variables. However, this is not an option in Rust (and would be undesirable, even if
//! it were an option).
//!
//! RACC provides a safe means to access such data. Rules may access an "app context".
//! When the app calls `push_token` or `finish`, the app also passes a `&mut` reference
//! to an "app context" value. The type of this value can be anything defined by the
//! application. (It is necessary to specify the type in the `grammar!` definition.)
//! Within the scope of the rule action, this value may be accessed by using the identifier
//! specified in the grammar definition. In the example above, the identifier is `ctx`,
//! and the type of the context is `uint`.
//!
//! ## Propagating values through the parsing tree
//!
//! In Berkeley YACC, the tokenizer stage (lexer) may set the `yylval` variable to a value,
//! in order to specify a "value" for the current token. This value is shifted onto the
//! value stack, and is accessible to rule actions using the `$1`.. `$n` syntax. Rules
//! specify the result value of the rule by assigning the `$$` value.
//!
//! RACC has a similar facility, but the syntax for using it is different. The syntax in
//! RACC is a more natural fit for Rust. Instead of using `$1` bindings, RACC grammars
//! specify name bindings using `= name` after a symbol in a rule definition. For example:
//!
//! ```rust,ignore
//! Expr : Expr=left PLUS Expr=right {
//! println!("evaluating: {} + {}", left, right);
//! left + right
//! };
//! ```
//!
//! In this code, `Expr=left` means "match the symbol `Expr` and bind its value to the
//! name `left` within the scope of the action," and similarly for `Expr=right`.
//! Instead of using `$$` for setting the value of the rule action, the value of the rule
//! action is simply the value of the action, when evaluated using the normal rules of Rust.
//! This is why the action block in the example ends with `left + right` and not `left + right;`.
//! Ending the action with `;` would mean that the rule evaluates to `()`.
//!
//! Note that all rules must evaluate to a value, even if that value is `()` or `None`, and
//! the type of the value must match the type specified in the grammar. RACC (like Rust) will
//! not perform any implicit conversions, or insert any implicit `None` values.
//!
//! If you do not wish to propagate values in this way, you can use a symbol value of `()`.
//! If you do this, then you may have empty rule actions.
//!
//! ## Finishing parsing
//!
//! In Berkeley YACC, the lexer indicates the end of an input stream by reporting a `YYEOF`
//! token. Because RACC uses a push model rather than a pull model, a RACC-based parser
//! indicates the end of the input stream by calling the `parser.finish()` method. The
//! `finish` method performs any final reductions that are necessary, and then checks whether
//! the grammar accepts the input. If the grammar accepts the input, then `finish` will
//! return `FinishParseResult::Accept(value)`, where `value` is the value of the entire
//! parse tree.
//!
//! # License
//!
//! Berkeley YACC is in the public domain. From its `README` file:
//!
//! ```text
//! Berkeley Yacc is in the public domain. The data structures and algorithms
//! used in Berkeley Yacc are all either taken from documents available to the
//! general public or are inventions of the author. Anyone may freely distribute
//! source or binary forms of Berkeley Yacc whether unchanged or modified.
//! Distributers may charge whatever fees they can obtain for Berkeley Yacc.
//! Programs generated by Berkeley Yacc may be distributed freely.
//! ```
//!
//! RACC is published under the MIT open-source license, which should be compatible with nearly all
//! open source needs and should be compatible with the letter and spirit of Berkeley YACC's license.
//!
//! # Stability
//!
//! The ideas implemented in YACC are stable and time-tested. RACC is a port of YACC, and should
//! be considered unstable. The implementation may contain porting bugs, where the behavior of
//! RACC is not faithful to the original YACC. Rust procedural macros and the ecosystem supporting
//! them is also still growing and changing.
//!
//! So if you build anything using RACC, please be aware that both Rust and RACC are still evolving
//! quickly, and your code may break quickly and without notice.
//!
//! The original Berkeley YACC contains a strident disclaimer, which is repeated here:
//!
//! ```text
//! Berkeley Yacc is distributed with no warranty whatever. The author
//! and any other contributors take no responsibility for the consequences of
//! its use.
//! ```
//!
//! That disclaimer applies to this Rust port, as well. The author (of the Rust port) makes no
//! claim of suitability for any purpose, and takes no responsibility for the consequences of its use.
//!
//! # TODO List
//!
//! * Allow grammars to specify precedence and associativity. The underlying code implements
//! support for precedence and associativity, exactly as in Berkeley YACC, but this is not
//! yet expose.
//!
//! * Support reading standalone grammars, either using the Rust parser or something else.
//!
//! * Port a lexical analyzer, too.
//!
//! # Author
//!
//! RACC was implemented by Arlie Davis `[email protected]`. I did this as an experiment
//! in porting a well-known (and useful) tool to Rust. I was also intrigued by Rust's support
//! for procedural macros, and I wanted to see whether I could implement something interesting
//! using procedural macros.
//!
//! # Feedback
//!
//! Feel free to send me any feedback on RACC to `[email protected]`.
#![recursion_limit = "256"]
#![warn(rust_2018_idioms)]
#![allow(clippy::needless_lifetimes)]
#![allow(clippy::cognitive_complexity)]
mod grammar;
mod lalr;
mod lr0;
mod mkpar;
mod output;
mod reader;
mod tvec;
mod util;
mod warshall;
use proc_macro2::Span;
use syn::Ident;
macro_rules! int_alias {
(type $name:ident = $int:ty;) => {
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
struct $name(pub $int);
impl $name {
pub fn index(&self) -> usize {
self.0 as usize
}
}
impl core::ops::Add<$int> for $name {
type Output = Self;
fn add(self, rhs: $int) -> $name {
$name(self.0 + rhs)
}
}
impl core::fmt::Display for $name {
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
core::fmt::Display::fmt(&self.0, fmt)
}
}
impl core::convert::From<$name> for usize {
fn from(i: $name) -> usize {
i.0 as usize
}
}
impl core::convert::From<usize> for $name {
fn from(i: usize) -> $name {
$name(i as $int)
}
}
};
}
// Type aliases
int_alias! {type Symbol = i16;}
int_alias! {type Var = i16;}
int_alias! {type Rule = i16;}
int_alias! {type State = i16;}
int_alias! {type Item = i16;}
int_alias! {type Token = i16;}
impl Rule {
// const RULE_NULL: Rule = Rule(0);
// const RULE_0: Rule = Rule(0);
const RULE_1: Rule = Rule(1);
const RULE_2: Rule = Rule(2);
}
impl Symbol {
pub const NULL: Symbol = Symbol(0);
pub const ERROR: Symbol = Token::ERROR.to_symbol();
}
impl Token {
/// Converts a token to a symbol. This is trivial, since all tokens are symbols
/// starting at zero.
pub const fn to_symbol(self) -> Symbol {
Symbol(self.0)
}
pub const ERROR: Token = Token(1);
}
impl Item {
pub const NULL: Item = Item(0);
}
#[derive(Copy, Clone, PartialEq, Eq, Ord, PartialOrd)]
struct SymbolOrRule(i16);
impl SymbolOrRule {
pub fn rule(rule: Rule) -> SymbolOrRule {
assert!(rule.0 > 0);
Self(-rule.0)
}
pub fn symbol(symbol: Symbol) -> SymbolOrRule {
assert!(symbol.0 >= 0);
Self(symbol.0)
}
pub fn is_symbol(self) -> bool {
self.0 >= 0
}
pub fn is_rule(self) -> bool {
self.0 < 0
}
pub fn as_symbol(self) -> Symbol {
assert!(self.is_symbol());
Symbol(self.0)
}
pub fn as_rule(self) -> Rule {
assert!(self.is_rule());
Rule(-self.0)
}
}
use core::fmt::{Debug, Formatter};
impl Debug for SymbolOrRule {
fn fmt(&self, fmt: &mut Formatter<'_>) -> core::fmt::Result {
if self.is_symbol() | else {
write!(fmt, "Rule({})", self.as_rule().index())
}
}
}
type StateOrRule = i16;
use reader::GrammarDef;
fn racc_grammar2(tokens: proc_macro2::TokenStream) -> syn::Result<proc_macro2::TokenStream> {
let grammar_def: GrammarDef = syn::parse2::<GrammarDef>(tokens)?;
let context_param_ident = Ident::new("context", Span::call_site());
let gram = &grammar_def.grammar;
let lr0 = lr0::compute_lr0(&gram);
let lalr_out = lalr::run_lalr_phase(&gram, &lr0);
let yaccparser = mkpar::make_parser(&gram, &lr0, &lalr_out);
Ok(output::output_parser_to_token_stream(
&gram,
&lalr_out.gotos,
&yaccparser,
&grammar_def.rule_blocks,
&grammar_def.rhs_bindings,
grammar_def.context_ty,
context_param_ident,
grammar_def.value_ty,
))
}
#[proc_macro]
pub fn racc_grammar(tokens: proc_macro::TokenStream) -> proc_macro::TokenStream {
let tokens2: proc_macro2::TokenStream = tokens.into();
racc_grammar2(tokens2).unwrap().into()
}
| {
write!(fmt, "Symbol({})", self.as_symbol().index())
} | conditional_block |
lib.rs | //! # RACC -- Rust Another Compiler-Compiler
//!
//! This is a port of Barkeley YACC to Rust. It runs as a procedural macro, and so allows you to
//! define grammars directly in Rust source code, rather than calling an external tool or writing
//! a `build.rs` script.
//!
//! # How to write a grammar
//!
//! Here is a very brief example of how to use RACC. This program evaluates a very limited class
//! of numeric expressions.
//!
//! In `Cargo.toml:
//!
//! ```toml,ignore
//! racc = "0.1.0"
//! ```
//!
//! In your code:
//!
//! ```rust,ignore
//!
//! racc::grammar! {
//! uint ctx; // application context; not used in this example
//! i32; // the type of values in the value stack, i.e. %union
//!
//! // This is the list of tokens defined for your grammar.
//! // RACC will generate named constants using these names; use those constants
//! // when calling push_token().
//! NUM; PLUS; MINUS; LPAREN; RPAREN;
//!
//! // Define the rules of your language. The first rule implicitly defines the goal symbol.
//! // Note the presence of '=x' in the rule definitions. These are name bindings, which RACC
//! // uses in order to allow your code blocks (which are in {... } braces) to access the
//! // values for each symbol. The values come from the value stack in the parser state machine.
//! // When you call push_token(), you provide both the token code and the "value" for that token.
//!
//! Expr : NUM=x { x };
//!
//! Expr : LPAREN Expr=x RPAREN { x };
//!
//! Expr : Expr=left PLUS Expr=right {
//! // You can put arbitrary code here.
//! println!("evaluating: {} + {}", left, right);
//!
//! // The value of the action block is used as the
//! // value of the rule (reduction). Note the absence
//! // of a semi-colon here.
//! left + right
//! };
//!
//! Expr : Expr=left MINUS Expr=right {
//! println!("evaluating: {} - {}", left, right);
//! left - right
//! };
//! }
//!
//! fn main() {
//! // The tokens in our input, and their numeric values.
//! let tokens = vec![
//! (LPAREN, -1),
//! (NUM, 50),
//! (PLUS, -1),
//! (NUM, 25),
//! (RPAREN, -1),
//! (MINUS, -1),
//! (NUM, 10)
//! ];
//!
//! // Create a parser.
//! let mut parser = new_parser();
//!
//! let mut ctx: uint = 0; // App context; not used in this example.
//!
//! for &(token, value) in tokens.iter() {
//! parser.push_token(&mut ctx, token, value);
//! }
//!
//! match parser.finish() {
//! FinishParseResult::Accept(value) => println!("Accepted: {}", value),
//! FinishParseResult::SyntaxError => println!("Syntax error")
//! }
//! }
//! */
//! ```
//!
//! ## Advancing the parser state machine
//!
//! Berkeley YACC generates a `yyparse` function, as the primary entry point to the parser.
//! Your code is integrated into `yyparse` in several ways. First, `yyparse` will call your
//! `yylex` function in order to read the next token from the input. Then `yyparse` will
//! advance the state machine, and when rules have been matched ("reduced"), the action code
//! that you provided (in `{... }` blocks) will execute.
//!
//! In this model, the `yyparse` method runs until all of the tokens have been processed, or
//! until an action block prematurely exits the parser. However, this model suffers from
//! several problems. It puts too much control in the generated code, and requires the
//! parser generator (YACC / RACC) to call into too many "hook" functions, such as `yylex`.
//!
//! Instead, in RACC I have decided to use a different API model. Instead of generating a
//! `yyparse` function, RACC generates parsing tables and a `reduce` function. The `reduce`
//! function contains all of the rule action blocks (your code). RACC also generates a
//! `new_parser` method, which returns a new `ParsingState` struct which contains references
//! to the parsing tables and the generated `reduce` method. Your app then makes calls
//! to `parser.push_token()` to push tokens into the parser. This inverts the control-flow
//! model -- your app code is in control, and makes brief calls into the RACC runtime and
//! generated code in order to advance the state of the parser.
//!
//! This is simpler and more flexible, and I hope will be a more natural fit for Rust.
//! This parsing model also works well with Rust's lifetime model. Each parser object
//! (each instance of `ParsingState`) contains only the state necessary to advance the
//! state machine, and the contents of the "value" stack.
//!
//! ## Accessing external data during parsing
//!
//! It is often necessary, when imlementing a parser, to access external or "environmental"
//! state, when executing rule actions. In C parsers, this is usually done with global
//! variables. However, this is not an option in Rust (and would be undesirable, even if
//! it were an option).
//!
//! RACC provides a safe means to access such data. Rules may access an "app context".
//! When the app calls `push_token` or `finish`, the app also passes a `&mut` reference
//! to an "app context" value. The type of this value can be anything defined by the
//! application. (It is necessary to specify the type in the `grammar!` definition.)
//! Within the scope of the rule action, this value may be accessed by using the identifier
//! specified in the grammar definition. In the example above, the identifier is `ctx`,
//! and the type of the context is `uint`.
//!
//! ## Propagating values through the parsing tree
//!
//! In Berkeley YACC, the tokenizer stage (lexer) may set the `yylval` variable to a value,
//! in order to specify a "value" for the current token. This value is shifted onto the
//! value stack, and is accessible to rule actions using the `$1`.. `$n` syntax. Rules
//! specify the result value of the rule by assigning the `$$` value.
//!
//! RACC has a similar facility, but the syntax for using it is different. The syntax in
//! RACC is a more natural fit for Rust. Instead of using `$1` bindings, RACC grammars
//! specify name bindings using `= name` after a symbol in a rule definition. For example:
//!
//! ```rust,ignore
//! Expr : Expr=left PLUS Expr=right {
//! println!("evaluating: {} + {}", left, right);
//! left + right
//! };
//! ```
//!
//! In this code, `Expr=left` means "match the symbol `Expr` and bind its value to the
//! name `left` within the scope of the action," and similarly for `Expr=right`.
//! Instead of using `$$` for setting the value of the rule action, the value of the rule
//! action is simply the value of the action, when evaluated using the normal rules of Rust.
//! This is why the action block in the example ends with `left + right` and not `left + right;`.
//! Ending the action with `;` would mean that the rule evaluates to `()`.
//!
//! Note that all rules must evaluate to a value, even if that value is `()` or `None`, and
//! the type of the value must match the type specified in the grammar. RACC (like Rust) will
//! not perform any implicit conversions, or insert any implicit `None` values.
//!
//! If you do not wish to propagate values in this way, you can use a symbol value of `()`.
//! If you do this, then you may have empty rule actions.
//!
//! ## Finishing parsing
//!
//! In Berkeley YACC, the lexer indicates the end of an input stream by reporting a `YYEOF`
//! token. Because RACC uses a push model rather than a pull model, a RACC-based parser
//! indicates the end of the input stream by calling the `parser.finish()` method. The
//! `finish` method performs any final reductions that are necessary, and then checks whether
//! the grammar accepts the input. If the grammar accepts the input, then `finish` will
//! return `FinishParseResult::Accept(value)`, where `value` is the value of the entire
//! parse tree.
//!
//! # License
//!
//! Berkeley YACC is in the public domain. From its `README` file:
//!
//! ```text
//! Berkeley Yacc is in the public domain. The data structures and algorithms
//! used in Berkeley Yacc are all either taken from documents available to the
//! general public or are inventions of the author. Anyone may freely distribute
//! source or binary forms of Berkeley Yacc whether unchanged or modified.
//! Distributers may charge whatever fees they can obtain for Berkeley Yacc.
//! Programs generated by Berkeley Yacc may be distributed freely.
//! ```
//!
//! RACC is published under the MIT open-source license, which should be compatible with nearly all
//! open source needs and should be compatible with the letter and spirit of Berkeley YACC's license.
//!
//! # Stability
//!
//! The ideas implemented in YACC are stable and time-tested. RACC is a port of YACC, and should
//! be considered unstable. The implementation may contain porting bugs, where the behavior of
//! RACC is not faithful to the original YACC. Rust procedural macros and the ecosystem supporting
//! them is also still growing and changing.
//!
//! So if you build anything using RACC, please be aware that both Rust and RACC are still evolving
//! quickly, and your code may break quickly and without notice.
//!
//! The original Berkeley YACC contains a strident disclaimer, which is repeated here:
//!
//! ```text
//! Berkeley Yacc is distributed with no warranty whatever. The author
//! and any other contributors take no responsibility for the consequences of
//! its use.
//! ```
//!
//! That disclaimer applies to this Rust port, as well. The author (of the Rust port) makes no
//! claim of suitability for any purpose, and takes no responsibility for the consequences of its use.
//!
//! # TODO List
//!
//! * Allow grammars to specify precedence and associativity. The underlying code implements
//! support for precedence and associativity, exactly as in Berkeley YACC, but this is not
//! yet expose.
//!
//! * Support reading standalone grammars, either using the Rust parser or something else.
//!
//! * Port a lexical analyzer, too.
//!
//! # Author
//!
//! RACC was implemented by Arlie Davis `[email protected]`. I did this as an experiment
//! in porting a well-known (and useful) tool to Rust. I was also intrigued by Rust's support
//! for procedural macros, and I wanted to see whether I could implement something interesting
//! using procedural macros.
//!
//! # Feedback
//!
//! Feel free to send me any feedback on RACC to `[email protected]`.
#![recursion_limit = "256"]
#![warn(rust_2018_idioms)]
#![allow(clippy::needless_lifetimes)]
#![allow(clippy::cognitive_complexity)]
mod grammar;
mod lalr;
mod lr0;
mod mkpar;
mod output;
mod reader;
mod tvec;
mod util;
mod warshall;
use proc_macro2::Span;
use syn::Ident;
macro_rules! int_alias {
(type $name:ident = $int:ty;) => {
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)]
struct $name(pub $int);
impl $name {
pub fn index(&self) -> usize {
self.0 as usize
}
}
impl core::ops::Add<$int> for $name {
type Output = Self;
fn add(self, rhs: $int) -> $name {
$name(self.0 + rhs)
}
}
impl core::fmt::Display for $name {
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
core::fmt::Display::fmt(&self.0, fmt)
}
}
impl core::convert::From<$name> for usize {
fn from(i: $name) -> usize {
i.0 as usize
}
}
impl core::convert::From<usize> for $name {
fn from(i: usize) -> $name {
$name(i as $int)
}
}
};
}
// Type aliases
int_alias! {type Symbol = i16;}
int_alias! {type Var = i16;}
int_alias! {type Rule = i16;}
int_alias! {type State = i16;}
int_alias! {type Item = i16;}
int_alias! {type Token = i16;}
impl Rule {
// const RULE_NULL: Rule = Rule(0);
// const RULE_0: Rule = Rule(0);
const RULE_1: Rule = Rule(1);
const RULE_2: Rule = Rule(2);
}
impl Symbol {
pub const NULL: Symbol = Symbol(0);
pub const ERROR: Symbol = Token::ERROR.to_symbol();
}
impl Token {
/// Converts a token to a symbol. This is trivial, since all tokens are symbols
/// starting at zero.
pub const fn to_symbol(self) -> Symbol {
Symbol(self.0)
}
pub const ERROR: Token = Token(1);
}
impl Item {
pub const NULL: Item = Item(0);
}
#[derive(Copy, Clone, PartialEq, Eq, Ord, PartialOrd)]
struct SymbolOrRule(i16);
impl SymbolOrRule {
pub fn rule(rule: Rule) -> SymbolOrRule {
assert!(rule.0 > 0);
Self(-rule.0)
}
pub fn symbol(symbol: Symbol) -> SymbolOrRule {
assert!(symbol.0 >= 0);
Self(symbol.0)
}
pub fn is_symbol(self) -> bool {
self.0 >= 0
}
pub fn is_rule(self) -> bool {
self.0 < 0
}
pub fn as_symbol(self) -> Symbol |
pub fn as_rule(self) -> Rule {
assert!(self.is_rule());
Rule(-self.0)
}
}
use core::fmt::{Debug, Formatter};
impl Debug for SymbolOrRule {
fn fmt(&self, fmt: &mut Formatter<'_>) -> core::fmt::Result {
if self.is_symbol() {
write!(fmt, "Symbol({})", self.as_symbol().index())
} else {
write!(fmt, "Rule({})", self.as_rule().index())
}
}
}
type StateOrRule = i16;
use reader::GrammarDef;
fn racc_grammar2(tokens: proc_macro2::TokenStream) -> syn::Result<proc_macro2::TokenStream> {
let grammar_def: GrammarDef = syn::parse2::<GrammarDef>(tokens)?;
let context_param_ident = Ident::new("context", Span::call_site());
let gram = &grammar_def.grammar;
let lr0 = lr0::compute_lr0(&gram);
let lalr_out = lalr::run_lalr_phase(&gram, &lr0);
let yaccparser = mkpar::make_parser(&gram, &lr0, &lalr_out);
Ok(output::output_parser_to_token_stream(
&gram,
&lalr_out.gotos,
&yaccparser,
&grammar_def.rule_blocks,
&grammar_def.rhs_bindings,
grammar_def.context_ty,
context_param_ident,
grammar_def.value_ty,
))
}
#[proc_macro]
pub fn racc_grammar(tokens: proc_macro::TokenStream) -> proc_macro::TokenStream {
let tokens2: proc_macro2::TokenStream = tokens.into();
racc_grammar2(tokens2).unwrap().into()
}
| {
assert!(self.is_symbol());
Symbol(self.0)
} | identifier_body |
iterable.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
//! Implementation of `iterable<...>` and `iterable<...,...>` WebIDL declarations.
use crate::dom::bindings::codegen::Bindings::IterableIteratorBinding::IterableKeyAndValueResult;
use crate::dom::bindings::codegen::Bindings::IterableIteratorBinding::IterableKeyOrValueResult;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject, Reflector};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::trace::{JSTraceable, RootedTraceableBox};
use crate::dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
use js::conversions::ToJSValConvertible;
use js::jsapi::{Heap, JSContext, JSObject};
use js::jsval::UndefinedValue;
use js::rust::{HandleValue, MutableHandleObject};
use std::cell::Cell;
use std::ptr;
use std::ptr::NonNull;
/// The values that an iterator will iterate over.
#[derive(JSTraceable, MallocSizeOf)]
pub enum IteratorType {
/// The keys of the iterable object.
Keys,
/// The values of the iterable object.
Values,
/// The keys and values of the iterable object combined.
Entries,
}
/// A DOM object that can be iterated over using a pair value iterator.
pub trait Iterable {
/// The type of the key of the iterator pair.
type Key: ToJSValConvertible;
/// The type of the value of the iterator pair.
type Value: ToJSValConvertible;
/// Return the number of entries that can be iterated over.
fn get_iterable_length(&self) -> u32;
/// Return the value at the provided index.
fn get_value_at_index(&self, index: u32) -> Self::Value;
/// Return the key at the provided index.
fn get_key_at_index(&self, index: u32) -> Self::Key;
}
/// An iterator over the iterable entries of a given DOM interface.
//FIXME: #12811 prevents dom_struct with type parameters
#[dom_struct]
pub struct IterableIterator<T: DomObject + JSTraceable + Iterable> {
reflector: Reflector,
iterable: Dom<T>,
type_: IteratorType,
index: Cell<u32>,
}
impl<T: DomObject + JSTraceable + Iterable> IterableIterator<T> {
/// Create a new iterator instance for the provided iterable DOM interface.
pub fn new(
iterable: &T,
type_: IteratorType,
wrap: unsafe fn(*mut JSContext, &GlobalScope, Box<IterableIterator<T>>) -> DomRoot<Self>,
) -> DomRoot<Self> {
let iterator = Box::new(IterableIterator {
reflector: Reflector::new(),
type_: type_,
iterable: Dom::from_ref(iterable),
index: Cell::new(0),
});
reflect_dom_object(iterator, &*iterable.global(), wrap)
}
/// Return the next value from the iterable object.
#[allow(non_snake_case)]
pub fn Next(&self, cx: *mut JSContext) -> Fallible<NonNull<JSObject>> {
let index = self.index.get();
rooted!(in(cx) let mut value = UndefinedValue());
rooted!(in(cx) let mut rval = ptr::null_mut::<JSObject>());
let result = if index >= self.iterable.get_iterable_length() {
dict_return(cx, rval.handle_mut(), true, value.handle())
} else {
match self.type_ {
IteratorType::Keys => {
unsafe {
self.iterable
.get_key_at_index(index)
.to_jsval(cx, value.handle_mut());
}
dict_return(cx, rval.handle_mut(), false, value.handle())
},
IteratorType::Values => {
unsafe {
self.iterable
.get_value_at_index(index)
.to_jsval(cx, value.handle_mut());
}
dict_return(cx, rval.handle_mut(), false, value.handle())
},
IteratorType::Entries => {
rooted!(in(cx) let mut key = UndefinedValue());
unsafe {
self.iterable
.get_key_at_index(index)
.to_jsval(cx, key.handle_mut());
self.iterable
.get_value_at_index(index)
.to_jsval(cx, value.handle_mut());
}
key_and_value_return(cx, rval.handle_mut(), key.handle(), value.handle())
},
}
};
self.index.set(index + 1);
result.map(|_| NonNull::new(rval.get()).expect("got a null pointer"))
}
}
fn | (
cx: *mut JSContext,
mut result: MutableHandleObject,
done: bool,
value: HandleValue,
) -> Fallible<()> {
let mut dict = IterableKeyOrValueResult::empty();
dict.done = done;
dict.value.set(value.get());
rooted!(in(cx) let mut dict_value = UndefinedValue());
unsafe {
dict.to_jsval(cx, dict_value.handle_mut());
}
result.set(dict_value.to_object());
Ok(())
}
fn key_and_value_return(
cx: *mut JSContext,
mut result: MutableHandleObject,
key: HandleValue,
value: HandleValue,
) -> Fallible<()> {
let mut dict = IterableKeyAndValueResult::empty();
dict.done = false;
dict.value = Some(
vec![key, value]
.into_iter()
.map(|handle| RootedTraceableBox::from_box(Heap::boxed(handle.get())))
.collect(),
);
rooted!(in(cx) let mut dict_value = UndefinedValue());
unsafe {
dict.to_jsval(cx, dict_value.handle_mut());
}
result.set(dict_value.to_object());
Ok(())
}
| dict_return | identifier_name |
iterable.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
//! Implementation of `iterable<...>` and `iterable<...,...>` WebIDL declarations.
use crate::dom::bindings::codegen::Bindings::IterableIteratorBinding::IterableKeyAndValueResult;
use crate::dom::bindings::codegen::Bindings::IterableIteratorBinding::IterableKeyOrValueResult;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject, Reflector};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::trace::{JSTraceable, RootedTraceableBox};
use crate::dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
use js::conversions::ToJSValConvertible;
use js::jsapi::{Heap, JSContext, JSObject};
use js::jsval::UndefinedValue;
use js::rust::{HandleValue, MutableHandleObject};
use std::cell::Cell;
use std::ptr;
use std::ptr::NonNull;
/// The values that an iterator will iterate over.
#[derive(JSTraceable, MallocSizeOf)]
pub enum IteratorType {
/// The keys of the iterable object.
Keys,
/// The values of the iterable object.
Values,
/// The keys and values of the iterable object combined.
Entries,
}
/// A DOM object that can be iterated over using a pair value iterator.
pub trait Iterable {
/// The type of the key of the iterator pair.
type Key: ToJSValConvertible;
/// The type of the value of the iterator pair.
type Value: ToJSValConvertible;
/// Return the number of entries that can be iterated over.
fn get_iterable_length(&self) -> u32;
/// Return the value at the provided index.
fn get_value_at_index(&self, index: u32) -> Self::Value;
/// Return the key at the provided index.
fn get_key_at_index(&self, index: u32) -> Self::Key;
}
/// An iterator over the iterable entries of a given DOM interface.
//FIXME: #12811 prevents dom_struct with type parameters
#[dom_struct]
pub struct IterableIterator<T: DomObject + JSTraceable + Iterable> {
reflector: Reflector,
iterable: Dom<T>,
type_: IteratorType,
index: Cell<u32>,
}
impl<T: DomObject + JSTraceable + Iterable> IterableIterator<T> {
/// Create a new iterator instance for the provided iterable DOM interface.
pub fn new(
iterable: &T,
type_: IteratorType,
wrap: unsafe fn(*mut JSContext, &GlobalScope, Box<IterableIterator<T>>) -> DomRoot<Self>,
) -> DomRoot<Self> {
let iterator = Box::new(IterableIterator {
reflector: Reflector::new(),
type_: type_,
iterable: Dom::from_ref(iterable),
index: Cell::new(0),
});
reflect_dom_object(iterator, &*iterable.global(), wrap)
}
/// Return the next value from the iterable object.
#[allow(non_snake_case)]
pub fn Next(&self, cx: *mut JSContext) -> Fallible<NonNull<JSObject>> {
let index = self.index.get();
rooted!(in(cx) let mut value = UndefinedValue());
rooted!(in(cx) let mut rval = ptr::null_mut::<JSObject>());
let result = if index >= self.iterable.get_iterable_length() {
dict_return(cx, rval.handle_mut(), true, value.handle())
} else {
match self.type_ {
IteratorType::Keys => {
unsafe {
self.iterable
.get_key_at_index(index)
.to_jsval(cx, value.handle_mut());
}
dict_return(cx, rval.handle_mut(), false, value.handle())
},
IteratorType::Values => {
unsafe {
self.iterable
.get_value_at_index(index)
.to_jsval(cx, value.handle_mut());
}
dict_return(cx, rval.handle_mut(), false, value.handle())
},
IteratorType::Entries => | ,
}
};
self.index.set(index + 1);
result.map(|_| NonNull::new(rval.get()).expect("got a null pointer"))
}
}
fn dict_return(
cx: *mut JSContext,
mut result: MutableHandleObject,
done: bool,
value: HandleValue,
) -> Fallible<()> {
let mut dict = IterableKeyOrValueResult::empty();
dict.done = done;
dict.value.set(value.get());
rooted!(in(cx) let mut dict_value = UndefinedValue());
unsafe {
dict.to_jsval(cx, dict_value.handle_mut());
}
result.set(dict_value.to_object());
Ok(())
}
fn key_and_value_return(
cx: *mut JSContext,
mut result: MutableHandleObject,
key: HandleValue,
value: HandleValue,
) -> Fallible<()> {
let mut dict = IterableKeyAndValueResult::empty();
dict.done = false;
dict.value = Some(
vec![key, value]
.into_iter()
.map(|handle| RootedTraceableBox::from_box(Heap::boxed(handle.get())))
.collect(),
);
rooted!(in(cx) let mut dict_value = UndefinedValue());
unsafe {
dict.to_jsval(cx, dict_value.handle_mut());
}
result.set(dict_value.to_object());
Ok(())
}
| {
rooted!(in(cx) let mut key = UndefinedValue());
unsafe {
self.iterable
.get_key_at_index(index)
.to_jsval(cx, key.handle_mut());
self.iterable
.get_value_at_index(index)
.to_jsval(cx, value.handle_mut());
}
key_and_value_return(cx, rval.handle_mut(), key.handle(), value.handle())
} | conditional_block |
iterable.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#![allow(unsafe_code)]
//! Implementation of `iterable<...>` and `iterable<...,...>` WebIDL declarations.
use crate::dom::bindings::codegen::Bindings::IterableIteratorBinding::IterableKeyAndValueResult;
use crate::dom::bindings::codegen::Bindings::IterableIteratorBinding::IterableKeyOrValueResult;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject, Reflector};
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::trace::{JSTraceable, RootedTraceableBox};
use crate::dom::globalscope::GlobalScope;
use dom_struct::dom_struct;
use js::conversions::ToJSValConvertible;
use js::jsapi::{Heap, JSContext, JSObject};
use js::jsval::UndefinedValue;
use js::rust::{HandleValue, MutableHandleObject};
use std::cell::Cell;
use std::ptr;
use std::ptr::NonNull;
/// The values that an iterator will iterate over.
#[derive(JSTraceable, MallocSizeOf)]
pub enum IteratorType {
/// The keys of the iterable object.
Keys,
/// The values of the iterable object.
Values,
/// The keys and values of the iterable object combined.
Entries,
}
/// A DOM object that can be iterated over using a pair value iterator.
pub trait Iterable {
/// The type of the key of the iterator pair.
type Key: ToJSValConvertible;
/// The type of the value of the iterator pair.
type Value: ToJSValConvertible;
/// Return the number of entries that can be iterated over.
fn get_iterable_length(&self) -> u32;
/// Return the value at the provided index.
fn get_value_at_index(&self, index: u32) -> Self::Value;
/// Return the key at the provided index.
fn get_key_at_index(&self, index: u32) -> Self::Key;
}
/// An iterator over the iterable entries of a given DOM interface.
//FIXME: #12811 prevents dom_struct with type parameters
#[dom_struct]
pub struct IterableIterator<T: DomObject + JSTraceable + Iterable> { | type_: IteratorType,
index: Cell<u32>,
}
impl<T: DomObject + JSTraceable + Iterable> IterableIterator<T> {
/// Create a new iterator instance for the provided iterable DOM interface.
pub fn new(
iterable: &T,
type_: IteratorType,
wrap: unsafe fn(*mut JSContext, &GlobalScope, Box<IterableIterator<T>>) -> DomRoot<Self>,
) -> DomRoot<Self> {
let iterator = Box::new(IterableIterator {
reflector: Reflector::new(),
type_: type_,
iterable: Dom::from_ref(iterable),
index: Cell::new(0),
});
reflect_dom_object(iterator, &*iterable.global(), wrap)
}
/// Return the next value from the iterable object.
#[allow(non_snake_case)]
pub fn Next(&self, cx: *mut JSContext) -> Fallible<NonNull<JSObject>> {
let index = self.index.get();
rooted!(in(cx) let mut value = UndefinedValue());
rooted!(in(cx) let mut rval = ptr::null_mut::<JSObject>());
let result = if index >= self.iterable.get_iterable_length() {
dict_return(cx, rval.handle_mut(), true, value.handle())
} else {
match self.type_ {
IteratorType::Keys => {
unsafe {
self.iterable
.get_key_at_index(index)
.to_jsval(cx, value.handle_mut());
}
dict_return(cx, rval.handle_mut(), false, value.handle())
},
IteratorType::Values => {
unsafe {
self.iterable
.get_value_at_index(index)
.to_jsval(cx, value.handle_mut());
}
dict_return(cx, rval.handle_mut(), false, value.handle())
},
IteratorType::Entries => {
rooted!(in(cx) let mut key = UndefinedValue());
unsafe {
self.iterable
.get_key_at_index(index)
.to_jsval(cx, key.handle_mut());
self.iterable
.get_value_at_index(index)
.to_jsval(cx, value.handle_mut());
}
key_and_value_return(cx, rval.handle_mut(), key.handle(), value.handle())
},
}
};
self.index.set(index + 1);
result.map(|_| NonNull::new(rval.get()).expect("got a null pointer"))
}
}
fn dict_return(
cx: *mut JSContext,
mut result: MutableHandleObject,
done: bool,
value: HandleValue,
) -> Fallible<()> {
let mut dict = IterableKeyOrValueResult::empty();
dict.done = done;
dict.value.set(value.get());
rooted!(in(cx) let mut dict_value = UndefinedValue());
unsafe {
dict.to_jsval(cx, dict_value.handle_mut());
}
result.set(dict_value.to_object());
Ok(())
}
fn key_and_value_return(
cx: *mut JSContext,
mut result: MutableHandleObject,
key: HandleValue,
value: HandleValue,
) -> Fallible<()> {
let mut dict = IterableKeyAndValueResult::empty();
dict.done = false;
dict.value = Some(
vec![key, value]
.into_iter()
.map(|handle| RootedTraceableBox::from_box(Heap::boxed(handle.get())))
.collect(),
);
rooted!(in(cx) let mut dict_value = UndefinedValue());
unsafe {
dict.to_jsval(cx, dict_value.handle_mut());
}
result.set(dict_value.to_object());
Ok(())
} | reflector: Reflector,
iterable: Dom<T>, | random_line_split |
session.rs | // Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Session handlers factory.
use ws;
use authcode_store::AuthCodes;
use std::path::{PathBuf, Path};
use std::sync::Arc;
use std::str::FromStr;
use jsonrpc_core::{IoHandler, GenericIoHandler};
use util::{H256, version};
#[cfg(feature = "parity-ui")]
mod ui {
extern crate parity_ui as ui;
extern crate parity_dapps_glue as dapps;
use self::dapps::WebApp;
#[derive(Default)]
pub struct Handler {
ui: ui::App,
}
impl Handler {
pub fn handle(&self, req: &str) -> Option<&dapps::File> {
let file = match req {
"" | "/" => "index.html",
path => &path[1..],
};
self.ui.file(file)
}
}
}
#[cfg(not(feature = "parity-ui"))]
mod ui {
pub struct File {
pub content: &'static [u8],
pub content_type: &'static str,
}
#[derive(Default)]
pub struct Handler;
impl Handler {
pub fn handle(&self, _req: &str) -> Option<&File> {
None
}
}
}
const HOME_DOMAIN: &'static str = "home.parity";
fn origin_is_allowed(self_origin: &str, header: Option<&[u8]>) -> bool {
match header {
None => false,
Some(h) => {
let v = String::from_utf8(h.to_owned()).ok();
match v {
Some(ref origin) if origin.starts_with("chrome-extension://") => true,
Some(ref origin) if origin.starts_with(self_origin) => true,
Some(ref origin) if origin.starts_with(&format!("http://{}", self_origin)) => true,
Some(ref origin) if origin.starts_with(HOME_DOMAIN) => true,
Some(ref origin) if origin.starts_with(&format!("http://{}", HOME_DOMAIN)) => true,
_ => false
}
}
}
}
fn auth_is_valid(codes_path: &Path, protocols: ws::Result<Vec<&str>>) -> bool {
match protocols {
Ok(ref protocols) if protocols.len() == 1 => {
protocols.iter().any(|protocol| {
let mut split = protocol.split('_');
let auth = split.next().and_then(|v| H256::from_str(v).ok());
let time = split.next().and_then(|v| u64::from_str_radix(v, 10).ok());
if let (Some(auth), Some(time)) = (auth, time) {
// Check if the code is valid
AuthCodes::from_file(codes_path)
.map(|mut codes| {
// remove old tokens
codes.clear_garbage();
let res = codes.is_valid(&auth, time);
// make sure to save back authcodes - it might have been modified
if codes.to_file(codes_path).is_err() {
warn!(target: "signer", "Couldn't save authorization codes to file.");
}
res
})
.unwrap_or(false)
} else {
false
}
})
},
_ => false
}
}
fn add_headers(mut response: ws::Response, mime: &str) -> ws::Response {
let content_len = format!("{}", response.len());
{
let mut headers = response.headers_mut();
headers.push(("X-Frame-Options".into(), b"SAMEORIGIN".to_vec()));
headers.push(("X-XSS-Protection".into(), b"1; mode=block".to_vec()));
headers.push(("X-Content-Type-Options".into(), b"nosniff".to_vec()));
headers.push(("Server".into(), b"Parity/SignerUI".to_vec()));
headers.push(("Content-Length".into(), content_len.as_bytes().to_vec()));
headers.push(("Content-Type".into(), mime.as_bytes().to_vec()));
headers.push(("Connection".into(), b"close".to_vec()));
}
response
}
pub struct Session {
out: ws::Sender,
skip_origin_validation: bool,
self_origin: String,
authcodes_path: PathBuf,
handler: Arc<IoHandler>,
file_handler: Arc<ui::Handler>,
}
impl ws::Handler for Session {
#[cfg_attr(feature="dev", allow(collapsible_if))]
fn on_request(&mut self, req: &ws::Request) -> ws::Result<(ws::Response)> {
trace!(target: "signer", "Handling request: {:?}", req);
// TODO [ToDr] ws server is not handling proxied requests correctly:
// Trim domain name from resource part:
let resource = req.resource().trim_left_matches(&format!("http://{}", HOME_DOMAIN));
// Styles file is allowed for error pages to display nicely.
let is_styles_file = resource == "/styles.css";
// Check request origin and host header.
if!self.skip_origin_validation {
let origin = req.header("origin").or_else(|| req.header("Origin")).map(|x| &x[..]);
let host = req.header("host").or_else(|| req.header("Host")).map(|x| &x[..]);
let is_valid = origin_is_allowed(&self.self_origin, origin) || (origin.is_none() && origin_is_allowed(&self.self_origin, host));
let is_valid = is_styles_file || is_valid;
if!is_valid {
warn!(target: "signer", "Blocked connection to Signer API from untrusted origin.");
return Ok(error(
ErrorType::Forbidden,
"URL Blocked",
"You are not allowed to access Trusted Signer using this URL.",
Some(&format!("Use: http://{}", self.self_origin)),
));
}
}
// PROXY requests when running behind home.parity
if req.method() == "CONNECT" {
let mut res = ws::Response::ok("".into());
res.headers_mut().push(("Content-Length".into(), b"0".to_vec()));
res.headers_mut().push(("Connection".into(), b"keep-alive".to_vec()));
return Ok(res);
}
// Detect if it's a websocket request
// (styles file skips origin validation, so make sure to prevent WS connections on this resource)
if req.header("sec-websocket-key").is_some() &&!is_styles_file {
// Check authorization
if!auth_is_valid(&self.authcodes_path, req.protocols()) {
info!(target: "signer", "Unauthorized connection to Signer API blocked.");
return Ok(error(ErrorType::Forbidden, "Not Authorized", "Request to this API was not authorized.", None));
}
let protocols = req.protocols().expect("Existence checked by authorization.");
let protocol = protocols.get(0).expect("Proved by authorization.");
return ws::Response::from_request(req).map(|mut res| {
// To make WebSockets connection successful we need to send back the protocol header.
res.set_protocol(protocol);
res
});
}
debug!(target: "signer", "Requesting resource: {:?}", resource);
// Otherwise try to serve a page.
Ok(self.file_handler.handle(resource)
.map_or_else(
// return 404 not found
|| error(ErrorType::NotFound, "Not found", "Requested file was not found.", None),
// or serve the file
|f| add_headers(ws::Response::ok_raw(f.content.to_vec()), f.content_type)
))
}
fn on_message(&mut self, msg: ws::Message) -> ws::Result<()> {
let req = try!(msg.as_text());
let out = self.out.clone();
self.handler.handle_request(req, move |response| {
if let Some(result) = response {
let res = out.send(result);
if let Err(e) = res {
warn!(target: "signer", "Error while sending response: {:?}", e);
}
}
});
Ok(())
}
}
pub struct Factory {
handler: Arc<IoHandler>,
skip_origin_validation: bool,
self_origin: String,
authcodes_path: PathBuf,
file_handler: Arc<ui::Handler>,
}
impl Factory {
pub fn new(handler: Arc<IoHandler>, self_origin: String, authcodes_path: PathBuf, skip_origin_validation: bool) -> Self {
Factory {
handler: handler,
skip_origin_validation: skip_origin_validation,
self_origin: self_origin,
authcodes_path: authcodes_path,
file_handler: Arc::new(ui::Handler::default()),
}
}
}
impl ws::Factory for Factory {
type Handler = Session;
fn connection_made(&mut self, sender: ws::Sender) -> Self::Handler |
}
enum ErrorType {
NotFound,
Forbidden,
}
fn error(error: ErrorType, title: &str, message: &str, details: Option<&str>) -> ws::Response {
let content = format!(
include_str!("./error_tpl.html"),
title=title,
meta="",
message=message,
details=details.unwrap_or(""),
version=version(),
);
let res = match error {
ErrorType::NotFound => ws::Response::not_found(content),
ErrorType::Forbidden => ws::Response::forbidden(content),
};
add_headers(res, "text/html")
}
| {
Session {
out: sender,
handler: self.handler.clone(),
skip_origin_validation: self.skip_origin_validation,
self_origin: self.self_origin.clone(),
authcodes_path: self.authcodes_path.clone(),
file_handler: self.file_handler.clone(),
}
} | identifier_body |
session.rs | // Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Session handlers factory.
use ws;
use authcode_store::AuthCodes;
use std::path::{PathBuf, Path};
use std::sync::Arc;
use std::str::FromStr;
use jsonrpc_core::{IoHandler, GenericIoHandler};
use util::{H256, version};
#[cfg(feature = "parity-ui")]
mod ui {
extern crate parity_ui as ui;
extern crate parity_dapps_glue as dapps;
use self::dapps::WebApp;
#[derive(Default)]
pub struct Handler {
ui: ui::App,
}
impl Handler {
pub fn handle(&self, req: &str) -> Option<&dapps::File> {
let file = match req {
"" | "/" => "index.html",
path => &path[1..],
};
self.ui.file(file)
}
}
}
#[cfg(not(feature = "parity-ui"))]
mod ui {
pub struct | {
pub content: &'static [u8],
pub content_type: &'static str,
}
#[derive(Default)]
pub struct Handler;
impl Handler {
pub fn handle(&self, _req: &str) -> Option<&File> {
None
}
}
}
const HOME_DOMAIN: &'static str = "home.parity";
fn origin_is_allowed(self_origin: &str, header: Option<&[u8]>) -> bool {
match header {
None => false,
Some(h) => {
let v = String::from_utf8(h.to_owned()).ok();
match v {
Some(ref origin) if origin.starts_with("chrome-extension://") => true,
Some(ref origin) if origin.starts_with(self_origin) => true,
Some(ref origin) if origin.starts_with(&format!("http://{}", self_origin)) => true,
Some(ref origin) if origin.starts_with(HOME_DOMAIN) => true,
Some(ref origin) if origin.starts_with(&format!("http://{}", HOME_DOMAIN)) => true,
_ => false
}
}
}
}
fn auth_is_valid(codes_path: &Path, protocols: ws::Result<Vec<&str>>) -> bool {
match protocols {
Ok(ref protocols) if protocols.len() == 1 => {
protocols.iter().any(|protocol| {
let mut split = protocol.split('_');
let auth = split.next().and_then(|v| H256::from_str(v).ok());
let time = split.next().and_then(|v| u64::from_str_radix(v, 10).ok());
if let (Some(auth), Some(time)) = (auth, time) {
// Check if the code is valid
AuthCodes::from_file(codes_path)
.map(|mut codes| {
// remove old tokens
codes.clear_garbage();
let res = codes.is_valid(&auth, time);
// make sure to save back authcodes - it might have been modified
if codes.to_file(codes_path).is_err() {
warn!(target: "signer", "Couldn't save authorization codes to file.");
}
res
})
.unwrap_or(false)
} else {
false
}
})
},
_ => false
}
}
fn add_headers(mut response: ws::Response, mime: &str) -> ws::Response {
let content_len = format!("{}", response.len());
{
let mut headers = response.headers_mut();
headers.push(("X-Frame-Options".into(), b"SAMEORIGIN".to_vec()));
headers.push(("X-XSS-Protection".into(), b"1; mode=block".to_vec()));
headers.push(("X-Content-Type-Options".into(), b"nosniff".to_vec()));
headers.push(("Server".into(), b"Parity/SignerUI".to_vec()));
headers.push(("Content-Length".into(), content_len.as_bytes().to_vec()));
headers.push(("Content-Type".into(), mime.as_bytes().to_vec()));
headers.push(("Connection".into(), b"close".to_vec()));
}
response
}
pub struct Session {
out: ws::Sender,
skip_origin_validation: bool,
self_origin: String,
authcodes_path: PathBuf,
handler: Arc<IoHandler>,
file_handler: Arc<ui::Handler>,
}
impl ws::Handler for Session {
#[cfg_attr(feature="dev", allow(collapsible_if))]
fn on_request(&mut self, req: &ws::Request) -> ws::Result<(ws::Response)> {
trace!(target: "signer", "Handling request: {:?}", req);
// TODO [ToDr] ws server is not handling proxied requests correctly:
// Trim domain name from resource part:
let resource = req.resource().trim_left_matches(&format!("http://{}", HOME_DOMAIN));
// Styles file is allowed for error pages to display nicely.
let is_styles_file = resource == "/styles.css";
// Check request origin and host header.
if!self.skip_origin_validation {
let origin = req.header("origin").or_else(|| req.header("Origin")).map(|x| &x[..]);
let host = req.header("host").or_else(|| req.header("Host")).map(|x| &x[..]);
let is_valid = origin_is_allowed(&self.self_origin, origin) || (origin.is_none() && origin_is_allowed(&self.self_origin, host));
let is_valid = is_styles_file || is_valid;
if!is_valid {
warn!(target: "signer", "Blocked connection to Signer API from untrusted origin.");
return Ok(error(
ErrorType::Forbidden,
"URL Blocked",
"You are not allowed to access Trusted Signer using this URL.",
Some(&format!("Use: http://{}", self.self_origin)),
));
}
}
// PROXY requests when running behind home.parity
if req.method() == "CONNECT" {
let mut res = ws::Response::ok("".into());
res.headers_mut().push(("Content-Length".into(), b"0".to_vec()));
res.headers_mut().push(("Connection".into(), b"keep-alive".to_vec()));
return Ok(res);
}
// Detect if it's a websocket request
// (styles file skips origin validation, so make sure to prevent WS connections on this resource)
if req.header("sec-websocket-key").is_some() &&!is_styles_file {
// Check authorization
if!auth_is_valid(&self.authcodes_path, req.protocols()) {
info!(target: "signer", "Unauthorized connection to Signer API blocked.");
return Ok(error(ErrorType::Forbidden, "Not Authorized", "Request to this API was not authorized.", None));
}
let protocols = req.protocols().expect("Existence checked by authorization.");
let protocol = protocols.get(0).expect("Proved by authorization.");
return ws::Response::from_request(req).map(|mut res| {
// To make WebSockets connection successful we need to send back the protocol header.
res.set_protocol(protocol);
res
});
}
debug!(target: "signer", "Requesting resource: {:?}", resource);
// Otherwise try to serve a page.
Ok(self.file_handler.handle(resource)
.map_or_else(
// return 404 not found
|| error(ErrorType::NotFound, "Not found", "Requested file was not found.", None),
// or serve the file
|f| add_headers(ws::Response::ok_raw(f.content.to_vec()), f.content_type)
))
}
fn on_message(&mut self, msg: ws::Message) -> ws::Result<()> {
let req = try!(msg.as_text());
let out = self.out.clone();
self.handler.handle_request(req, move |response| {
if let Some(result) = response {
let res = out.send(result);
if let Err(e) = res {
warn!(target: "signer", "Error while sending response: {:?}", e);
}
}
});
Ok(())
}
}
pub struct Factory {
handler: Arc<IoHandler>,
skip_origin_validation: bool,
self_origin: String,
authcodes_path: PathBuf,
file_handler: Arc<ui::Handler>,
}
impl Factory {
pub fn new(handler: Arc<IoHandler>, self_origin: String, authcodes_path: PathBuf, skip_origin_validation: bool) -> Self {
Factory {
handler: handler,
skip_origin_validation: skip_origin_validation,
self_origin: self_origin,
authcodes_path: authcodes_path,
file_handler: Arc::new(ui::Handler::default()),
}
}
}
impl ws::Factory for Factory {
type Handler = Session;
fn connection_made(&mut self, sender: ws::Sender) -> Self::Handler {
Session {
out: sender,
handler: self.handler.clone(),
skip_origin_validation: self.skip_origin_validation,
self_origin: self.self_origin.clone(),
authcodes_path: self.authcodes_path.clone(),
file_handler: self.file_handler.clone(),
}
}
}
enum ErrorType {
NotFound,
Forbidden,
}
fn error(error: ErrorType, title: &str, message: &str, details: Option<&str>) -> ws::Response {
let content = format!(
include_str!("./error_tpl.html"),
title=title,
meta="",
message=message,
details=details.unwrap_or(""),
version=version(),
);
let res = match error {
ErrorType::NotFound => ws::Response::not_found(content),
ErrorType::Forbidden => ws::Response::forbidden(content),
};
add_headers(res, "text/html")
}
| File | identifier_name |
session.rs | // Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Session handlers factory.
use ws;
use authcode_store::AuthCodes;
use std::path::{PathBuf, Path};
use std::sync::Arc;
use std::str::FromStr;
use jsonrpc_core::{IoHandler, GenericIoHandler};
use util::{H256, version};
#[cfg(feature = "parity-ui")]
mod ui {
extern crate parity_ui as ui;
extern crate parity_dapps_glue as dapps;
use self::dapps::WebApp;
#[derive(Default)]
pub struct Handler {
ui: ui::App,
}
impl Handler {
pub fn handle(&self, req: &str) -> Option<&dapps::File> {
let file = match req {
"" | "/" => "index.html",
path => &path[1..],
};
self.ui.file(file)
}
}
}
#[cfg(not(feature = "parity-ui"))]
mod ui {
pub struct File {
pub content: &'static [u8],
pub content_type: &'static str,
}
#[derive(Default)]
pub struct Handler;
impl Handler {
pub fn handle(&self, _req: &str) -> Option<&File> {
None
}
}
}
const HOME_DOMAIN: &'static str = "home.parity";
fn origin_is_allowed(self_origin: &str, header: Option<&[u8]>) -> bool {
match header {
None => false,
Some(h) => {
let v = String::from_utf8(h.to_owned()).ok();
match v {
Some(ref origin) if origin.starts_with("chrome-extension://") => true,
Some(ref origin) if origin.starts_with(self_origin) => true,
Some(ref origin) if origin.starts_with(&format!("http://{}", self_origin)) => true,
Some(ref origin) if origin.starts_with(HOME_DOMAIN) => true,
Some(ref origin) if origin.starts_with(&format!("http://{}", HOME_DOMAIN)) => true,
_ => false
}
}
}
}
fn auth_is_valid(codes_path: &Path, protocols: ws::Result<Vec<&str>>) -> bool {
match protocols {
Ok(ref protocols) if protocols.len() == 1 => {
protocols.iter().any(|protocol| {
let mut split = protocol.split('_');
let auth = split.next().and_then(|v| H256::from_str(v).ok());
let time = split.next().and_then(|v| u64::from_str_radix(v, 10).ok());
if let (Some(auth), Some(time)) = (auth, time) {
// Check if the code is valid
AuthCodes::from_file(codes_path)
.map(|mut codes| {
// remove old tokens
codes.clear_garbage();
let res = codes.is_valid(&auth, time);
// make sure to save back authcodes - it might have been modified
if codes.to_file(codes_path).is_err() {
warn!(target: "signer", "Couldn't save authorization codes to file.");
}
res
})
.unwrap_or(false)
} else {
false
}
})
},
_ => false
}
}
fn add_headers(mut response: ws::Response, mime: &str) -> ws::Response {
let content_len = format!("{}", response.len());
{
let mut headers = response.headers_mut();
headers.push(("X-Frame-Options".into(), b"SAMEORIGIN".to_vec()));
headers.push(("X-XSS-Protection".into(), b"1; mode=block".to_vec()));
headers.push(("X-Content-Type-Options".into(), b"nosniff".to_vec()));
headers.push(("Server".into(), b"Parity/SignerUI".to_vec()));
headers.push(("Content-Length".into(), content_len.as_bytes().to_vec()));
headers.push(("Content-Type".into(), mime.as_bytes().to_vec()));
headers.push(("Connection".into(), b"close".to_vec()));
}
response
}
pub struct Session {
out: ws::Sender,
skip_origin_validation: bool,
self_origin: String,
authcodes_path: PathBuf,
handler: Arc<IoHandler>,
file_handler: Arc<ui::Handler>,
}
impl ws::Handler for Session {
#[cfg_attr(feature="dev", allow(collapsible_if))]
fn on_request(&mut self, req: &ws::Request) -> ws::Result<(ws::Response)> {
trace!(target: "signer", "Handling request: {:?}", req);
// TODO [ToDr] ws server is not handling proxied requests correctly:
// Trim domain name from resource part:
let resource = req.resource().trim_left_matches(&format!("http://{}", HOME_DOMAIN));
// Styles file is allowed for error pages to display nicely.
let is_styles_file = resource == "/styles.css";
// Check request origin and host header.
if!self.skip_origin_validation {
let origin = req.header("origin").or_else(|| req.header("Origin")).map(|x| &x[..]);
let host = req.header("host").or_else(|| req.header("Host")).map(|x| &x[..]);
let is_valid = origin_is_allowed(&self.self_origin, origin) || (origin.is_none() && origin_is_allowed(&self.self_origin, host));
let is_valid = is_styles_file || is_valid;
if!is_valid {
warn!(target: "signer", "Blocked connection to Signer API from untrusted origin.");
return Ok(error(
ErrorType::Forbidden,
"URL Blocked",
"You are not allowed to access Trusted Signer using this URL.",
Some(&format!("Use: http://{}", self.self_origin)),
));
}
}
// PROXY requests when running behind home.parity
if req.method() == "CONNECT" {
let mut res = ws::Response::ok("".into());
res.headers_mut().push(("Content-Length".into(), b"0".to_vec()));
res.headers_mut().push(("Connection".into(), b"keep-alive".to_vec()));
return Ok(res);
}
// Detect if it's a websocket request
// (styles file skips origin validation, so make sure to prevent WS connections on this resource)
if req.header("sec-websocket-key").is_some() &&!is_styles_file {
// Check authorization
if!auth_is_valid(&self.authcodes_path, req.protocols()) {
info!(target: "signer", "Unauthorized connection to Signer API blocked.");
return Ok(error(ErrorType::Forbidden, "Not Authorized", "Request to this API was not authorized.", None));
}
let protocols = req.protocols().expect("Existence checked by authorization.");
let protocol = protocols.get(0).expect("Proved by authorization.");
return ws::Response::from_request(req).map(|mut res| {
// To make WebSockets connection successful we need to send back the protocol header.
res.set_protocol(protocol);
res
});
}
debug!(target: "signer", "Requesting resource: {:?}", resource);
// Otherwise try to serve a page.
Ok(self.file_handler.handle(resource)
.map_or_else(
// return 404 not found
|| error(ErrorType::NotFound, "Not found", "Requested file was not found.", None),
// or serve the file
|f| add_headers(ws::Response::ok_raw(f.content.to_vec()), f.content_type)
))
}
fn on_message(&mut self, msg: ws::Message) -> ws::Result<()> {
let req = try!(msg.as_text());
let out = self.out.clone();
self.handler.handle_request(req, move |response| {
if let Some(result) = response {
let res = out.send(result);
if let Err(e) = res {
warn!(target: "signer", "Error while sending response: {:?}", e);
}
}
});
Ok(())
}
}
pub struct Factory {
handler: Arc<IoHandler>,
skip_origin_validation: bool,
self_origin: String,
authcodes_path: PathBuf,
file_handler: Arc<ui::Handler>,
}
impl Factory {
pub fn new(handler: Arc<IoHandler>, self_origin: String, authcodes_path: PathBuf, skip_origin_validation: bool) -> Self {
Factory {
handler: handler,
skip_origin_validation: skip_origin_validation,
self_origin: self_origin,
authcodes_path: authcodes_path,
file_handler: Arc::new(ui::Handler::default()),
}
}
}
impl ws::Factory for Factory {
type Handler = Session;
fn connection_made(&mut self, sender: ws::Sender) -> Self::Handler {
Session {
out: sender,
handler: self.handler.clone(),
skip_origin_validation: self.skip_origin_validation,
self_origin: self.self_origin.clone(),
authcodes_path: self.authcodes_path.clone(), | }
}
enum ErrorType {
NotFound,
Forbidden,
}
fn error(error: ErrorType, title: &str, message: &str, details: Option<&str>) -> ws::Response {
let content = format!(
include_str!("./error_tpl.html"),
title=title,
meta="",
message=message,
details=details.unwrap_or(""),
version=version(),
);
let res = match error {
ErrorType::NotFound => ws::Response::not_found(content),
ErrorType::Forbidden => ws::Response::forbidden(content),
};
add_headers(res, "text/html")
} | file_handler: self.file_handler.clone(),
} | random_line_split |
session.rs | // Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Session handlers factory.
use ws;
use authcode_store::AuthCodes;
use std::path::{PathBuf, Path};
use std::sync::Arc;
use std::str::FromStr;
use jsonrpc_core::{IoHandler, GenericIoHandler};
use util::{H256, version};
#[cfg(feature = "parity-ui")]
mod ui {
extern crate parity_ui as ui;
extern crate parity_dapps_glue as dapps;
use self::dapps::WebApp;
#[derive(Default)]
pub struct Handler {
ui: ui::App,
}
impl Handler {
pub fn handle(&self, req: &str) -> Option<&dapps::File> {
let file = match req {
"" | "/" => "index.html",
path => &path[1..],
};
self.ui.file(file)
}
}
}
#[cfg(not(feature = "parity-ui"))]
mod ui {
pub struct File {
pub content: &'static [u8],
pub content_type: &'static str,
}
#[derive(Default)]
pub struct Handler;
impl Handler {
pub fn handle(&self, _req: &str) -> Option<&File> {
None
}
}
}
const HOME_DOMAIN: &'static str = "home.parity";
fn origin_is_allowed(self_origin: &str, header: Option<&[u8]>) -> bool {
match header {
None => false,
Some(h) => {
let v = String::from_utf8(h.to_owned()).ok();
match v {
Some(ref origin) if origin.starts_with("chrome-extension://") => true,
Some(ref origin) if origin.starts_with(self_origin) => true,
Some(ref origin) if origin.starts_with(&format!("http://{}", self_origin)) => true,
Some(ref origin) if origin.starts_with(HOME_DOMAIN) => true,
Some(ref origin) if origin.starts_with(&format!("http://{}", HOME_DOMAIN)) => true,
_ => false
}
}
}
}
fn auth_is_valid(codes_path: &Path, protocols: ws::Result<Vec<&str>>) -> bool {
match protocols {
Ok(ref protocols) if protocols.len() == 1 => {
protocols.iter().any(|protocol| {
let mut split = protocol.split('_');
let auth = split.next().and_then(|v| H256::from_str(v).ok());
let time = split.next().and_then(|v| u64::from_str_radix(v, 10).ok());
if let (Some(auth), Some(time)) = (auth, time) {
// Check if the code is valid
AuthCodes::from_file(codes_path)
.map(|mut codes| {
// remove old tokens
codes.clear_garbage();
let res = codes.is_valid(&auth, time);
// make sure to save back authcodes - it might have been modified
if codes.to_file(codes_path).is_err() |
res
})
.unwrap_or(false)
} else {
false
}
})
},
_ => false
}
}
fn add_headers(mut response: ws::Response, mime: &str) -> ws::Response {
let content_len = format!("{}", response.len());
{
let mut headers = response.headers_mut();
headers.push(("X-Frame-Options".into(), b"SAMEORIGIN".to_vec()));
headers.push(("X-XSS-Protection".into(), b"1; mode=block".to_vec()));
headers.push(("X-Content-Type-Options".into(), b"nosniff".to_vec()));
headers.push(("Server".into(), b"Parity/SignerUI".to_vec()));
headers.push(("Content-Length".into(), content_len.as_bytes().to_vec()));
headers.push(("Content-Type".into(), mime.as_bytes().to_vec()));
headers.push(("Connection".into(), b"close".to_vec()));
}
response
}
pub struct Session {
out: ws::Sender,
skip_origin_validation: bool,
self_origin: String,
authcodes_path: PathBuf,
handler: Arc<IoHandler>,
file_handler: Arc<ui::Handler>,
}
impl ws::Handler for Session {
#[cfg_attr(feature="dev", allow(collapsible_if))]
fn on_request(&mut self, req: &ws::Request) -> ws::Result<(ws::Response)> {
trace!(target: "signer", "Handling request: {:?}", req);
// TODO [ToDr] ws server is not handling proxied requests correctly:
// Trim domain name from resource part:
let resource = req.resource().trim_left_matches(&format!("http://{}", HOME_DOMAIN));
// Styles file is allowed for error pages to display nicely.
let is_styles_file = resource == "/styles.css";
// Check request origin and host header.
if!self.skip_origin_validation {
let origin = req.header("origin").or_else(|| req.header("Origin")).map(|x| &x[..]);
let host = req.header("host").or_else(|| req.header("Host")).map(|x| &x[..]);
let is_valid = origin_is_allowed(&self.self_origin, origin) || (origin.is_none() && origin_is_allowed(&self.self_origin, host));
let is_valid = is_styles_file || is_valid;
if!is_valid {
warn!(target: "signer", "Blocked connection to Signer API from untrusted origin.");
return Ok(error(
ErrorType::Forbidden,
"URL Blocked",
"You are not allowed to access Trusted Signer using this URL.",
Some(&format!("Use: http://{}", self.self_origin)),
));
}
}
// PROXY requests when running behind home.parity
if req.method() == "CONNECT" {
let mut res = ws::Response::ok("".into());
res.headers_mut().push(("Content-Length".into(), b"0".to_vec()));
res.headers_mut().push(("Connection".into(), b"keep-alive".to_vec()));
return Ok(res);
}
// Detect if it's a websocket request
// (styles file skips origin validation, so make sure to prevent WS connections on this resource)
if req.header("sec-websocket-key").is_some() &&!is_styles_file {
// Check authorization
if!auth_is_valid(&self.authcodes_path, req.protocols()) {
info!(target: "signer", "Unauthorized connection to Signer API blocked.");
return Ok(error(ErrorType::Forbidden, "Not Authorized", "Request to this API was not authorized.", None));
}
let protocols = req.protocols().expect("Existence checked by authorization.");
let protocol = protocols.get(0).expect("Proved by authorization.");
return ws::Response::from_request(req).map(|mut res| {
// To make WebSockets connection successful we need to send back the protocol header.
res.set_protocol(protocol);
res
});
}
debug!(target: "signer", "Requesting resource: {:?}", resource);
// Otherwise try to serve a page.
Ok(self.file_handler.handle(resource)
.map_or_else(
// return 404 not found
|| error(ErrorType::NotFound, "Not found", "Requested file was not found.", None),
// or serve the file
|f| add_headers(ws::Response::ok_raw(f.content.to_vec()), f.content_type)
))
}
fn on_message(&mut self, msg: ws::Message) -> ws::Result<()> {
let req = try!(msg.as_text());
let out = self.out.clone();
self.handler.handle_request(req, move |response| {
if let Some(result) = response {
let res = out.send(result);
if let Err(e) = res {
warn!(target: "signer", "Error while sending response: {:?}", e);
}
}
});
Ok(())
}
}
pub struct Factory {
handler: Arc<IoHandler>,
skip_origin_validation: bool,
self_origin: String,
authcodes_path: PathBuf,
file_handler: Arc<ui::Handler>,
}
impl Factory {
pub fn new(handler: Arc<IoHandler>, self_origin: String, authcodes_path: PathBuf, skip_origin_validation: bool) -> Self {
Factory {
handler: handler,
skip_origin_validation: skip_origin_validation,
self_origin: self_origin,
authcodes_path: authcodes_path,
file_handler: Arc::new(ui::Handler::default()),
}
}
}
impl ws::Factory for Factory {
type Handler = Session;
fn connection_made(&mut self, sender: ws::Sender) -> Self::Handler {
Session {
out: sender,
handler: self.handler.clone(),
skip_origin_validation: self.skip_origin_validation,
self_origin: self.self_origin.clone(),
authcodes_path: self.authcodes_path.clone(),
file_handler: self.file_handler.clone(),
}
}
}
enum ErrorType {
NotFound,
Forbidden,
}
fn error(error: ErrorType, title: &str, message: &str, details: Option<&str>) -> ws::Response {
let content = format!(
include_str!("./error_tpl.html"),
title=title,
meta="",
message=message,
details=details.unwrap_or(""),
version=version(),
);
let res = match error {
ErrorType::NotFound => ws::Response::not_found(content),
ErrorType::Forbidden => ws::Response::forbidden(content),
};
add_headers(res, "text/html")
}
| {
warn!(target: "signer", "Couldn't save authorization codes to file.");
} | conditional_block |
queue.rs | use std::cmp::Ordering;
use base::command::CommandType;
use base::party::Party;
use base::runner::BattleFlagsType;
#[derive(Debug)]
struct PartyCommand
{
party: bool,
commands: Vec<Option<CommandType>>,
ready: usize,
total: usize,
}
impl PartyCommand
{
fn | (members: usize) -> Self
{
let mut commands = Vec::with_capacity(members);
for _ in 0..members
{
commands.push(None);
}
PartyCommand
{
party: false,
commands: commands,
ready: 0,
total: members,
}
}
fn command_count(&self) -> usize
{
self.commands.len()
}
fn command_get(&self, index: usize) -> Option<&CommandType>
{
self.commands[index].as_ref()
}
fn command_take(&mut self, index: usize) -> CommandType
{
debug_assert!(self.commands[index].is_some());
self.ready -= 1;
self.commands[index].take().unwrap()
}
fn command_add(&mut self, command: CommandType, member: usize) -> isize
{
let mut change = 0;
if self.party
{
// All members are now waiting for their individual commands.
change = - (self.total as isize) + 1;
self.party = false;
for i in 0..self.total
{
self.commands[i] = None;
}
self.ready = 1;
}
else if!self.commands[member].is_some()
{
change = 1;
self.ready += 1;
}
self.commands[member] = Some(command);
change
}
fn command_add_party(&mut self, command: CommandType) -> usize
{
let change = self.total - self.ready;
if self.party
{
for i in 1..self.total
{
self.commands[i] = None;
}
}
self.commands[0] = Some(command);
self.ready = self.total;
self.party = true;
change
}
fn command_remove(&mut self, member: usize) -> usize
{
if let Some(_) = self.commands[member]
{
self.commands[member] = None;
self.ready -= 1;
1
}
else
{
0
}
}
fn member_remove(&mut self)
{
self.total -= 1;
}
}
/// Manages a list of upcoming battle commands.
///
/// By default, the queue is considered not ready. At this state, new commands can be added. Once
/// all parties have a command for each of their members, the queue is considered ready. At that
/// point, commands are sorted and consumed. Once all commands are consumed, the queue goes back to
/// its default state where it is not ready and no commands have been associated with any parties.
///
#[derive(Debug)]
pub struct BattleQueue
{
waiting: usize,
total: usize,
queue: Vec<PartyCommand>,
}
impl BattleQueue
{
/// Initializes a new empty queue that is not ready.
pub fn new(parties: &[Party]) -> Self
{
let mut queue = Vec::with_capacity(parties.len());
let mut total = 0;
for party in parties
{
total += party.active_count();
queue.push(PartyCommand::new(party.active_count()));
}
BattleQueue
{
waiting: total,
total: total,
queue: queue,
}
}
/// Returns true if the queue was populated or is in the process of being consumed.
pub fn ready(&self) -> bool
{
self.waiting == 0
}
/// Returns the command for the indicated party member.
pub fn command_get(&self, party: usize, member: usize) -> Option<&CommandType>
{
self.queue[party].command_get(member)
}
/// Adds the given command to the queue for the indicated members of the given party.
///
/// This will override any commands already given to this party member. If the given party
/// already has an attached command, then all members of that party will be invalidated.
///
pub fn command_add(&mut self, command: CommandType, party: usize, member: usize)
{
let change = self.queue[party].command_add(command, member);
self.waiting = (self.waiting as isize - change) as usize;
}
/// Adds the given command to the queue for all party members of the given party.
///
/// This will override any commands for the given party.
///
pub fn command_add_party(&mut self, command: CommandType, party: usize)
{
self.waiting -= self.queue[party].command_add_party(command);
}
/// Removes any command requested by the indicated member of the given party.
///
/// This command will not remove any other commands that reference this member.
///
pub fn command_remove(&mut self, party: usize, member: usize)
{
self.queue[party].command_remove(member);
}
/// Finds the highest priority command in the queue and pops it.
///
/// The queue must be ready before calling this method.
///
pub fn command_consume(&mut self, parties: &[Party], flags: BattleFlagsType) -> CommandType
{
let mut finished = true;
let mut priority = 0;
let mut priority_index = 0;
// Find a party to start from.
'outer: for party_index in 0..self.queue.len()
{
if self.queue[party_index].command_count() > 0
{
for command_index in 0..self.queue[party_index].command_count()
{
if let Some(_) = self.queue[party_index].command_get(command_index)
{
priority = party_index;
priority_index = command_index;
break 'outer;
}
}
}
}
// Find the minimum uses the starting point as a base.
for party_index in priority..self.queue.len()
{
for command_index in 0..self.queue[party_index].command_count()
{
let priority_command = self.queue[priority].command_get(priority_index);
if let Some(command) = self.queue[party_index].command_get(command_index)
{
if priority!= party_index || priority_index!= command_index
{
finished = false;
}
if CommandType::cmp(command, priority_command.unwrap(), parties, flags) == Ordering::Less
{
priority = party_index;
priority_index = command_index;
}
}
}
}
let command = self.queue[priority].command_take(priority_index);
if finished
{
self.waiting = self.total;
}
command
}
pub fn member_remove(&mut self, party: usize, _: usize)
{
self.total -= 1;
self.queue[party].member_remove();
}
}
| new | identifier_name |
queue.rs | use std::cmp::Ordering;
use base::command::CommandType;
use base::party::Party;
use base::runner::BattleFlagsType;
#[derive(Debug)]
struct PartyCommand
{
party: bool,
commands: Vec<Option<CommandType>>,
ready: usize,
total: usize,
}
impl PartyCommand
{
fn new(members: usize) -> Self
{
let mut commands = Vec::with_capacity(members);
for _ in 0..members
{
commands.push(None);
}
PartyCommand
{
party: false,
commands: commands,
ready: 0,
total: members,
}
}
fn command_count(&self) -> usize
{
self.commands.len()
}
fn command_get(&self, index: usize) -> Option<&CommandType>
{
self.commands[index].as_ref()
}
fn command_take(&mut self, index: usize) -> CommandType
{
debug_assert!(self.commands[index].is_some());
self.ready -= 1;
self.commands[index].take().unwrap()
}
fn command_add(&mut self, command: CommandType, member: usize) -> isize
{
let mut change = 0;
if self.party
|
else if!self.commands[member].is_some()
{
change = 1;
self.ready += 1;
}
self.commands[member] = Some(command);
change
}
fn command_add_party(&mut self, command: CommandType) -> usize
{
let change = self.total - self.ready;
if self.party
{
for i in 1..self.total
{
self.commands[i] = None;
}
}
self.commands[0] = Some(command);
self.ready = self.total;
self.party = true;
change
}
fn command_remove(&mut self, member: usize) -> usize
{
if let Some(_) = self.commands[member]
{
self.commands[member] = None;
self.ready -= 1;
1
}
else
{
0
}
}
fn member_remove(&mut self)
{
self.total -= 1;
}
}
/// Manages a list of upcoming battle commands.
///
/// By default, the queue is considered not ready. At this state, new commands can be added. Once
/// all parties have a command for each of their members, the queue is considered ready. At that
/// point, commands are sorted and consumed. Once all commands are consumed, the queue goes back to
/// its default state where it is not ready and no commands have been associated with any parties.
///
#[derive(Debug)]
pub struct BattleQueue
{
waiting: usize,
total: usize,
queue: Vec<PartyCommand>,
}
impl BattleQueue
{
/// Initializes a new empty queue that is not ready.
pub fn new(parties: &[Party]) -> Self
{
let mut queue = Vec::with_capacity(parties.len());
let mut total = 0;
for party in parties
{
total += party.active_count();
queue.push(PartyCommand::new(party.active_count()));
}
BattleQueue
{
waiting: total,
total: total,
queue: queue,
}
}
/// Returns true if the queue was populated or is in the process of being consumed.
pub fn ready(&self) -> bool
{
self.waiting == 0
}
/// Returns the command for the indicated party member.
pub fn command_get(&self, party: usize, member: usize) -> Option<&CommandType>
{
self.queue[party].command_get(member)
}
/// Adds the given command to the queue for the indicated members of the given party.
///
/// This will override any commands already given to this party member. If the given party
/// already has an attached command, then all members of that party will be invalidated.
///
pub fn command_add(&mut self, command: CommandType, party: usize, member: usize)
{
let change = self.queue[party].command_add(command, member);
self.waiting = (self.waiting as isize - change) as usize;
}
/// Adds the given command to the queue for all party members of the given party.
///
/// This will override any commands for the given party.
///
pub fn command_add_party(&mut self, command: CommandType, party: usize)
{
self.waiting -= self.queue[party].command_add_party(command);
}
/// Removes any command requested by the indicated member of the given party.
///
/// This command will not remove any other commands that reference this member.
///
pub fn command_remove(&mut self, party: usize, member: usize)
{
self.queue[party].command_remove(member);
}
/// Finds the highest priority command in the queue and pops it.
///
/// The queue must be ready before calling this method.
///
pub fn command_consume(&mut self, parties: &[Party], flags: BattleFlagsType) -> CommandType
{
let mut finished = true;
let mut priority = 0;
let mut priority_index = 0;
// Find a party to start from.
'outer: for party_index in 0..self.queue.len()
{
if self.queue[party_index].command_count() > 0
{
for command_index in 0..self.queue[party_index].command_count()
{
if let Some(_) = self.queue[party_index].command_get(command_index)
{
priority = party_index;
priority_index = command_index;
break 'outer;
}
}
}
}
// Find the minimum uses the starting point as a base.
for party_index in priority..self.queue.len()
{
for command_index in 0..self.queue[party_index].command_count()
{
let priority_command = self.queue[priority].command_get(priority_index);
if let Some(command) = self.queue[party_index].command_get(command_index)
{
if priority!= party_index || priority_index!= command_index
{
finished = false;
}
if CommandType::cmp(command, priority_command.unwrap(), parties, flags) == Ordering::Less
{
priority = party_index;
priority_index = command_index;
}
}
}
}
let command = self.queue[priority].command_take(priority_index);
if finished
{
self.waiting = self.total;
}
command
}
pub fn member_remove(&mut self, party: usize, _: usize)
{
self.total -= 1;
self.queue[party].member_remove();
}
}
| {
// All members are now waiting for their individual commands.
change = - (self.total as isize) + 1;
self.party = false;
for i in 0..self.total
{
self.commands[i] = None;
}
self.ready = 1;
} | conditional_block |
queue.rs | use std::cmp::Ordering;
use base::command::CommandType;
use base::party::Party;
use base::runner::BattleFlagsType;
#[derive(Debug)]
struct PartyCommand
{
party: bool,
commands: Vec<Option<CommandType>>,
ready: usize,
total: usize,
}
impl PartyCommand
{
fn new(members: usize) -> Self
{
let mut commands = Vec::with_capacity(members);
for _ in 0..members
{
commands.push(None);
}
PartyCommand
{
party: false,
commands: commands,
ready: 0,
total: members,
}
}
fn command_count(&self) -> usize
{
self.commands.len()
}
fn command_get(&self, index: usize) -> Option<&CommandType>
{
self.commands[index].as_ref()
}
fn command_take(&mut self, index: usize) -> CommandType
{
debug_assert!(self.commands[index].is_some());
self.ready -= 1;
self.commands[index].take().unwrap()
}
fn command_add(&mut self, command: CommandType, member: usize) -> isize
{
let mut change = 0;
if self.party
{
// All members are now waiting for their individual commands.
change = - (self.total as isize) + 1;
self.party = false;
for i in 0..self.total
{
self.commands[i] = None;
}
self.ready = 1;
}
else if!self.commands[member].is_some()
{
change = 1;
self.ready += 1;
}
self.commands[member] = Some(command);
change
}
fn command_add_party(&mut self, command: CommandType) -> usize
{
let change = self.total - self.ready;
if self.party
{
for i in 1..self.total
{
self.commands[i] = None;
}
}
self.commands[0] = Some(command);
self.ready = self.total;
self.party = true;
change
}
fn command_remove(&mut self, member: usize) -> usize
{
if let Some(_) = self.commands[member]
{
self.commands[member] = None;
self.ready -= 1;
1
}
else
{
0
}
}
fn member_remove(&mut self)
{
self.total -= 1;
}
}
/// Manages a list of upcoming battle commands.
///
/// By default, the queue is considered not ready. At this state, new commands can be added. Once
/// all parties have a command for each of their members, the queue is considered ready. At that
/// point, commands are sorted and consumed. Once all commands are consumed, the queue goes back to
/// its default state where it is not ready and no commands have been associated with any parties.
///
#[derive(Debug)]
pub struct BattleQueue
{
waiting: usize,
total: usize,
queue: Vec<PartyCommand>,
}
impl BattleQueue
{
/// Initializes a new empty queue that is not ready.
pub fn new(parties: &[Party]) -> Self
{
let mut queue = Vec::with_capacity(parties.len());
let mut total = 0; | }
BattleQueue
{
waiting: total,
total: total,
queue: queue,
}
}
/// Returns true if the queue was populated or is in the process of being consumed.
pub fn ready(&self) -> bool
{
self.waiting == 0
}
/// Returns the command for the indicated party member.
pub fn command_get(&self, party: usize, member: usize) -> Option<&CommandType>
{
self.queue[party].command_get(member)
}
/// Adds the given command to the queue for the indicated members of the given party.
///
/// This will override any commands already given to this party member. If the given party
/// already has an attached command, then all members of that party will be invalidated.
///
pub fn command_add(&mut self, command: CommandType, party: usize, member: usize)
{
let change = self.queue[party].command_add(command, member);
self.waiting = (self.waiting as isize - change) as usize;
}
/// Adds the given command to the queue for all party members of the given party.
///
/// This will override any commands for the given party.
///
pub fn command_add_party(&mut self, command: CommandType, party: usize)
{
self.waiting -= self.queue[party].command_add_party(command);
}
/// Removes any command requested by the indicated member of the given party.
///
/// This command will not remove any other commands that reference this member.
///
pub fn command_remove(&mut self, party: usize, member: usize)
{
self.queue[party].command_remove(member);
}
/// Finds the highest priority command in the queue and pops it.
///
/// The queue must be ready before calling this method.
///
pub fn command_consume(&mut self, parties: &[Party], flags: BattleFlagsType) -> CommandType
{
let mut finished = true;
let mut priority = 0;
let mut priority_index = 0;
// Find a party to start from.
'outer: for party_index in 0..self.queue.len()
{
if self.queue[party_index].command_count() > 0
{
for command_index in 0..self.queue[party_index].command_count()
{
if let Some(_) = self.queue[party_index].command_get(command_index)
{
priority = party_index;
priority_index = command_index;
break 'outer;
}
}
}
}
// Find the minimum uses the starting point as a base.
for party_index in priority..self.queue.len()
{
for command_index in 0..self.queue[party_index].command_count()
{
let priority_command = self.queue[priority].command_get(priority_index);
if let Some(command) = self.queue[party_index].command_get(command_index)
{
if priority!= party_index || priority_index!= command_index
{
finished = false;
}
if CommandType::cmp(command, priority_command.unwrap(), parties, flags) == Ordering::Less
{
priority = party_index;
priority_index = command_index;
}
}
}
}
let command = self.queue[priority].command_take(priority_index);
if finished
{
self.waiting = self.total;
}
command
}
pub fn member_remove(&mut self, party: usize, _: usize)
{
self.total -= 1;
self.queue[party].member_remove();
}
} | for party in parties
{
total += party.active_count();
queue.push(PartyCommand::new(party.active_count())); | random_line_split |
queue.rs | use std::cmp::Ordering;
use base::command::CommandType;
use base::party::Party;
use base::runner::BattleFlagsType;
#[derive(Debug)]
struct PartyCommand
{
party: bool,
commands: Vec<Option<CommandType>>,
ready: usize,
total: usize,
}
impl PartyCommand
{
fn new(members: usize) -> Self
{
let mut commands = Vec::with_capacity(members);
for _ in 0..members
{
commands.push(None);
}
PartyCommand
{
party: false,
commands: commands,
ready: 0,
total: members,
}
}
fn command_count(&self) -> usize
{
self.commands.len()
}
fn command_get(&self, index: usize) -> Option<&CommandType>
{
self.commands[index].as_ref()
}
fn command_take(&mut self, index: usize) -> CommandType
{
debug_assert!(self.commands[index].is_some());
self.ready -= 1;
self.commands[index].take().unwrap()
}
fn command_add(&mut self, command: CommandType, member: usize) -> isize
{
let mut change = 0;
if self.party
{
// All members are now waiting for their individual commands.
change = - (self.total as isize) + 1;
self.party = false;
for i in 0..self.total
{
self.commands[i] = None;
}
self.ready = 1;
}
else if!self.commands[member].is_some()
{
change = 1;
self.ready += 1;
}
self.commands[member] = Some(command);
change
}
fn command_add_party(&mut self, command: CommandType) -> usize
{
let change = self.total - self.ready;
if self.party
{
for i in 1..self.total
{
self.commands[i] = None;
}
}
self.commands[0] = Some(command);
self.ready = self.total;
self.party = true;
change
}
fn command_remove(&mut self, member: usize) -> usize
{
if let Some(_) = self.commands[member]
{
self.commands[member] = None;
self.ready -= 1;
1
}
else
{
0
}
}
fn member_remove(&mut self)
{
self.total -= 1;
}
}
/// Manages a list of upcoming battle commands.
///
/// By default, the queue is considered not ready. At this state, new commands can be added. Once
/// all parties have a command for each of their members, the queue is considered ready. At that
/// point, commands are sorted and consumed. Once all commands are consumed, the queue goes back to
/// its default state where it is not ready and no commands have been associated with any parties.
///
#[derive(Debug)]
pub struct BattleQueue
{
waiting: usize,
total: usize,
queue: Vec<PartyCommand>,
}
impl BattleQueue
{
/// Initializes a new empty queue that is not ready.
pub fn new(parties: &[Party]) -> Self
{
let mut queue = Vec::with_capacity(parties.len());
let mut total = 0;
for party in parties
{
total += party.active_count();
queue.push(PartyCommand::new(party.active_count()));
}
BattleQueue
{
waiting: total,
total: total,
queue: queue,
}
}
/// Returns true if the queue was populated or is in the process of being consumed.
pub fn ready(&self) -> bool
{
self.waiting == 0
}
/// Returns the command for the indicated party member.
pub fn command_get(&self, party: usize, member: usize) -> Option<&CommandType>
|
/// Adds the given command to the queue for the indicated members of the given party.
///
/// This will override any commands already given to this party member. If the given party
/// already has an attached command, then all members of that party will be invalidated.
///
pub fn command_add(&mut self, command: CommandType, party: usize, member: usize)
{
let change = self.queue[party].command_add(command, member);
self.waiting = (self.waiting as isize - change) as usize;
}
/// Adds the given command to the queue for all party members of the given party.
///
/// This will override any commands for the given party.
///
pub fn command_add_party(&mut self, command: CommandType, party: usize)
{
self.waiting -= self.queue[party].command_add_party(command);
}
/// Removes any command requested by the indicated member of the given party.
///
/// This command will not remove any other commands that reference this member.
///
pub fn command_remove(&mut self, party: usize, member: usize)
{
self.queue[party].command_remove(member);
}
/// Finds the highest priority command in the queue and pops it.
///
/// The queue must be ready before calling this method.
///
pub fn command_consume(&mut self, parties: &[Party], flags: BattleFlagsType) -> CommandType
{
let mut finished = true;
let mut priority = 0;
let mut priority_index = 0;
// Find a party to start from.
'outer: for party_index in 0..self.queue.len()
{
if self.queue[party_index].command_count() > 0
{
for command_index in 0..self.queue[party_index].command_count()
{
if let Some(_) = self.queue[party_index].command_get(command_index)
{
priority = party_index;
priority_index = command_index;
break 'outer;
}
}
}
}
// Find the minimum uses the starting point as a base.
for party_index in priority..self.queue.len()
{
for command_index in 0..self.queue[party_index].command_count()
{
let priority_command = self.queue[priority].command_get(priority_index);
if let Some(command) = self.queue[party_index].command_get(command_index)
{
if priority!= party_index || priority_index!= command_index
{
finished = false;
}
if CommandType::cmp(command, priority_command.unwrap(), parties, flags) == Ordering::Less
{
priority = party_index;
priority_index = command_index;
}
}
}
}
let command = self.queue[priority].command_take(priority_index);
if finished
{
self.waiting = self.total;
}
command
}
pub fn member_remove(&mut self, party: usize, _: usize)
{
self.total -= 1;
self.queue[party].member_remove();
}
}
| {
self.queue[party].command_get(member)
} | identifier_body |
lib.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// For simd (currently x86_64/aarch64)
#![cfg_attr(any(target_os = "linux", target_os = "android", target_os = "windows"), feature(heap_api))]
#![feature(alloc)]
#![feature(box_syntax)]
#![feature(custom_attribute)]
#![feature(custom_derive)]
#![feature(mpsc_select)]
#![feature(plugin)]
#![feature(proc_macro)]
#![feature(range_contains)]
#![feature(rustc_attrs)]
#![feature(structural_match)]
#![feature(unique)]
#![plugin(heapsize_plugin)]
#![plugin(plugins)]
#![deny(unsafe_code)]
extern crate alloc;
extern crate app_units;
extern crate azure;
#[allow(unused_extern_crates)]
#[macro_use]
extern crate bitflags;
// Mac OS-specific library dependencies
#[cfg(target_os = "macos")] extern crate byteorder;
#[cfg(target_os = "macos")] extern crate core_foundation;
#[cfg(target_os = "macos")] extern crate core_graphics;
#[cfg(target_os = "macos")] extern crate core_text;
// Windows-specific library dependencies
#[cfg(target_os = "windows")] extern crate gdi32;
#[cfg(target_os = "windows")] extern crate winapi;
extern crate euclid;
extern crate fnv;
// Platforms that use Freetype/Fontconfig library dependencies
#[cfg(any(target_os = "linux", target_os = "android"))]
extern crate fontconfig;
extern crate fontsan;
#[cfg(any(target_os = "linux", target_os = "android", target_os = "windows"))]
extern crate freetype;
extern crate gfx_traits;
// Eventually we would like the shaper to be pluggable, as many operating systems have their own
// shapers. For now, however, this is a hard dependency.
extern crate harfbuzz_sys as harfbuzz;
extern crate heapsize;
extern crate ipc_channel;
extern crate layers;
#[allow(unused_extern_crates)]
#[macro_use]
extern crate lazy_static;
extern crate libc;
#[macro_use]
extern crate log;
extern crate mime;
extern crate msg;
extern crate net_traits;
extern crate ordered_float;
#[macro_use]
extern crate profile_traits;
extern crate rand;
#[macro_use]
extern crate range;
extern crate rustc_serialize;
extern crate serde;
#[macro_use]
extern crate serde_derive;
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
extern crate simd;
extern crate smallvec;
#[macro_use]
extern crate string_cache;
extern crate style;
extern crate style_traits;
extern crate time;
extern crate unicode_script;
extern crate url;
extern crate util;
extern crate webrender_traits;
extern crate xi_unicode;
pub use paint_context::PaintContext;
// Misc.
mod filters; | // Private painting modules
mod paint_context;
#[deny(unsafe_code)]
pub mod display_list;
// Fonts
#[macro_use] pub mod font;
pub mod font_cache_thread;
pub mod font_context;
pub mod font_template;
pub mod paint_thread;
// Platform-specific implementations.
#[allow(unsafe_code)]
mod platform;
// Text
pub mod text; | random_line_split |
|
mod.rs | // Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
| /// `UNSPECIFIED_FSP` is the unspecified fractional seconds part.
pub const UNSPECIFIED_FSP: i8 = -1;
/// `MAX_FSP` is the maximum digit of fractional seconds part.
pub const MAX_FSP: i8 = 6;
/// `MIN_FSP` is the minimum digit of fractional seconds part.
pub const MIN_FSP: i8 = 0;
/// `DEFAULT_FSP` is the default digit of fractional seconds part.
/// `MySQL` use 0 as the default Fsp.
pub const DEFAULT_FSP: i8 = 0;
fn check_fsp(fsp: i8) -> Result<u8> {
if fsp == UNSPECIFIED_FSP {
return Ok(DEFAULT_FSP as u8);
}
if!(MIN_FSP..=MAX_FSP).contains(&fsp) {
return Err(invalid_type!("Invalid fsp {}", fsp));
}
Ok(fsp as u8)
}
pub mod binary_literal;
pub mod charset;
pub mod decimal;
pub mod duration;
pub mod enums;
pub mod json;
pub mod set;
pub mod time;
pub use self::decimal::{dec_encoded_len, Decimal, DecimalDecoder, DecimalEncoder, Res, RoundMode};
pub use self::duration::{Duration, DurationDecoder, DurationEncoder};
pub use self::enums::{Enum, EnumDecoder, EnumEncoder, EnumRef};
pub use self::json::{
parse_json_path_expr, Json, JsonDatumPayloadChunkEncoder, JsonDecoder, JsonEncoder, JsonType,
ModifyType, PathExpression,
};
pub use self::set::{Set, SetRef};
pub use self::time::{Time, TimeDecoder, TimeEncoder, TimeType, Tz}; | use super::Result;
| random_line_split |
mod.rs | // Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
use super::Result;
/// `UNSPECIFIED_FSP` is the unspecified fractional seconds part.
pub const UNSPECIFIED_FSP: i8 = -1;
/// `MAX_FSP` is the maximum digit of fractional seconds part.
pub const MAX_FSP: i8 = 6;
/// `MIN_FSP` is the minimum digit of fractional seconds part.
pub const MIN_FSP: i8 = 0;
/// `DEFAULT_FSP` is the default digit of fractional seconds part.
/// `MySQL` use 0 as the default Fsp.
pub const DEFAULT_FSP: i8 = 0;
fn | (fsp: i8) -> Result<u8> {
if fsp == UNSPECIFIED_FSP {
return Ok(DEFAULT_FSP as u8);
}
if!(MIN_FSP..=MAX_FSP).contains(&fsp) {
return Err(invalid_type!("Invalid fsp {}", fsp));
}
Ok(fsp as u8)
}
pub mod binary_literal;
pub mod charset;
pub mod decimal;
pub mod duration;
pub mod enums;
pub mod json;
pub mod set;
pub mod time;
pub use self::decimal::{dec_encoded_len, Decimal, DecimalDecoder, DecimalEncoder, Res, RoundMode};
pub use self::duration::{Duration, DurationDecoder, DurationEncoder};
pub use self::enums::{Enum, EnumDecoder, EnumEncoder, EnumRef};
pub use self::json::{
parse_json_path_expr, Json, JsonDatumPayloadChunkEncoder, JsonDecoder, JsonEncoder, JsonType,
ModifyType, PathExpression,
};
pub use self::set::{Set, SetRef};
pub use self::time::{Time, TimeDecoder, TimeEncoder, TimeType, Tz};
| check_fsp | identifier_name |
mod.rs | // Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
use super::Result;
/// `UNSPECIFIED_FSP` is the unspecified fractional seconds part.
pub const UNSPECIFIED_FSP: i8 = -1;
/// `MAX_FSP` is the maximum digit of fractional seconds part.
pub const MAX_FSP: i8 = 6;
/// `MIN_FSP` is the minimum digit of fractional seconds part.
pub const MIN_FSP: i8 = 0;
/// `DEFAULT_FSP` is the default digit of fractional seconds part.
/// `MySQL` use 0 as the default Fsp.
pub const DEFAULT_FSP: i8 = 0;
fn check_fsp(fsp: i8) -> Result<u8> |
pub mod binary_literal;
pub mod charset;
pub mod decimal;
pub mod duration;
pub mod enums;
pub mod json;
pub mod set;
pub mod time;
pub use self::decimal::{dec_encoded_len, Decimal, DecimalDecoder, DecimalEncoder, Res, RoundMode};
pub use self::duration::{Duration, DurationDecoder, DurationEncoder};
pub use self::enums::{Enum, EnumDecoder, EnumEncoder, EnumRef};
pub use self::json::{
parse_json_path_expr, Json, JsonDatumPayloadChunkEncoder, JsonDecoder, JsonEncoder, JsonType,
ModifyType, PathExpression,
};
pub use self::set::{Set, SetRef};
pub use self::time::{Time, TimeDecoder, TimeEncoder, TimeType, Tz};
| {
if fsp == UNSPECIFIED_FSP {
return Ok(DEFAULT_FSP as u8);
}
if !(MIN_FSP..=MAX_FSP).contains(&fsp) {
return Err(invalid_type!("Invalid fsp {}", fsp));
}
Ok(fsp as u8)
} | identifier_body |
mod.rs | // Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
use super::Result;
/// `UNSPECIFIED_FSP` is the unspecified fractional seconds part.
pub const UNSPECIFIED_FSP: i8 = -1;
/// `MAX_FSP` is the maximum digit of fractional seconds part.
pub const MAX_FSP: i8 = 6;
/// `MIN_FSP` is the minimum digit of fractional seconds part.
pub const MIN_FSP: i8 = 0;
/// `DEFAULT_FSP` is the default digit of fractional seconds part.
/// `MySQL` use 0 as the default Fsp.
pub const DEFAULT_FSP: i8 = 0;
fn check_fsp(fsp: i8) -> Result<u8> {
if fsp == UNSPECIFIED_FSP {
return Ok(DEFAULT_FSP as u8);
}
if!(MIN_FSP..=MAX_FSP).contains(&fsp) |
Ok(fsp as u8)
}
pub mod binary_literal;
pub mod charset;
pub mod decimal;
pub mod duration;
pub mod enums;
pub mod json;
pub mod set;
pub mod time;
pub use self::decimal::{dec_encoded_len, Decimal, DecimalDecoder, DecimalEncoder, Res, RoundMode};
pub use self::duration::{Duration, DurationDecoder, DurationEncoder};
pub use self::enums::{Enum, EnumDecoder, EnumEncoder, EnumRef};
pub use self::json::{
parse_json_path_expr, Json, JsonDatumPayloadChunkEncoder, JsonDecoder, JsonEncoder, JsonType,
ModifyType, PathExpression,
};
pub use self::set::{Set, SetRef};
pub use self::time::{Time, TimeDecoder, TimeEncoder, TimeType, Tz};
| {
return Err(invalid_type!("Invalid fsp {}", fsp));
} | conditional_block |
maybe_uninit_nodrop.rs | use super::nodrop::NoDrop;
use array::Array;
use std::mem;
use std::ops::{Deref, DerefMut};
/// A combination of NoDrop and “maybe uninitialized”;
/// this wraps a value that can be wholly or partially uninitialized.
///
/// NOTE: This is known to not be a good solution, but it's the one we have kept
/// working on stable Rust. Stable improvements are encouraged, in any form,
/// but of course we are waiting for a real, stable, MaybeUninit.
#[repr(C)] // for cast from self ptr to value
pub struct Mayb | NoDrop<T>);
// why don't we use ManuallyDrop here: It doesn't inhibit
// enum layout optimizations that depend on T, and we support older Rust.
impl<T> MaybeUninit<T> {
/// Create a new MaybeUninit with uninitialized interior
pub unsafe fn uninitialized() -> Self {
MaybeUninit(NoDrop::new(mem::uninitialized()))
}
}
impl<A: Array> Deref for MaybeUninit<A> {
type Target = A;
#[inline(always)]
fn deref(&self) -> &A {
&self.0
}
}
impl<A: Array> DerefMut for MaybeUninit<A> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut A {
&mut self.0
}
}
| eUninit<T>( | identifier_name |
maybe_uninit_nodrop.rs | use super::nodrop::NoDrop;
use array::Array;
use std::mem;
use std::ops::{Deref, DerefMut};
/// A combination of NoDrop and “maybe uninitialized”;
/// this wraps a value that can be wholly or partially uninitialized.
///
/// NOTE: This is known to not be a good solution, but it's the one we have kept
/// working on stable Rust. Stable improvements are encouraged, in any form,
/// but of course we are waiting for a real, stable, MaybeUninit.
#[repr(C)] // for cast from self ptr to value
pub struct MaybeUninit<T>(NoDrop<T>);
// why don't we use ManuallyDrop here: It doesn't inhibit
// enum layout optimizations that depend on T, and we support older Rust.
impl<T> MaybeUninit<T> {
/// Create a new MaybeUninit with uninitialized interior
pub unsafe fn uninitialized() -> Self {
| impl<A: Array> Deref for MaybeUninit<A> {
type Target = A;
#[inline(always)]
fn deref(&self) -> &A {
&self.0
}
}
impl<A: Array> DerefMut for MaybeUninit<A> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut A {
&mut self.0
}
}
| MaybeUninit(NoDrop::new(mem::uninitialized()))
}
}
| identifier_body |
maybe_uninit_nodrop.rs | use super::nodrop::NoDrop;
use array::Array;
use std::mem;
use std::ops::{Deref, DerefMut};
/// A combination of NoDrop and “maybe uninitialized”;
/// this wraps a value that can be wholly or partially uninitialized.
///
/// NOTE: This is known to not be a good solution, but it's the one we have kept
/// working on stable Rust. Stable improvements are encouraged, in any form,
/// but of course we are waiting for a real, stable, MaybeUninit.
#[repr(C)] // for cast from self ptr to value
pub struct MaybeUninit<T>(NoDrop<T>);
// why don't we use ManuallyDrop here: It doesn't inhibit
// enum layout optimizations that depend on T, and we support older Rust.
impl<T> MaybeUninit<T> {
/// Create a new MaybeUninit with uninitialized interior
pub unsafe fn uninitialized() -> Self {
MaybeUninit(NoDrop::new(mem::uninitialized()))
}
}
impl<A: Array> Deref for MaybeUninit<A> {
type Target = A;
#[inline(always)]
fn deref(&self) -> &A { | &self.0
}
}
impl<A: Array> DerefMut for MaybeUninit<A> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut A {
&mut self.0
}
} | random_line_split |
|
fat-ptr-cast.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
// | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Trait {}
// Make sure casts between thin-pointer <-> fat pointer obey RFC401
fn main() {
let a: &[i32] = &[1, 2, 3];
let b: Box<[i32]> = Box::new([1, 2, 3]);
let p = a as *const [i32];
let q = a.as_ptr();
a as usize; //~ ERROR casting
b as usize; //~ ERROR non-scalar cast
p as usize;
//~^ ERROR casting
//~^^ HELP cast through a raw pointer
// #22955
q as *const [i32]; //~ ERROR casting
// #21397
let t: *mut (Trait +'static) = 0 as *mut _; //~ ERROR casting
let mut fail: *const str = 0 as *const str; //~ ERROR casting
} | random_line_split |
|
fat-ptr-cast.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Trait {}
// Make sure casts between thin-pointer <-> fat pointer obey RFC401
fn | () {
let a: &[i32] = &[1, 2, 3];
let b: Box<[i32]> = Box::new([1, 2, 3]);
let p = a as *const [i32];
let q = a.as_ptr();
a as usize; //~ ERROR casting
b as usize; //~ ERROR non-scalar cast
p as usize;
//~^ ERROR casting
//~^^ HELP cast through a raw pointer
// #22955
q as *const [i32]; //~ ERROR casting
// #21397
let t: *mut (Trait +'static) = 0 as *mut _; //~ ERROR casting
let mut fail: *const str = 0 as *const str; //~ ERROR casting
}
| main | identifier_name |
fat-ptr-cast.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Trait {}
// Make sure casts between thin-pointer <-> fat pointer obey RFC401
fn main() | {
let a: &[i32] = &[1, 2, 3];
let b: Box<[i32]> = Box::new([1, 2, 3]);
let p = a as *const [i32];
let q = a.as_ptr();
a as usize; //~ ERROR casting
b as usize; //~ ERROR non-scalar cast
p as usize;
//~^ ERROR casting
//~^^ HELP cast through a raw pointer
// #22955
q as *const [i32]; //~ ERROR casting
// #21397
let t: *mut (Trait + 'static) = 0 as *mut _; //~ ERROR casting
let mut fail: *const str = 0 as *const str; //~ ERROR casting
} | identifier_body |
|
utils.rs | // Copyright 2018 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
pub mod strings {
use std::ffi::{
CString,
CStr
};
use std::os::raw::c_char;
use mentat::{
Keyword,
};
pub fn c_char_to_string(cchar: *const c_char) -> &'static str {
assert!(!cchar.is_null());
let c_str = unsafe { CStr::from_ptr(cchar) };
c_str.to_str().unwrap_or("")
}
pub fn string_to_c_char<T>(r_string: T) -> *mut c_char where T: Into<String> {
CString::new(r_string.into()).unwrap().into_raw()
}
pub fn kw_from_string(keyword_string: &'static str) -> Keyword {
// TODO: validate. The input might not be a keyword!
let attr_name = keyword_string.trim_left_matches(":");
let parts: Vec<&str> = attr_name.split("/").collect();
Keyword::namespaced(parts[0], parts[1])
}
}
pub mod log {
#[cfg(all(target_os="android", not(test)))]
use std::ffi::CString;
#[cfg(all(target_os="android", not(test)))]
use android;
// TODO far from ideal. And, we might actually want to println in tests.
#[cfg(all(not(target_os="android"), not(target_os="ios")))]
pub fn d(_: &str) |
#[cfg(all(target_os="ios", not(test)))]
pub fn d(message: &str) {
eprintln!("{}", message);
}
#[cfg(all(target_os="android", not(test)))]
pub fn d(message: &str) {
let message = CString::new(message).unwrap();
let message = message.as_ptr();
let tag = CString::new("Mentat").unwrap();
let tag = tag.as_ptr();
unsafe { android::__android_log_write(android::LogLevel::Debug as i32, tag, message) };
}
}
pub mod error {
use super::strings::string_to_c_char;
use std::os::raw::c_char;
use std::boxed::Box;
use std::fmt::Display;
use std::ptr;
/// Represents an error that occurred on the mentat side. Many mentat FFI functions take a
/// `*mut ExternError` as the last argument. This is an out parameter that indicates an
/// error that occurred during that function's execution (if any).
///
/// For functions that use this pattern, if the ExternError's message property is null, then no
/// error occurred. If the message is non-null then it contains a string description of the
/// error that occurred.
///
/// Important: This message is allocated on the heap and it is the consumer's responsibility to
/// free it using `destroy_mentat_string`!
///
/// While this pattern is not ergonomic in Rust, it offers two main benefits:
///
/// 1. It avoids defining a large number of `Result`-shaped types in the FFI consumer, as would
/// be required with something like an `struct ExternResult<T> { ok: *mut T, err:... }`
/// 2. It offers additional type safety over `struct ExternResult { ok: *mut c_void, err:... }`,
/// which helps avoid memory safety errors.
#[repr(C)]
#[derive(Debug)]
pub struct ExternError {
pub message: *mut c_char,
// TODO: Include an error code here.
}
impl Default for ExternError {
fn default() -> ExternError {
ExternError { message: ptr::null_mut() }
}
}
/// Translate Result<T, E>, into something C can understand, when T is not `#[repr(C)]`
///
/// - If `result` is `Ok(v)`, moves `v` to the heap and returns a pointer to it, and sets
/// `error` to a state indicating that no error occurred (`message` is null).
/// - If `result` is `Err(e)`, returns a null pointer and stores a string representing the error
/// message (which was allocated on the heap and should eventually be freed) into
/// `error.message`
pub unsafe fn translate_result<T, E>(result: Result<T, E>, error: *mut ExternError) -> *mut T
where E: Display {
// TODO: can't unwind across FFI...
assert!(!error.is_null(), "Error output parameter is not optional");
let error = &mut *error;
error.message = ptr::null_mut();
match result {
Ok(val) => Box::into_raw(Box::new(val)),
Err(e) => {
error.message = string_to_c_char(e.to_string());
ptr::null_mut()
}
}
}
/// Translate Result<Option<T>, E> into something C can understand, when T is not `#[repr(C)]`.
///
/// - If `result` is `Ok(Some(v))`, moves `v` to the heap and returns a pointer to it, and
/// sets `error` to a state indicating that no error occurred (`message` is null).
/// - If `result` is `Ok(None)` returns a null pointer, but sets `error` to a state indicating
/// that no error occurred (`message` is null).
/// - If `result` is `Err(e)`, returns a null pointer and stores a string representing the error
/// message (which was allocated on the heap and should eventually be freed) into
/// `error.message`
pub unsafe fn translate_opt_result<T, E>(result: Result<Option<T>, E>, error: *mut ExternError) -> *mut T
where E: Display {
assert!(!error.is_null(), "Error output parameter is not optional");
let error = &mut *error;
error.message = ptr::null_mut();
match result {
Ok(Some(val)) => Box::into_raw(Box::new(val)),
Ok(None) => ptr::null_mut(),
Err(e) => {
error.message = string_to_c_char(e.to_string());
ptr::null_mut()
}
}
}
/// Identical to `translate_result`, but with additional type checking for the case that we have
/// a `Result<(), E>` (which we're about to drop on the floor).
pub unsafe fn translate_void_result<E>(result: Result<(), E>, error: *mut ExternError) where E: Display {
// Note that Box<T> guarantees that if T is zero sized, it's not heap allocated. So not
// only do we never need to free the return value of this, it would be a problem if someone did.
translate_result(result, error);
}
}
| {} | identifier_body |
utils.rs | // Copyright 2018 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
pub mod strings {
use std::ffi::{
CString,
CStr
};
use std::os::raw::c_char;
use mentat::{
Keyword,
};
pub fn c_char_to_string(cchar: *const c_char) -> &'static str {
assert!(!cchar.is_null());
let c_str = unsafe { CStr::from_ptr(cchar) };
c_str.to_str().unwrap_or("")
}
pub fn string_to_c_char<T>(r_string: T) -> *mut c_char where T: Into<String> {
CString::new(r_string.into()).unwrap().into_raw()
}
pub fn kw_from_string(keyword_string: &'static str) -> Keyword {
// TODO: validate. The input might not be a keyword!
let attr_name = keyword_string.trim_left_matches(":");
let parts: Vec<&str> = attr_name.split("/").collect();
Keyword::namespaced(parts[0], parts[1])
}
}
pub mod log {
#[cfg(all(target_os="android", not(test)))]
use std::ffi::CString;
#[cfg(all(target_os="android", not(test)))]
use android;
// TODO far from ideal. And, we might actually want to println in tests.
#[cfg(all(not(target_os="android"), not(target_os="ios")))]
pub fn d(_: &str) {}
#[cfg(all(target_os="ios", not(test)))]
pub fn | (message: &str) {
eprintln!("{}", message);
}
#[cfg(all(target_os="android", not(test)))]
pub fn d(message: &str) {
let message = CString::new(message).unwrap();
let message = message.as_ptr();
let tag = CString::new("Mentat").unwrap();
let tag = tag.as_ptr();
unsafe { android::__android_log_write(android::LogLevel::Debug as i32, tag, message) };
}
}
pub mod error {
use super::strings::string_to_c_char;
use std::os::raw::c_char;
use std::boxed::Box;
use std::fmt::Display;
use std::ptr;
/// Represents an error that occurred on the mentat side. Many mentat FFI functions take a
/// `*mut ExternError` as the last argument. This is an out parameter that indicates an
/// error that occurred during that function's execution (if any).
///
/// For functions that use this pattern, if the ExternError's message property is null, then no
/// error occurred. If the message is non-null then it contains a string description of the
/// error that occurred.
///
/// Important: This message is allocated on the heap and it is the consumer's responsibility to
/// free it using `destroy_mentat_string`!
///
/// While this pattern is not ergonomic in Rust, it offers two main benefits:
///
/// 1. It avoids defining a large number of `Result`-shaped types in the FFI consumer, as would
/// be required with something like an `struct ExternResult<T> { ok: *mut T, err:... }`
/// 2. It offers additional type safety over `struct ExternResult { ok: *mut c_void, err:... }`,
/// which helps avoid memory safety errors.
#[repr(C)]
#[derive(Debug)]
pub struct ExternError {
pub message: *mut c_char,
// TODO: Include an error code here.
}
impl Default for ExternError {
fn default() -> ExternError {
ExternError { message: ptr::null_mut() }
}
}
/// Translate Result<T, E>, into something C can understand, when T is not `#[repr(C)]`
///
/// - If `result` is `Ok(v)`, moves `v` to the heap and returns a pointer to it, and sets
/// `error` to a state indicating that no error occurred (`message` is null).
/// - If `result` is `Err(e)`, returns a null pointer and stores a string representing the error
/// message (which was allocated on the heap and should eventually be freed) into
/// `error.message`
pub unsafe fn translate_result<T, E>(result: Result<T, E>, error: *mut ExternError) -> *mut T
where E: Display {
// TODO: can't unwind across FFI...
assert!(!error.is_null(), "Error output parameter is not optional");
let error = &mut *error;
error.message = ptr::null_mut();
match result {
Ok(val) => Box::into_raw(Box::new(val)),
Err(e) => {
error.message = string_to_c_char(e.to_string());
ptr::null_mut()
}
}
}
/// Translate Result<Option<T>, E> into something C can understand, when T is not `#[repr(C)]`.
///
/// - If `result` is `Ok(Some(v))`, moves `v` to the heap and returns a pointer to it, and
/// sets `error` to a state indicating that no error occurred (`message` is null).
/// - If `result` is `Ok(None)` returns a null pointer, but sets `error` to a state indicating
/// that no error occurred (`message` is null).
/// - If `result` is `Err(e)`, returns a null pointer and stores a string representing the error
/// message (which was allocated on the heap and should eventually be freed) into
/// `error.message`
pub unsafe fn translate_opt_result<T, E>(result: Result<Option<T>, E>, error: *mut ExternError) -> *mut T
where E: Display {
assert!(!error.is_null(), "Error output parameter is not optional");
let error = &mut *error;
error.message = ptr::null_mut();
match result {
Ok(Some(val)) => Box::into_raw(Box::new(val)),
Ok(None) => ptr::null_mut(),
Err(e) => {
error.message = string_to_c_char(e.to_string());
ptr::null_mut()
}
}
}
/// Identical to `translate_result`, but with additional type checking for the case that we have
/// a `Result<(), E>` (which we're about to drop on the floor).
pub unsafe fn translate_void_result<E>(result: Result<(), E>, error: *mut ExternError) where E: Display {
// Note that Box<T> guarantees that if T is zero sized, it's not heap allocated. So not
// only do we never need to free the return value of this, it would be a problem if someone did.
translate_result(result, error);
}
}
| d | identifier_name |
utils.rs | // Copyright 2018 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
pub mod strings {
use std::ffi::{
CString,
CStr
};
use std::os::raw::c_char;
use mentat::{
Keyword,
};
pub fn c_char_to_string(cchar: *const c_char) -> &'static str {
assert!(!cchar.is_null());
let c_str = unsafe { CStr::from_ptr(cchar) };
c_str.to_str().unwrap_or("")
}
pub fn string_to_c_char<T>(r_string: T) -> *mut c_char where T: Into<String> {
CString::new(r_string.into()).unwrap().into_raw()
}
pub fn kw_from_string(keyword_string: &'static str) -> Keyword {
// TODO: validate. The input might not be a keyword!
let attr_name = keyword_string.trim_left_matches(":");
let parts: Vec<&str> = attr_name.split("/").collect();
Keyword::namespaced(parts[0], parts[1])
}
}
pub mod log {
#[cfg(all(target_os="android", not(test)))]
use std::ffi::CString;
#[cfg(all(target_os="android", not(test)))]
use android;
// TODO far from ideal. And, we might actually want to println in tests.
#[cfg(all(not(target_os="android"), not(target_os="ios")))]
pub fn d(_: &str) {}
#[cfg(all(target_os="ios", not(test)))]
pub fn d(message: &str) {
eprintln!("{}", message);
}
#[cfg(all(target_os="android", not(test)))]
pub fn d(message: &str) {
let message = CString::new(message).unwrap();
let message = message.as_ptr();
let tag = CString::new("Mentat").unwrap();
let tag = tag.as_ptr();
unsafe { android::__android_log_write(android::LogLevel::Debug as i32, tag, message) };
}
}
pub mod error {
use super::strings::string_to_c_char;
use std::os::raw::c_char;
use std::boxed::Box;
use std::fmt::Display;
use std::ptr;
/// Represents an error that occurred on the mentat side. Many mentat FFI functions take a
/// `*mut ExternError` as the last argument. This is an out parameter that indicates an
/// error that occurred during that function's execution (if any).
///
/// For functions that use this pattern, if the ExternError's message property is null, then no
/// error occurred. If the message is non-null then it contains a string description of the
/// error that occurred.
///
/// Important: This message is allocated on the heap and it is the consumer's responsibility to
/// free it using `destroy_mentat_string`!
///
/// While this pattern is not ergonomic in Rust, it offers two main benefits:
///
/// 1. It avoids defining a large number of `Result`-shaped types in the FFI consumer, as would
/// be required with something like an `struct ExternResult<T> { ok: *mut T, err:... }`
/// 2. It offers additional type safety over `struct ExternResult { ok: *mut c_void, err:... }`,
/// which helps avoid memory safety errors.
#[repr(C)]
#[derive(Debug)]
pub struct ExternError {
pub message: *mut c_char,
// TODO: Include an error code here.
}
impl Default for ExternError {
fn default() -> ExternError {
ExternError { message: ptr::null_mut() }
}
}
/// Translate Result<T, E>, into something C can understand, when T is not `#[repr(C)]`
///
/// - If `result` is `Ok(v)`, moves `v` to the heap and returns a pointer to it, and sets | /// `error` to a state indicating that no error occurred (`message` is null).
/// - If `result` is `Err(e)`, returns a null pointer and stores a string representing the error
/// message (which was allocated on the heap and should eventually be freed) into
/// `error.message`
pub unsafe fn translate_result<T, E>(result: Result<T, E>, error: *mut ExternError) -> *mut T
where E: Display {
// TODO: can't unwind across FFI...
assert!(!error.is_null(), "Error output parameter is not optional");
let error = &mut *error;
error.message = ptr::null_mut();
match result {
Ok(val) => Box::into_raw(Box::new(val)),
Err(e) => {
error.message = string_to_c_char(e.to_string());
ptr::null_mut()
}
}
}
/// Translate Result<Option<T>, E> into something C can understand, when T is not `#[repr(C)]`.
///
/// - If `result` is `Ok(Some(v))`, moves `v` to the heap and returns a pointer to it, and
/// sets `error` to a state indicating that no error occurred (`message` is null).
/// - If `result` is `Ok(None)` returns a null pointer, but sets `error` to a state indicating
/// that no error occurred (`message` is null).
/// - If `result` is `Err(e)`, returns a null pointer and stores a string representing the error
/// message (which was allocated on the heap and should eventually be freed) into
/// `error.message`
pub unsafe fn translate_opt_result<T, E>(result: Result<Option<T>, E>, error: *mut ExternError) -> *mut T
where E: Display {
assert!(!error.is_null(), "Error output parameter is not optional");
let error = &mut *error;
error.message = ptr::null_mut();
match result {
Ok(Some(val)) => Box::into_raw(Box::new(val)),
Ok(None) => ptr::null_mut(),
Err(e) => {
error.message = string_to_c_char(e.to_string());
ptr::null_mut()
}
}
}
/// Identical to `translate_result`, but with additional type checking for the case that we have
/// a `Result<(), E>` (which we're about to drop on the floor).
pub unsafe fn translate_void_result<E>(result: Result<(), E>, error: *mut ExternError) where E: Display {
// Note that Box<T> guarantees that if T is zero sized, it's not heap allocated. So not
// only do we never need to free the return value of this, it would be a problem if someone did.
translate_result(result, error);
}
} | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.