file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
manifest.rs | prelude_items.len(), 0);
let content = strip_hashbang(content);
let (manifest, source) = find_embedded_manifest(content)
.unwrap_or((Manifest::Toml(""), content));
(manifest, source, consts::FILE_TEMPLATE, false)
},
Input::Expr(content) => {
(Manifest::Toml(""), content, consts::EXPR_TEMPLATE, true)
},
Input::Loop(content, count) => {
let templ = if count { consts::LOOP_COUNT_TEMPLATE } else { consts::LOOP_TEMPLATE };
(Manifest::Toml(""), content, templ, true)
},
};
let source = template.replace("%b", source);
/*
We are doing it this way because we can guarantee that %p *always* appears before %b, *and* that we don't attempt this when we don't want to allow prelude substitution.
The problem with doing it the other way around is that the user could specify a prelude item that contains `%b` (which would do *weird things*).
Also, don't use `str::replace` because it replaces *all* occurrences, not just the first.
*/
let source = match sub_prelude {
false => source,
true => {
const PRELUDE_PAT: &'static str = "%p";
let offset = source.find(PRELUDE_PAT).expect("template doesn't have %p");
let mut new_source = String::new();
new_source.push_str(&source[..offset]);
for i in prelude_items {
new_source.push_str(i);
new_source.push_str("\n");
}
new_source.push_str(&source[offset + PRELUDE_PAT.len()..]);
new_source
}
};
info!("part_mani: {:?}", part_mani);
info!("source: {:?}", source);
let part_mani = try!(part_mani.into_toml());
info!("part_mani: {:?}", part_mani);
// It's-a mergin' time!
let def_mani = try!(default_manifest(input));
let dep_mani = try!(deps_manifest(deps));
let mani = try!(merge_manifest(def_mani, part_mani));
let mani = try!(merge_manifest(mani, dep_mani));
info!("mani: {:?}", mani);
let mani_str = format!("{}", toml::Value::Table(mani));
info!("mani_str: {}", mani_str);
Ok((mani_str, source))
}
#[test]
fn test_split_input() {
macro_rules! si {
($i:expr) => (split_input(&$i, &[], &[]).ok())
}
let dummy_path: ::std::path::PathBuf = "p".into();
let dummy_path = &dummy_path;
let f = |c| Input::File("n", &dummy_path, c, 0);
macro_rules! r {
($m:expr, $r:expr) => (Some(($m.into(), $r.into())));
}
assert_eq!(si!(f(
r#"fn main() {}"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"fn main() {}"#
)
);
// Ensure removed prefix manifests don't work.
assert_eq!(si!(f(
r#"
---
fn main() {}
"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"
---
fn main() {}
"#
)
);
assert_eq!(si!(f(
r#"[dependencies]
time="0.1.25"
---
fn main() {}
"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"[dependencies]
time="0.1.25"
---
fn main() {}
"#
)
);
assert_eq!(si!(f(
r#"
// Cargo-Deps: time="0.1.25"
fn main() {}
"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
time = "0.1.25"
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"
// Cargo-Deps: time="0.1.25"
fn main() {}
"#
)
);
assert_eq!(si!(f(
r#"
// Cargo-Deps: time="0.1.25", libc="0.2.5"
fn main() {}
"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
libc = "0.2.5"
time = "0.1.25"
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"
// Cargo-Deps: time="0.1.25", libc="0.2.5"
fn main() {}
"#
)
);
assert_eq!(si!(f(
r#"
/*!
Here is a manifest:
```cargo
[dependencies]
time = "0.1.25"
```
*/
fn main() {}
"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
time = "0.1.25"
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"
/*!
Here is a manifest:
```cargo
[dependencies]
time = "0.1.25"
```
*/
fn main() {}
"#
)
);
}
/**
Returns a slice of the input string with the leading hashbang, if there is one, omitted.
*/
fn strip_hashbang(s: &str) -> &str {
match RE_HASHBANG.find(s) {
Some((_, end)) => &s[end..],
None => s
}
}
#[test]
fn test_strip_hashbang() {
assert_eq!(strip_hashbang("\
#!/usr/bin/env run-cargo-script
and the rest
\
"), "\
and the rest
\
");
assert_eq!(strip_hashbang("\
#![thingy]
and the rest
\
"), "\
#![thingy]
and the rest
\
");
}
/**
Represents the kind, and content of, an embedded manifest.
*/
#[derive(Debug, Eq, PartialEq)]
enum Manifest<'s> {
/// The manifest is a valid TOML fragment.
Toml(&'s str),
/// The manifest is a valid TOML fragment (owned).
// TODO: Change to Cow<'s, str>.
TomlOwned(String),
/// The manifest is a comma-delimited list of dependencies.
DepList(&'s str),
}
impl<'s> Manifest<'s> {
pub fn into_toml(self) -> Result<toml::Table> {
use self::Manifest::*;
match self {
Toml(s) => Ok(try!(toml::Parser::new(s).parse()
.ok_or("could not parse embedded manifest"))),
TomlOwned(ref s) => Ok(try!(toml::Parser::new(s).parse()
.ok_or("could not parse embedded manifest"))),
DepList(s) => Manifest::dep_list_to_toml(s),
}
}
fn dep_list_to_toml(s: &str) -> Result<toml::Table> {
let mut r = String::new();
r.push_str("[dependencies]\n");
for dep in s.trim().split(',') {
// If there's no version specified, add one.
match dep.contains('=') {
true => {
r.push_str(dep);
r.push_str("\n");
},
false => {
r.push_str(dep);
r.push_str("=\"*\"\n");
}
}
}
Ok(try!(toml::Parser::new(&r).parse()
.ok_or("could not parse embedded manifest")))
}
}
/**
Locates a manifest embedded in Rust source.
Returns `Some((manifest, source))` if it finds a manifest, `None` otherwise.
*/
fn find_embedded_manifest(s: &str) -> Option<(Manifest, &str)> {
find_short_comment_manifest(s)
.or_else(|| find_code_block_manifest(s))
}
#[test]
fn test_find_embedded_manifest() {
use self::Manifest::*;
let fem = find_embedded_manifest;
assert_eq!(fem("fn main() {}"), None);
assert_eq!(fem(
"
fn main() {}
"),
None);
// Ensure removed prefix manifests don't work.
assert_eq!(fem(
r#"
---
fn main() {}
"#),
None);
assert_eq!(fem(
"[dependencies]
time = \"0.1.25\"
---
fn main() {}
"),
None);
assert_eq!(fem(
"[dependencies]
time = \"0.1.25\"
fn main() {}
"),
None);
// Make sure we aren't just grabbing the *last* line.
assert_eq!(fem(
"[dependencies]
time = \"0.1.25\"
fn main() {
println!(\"Hi!\");
}
"),
None);
assert_eq!(fem(
"// cargo-deps: time=\"0.1.25\"
fn main() {}
"),
Some((
DepList(" time=\"0.1.25\""),
"// cargo-deps: time=\"0.1.25\"
fn main() {}
"
)));
assert_eq!(fem(
"// cargo-deps: time=\"0.1.25\", libc=\"0.2.5\"
fn main() {}
"),
Some((
DepList(" time=\"0.1.25\", libc=\"0.2.5\""),
"// cargo-deps: time=\"0.1.25\", libc=\"0.2.5\"
fn main() {}
"
)));
assert_eq!(fem(
"
// cargo-deps: time=\"0.1.25\" \n\
fn main() {}
"),
Some((
DepList(" time=\"0.1.25\" "),
"
// cargo-deps: time=\"0.1.25\" \n\
fn main() {}
"
)));
assert_eq!(fem(
"/* cargo-deps: time=\"0.1.25\" */
fn main() {}
"),
None);
assert_eq!(fem(
r#"//! [dependencies]
//! time = "0.1.25"
fn main() {}
"#),
None);
assert_eq!(fem(
r#"//! ```Cargo
//! [dependencies]
//! time = "0.1.25"
//! ```
fn main() {}
"#),
Some((
TomlOwned(r#"[dependencies]
time = "0.1.25"
"#.into()),
r#"//! ```Cargo
//! [dependencies]
//! time = "0.1.25"
//! ```
fn main() {}
"#
)));
assert_eq!(fem(
r#"/*!
[dependencies]
time = "0.1.25"
*/
fn main() {}
"#),
None);
assert_eq!(fem(
r#"/*!
```Cargo
[dependencies]
time = "0.1.25"
```
*/
fn main() {}
"#),
Some((
TomlOwned(r#"[dependencies]
time = "0.1.25"
"#.into()),
r#"/*!
```Cargo
[dependencies]
time = "0.1.25"
```
*/
fn main() {}
"#
)));
assert_eq!(fem(
r#"/*!
* [dependencies]
* time = "0.1.25"
*/
fn main() {}
"#),
None);
assert_eq!(fem(
r#"/*!
* ```Cargo
* [dependencies]
* time = "0.1.25"
* ```
*/
fn main() {}
"#),
Some((
TomlOwned(r#"[dependencies]
time = "0.1.25"
"#.into()),
r#"/*!
* ```Cargo
* [dependencies]
* time = "0.1.25"
* ```
*/
fn main() {}
"#
)));
}
/**
Locates a "short comment manifest" in Rust source.
*/
fn find_short_comment_manifest(s: &str) -> Option<(Manifest, &str)> {
/*
This is pretty simple: the only valid syntax for this is for the first, non-blank line to contain a single-line comment whose first token is `cargo-deps:`. That's it.
*/
let re = &*RE_SHORT_MANIFEST;
if let Some(cap) = re.captures(s) {
if let Some((a, b)) = cap.pos(1) {
return Some((Manifest::DepList(&s[a..b]), &s[..]))
}
}
None
}
/**
Locates a "code block manifest" in Rust source.
*/
fn find_code_block_manifest(s: &str) -> Option<(Manifest, &str)> {
/*
This has to happen in a few steps.
First, we will look for and slice out a contiguous, inner doc comment which must be *the very first thing* in the file. `#[doc(...)]` attributes *are not supported*. Multiple single-line comments cannot have any blank lines between them.
Then, we need to strip off the actual comment markers from the content. Including indentation removal, and taking out the (optional) leading line markers for block comments. *sigh*
Then, we need to take the contents of this doc comment and feed it to a Markdown parser. We are looking for *the first* fenced code block with a language token of `cargo`. This is extracted and pasted back together into the manifest.
*/
let start = match RE_CRATE_COMMENT.captures(s) {
Some(cap) => match cap.pos(1) {
Some((a, _)) => a,
None => return None
},
None => return None
};
let comment = match extract_comment(&s[start..]) {
Ok(s) => s,
Err(err) => {
error!("error slicing comment: {}", err);
return None
}
};
scrape_markdown_manifest(&comment)
.unwrap_or(None)
.map(|m| (Manifest::TomlOwned(m), s))
}
/**
Extracts the first `Cargo` fenced code block from a chunk of Markdown.
*/
fn scrape_markdown_manifest(content: &str) -> Result<Option<String>> {
use self::hoedown::{Buffer, Markdown, Render};
// To match librustdoc/html/markdown.rs, HOEDOWN_EXTENSIONS.
let exts
= hoedown::NO_INTRA_EMPHASIS
| hoedown::TABLES
| hoedown::FENCED_CODE
| hoedown::AUTOLINK
| hoedown::STRIKETHROUGH
| hoedown::SUPERSCRIPT
| hoedown::FOOTNOTES;
let md = Markdown::new(&content).extensions(exts);
struct ManifestScraper {
seen_manifest: bool,
}
impl Render for ManifestScraper {
fn code_block(&mut self, output: &mut Buffer, text: &Buffer, lang: &Buffer) {
use std::ascii::AsciiExt;
let lang = lang.to_str().unwrap();
if!self.seen_manifest && lang.eq_ignore_ascii_case("cargo") {
// Pass it through.
info!("found code block manifest");
output.pipe(text);
self.seen_manifest = true;
}
}
}
let mut ms = ManifestScraper { seen_manifest: false };
let mani_buf = ms.render(&md);
if!ms.seen_manifest { return Ok(None) }
mani_buf.to_str().map(|s| Some(s.into()))
.map_err(|_| "error decoding manifest as UTF-8".into())
}
#[test]
fn test_scrape_markdown_manifest() {
macro_rules! smm {
($c:expr) => (scrape_markdown_manifest($c).map_err(|e| e.to_string()));
}
assert_eq!(smm!(
r#"There is no manifest in this comment.
"#
),
Ok(None)
);
assert_eq!(smm!(
r#"There is no manifest in this comment.
```
This is not a manifest.
```
```rust
println!("Nor is this.");
```
Or this.
"#
),
Ok(None)
);
assert_eq!(smm!(
r#"This is a manifest:
```cargo
dependencies = { time = "*" }
```
"#
),
Ok(Some(r#"dependencies = { time = "*" }
"#.into()))
);
assert_eq!(smm!(
r#"This is *not* a manifest:
```
He's lying, I'm *totally* a manifest!
```
This *is*:
```cargo
dependencies = { time = "*" }
```
"#
),
Ok(Some(r#"dependencies = { time = "*" }
"#.into()))
);
assert_eq!(smm!(
r#"This is a manifest:
```cargo
dependencies = { time = "*" }
```
So is this, but it doesn't count:
```cargo
dependencies = { explode = true }
```
"#
),
Ok(Some(r#"dependencies = { time = "*" }
"#.into()))
);
}
/**
Extracts the contents of a Rust doc comment.
*/
fn extract_comment(s: &str) -> Result<String> {
use std::cmp::min;
fn n_leading_spaces(s: &str, n: usize) -> Result<()> {
| fn extract_block(s: &str) -> Result<String> {
/*
On every line:
- update nesting level and detect end-of-comment
- if margin is None:
- if there appears to be a margin, set margin.
- strip off margin marker
- update the leading space counter
- strip leading space
- append content
*/
let mut r = String::new();
let margin_re = &*RE_MARGIN;
let space_re = &*RE_SPACE;
let nesting_re = &*RE_NESTING;
let mut leading_space = None;
let mut margin = None;
let mut depth: u32 = 1;
for line in s.lines() {
if depth == 0 { break }
// Update nesting and look for end-of-comment.
let mut end_of_comment = None;
for (end, marker) in nesting_re.find_iter(line).map(|(a,b)| (a, &line[a..b])) {
match (marker, depth) {
("/*", _) => depth += 1,
("*/", 1) => {
end_of_comment = Some(end);
depth = 0;
break;
},
("*/", _) => depth -= 1,
_ => panic!("got a comment marker other than /* or */")
}
}
let line = end_of_comment.map(|end| &line[..end]).unwrap_or(line);
// Detect and strip margin.
margin = margin
.or_else(|| margin_re.find(line)
.and_then(|(b, e)| Some(&line[b..e])));
let line = if let Some(margin) = margin {
let end = line.char_indices().take(margin.len())
.map(|(i,c)| i + c.len_utf8()).last().unwrap_or(0);
&line[end..]
} else {
line
};
// Detect and strip leading indentation.
leading_space = leading_space
.or_else(|| space_re.find(line)
.map(|(_,n)| n));
/*
Make sure we have only leading spaces.
If we see a tab, fall over. I *would* expand them, but that gets into the question of how *many* spaces to expand them to, and *where* is the tab, because tabs are tab stops and not just N spaces.
Eurgh.
*/
try!(n_leading_spaces(line, leading_space.unwrap_or(0)));
let strip_len = min(leading_space.unwrap_or(0), line.len());
let line = &line[strip_len..];
// Done.
r.push_str(line);
// `lines` removes newlines. Ideally, it wouldn't do that, but hopefully this shouldn't cause any *real* problems.
r.push_str("\n");
}
Ok(r)
}
fn extract_line(s: &str) -> Result<String> {
let mut r = String::new();
let comment_re = &*RE_COMMENT;
let space_re = &*RE_SPACE;
let mut leading_space = None;
for line in s.lines() {
// Strip leading comment marker.
let content = match comment_re.find(line) {
Some((_, end)) => &line[end..],
None => break
};
// Detect and strip leading indentation.
leading_space = leading_space
.or_else(|| space_re.captures(content)
.and_then(|c| c.pos(1))
.map(|(_,n)| n));
/*
Make sure we have only leading spaces.
If we see a tab, fall over. I *would* expand them, but that gets into the question of how *many* spaces to expand them to, and *where* is the tab, because tabs are tab stops and not just N spaces.
Eurgh.
*/
try!(n_leading_spaces(content, leading_space.unwrap_or(0)));
let strip_len = min(leading_space.unwrap_or(0), content.len());
let content = &content[strip_len..];
// Done.
r.push_str(content);
// `lines` removes newlines. Ideally, it wouldn't do that, but hopefully this shouldn't cause any *real* problems.
| if !s.chars().take(n).all(|c| c == ' ') {
return Err(format!("leading {:?} chars aren't all spaces: {:?}", n, s).into())
}
Ok(())
}
| identifier_body |
manifest.rs | !(prelude_items.len(), 0);
let content = strip_hashbang(content);
let (manifest, source) = find_embedded_manifest(content)
.unwrap_or((Manifest::Toml(""), content));
(manifest, source, consts::FILE_TEMPLATE, false)
},
Input::Expr(content) => {
(Manifest::Toml(""), content, consts::EXPR_TEMPLATE, true)
},
Input::Loop(content, count) => {
let templ = if count { consts::LOOP_COUNT_TEMPLATE } else { consts::LOOP_TEMPLATE };
(Manifest::Toml(""), content, templ, true)
},
};
let source = template.replace("%b", source);
/*
We are doing it this way because we can guarantee that %p *always* appears before %b, *and* that we don't attempt this when we don't want to allow prelude substitution.
The problem with doing it the other way around is that the user could specify a prelude item that contains `%b` (which would do *weird things*).
Also, don't use `str::replace` because it replaces *all* occurrences, not just the first.
*/
let source = match sub_prelude {
false => source,
true => {
const PRELUDE_PAT: &'static str = "%p";
let offset = source.find(PRELUDE_PAT).expect("template doesn't have %p");
let mut new_source = String::new();
new_source.push_str(&source[..offset]);
for i in prelude_items {
new_source.push_str(i);
new_source.push_str("\n");
}
new_source.push_str(&source[offset + PRELUDE_PAT.len()..]);
new_source
}
};
info!("part_mani: {:?}", part_mani);
info!("source: {:?}", source);
let part_mani = try!(part_mani.into_toml());
info!("part_mani: {:?}", part_mani);
// It's-a mergin' time!
let def_mani = try!(default_manifest(input));
let dep_mani = try!(deps_manifest(deps));
let mani = try!(merge_manifest(def_mani, part_mani));
let mani = try!(merge_manifest(mani, dep_mani));
info!("mani: {:?}", mani);
let mani_str = format!("{}", toml::Value::Table(mani));
info!("mani_str: {}", mani_str);
Ok((mani_str, source))
}
#[test]
fn test_split_input() {
macro_rules! si {
($i:expr) => (split_input(&$i, &[], &[]).ok())
}
let dummy_path: ::std::path::PathBuf = "p".into();
let dummy_path = &dummy_path;
let f = |c| Input::File("n", &dummy_path, c, 0);
macro_rules! r {
($m:expr, $r:expr) => (Some(($m.into(), $r.into())));
}
assert_eq!(si!(f(
r#"fn main() {}"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"fn main() {}"#
)
);
// Ensure removed prefix manifests don't work.
assert_eq!(si!(f(
r#"
---
fn main() {}
"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"
---
fn main() {}
"#
)
);
assert_eq!(si!(f(
r#"[dependencies]
time="0.1.25"
---
fn main() {}
"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"[dependencies]
time="0.1.25"
---
fn main() {}
"#
)
);
assert_eq!(si!(f(
r#"
// Cargo-Deps: time="0.1.25"
fn main() {}
"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
time = "0.1.25"
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"
// Cargo-Deps: time="0.1.25"
fn main() {}
"#
)
);
assert_eq!(si!(f(
r#"
// Cargo-Deps: time="0.1.25", libc="0.2.5"
fn main() {}
"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
libc = "0.2.5"
time = "0.1.25"
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"
// Cargo-Deps: time="0.1.25", libc="0.2.5"
fn main() {}
"#
)
);
assert_eq!(si!(f(
r#"
/*!
Here is a manifest:
```cargo
[dependencies]
time = "0.1.25"
```
*/
fn main() {}
"#
)),
r!(
r#"
[[bin]]
name = "n"
path = "n.rs"
[dependencies]
time = "0.1.25"
[package]
authors = ["Anonymous"]
name = "n"
version = "0.1.0"
"#,
r#"
/*!
Here is a manifest:
```cargo
[dependencies]
time = "0.1.25"
```
*/
fn main() {}
"#
) | /**
Returns a slice of the input string with the leading hashbang, if there is one, omitted.
*/
fn strip_hashbang(s: &str) -> &str {
match RE_HASHBANG.find(s) {
Some((_, end)) => &s[end..],
None => s
}
}
#[test]
fn test_strip_hashbang() {
assert_eq!(strip_hashbang("\
#!/usr/bin/env run-cargo-script
and the rest
\
"), "\
and the rest
\
");
assert_eq!(strip_hashbang("\
#![thingy]
and the rest
\
"), "\
#![thingy]
and the rest
\
");
}
/**
Represents the kind, and content of, an embedded manifest.
*/
#[derive(Debug, Eq, PartialEq)]
enum Manifest<'s> {
/// The manifest is a valid TOML fragment.
Toml(&'s str),
/// The manifest is a valid TOML fragment (owned).
// TODO: Change to Cow<'s, str>.
TomlOwned(String),
/// The manifest is a comma-delimited list of dependencies.
DepList(&'s str),
}
impl<'s> Manifest<'s> {
pub fn into_toml(self) -> Result<toml::Table> {
use self::Manifest::*;
match self {
Toml(s) => Ok(try!(toml::Parser::new(s).parse()
.ok_or("could not parse embedded manifest"))),
TomlOwned(ref s) => Ok(try!(toml::Parser::new(s).parse()
.ok_or("could not parse embedded manifest"))),
DepList(s) => Manifest::dep_list_to_toml(s),
}
}
fn dep_list_to_toml(s: &str) -> Result<toml::Table> {
let mut r = String::new();
r.push_str("[dependencies]\n");
for dep in s.trim().split(',') {
// If there's no version specified, add one.
match dep.contains('=') {
true => {
r.push_str(dep);
r.push_str("\n");
},
false => {
r.push_str(dep);
r.push_str("=\"*\"\n");
}
}
}
Ok(try!(toml::Parser::new(&r).parse()
.ok_or("could not parse embedded manifest")))
}
}
/**
Locates a manifest embedded in Rust source.
Returns `Some((manifest, source))` if it finds a manifest, `None` otherwise.
*/
fn find_embedded_manifest(s: &str) -> Option<(Manifest, &str)> {
find_short_comment_manifest(s)
.or_else(|| find_code_block_manifest(s))
}
#[test]
fn test_find_embedded_manifest() {
use self::Manifest::*;
let fem = find_embedded_manifest;
assert_eq!(fem("fn main() {}"), None);
assert_eq!(fem(
"
fn main() {}
"),
None);
// Ensure removed prefix manifests don't work.
assert_eq!(fem(
r#"
---
fn main() {}
"#),
None);
assert_eq!(fem(
"[dependencies]
time = \"0.1.25\"
---
fn main() {}
"),
None);
assert_eq!(fem(
"[dependencies]
time = \"0.1.25\"
fn main() {}
"),
None);
// Make sure we aren't just grabbing the *last* line.
assert_eq!(fem(
"[dependencies]
time = \"0.1.25\"
fn main() {
println!(\"Hi!\");
}
"),
None);
assert_eq!(fem(
"// cargo-deps: time=\"0.1.25\"
fn main() {}
"),
Some((
DepList(" time=\"0.1.25\""),
"// cargo-deps: time=\"0.1.25\"
fn main() {}
"
)));
assert_eq!(fem(
"// cargo-deps: time=\"0.1.25\", libc=\"0.2.5\"
fn main() {}
"),
Some((
DepList(" time=\"0.1.25\", libc=\"0.2.5\""),
"// cargo-deps: time=\"0.1.25\", libc=\"0.2.5\"
fn main() {}
"
)));
assert_eq!(fem(
"
// cargo-deps: time=\"0.1.25\" \n\
fn main() {}
"),
Some((
DepList(" time=\"0.1.25\" "),
"
// cargo-deps: time=\"0.1.25\" \n\
fn main() {}
"
)));
assert_eq!(fem(
"/* cargo-deps: time=\"0.1.25\" */
fn main() {}
"),
None);
assert_eq!(fem(
r#"//! [dependencies]
//! time = "0.1.25"
fn main() {}
"#),
None);
assert_eq!(fem(
r#"//! ```Cargo
//! [dependencies]
//! time = "0.1.25"
//! ```
fn main() {}
"#),
Some((
TomlOwned(r#"[dependencies]
time = "0.1.25"
"#.into()),
r#"//! ```Cargo
//! [dependencies]
//! time = "0.1.25"
//! ```
fn main() {}
"#
)));
assert_eq!(fem(
r#"/*!
[dependencies]
time = "0.1.25"
*/
fn main() {}
"#),
None);
assert_eq!(fem(
r#"/*!
```Cargo
[dependencies]
time = "0.1.25"
```
*/
fn main() {}
"#),
Some((
TomlOwned(r#"[dependencies]
time = "0.1.25"
"#.into()),
r#"/*!
```Cargo
[dependencies]
time = "0.1.25"
```
*/
fn main() {}
"#
)));
assert_eq!(fem(
r#"/*!
* [dependencies]
* time = "0.1.25"
*/
fn main() {}
"#),
None);
assert_eq!(fem(
r#"/*!
* ```Cargo
* [dependencies]
* time = "0.1.25"
* ```
*/
fn main() {}
"#),
Some((
TomlOwned(r#"[dependencies]
time = "0.1.25"
"#.into()),
r#"/*!
* ```Cargo
* [dependencies]
* time = "0.1.25"
* ```
*/
fn main() {}
"#
)));
}
/**
Locates a "short comment manifest" in Rust source.
*/
fn find_short_comment_manifest(s: &str) -> Option<(Manifest, &str)> {
/*
This is pretty simple: the only valid syntax for this is for the first, non-blank line to contain a single-line comment whose first token is `cargo-deps:`. That's it.
*/
let re = &*RE_SHORT_MANIFEST;
if let Some(cap) = re.captures(s) {
if let Some((a, b)) = cap.pos(1) {
return Some((Manifest::DepList(&s[a..b]), &s[..]))
}
}
None
}
/**
Locates a "code block manifest" in Rust source.
*/
fn find_code_block_manifest(s: &str) -> Option<(Manifest, &str)> {
/*
This has to happen in a few steps.
First, we will look for and slice out a contiguous, inner doc comment which must be *the very first thing* in the file. `#[doc(...)]` attributes *are not supported*. Multiple single-line comments cannot have any blank lines between them.
Then, we need to strip off the actual comment markers from the content. Including indentation removal, and taking out the (optional) leading line markers for block comments. *sigh*
Then, we need to take the contents of this doc comment and feed it to a Markdown parser. We are looking for *the first* fenced code block with a language token of `cargo`. This is extracted and pasted back together into the manifest.
*/
let start = match RE_CRATE_COMMENT.captures(s) {
Some(cap) => match cap.pos(1) {
Some((a, _)) => a,
None => return None
},
None => return None
};
let comment = match extract_comment(&s[start..]) {
Ok(s) => s,
Err(err) => {
error!("error slicing comment: {}", err);
return None
}
};
scrape_markdown_manifest(&comment)
.unwrap_or(None)
.map(|m| (Manifest::TomlOwned(m), s))
}
/**
Extracts the first `Cargo` fenced code block from a chunk of Markdown.
*/
fn scrape_markdown_manifest(content: &str) -> Result<Option<String>> {
use self::hoedown::{Buffer, Markdown, Render};
// To match librustdoc/html/markdown.rs, HOEDOWN_EXTENSIONS.
let exts
= hoedown::NO_INTRA_EMPHASIS
| hoedown::TABLES
| hoedown::FENCED_CODE
| hoedown::AUTOLINK
| hoedown::STRIKETHROUGH
| hoedown::SUPERSCRIPT
| hoedown::FOOTNOTES;
let md = Markdown::new(&content).extensions(exts);
struct ManifestScraper {
seen_manifest: bool,
}
impl Render for ManifestScraper {
fn code_block(&mut self, output: &mut Buffer, text: &Buffer, lang: &Buffer) {
use std::ascii::AsciiExt;
let lang = lang.to_str().unwrap();
if!self.seen_manifest && lang.eq_ignore_ascii_case("cargo") {
// Pass it through.
info!("found code block manifest");
output.pipe(text);
self.seen_manifest = true;
}
}
}
let mut ms = ManifestScraper { seen_manifest: false };
let mani_buf = ms.render(&md);
if!ms.seen_manifest { return Ok(None) }
mani_buf.to_str().map(|s| Some(s.into()))
.map_err(|_| "error decoding manifest as UTF-8".into())
}
#[test]
fn test_scrape_markdown_manifest() {
macro_rules! smm {
($c:expr) => (scrape_markdown_manifest($c).map_err(|e| e.to_string()));
}
assert_eq!(smm!(
r#"There is no manifest in this comment.
"#
),
Ok(None)
);
assert_eq!(smm!(
r#"There is no manifest in this comment.
```
This is not a manifest.
```
```rust
println!("Nor is this.");
```
Or this.
"#
),
Ok(None)
);
assert_eq!(smm!(
r#"This is a manifest:
```cargo
dependencies = { time = "*" }
```
"#
),
Ok(Some(r#"dependencies = { time = "*" }
"#.into()))
);
assert_eq!(smm!(
r#"This is *not* a manifest:
```
He's lying, I'm *totally* a manifest!
```
This *is*:
```cargo
dependencies = { time = "*" }
```
"#
),
Ok(Some(r#"dependencies = { time = "*" }
"#.into()))
);
assert_eq!(smm!(
r#"This is a manifest:
```cargo
dependencies = { time = "*" }
```
So is this, but it doesn't count:
```cargo
dependencies = { explode = true }
```
"#
),
Ok(Some(r#"dependencies = { time = "*" }
"#.into()))
);
}
/**
Extracts the contents of a Rust doc comment.
*/
fn extract_comment(s: &str) -> Result<String> {
use std::cmp::min;
fn n_leading_spaces(s: &str, n: usize) -> Result<()> {
if!s.chars().take(n).all(|c| c =='') {
return Err(format!("leading {:?} chars aren't all spaces: {:?}", n, s).into())
}
Ok(())
}
fn extract_block(s: &str) -> Result<String> {
/*
On every line:
- update nesting level and detect end-of-comment
- if margin is None:
- if there appears to be a margin, set margin.
- strip off margin marker
- update the leading space counter
- strip leading space
- append content
*/
let mut r = String::new();
let margin_re = &*RE_MARGIN;
let space_re = &*RE_SPACE;
let nesting_re = &*RE_NESTING;
let mut leading_space = None;
let mut margin = None;
let mut depth: u32 = 1;
for line in s.lines() {
if depth == 0 { break }
// Update nesting and look for end-of-comment.
let mut end_of_comment = None;
for (end, marker) in nesting_re.find_iter(line).map(|(a,b)| (a, &line[a..b])) {
match (marker, depth) {
("/*", _) => depth += 1,
("*/", 1) => {
end_of_comment = Some(end);
depth = 0;
break;
},
("*/", _) => depth -= 1,
_ => panic!("got a comment marker other than /* or */")
}
}
let line = end_of_comment.map(|end| &line[..end]).unwrap_or(line);
// Detect and strip margin.
margin = margin
.or_else(|| margin_re.find(line)
.and_then(|(b, e)| Some(&line[b..e])));
let line = if let Some(margin) = margin {
let end = line.char_indices().take(margin.len())
.map(|(i,c)| i + c.len_utf8()).last().unwrap_or(0);
&line[end..]
} else {
line
};
// Detect and strip leading indentation.
leading_space = leading_space
.or_else(|| space_re.find(line)
.map(|(_,n)| n));
/*
Make sure we have only leading spaces.
If we see a tab, fall over. I *would* expand them, but that gets into the question of how *many* spaces to expand them to, and *where* is the tab, because tabs are tab stops and not just N spaces.
Eurgh.
*/
try!(n_leading_spaces(line, leading_space.unwrap_or(0)));
let strip_len = min(leading_space.unwrap_or(0), line.len());
let line = &line[strip_len..];
// Done.
r.push_str(line);
// `lines` removes newlines. Ideally, it wouldn't do that, but hopefully this shouldn't cause any *real* problems.
r.push_str("\n");
}
Ok(r)
}
fn extract_line(s: &str) -> Result<String> {
let mut r = String::new();
let comment_re = &*RE_COMMENT;
let space_re = &*RE_SPACE;
let mut leading_space = None;
for line in s.lines() {
// Strip leading comment marker.
let content = match comment_re.find(line) {
Some((_, end)) => &line[end..],
None => break
};
// Detect and strip leading indentation.
leading_space = leading_space
.or_else(|| space_re.captures(content)
.and_then(|c| c.pos(1))
.map(|(_,n)| n));
/*
Make sure we have only leading spaces.
If we see a tab, fall over. I *would* expand them, but that gets into the question of how *many* spaces to expand them to, and *where* is the tab, because tabs are tab stops and not just N spaces.
Eurgh.
*/
try!(n_leading_spaces(content, leading_space.unwrap_or(0)));
let strip_len = min(leading_space.unwrap_or(0), content.len());
let content = &content[strip_len..];
// Done.
r.push_str(content);
// `lines` removes newlines. Ideally, it wouldn't do that, but hopefully this shouldn't cause any *real* problems.
| );
}
| random_line_split |
application.rs | //! This module contains the base elements of an OrbTk application (Application, WindowBuilder and Window).
use std::sync::mpsc;
use dces::prelude::Entity;
use crate::{
core::{application::WindowAdapter, localization::*, *},
shell::{Shell, ShellRequest},
};
/// The `Application` represents the entry point of an OrbTk based application.
pub struct Application {
// shells: Vec<Shell<WindowAdapter>>,
request_sender: mpsc::Sender<ShellRequest<WindowAdapter>>,
shell: Shell<WindowAdapter>,
name: Box<str>,
theme: Rc<Theme>,
localization: Option<Rc<RefCell<Box<dyn Localization>>>>,
}
impl Default for Application {
fn default() -> Self {
Application::from_name("orbtk_application")
}
}
impl Application {
/// Creates a new application.
pub fn new() -> Self {
Self::default()
}
/// Sets the default theme for the application. Could be changed per window.
pub fn theme(mut self, theme: Theme) -> Self {
self.theme = Rc::new(theme);
self
}
pub fn localization<L>(mut self, localization: L) -> Self
where
L: Localization +'static,
|
/// Create a new application with the given name.
pub fn from_name(name: impl Into<Box<str>>) -> Self {
let (sender, receiver) = mpsc::channel();
Application {
request_sender: sender,
name: name.into(),
shell: Shell::new(receiver),
theme: Rc::new(crate::widgets::themes::theme_orbtk::theme_default()),
localization: None,
}
}
/// Creates a new window and add it to the application.
pub fn window<F: Fn(&mut BuildContext) -> Entity +'static>(mut self, create_fn: F) -> Self {
let (adapter, settings, receiver) = create_window(
self.name.clone(),
&self.theme,
self.request_sender.clone(),
create_fn,
self.localization.clone(),
);
self.shell
.create_window_from_settings(settings, adapter)
.request_receiver(receiver)
.build();
self
}
/// Starts the application and run it until quit is requested.
pub fn run(mut self) {
self.shell.run();
}
}
| {
self.localization = Some(Rc::new(RefCell::new(Box::new(localization))));
self
} | identifier_body |
application.rs | //! This module contains the base elements of an OrbTk application (Application, WindowBuilder and Window).
use std::sync::mpsc;
use dces::prelude::Entity;
use crate::{
core::{application::WindowAdapter, localization::*, *},
shell::{Shell, ShellRequest},
};
/// The `Application` represents the entry point of an OrbTk based application.
pub struct Application {
// shells: Vec<Shell<WindowAdapter>>,
request_sender: mpsc::Sender<ShellRequest<WindowAdapter>>,
shell: Shell<WindowAdapter>,
name: Box<str>,
theme: Rc<Theme>, | localization: Option<Rc<RefCell<Box<dyn Localization>>>>,
}
impl Default for Application {
fn default() -> Self {
Application::from_name("orbtk_application")
}
}
impl Application {
/// Creates a new application.
pub fn new() -> Self {
Self::default()
}
/// Sets the default theme for the application. Could be changed per window.
pub fn theme(mut self, theme: Theme) -> Self {
self.theme = Rc::new(theme);
self
}
pub fn localization<L>(mut self, localization: L) -> Self
where
L: Localization +'static,
{
self.localization = Some(Rc::new(RefCell::new(Box::new(localization))));
self
}
/// Create a new application with the given name.
pub fn from_name(name: impl Into<Box<str>>) -> Self {
let (sender, receiver) = mpsc::channel();
Application {
request_sender: sender,
name: name.into(),
shell: Shell::new(receiver),
theme: Rc::new(crate::widgets::themes::theme_orbtk::theme_default()),
localization: None,
}
}
/// Creates a new window and add it to the application.
pub fn window<F: Fn(&mut BuildContext) -> Entity +'static>(mut self, create_fn: F) -> Self {
let (adapter, settings, receiver) = create_window(
self.name.clone(),
&self.theme,
self.request_sender.clone(),
create_fn,
self.localization.clone(),
);
self.shell
.create_window_from_settings(settings, adapter)
.request_receiver(receiver)
.build();
self
}
/// Starts the application and run it until quit is requested.
pub fn run(mut self) {
self.shell.run();
}
} | random_line_split |
|
application.rs | //! This module contains the base elements of an OrbTk application (Application, WindowBuilder and Window).
use std::sync::mpsc;
use dces::prelude::Entity;
use crate::{
core::{application::WindowAdapter, localization::*, *},
shell::{Shell, ShellRequest},
};
/// The `Application` represents the entry point of an OrbTk based application.
pub struct Application {
// shells: Vec<Shell<WindowAdapter>>,
request_sender: mpsc::Sender<ShellRequest<WindowAdapter>>,
shell: Shell<WindowAdapter>,
name: Box<str>,
theme: Rc<Theme>,
localization: Option<Rc<RefCell<Box<dyn Localization>>>>,
}
impl Default for Application {
fn default() -> Self {
Application::from_name("orbtk_application")
}
}
impl Application {
/// Creates a new application.
pub fn new() -> Self {
Self::default()
}
/// Sets the default theme for the application. Could be changed per window.
pub fn theme(mut self, theme: Theme) -> Self {
self.theme = Rc::new(theme);
self
}
pub fn localization<L>(mut self, localization: L) -> Self
where
L: Localization +'static,
{
self.localization = Some(Rc::new(RefCell::new(Box::new(localization))));
self
}
/// Create a new application with the given name.
pub fn from_name(name: impl Into<Box<str>>) -> Self {
let (sender, receiver) = mpsc::channel();
Application {
request_sender: sender,
name: name.into(),
shell: Shell::new(receiver),
theme: Rc::new(crate::widgets::themes::theme_orbtk::theme_default()),
localization: None,
}
}
/// Creates a new window and add it to the application.
pub fn window<F: Fn(&mut BuildContext) -> Entity +'static>(mut self, create_fn: F) -> Self {
let (adapter, settings, receiver) = create_window(
self.name.clone(),
&self.theme,
self.request_sender.clone(),
create_fn,
self.localization.clone(),
);
self.shell
.create_window_from_settings(settings, adapter)
.request_receiver(receiver)
.build();
self
}
/// Starts the application and run it until quit is requested.
pub fn | (mut self) {
self.shell.run();
}
}
| run | identifier_name |
main.rs |
use std::env::args;
use std::collections::HashMap;
trait Validator {
fn increment(&mut self) -> bool;
fn has_sequence(&self) -> bool;
fn no_forbidden_chars(&self) -> bool;
fn has_two_doubles(&self) -> bool;
}
impl Validator for Vec<u8> {
fn increment(&mut self) -> bool {
*(self.last_mut().unwrap()) += 1;
let mut carry: u8 = 0;
for pos in (0..self.len()).rev() {
if carry > 0 {
self[pos] += 1;
carry = 0;
}
if self[pos] >= 26 {
carry = self[pos] / 26;
self[pos] = 0;
}
}
carry!= 0
}
fn has_sequence(&self) -> bool {
for win in self.windows(3) {
if win[0] + 2 == win[1] + 1 && win[1] + 1 == win[2] {
return true;
}
}
false
}
fn no_forbidden_chars(&self) -> bool |
fn has_two_doubles(&self) -> bool {
let mut double_count = 0;
let mut pos = 0;
while pos < (self.len() - 1) {
if self[pos] == self[pos + 1] {
double_count += 1;
pos += 1;
if double_count >= 2 {
return true;
}
}
pos += 1;
}
false
}
}
fn main() {
let mut a = args();
a.next(); // The first argument is the binary name/path
let start = a.next().unwrap(); // The puzzle input
let mut char_to_num = HashMap::new();
let mut num_to_char = HashMap::new();
for i in 0..26 {
let ch = (('a' as u8) + i) as char;
char_to_num.insert(ch, i);
num_to_char.insert(i, ch);
}
let mut passwd_vec = start.chars().map(|ch| char_to_num[&ch]).collect::<Vec<u8>>();
loop {
if passwd_vec.increment() {
panic!("All password combinations exhausted and no password found.");
}
if!passwd_vec.has_sequence() {
continue;
}
if!passwd_vec.no_forbidden_chars() {
continue;
}
if!passwd_vec.has_two_doubles() {
continue;
}
break;
}
let readable_passwd = passwd_vec.iter().map(|ch_num| num_to_char[ch_num]).collect::<String>();
println!("The next password is: {:?}", passwd_vec);
println!("Readable password: {:?}", readable_passwd);
}
| {
let i = ('i' as u8) - ('a' as u8);
let o = ('o' as u8) - ('a' as u8);
let l = ('l' as u8) - ('a' as u8);
!(self.contains(&i) || self.contains(&o) || self.contains(&l))
} | identifier_body |
main.rs |
use std::env::args;
use std::collections::HashMap;
trait Validator {
fn increment(&mut self) -> bool;
fn has_sequence(&self) -> bool;
fn no_forbidden_chars(&self) -> bool;
fn has_two_doubles(&self) -> bool;
}
impl Validator for Vec<u8> {
fn increment(&mut self) -> bool {
*(self.last_mut().unwrap()) += 1;
let mut carry: u8 = 0;
for pos in (0..self.len()).rev() {
if carry > 0 {
self[pos] += 1;
carry = 0;
}
if self[pos] >= 26 {
carry = self[pos] / 26;
self[pos] = 0;
}
}
carry!= 0
}
fn has_sequence(&self) -> bool {
for win in self.windows(3) {
if win[0] + 2 == win[1] + 1 && win[1] + 1 == win[2] {
return true;
}
}
false
}
fn no_forbidden_chars(&self) -> bool {
let i = ('i' as u8) - ('a' as u8);
let o = ('o' as u8) - ('a' as u8);
let l = ('l' as u8) - ('a' as u8);
!(self.contains(&i) || self.contains(&o) || self.contains(&l))
}
fn | (&self) -> bool {
let mut double_count = 0;
let mut pos = 0;
while pos < (self.len() - 1) {
if self[pos] == self[pos + 1] {
double_count += 1;
pos += 1;
if double_count >= 2 {
return true;
}
}
pos += 1;
}
false
}
}
fn main() {
let mut a = args();
a.next(); // The first argument is the binary name/path
let start = a.next().unwrap(); // The puzzle input
let mut char_to_num = HashMap::new();
let mut num_to_char = HashMap::new();
for i in 0..26 {
let ch = (('a' as u8) + i) as char;
char_to_num.insert(ch, i);
num_to_char.insert(i, ch);
}
let mut passwd_vec = start.chars().map(|ch| char_to_num[&ch]).collect::<Vec<u8>>();
loop {
if passwd_vec.increment() {
panic!("All password combinations exhausted and no password found.");
}
if!passwd_vec.has_sequence() {
continue;
}
if!passwd_vec.no_forbidden_chars() {
continue;
}
if!passwd_vec.has_two_doubles() {
continue;
}
break;
}
let readable_passwd = passwd_vec.iter().map(|ch_num| num_to_char[ch_num]).collect::<String>();
println!("The next password is: {:?}", passwd_vec);
println!("Readable password: {:?}", readable_passwd);
}
| has_two_doubles | identifier_name |
main.rs |
use std::env::args;
use std::collections::HashMap;
trait Validator {
fn increment(&mut self) -> bool;
fn has_sequence(&self) -> bool;
fn no_forbidden_chars(&self) -> bool;
fn has_two_doubles(&self) -> bool;
}
impl Validator for Vec<u8> {
fn increment(&mut self) -> bool {
*(self.last_mut().unwrap()) += 1;
let mut carry: u8 = 0;
for pos in (0..self.len()).rev() {
if carry > 0 {
self[pos] += 1;
carry = 0;
}
if self[pos] >= 26 {
carry = self[pos] / 26;
self[pos] = 0;
}
}
carry!= 0
}
fn has_sequence(&self) -> bool {
for win in self.windows(3) {
if win[0] + 2 == win[1] + 1 && win[1] + 1 == win[2] {
return true;
}
}
false
}
fn no_forbidden_chars(&self) -> bool {
let i = ('i' as u8) - ('a' as u8);
let o = ('o' as u8) - ('a' as u8);
let l = ('l' as u8) - ('a' as u8);
!(self.contains(&i) || self.contains(&o) || self.contains(&l))
}
fn has_two_doubles(&self) -> bool {
let mut double_count = 0;
let mut pos = 0;
while pos < (self.len() - 1) {
if self[pos] == self[pos + 1] |
pos += 1;
}
false
}
}
fn main() {
let mut a = args();
a.next(); // The first argument is the binary name/path
let start = a.next().unwrap(); // The puzzle input
let mut char_to_num = HashMap::new();
let mut num_to_char = HashMap::new();
for i in 0..26 {
let ch = (('a' as u8) + i) as char;
char_to_num.insert(ch, i);
num_to_char.insert(i, ch);
}
let mut passwd_vec = start.chars().map(|ch| char_to_num[&ch]).collect::<Vec<u8>>();
loop {
if passwd_vec.increment() {
panic!("All password combinations exhausted and no password found.");
}
if!passwd_vec.has_sequence() {
continue;
}
if!passwd_vec.no_forbidden_chars() {
continue;
}
if!passwd_vec.has_two_doubles() {
continue;
}
break;
}
let readable_passwd = passwd_vec.iter().map(|ch_num| num_to_char[ch_num]).collect::<String>();
println!("The next password is: {:?}", passwd_vec);
println!("Readable password: {:?}", readable_passwd);
}
| {
double_count += 1;
pos += 1;
if double_count >= 2 {
return true;
}
} | conditional_block |
main.rs | use std::env::args;
use std::collections::HashMap;
trait Validator {
fn increment(&mut self) -> bool;
fn has_sequence(&self) -> bool;
fn no_forbidden_chars(&self) -> bool;
fn has_two_doubles(&self) -> bool;
}
impl Validator for Vec<u8> {
fn increment(&mut self) -> bool {
*(self.last_mut().unwrap()) += 1;
let mut carry: u8 = 0;
for pos in (0..self.len()).rev() {
if carry > 0 {
self[pos] += 1;
carry = 0;
}
if self[pos] >= 26 {
carry = self[pos] / 26;
self[pos] = 0;
}
}
carry!= 0
}
fn has_sequence(&self) -> bool {
for win in self.windows(3) {
if win[0] + 2 == win[1] + 1 && win[1] + 1 == win[2] {
return true;
}
}
false
}
fn no_forbidden_chars(&self) -> bool {
let i = ('i' as u8) - ('a' as u8);
let o = ('o' as u8) - ('a' as u8);
let l = ('l' as u8) - ('a' as u8);
!(self.contains(&i) || self.contains(&o) || self.contains(&l))
}
fn has_two_doubles(&self) -> bool {
let mut double_count = 0;
let mut pos = 0;
while pos < (self.len() - 1) {
if self[pos] == self[pos + 1] {
double_count += 1;
pos += 1;
if double_count >= 2 {
return true;
}
}
pos += 1;
}
false
}
}
fn main() {
let mut a = args();
a.next(); // The first argument is the binary name/path
let start = a.next().unwrap(); // The puzzle input
let mut char_to_num = HashMap::new();
let mut num_to_char = HashMap::new();
for i in 0..26 {
let ch = (('a' as u8) + i) as char;
char_to_num.insert(ch, i);
num_to_char.insert(i, ch);
}
let mut passwd_vec = start.chars().map(|ch| char_to_num[&ch]).collect::<Vec<u8>>();
loop {
if passwd_vec.increment() {
panic!("All password combinations exhausted and no password found.");
}
if!passwd_vec.has_sequence() {
continue; | }
if!passwd_vec.no_forbidden_chars() {
continue;
}
if!passwd_vec.has_two_doubles() {
continue;
}
break;
}
let readable_passwd = passwd_vec.iter().map(|ch_num| num_to_char[ch_num]).collect::<String>();
println!("The next password is: {:?}", passwd_vec);
println!("Readable password: {:?}", readable_passwd);
} | random_line_split |
|
main.rs | use std::thread;
use std::time::Duration;
use std::collections::HashMap;
fn main() {
let simulated_user_specified_value = 10;
let simulated_random_number = 7;
generate_workout(simulated_user_specified_value, simulated_random_number);
}
struct Cacher<T, K, V>
where T: Fn(K) -> V,
K: Eq + std::hash::Hash + Clone,
V: Clone
{
calculation: T,
value: HashMap<K, V>,
}
impl<T, K, V> Cacher<T, K, V>
where T: Fn(K) -> V,
K: Eq + std::hash::Hash + Clone,
V: Clone
{
fn new(calculation: T) -> Cacher<T, K, V> {
Cacher {
calculation,
value: HashMap::new(),
}
}
fn | (&mut self, arg: K) -> V {
let closure = &self.calculation;
let v = self.value
.entry(arg.clone())
.or_insert_with(|| (closure)(arg));
(*v).clone()
}
}
fn generate_workout(intensity: i32, random_number: i32) {
let mut expensive_result = Cacher::new(|num| {
println!("calculating slowly...");
thread::sleep(Duration::from_secs(2));
num
});
if intensity < 25 {
println!("Today, do {} pushups!", expensive_result.value(intensity));
println!("Next, do {} situps!", expensive_result.value(intensity));
} else if random_number == 3 {
println!("Take a break today! Remember to stay hydrated!");
} else {
println!("Today, run for {} minutes!",
expensive_result.value(intensity));
}
}
#[test]
#[allow(unused_variables)]
fn call_with_different_values() {
let mut c = Cacher::new(|a| a);
let v1 = c.value(1);
let v2 = c.value(2);
assert_eq!(v2, 2);
}
| value | identifier_name |
main.rs | use std::thread;
use std::time::Duration;
use std::collections::HashMap;
fn main() {
let simulated_user_specified_value = 10;
let simulated_random_number = 7;
generate_workout(simulated_user_specified_value, simulated_random_number);
}
struct Cacher<T, K, V>
where T: Fn(K) -> V,
K: Eq + std::hash::Hash + Clone,
V: Clone
{
calculation: T,
value: HashMap<K, V>,
}
impl<T, K, V> Cacher<T, K, V>
where T: Fn(K) -> V,
K: Eq + std::hash::Hash + Clone,
V: Clone
{
fn new(calculation: T) -> Cacher<T, K, V> {
Cacher {
calculation,
value: HashMap::new(),
}
}
fn value(&mut self, arg: K) -> V |
}
fn generate_workout(intensity: i32, random_number: i32) {
let mut expensive_result = Cacher::new(|num| {
println!("calculating slowly...");
thread::sleep(Duration::from_secs(2));
num
});
if intensity < 25 {
println!("Today, do {} pushups!", expensive_result.value(intensity));
println!("Next, do {} situps!", expensive_result.value(intensity));
} else if random_number == 3 {
println!("Take a break today! Remember to stay hydrated!");
} else {
println!("Today, run for {} minutes!",
expensive_result.value(intensity));
}
}
#[test]
#[allow(unused_variables)]
fn call_with_different_values() {
let mut c = Cacher::new(|a| a);
let v1 = c.value(1);
let v2 = c.value(2);
assert_eq!(v2, 2);
}
| {
let closure = &self.calculation;
let v = self.value
.entry(arg.clone())
.or_insert_with(|| (closure)(arg));
(*v).clone()
} | identifier_body |
main.rs | use std::thread;
use std::time::Duration;
use std::collections::HashMap;
fn main() {
let simulated_user_specified_value = 10;
let simulated_random_number = 7;
generate_workout(simulated_user_specified_value, simulated_random_number);
}
struct Cacher<T, K, V>
where T: Fn(K) -> V,
K: Eq + std::hash::Hash + Clone,
V: Clone
{
calculation: T,
value: HashMap<K, V>,
}
impl<T, K, V> Cacher<T, K, V>
where T: Fn(K) -> V,
K: Eq + std::hash::Hash + Clone,
V: Clone
{
fn new(calculation: T) -> Cacher<T, K, V> {
Cacher {
calculation,
value: HashMap::new(),
}
}
fn value(&mut self, arg: K) -> V {
let closure = &self.calculation;
let v = self.value
.entry(arg.clone())
.or_insert_with(|| (closure)(arg));
(*v).clone() | fn generate_workout(intensity: i32, random_number: i32) {
let mut expensive_result = Cacher::new(|num| {
println!("calculating slowly...");
thread::sleep(Duration::from_secs(2));
num
});
if intensity < 25 {
println!("Today, do {} pushups!", expensive_result.value(intensity));
println!("Next, do {} situps!", expensive_result.value(intensity));
} else if random_number == 3 {
println!("Take a break today! Remember to stay hydrated!");
} else {
println!("Today, run for {} minutes!",
expensive_result.value(intensity));
}
}
#[test]
#[allow(unused_variables)]
fn call_with_different_values() {
let mut c = Cacher::new(|a| a);
let v1 = c.value(1);
let v2 = c.value(2);
assert_eq!(v2, 2);
} | }
}
| random_line_split |
diffie_hellman.rs | use num_bigint::{BigUint, RandBigInt};
use num_integer::Integer;
use num_traits::{One, Zero};
use once_cell::sync::Lazy;
use rand::{CryptoRng, Rng};
static DH_GENERATOR: Lazy<BigUint> = Lazy::new(|| BigUint::from_bytes_be(&[0x02]));
static DH_PRIME: Lazy<BigUint> = Lazy::new(|| {
BigUint::from_bytes_be(&[
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x0f, 0xda, 0xa2, 0x21, 0x68, 0xc2,
0x34, 0xc4, 0xc6, 0x62, 0x8b, 0x80, 0xdc, 0x1c, 0xd1, 0x29, 0x02, 0x4e, 0x08, 0x8a, 0x67,
0xcc, 0x74, 0x02, 0x0b, 0xbe, 0xa6, 0x3b, 0x13, 0x9b, 0x22, 0x51, 0x4a, 0x08, 0x79, 0x8e,
0x34, 0x04, 0xdd, 0xef, 0x95, 0x19, 0xb3, 0xcd, 0x3a, 0x43, 0x1b, 0x30, 0x2b, 0x0a, 0x6d,
0xf2, 0x5f, 0x14, 0x37, 0x4f, 0xe1, 0x35, 0x6d, 0x6d, 0x51, 0xc2, 0x45, 0xe4, 0x85, 0xb5,
0x76, 0x62, 0x5e, 0x7e, 0xc6, 0xf4, 0x4c, 0x42, 0xe9, 0xa6, 0x3a, 0x36, 0x20, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
])
});
fn powm(base: &BigUint, exp: &BigUint, modulus: &BigUint) -> BigUint {
let mut base = base.clone();
let mut exp = exp.clone();
let mut result: BigUint = One::one();
while!exp.is_zero() {
if exp.is_odd() |
exp >>= 1;
base = (&base * &base) % modulus;
}
result
}
pub struct DhLocalKeys {
private_key: BigUint,
public_key: BigUint,
}
impl DhLocalKeys {
pub fn random<R: Rng + CryptoRng>(rng: &mut R) -> DhLocalKeys {
let private_key = rng.gen_biguint(95 * 8);
let public_key = powm(&DH_GENERATOR, &private_key, &DH_PRIME);
DhLocalKeys {
private_key,
public_key,
}
}
pub fn public_key(&self) -> Vec<u8> {
self.public_key.to_bytes_be()
}
pub fn shared_secret(&self, remote_key: &[u8]) -> Vec<u8> {
let shared_key = powm(
&BigUint::from_bytes_be(remote_key),
&self.private_key,
&DH_PRIME,
);
shared_key.to_bytes_be()
}
}
| {
result = (result * &base) % modulus;
} | conditional_block |
diffie_hellman.rs | use num_bigint::{BigUint, RandBigInt};
use num_integer::Integer;
use num_traits::{One, Zero};
use once_cell::sync::Lazy;
use rand::{CryptoRng, Rng};
static DH_GENERATOR: Lazy<BigUint> = Lazy::new(|| BigUint::from_bytes_be(&[0x02]));
static DH_PRIME: Lazy<BigUint> = Lazy::new(|| {
BigUint::from_bytes_be(&[
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x0f, 0xda, 0xa2, 0x21, 0x68, 0xc2,
0x34, 0xc4, 0xc6, 0x62, 0x8b, 0x80, 0xdc, 0x1c, 0xd1, 0x29, 0x02, 0x4e, 0x08, 0x8a, 0x67,
0xcc, 0x74, 0x02, 0x0b, 0xbe, 0xa6, 0x3b, 0x13, 0x9b, 0x22, 0x51, 0x4a, 0x08, 0x79, 0x8e,
0x34, 0x04, 0xdd, 0xef, 0x95, 0x19, 0xb3, 0xcd, 0x3a, 0x43, 0x1b, 0x30, 0x2b, 0x0a, 0x6d,
0xf2, 0x5f, 0x14, 0x37, 0x4f, 0xe1, 0x35, 0x6d, 0x6d, 0x51, 0xc2, 0x45, 0xe4, 0x85, 0xb5,
0x76, 0x62, 0x5e, 0x7e, 0xc6, 0xf4, 0x4c, 0x42, 0xe9, 0xa6, 0x3a, 0x36, 0x20, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | });
fn powm(base: &BigUint, exp: &BigUint, modulus: &BigUint) -> BigUint {
let mut base = base.clone();
let mut exp = exp.clone();
let mut result: BigUint = One::one();
while!exp.is_zero() {
if exp.is_odd() {
result = (result * &base) % modulus;
}
exp >>= 1;
base = (&base * &base) % modulus;
}
result
}
pub struct DhLocalKeys {
private_key: BigUint,
public_key: BigUint,
}
impl DhLocalKeys {
pub fn random<R: Rng + CryptoRng>(rng: &mut R) -> DhLocalKeys {
let private_key = rng.gen_biguint(95 * 8);
let public_key = powm(&DH_GENERATOR, &private_key, &DH_PRIME);
DhLocalKeys {
private_key,
public_key,
}
}
pub fn public_key(&self) -> Vec<u8> {
self.public_key.to_bytes_be()
}
pub fn shared_secret(&self, remote_key: &[u8]) -> Vec<u8> {
let shared_key = powm(
&BigUint::from_bytes_be(remote_key),
&self.private_key,
&DH_PRIME,
);
shared_key.to_bytes_be()
}
} | ]) | random_line_split |
diffie_hellman.rs | use num_bigint::{BigUint, RandBigInt};
use num_integer::Integer;
use num_traits::{One, Zero};
use once_cell::sync::Lazy;
use rand::{CryptoRng, Rng};
static DH_GENERATOR: Lazy<BigUint> = Lazy::new(|| BigUint::from_bytes_be(&[0x02]));
static DH_PRIME: Lazy<BigUint> = Lazy::new(|| {
BigUint::from_bytes_be(&[
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x0f, 0xda, 0xa2, 0x21, 0x68, 0xc2,
0x34, 0xc4, 0xc6, 0x62, 0x8b, 0x80, 0xdc, 0x1c, 0xd1, 0x29, 0x02, 0x4e, 0x08, 0x8a, 0x67,
0xcc, 0x74, 0x02, 0x0b, 0xbe, 0xa6, 0x3b, 0x13, 0x9b, 0x22, 0x51, 0x4a, 0x08, 0x79, 0x8e,
0x34, 0x04, 0xdd, 0xef, 0x95, 0x19, 0xb3, 0xcd, 0x3a, 0x43, 0x1b, 0x30, 0x2b, 0x0a, 0x6d,
0xf2, 0x5f, 0x14, 0x37, 0x4f, 0xe1, 0x35, 0x6d, 0x6d, 0x51, 0xc2, 0x45, 0xe4, 0x85, 0xb5,
0x76, 0x62, 0x5e, 0x7e, 0xc6, 0xf4, 0x4c, 0x42, 0xe9, 0xa6, 0x3a, 0x36, 0x20, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
])
});
fn | (base: &BigUint, exp: &BigUint, modulus: &BigUint) -> BigUint {
let mut base = base.clone();
let mut exp = exp.clone();
let mut result: BigUint = One::one();
while!exp.is_zero() {
if exp.is_odd() {
result = (result * &base) % modulus;
}
exp >>= 1;
base = (&base * &base) % modulus;
}
result
}
pub struct DhLocalKeys {
private_key: BigUint,
public_key: BigUint,
}
impl DhLocalKeys {
pub fn random<R: Rng + CryptoRng>(rng: &mut R) -> DhLocalKeys {
let private_key = rng.gen_biguint(95 * 8);
let public_key = powm(&DH_GENERATOR, &private_key, &DH_PRIME);
DhLocalKeys {
private_key,
public_key,
}
}
pub fn public_key(&self) -> Vec<u8> {
self.public_key.to_bytes_be()
}
pub fn shared_secret(&self, remote_key: &[u8]) -> Vec<u8> {
let shared_key = powm(
&BigUint::from_bytes_be(remote_key),
&self.private_key,
&DH_PRIME,
);
shared_key.to_bytes_be()
}
}
| powm | identifier_name |
diffie_hellman.rs | use num_bigint::{BigUint, RandBigInt};
use num_integer::Integer;
use num_traits::{One, Zero};
use once_cell::sync::Lazy;
use rand::{CryptoRng, Rng};
static DH_GENERATOR: Lazy<BigUint> = Lazy::new(|| BigUint::from_bytes_be(&[0x02]));
static DH_PRIME: Lazy<BigUint> = Lazy::new(|| {
BigUint::from_bytes_be(&[
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x0f, 0xda, 0xa2, 0x21, 0x68, 0xc2,
0x34, 0xc4, 0xc6, 0x62, 0x8b, 0x80, 0xdc, 0x1c, 0xd1, 0x29, 0x02, 0x4e, 0x08, 0x8a, 0x67,
0xcc, 0x74, 0x02, 0x0b, 0xbe, 0xa6, 0x3b, 0x13, 0x9b, 0x22, 0x51, 0x4a, 0x08, 0x79, 0x8e,
0x34, 0x04, 0xdd, 0xef, 0x95, 0x19, 0xb3, 0xcd, 0x3a, 0x43, 0x1b, 0x30, 0x2b, 0x0a, 0x6d,
0xf2, 0x5f, 0x14, 0x37, 0x4f, 0xe1, 0x35, 0x6d, 0x6d, 0x51, 0xc2, 0x45, 0xe4, 0x85, 0xb5,
0x76, 0x62, 0x5e, 0x7e, 0xc6, 0xf4, 0x4c, 0x42, 0xe9, 0xa6, 0x3a, 0x36, 0x20, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
])
});
fn powm(base: &BigUint, exp: &BigUint, modulus: &BigUint) -> BigUint {
let mut base = base.clone();
let mut exp = exp.clone();
let mut result: BigUint = One::one();
while!exp.is_zero() {
if exp.is_odd() {
result = (result * &base) % modulus;
}
exp >>= 1;
base = (&base * &base) % modulus;
}
result
}
pub struct DhLocalKeys {
private_key: BigUint,
public_key: BigUint,
}
impl DhLocalKeys {
pub fn random<R: Rng + CryptoRng>(rng: &mut R) -> DhLocalKeys {
let private_key = rng.gen_biguint(95 * 8);
let public_key = powm(&DH_GENERATOR, &private_key, &DH_PRIME);
DhLocalKeys {
private_key,
public_key,
}
}
pub fn public_key(&self) -> Vec<u8> {
self.public_key.to_bytes_be()
}
pub fn shared_secret(&self, remote_key: &[u8]) -> Vec<u8> |
}
| {
let shared_key = powm(
&BigUint::from_bytes_be(remote_key),
&self.private_key,
&DH_PRIME,
);
shared_key.to_bytes_be()
} | identifier_body |
count.rs | #![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::iter::Iterator;
struct A<T> {
begin: T,
end: T
}
macro_rules! Iterator_impl {
($T:ty) => {
impl Iterator for A<$T> {
type Item = $T;
fn next(&mut self) -> Option<Self::Item> {
if self.begin < self.end {
let result = self.begin;
self.begin = self.begin.wrapping_add(1);
Some::<Self::Item>(result)
} else {
None::<Self::Item>
}
}
// fn count(self) -> usize where Self: Sized {
// // Might overflow.
// self.fold(0, |cnt, _| cnt + 1)
// }
}
}
}
type T = i32;
Iterator_impl!(T);
#[test]
fn count_test1() {
let a: A<T> = A { begin: 0, end: 10 };
let count: usize = a.count();
assert_eq!(count, 10);
}
#[test]
fn | () {
let mut a: A<T> = A { begin: 0, end: 10 };
assert_eq!(a.next(), Some::<T>(0));
let count: usize = a.count();
assert_eq!(count, 9);
}
}
| count_test2 | identifier_name |
count.rs | #![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::iter::Iterator;
struct A<T> {
begin: T,
end: T
}
macro_rules! Iterator_impl {
($T:ty) => {
impl Iterator for A<$T> {
type Item = $T;
fn next(&mut self) -> Option<Self::Item> {
if self.begin < self.end { | None::<Self::Item>
}
}
// fn count(self) -> usize where Self: Sized {
// // Might overflow.
// self.fold(0, |cnt, _| cnt + 1)
// }
}
}
}
type T = i32;
Iterator_impl!(T);
#[test]
fn count_test1() {
let a: A<T> = A { begin: 0, end: 10 };
let count: usize = a.count();
assert_eq!(count, 10);
}
#[test]
fn count_test2() {
let mut a: A<T> = A { begin: 0, end: 10 };
assert_eq!(a.next(), Some::<T>(0));
let count: usize = a.count();
assert_eq!(count, 9);
}
} | let result = self.begin;
self.begin = self.begin.wrapping_add(1);
Some::<Self::Item>(result)
} else { | random_line_split |
main.rs | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use kaze::*;
use rand::Rng;
use structopt::StructOpt;
/// This tool generates lock-style state machines that output an `unlocked` flag
/// after receiving a specific sequence of input symbols. It can configurably
/// generate the length of the unlock sequence, the width of the interface, and
/// the probability of it inserting backtracking transitions.
#[derive(StructOpt)]
struct Options {
/// The number of states between the initial and unlocked state.
#[structopt(long, default_value = "32")]
states: u32,
/// The width of the registers and ports making up the lock.
#[structopt(long, default_value = "32")]
width: u32,
}
fn main() -> std::io::Result<()> {
let options = Options::from_args();
let generator = Generator {
states: options.states,
width: options.width,
};
let mut context = Context::new();
let lock = generator.generate(&mut context);
verilog::generate(&lock, std::io::stdout()) | states: u32,
width: u32,
}
const fn num_bits<T>() -> usize { std::mem::size_of::<T>() * 8 }
fn log_2(x: u32) -> u32 {
num_bits::<u32>() as u32 - x.leading_zeros()
}
impl Generator {
fn generate<'a>(&'a self, c: &'a mut Context<'a>) -> &'a Module {
let mut rng = rand::thread_rng();
// compute width of state register
let state_reg_width = log_2(self.states - 1u32);
// create lock module with a single state register and trigger input
let lock = c.module("lock");
let input = lock.input("code", self.width);
let state = lock.reg("state", state_reg_width);
state.default_value(0u32);
// define lock state transitions
let mut next = state.value;
for i in 0..(self.states - 1u32) {
let trigger_value = rng.gen_range(1u64, 2u64.pow(self.width));
let from = lock.lit(i, state_reg_width);
let to = lock.lit(i + 1u32, state_reg_width);
let trigger = lock.lit(trigger_value, self.width);
next = (state.value.eq(from) & input.eq(trigger)).mux(to, next);
}
state.drive_next(next);
// define lock outputs
lock.output("unlocked", state.value.eq(lock.lit(self.states - 1u32, state_reg_width)));
lock.output("state", state.value);
// return HDL
lock
}
} | }
struct Generator { | random_line_split |
main.rs | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use kaze::*;
use rand::Rng;
use structopt::StructOpt;
/// This tool generates lock-style state machines that output an `unlocked` flag
/// after receiving a specific sequence of input symbols. It can configurably
/// generate the length of the unlock sequence, the width of the interface, and
/// the probability of it inserting backtracking transitions.
#[derive(StructOpt)]
struct Options {
/// The number of states between the initial and unlocked state.
#[structopt(long, default_value = "32")]
states: u32,
/// The width of the registers and ports making up the lock.
#[structopt(long, default_value = "32")]
width: u32,
}
fn main() -> std::io::Result<()> {
let options = Options::from_args();
let generator = Generator {
states: options.states,
width: options.width,
};
let mut context = Context::new();
let lock = generator.generate(&mut context);
verilog::generate(&lock, std::io::stdout())
}
struct Generator {
states: u32,
width: u32,
}
const fn num_bits<T>() -> usize { std::mem::size_of::<T>() * 8 }
fn log_2(x: u32) -> u32 {
num_bits::<u32>() as u32 - x.leading_zeros()
}
impl Generator {
fn | <'a>(&'a self, c: &'a mut Context<'a>) -> &'a Module {
let mut rng = rand::thread_rng();
// compute width of state register
let state_reg_width = log_2(self.states - 1u32);
// create lock module with a single state register and trigger input
let lock = c.module("lock");
let input = lock.input("code", self.width);
let state = lock.reg("state", state_reg_width);
state.default_value(0u32);
// define lock state transitions
let mut next = state.value;
for i in 0..(self.states - 1u32) {
let trigger_value = rng.gen_range(1u64, 2u64.pow(self.width));
let from = lock.lit(i, state_reg_width);
let to = lock.lit(i + 1u32, state_reg_width);
let trigger = lock.lit(trigger_value, self.width);
next = (state.value.eq(from) & input.eq(trigger)).mux(to, next);
}
state.drive_next(next);
// define lock outputs
lock.output("unlocked", state.value.eq(lock.lit(self.states - 1u32, state_reg_width)));
lock.output("state", state.value);
// return HDL
lock
}
}
| generate | identifier_name |
main.rs | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use kaze::*;
use rand::Rng;
use structopt::StructOpt;
/// This tool generates lock-style state machines that output an `unlocked` flag
/// after receiving a specific sequence of input symbols. It can configurably
/// generate the length of the unlock sequence, the width of the interface, and
/// the probability of it inserting backtracking transitions.
#[derive(StructOpt)]
struct Options {
/// The number of states between the initial and unlocked state.
#[structopt(long, default_value = "32")]
states: u32,
/// The width of the registers and ports making up the lock.
#[structopt(long, default_value = "32")]
width: u32,
}
fn main() -> std::io::Result<()> {
let options = Options::from_args();
let generator = Generator {
states: options.states,
width: options.width,
};
let mut context = Context::new();
let lock = generator.generate(&mut context);
verilog::generate(&lock, std::io::stdout())
}
struct Generator {
states: u32,
width: u32,
}
const fn num_bits<T>() -> usize |
fn log_2(x: u32) -> u32 {
num_bits::<u32>() as u32 - x.leading_zeros()
}
impl Generator {
fn generate<'a>(&'a self, c: &'a mut Context<'a>) -> &'a Module {
let mut rng = rand::thread_rng();
// compute width of state register
let state_reg_width = log_2(self.states - 1u32);
// create lock module with a single state register and trigger input
let lock = c.module("lock");
let input = lock.input("code", self.width);
let state = lock.reg("state", state_reg_width);
state.default_value(0u32);
// define lock state transitions
let mut next = state.value;
for i in 0..(self.states - 1u32) {
let trigger_value = rng.gen_range(1u64, 2u64.pow(self.width));
let from = lock.lit(i, state_reg_width);
let to = lock.lit(i + 1u32, state_reg_width);
let trigger = lock.lit(trigger_value, self.width);
next = (state.value.eq(from) & input.eq(trigger)).mux(to, next);
}
state.drive_next(next);
// define lock outputs
lock.output("unlocked", state.value.eq(lock.lit(self.states - 1u32, state_reg_width)));
lock.output("state", state.value);
// return HDL
lock
}
}
| { std::mem::size_of::<T>() * 8 } | identifier_body |
slab.rs | use alloc::heap::{Alloc, AllocErr, Layout};
use spin::Mutex;
use slab_allocator::Heap;
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
pub struct Allocator;
impl Allocator {
pub unsafe fn init(offset: usize, size: usize) {
*HEAP.lock() = Some(Heap::new(offset, size));
}
}
unsafe impl<'a> Alloc for &'a Allocator { | if let Some(ref mut heap) = *HEAP.lock() {
heap.allocate(layout)
} else {
panic!("__rust_allocate: heap not initialized");
}
}
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.deallocate(ptr, layout)
} else {
panic!("__rust_deallocate: heap not initialized");
}
}
fn oom(&mut self, error: AllocErr) ->! {
panic!("Out of memory: {:?}", error);
}
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.usable_size(layout)
} else {
panic!("__rust_usable_size: heap not initialized");
}
}
} | unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { | random_line_split |
slab.rs | use alloc::heap::{Alloc, AllocErr, Layout};
use spin::Mutex;
use slab_allocator::Heap;
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
pub struct Allocator;
impl Allocator {
pub unsafe fn init(offset: usize, size: usize) {
*HEAP.lock() = Some(Heap::new(offset, size));
}
}
unsafe impl<'a> Alloc for &'a Allocator {
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
if let Some(ref mut heap) = *HEAP.lock() {
heap.allocate(layout)
} else {
panic!("__rust_allocate: heap not initialized");
}
}
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.deallocate(ptr, layout)
} else {
panic!("__rust_deallocate: heap not initialized");
}
}
fn | (&mut self, error: AllocErr) ->! {
panic!("Out of memory: {:?}", error);
}
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.usable_size(layout)
} else {
panic!("__rust_usable_size: heap not initialized");
}
}
}
| oom | identifier_name |
slab.rs | use alloc::heap::{Alloc, AllocErr, Layout};
use spin::Mutex;
use slab_allocator::Heap;
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
pub struct Allocator;
impl Allocator {
pub unsafe fn init(offset: usize, size: usize) {
*HEAP.lock() = Some(Heap::new(offset, size));
}
}
unsafe impl<'a> Alloc for &'a Allocator {
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
if let Some(ref mut heap) = *HEAP.lock() {
heap.allocate(layout)
} else {
panic!("__rust_allocate: heap not initialized");
}
}
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.deallocate(ptr, layout)
} else {
panic!("__rust_deallocate: heap not initialized");
}
}
fn oom(&mut self, error: AllocErr) ->! |
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.usable_size(layout)
} else {
panic!("__rust_usable_size: heap not initialized");
}
}
}
| {
panic!("Out of memory: {:?}", error);
} | identifier_body |
slab.rs | use alloc::heap::{Alloc, AllocErr, Layout};
use spin::Mutex;
use slab_allocator::Heap;
static HEAP: Mutex<Option<Heap>> = Mutex::new(None);
pub struct Allocator;
impl Allocator {
pub unsafe fn init(offset: usize, size: usize) {
*HEAP.lock() = Some(Heap::new(offset, size));
}
}
unsafe impl<'a> Alloc for &'a Allocator {
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
if let Some(ref mut heap) = *HEAP.lock() {
heap.allocate(layout)
} else |
}
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.deallocate(ptr, layout)
} else {
panic!("__rust_deallocate: heap not initialized");
}
}
fn oom(&mut self, error: AllocErr) ->! {
panic!("Out of memory: {:?}", error);
}
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.usable_size(layout)
} else {
panic!("__rust_usable_size: heap not initialized");
}
}
}
| {
panic!("__rust_allocate: heap not initialized");
} | conditional_block |
mod.rs | ::headers::{HeaderFormat, UserAgent, Header};
use mime_guess::{guess_mime_type_opt, get_mime_type_str};
use xml::name::{OwnedName as OwnedXmlName, Name as XmlName};
use std::io::{ErrorKind as IoErrorKind, BufReader, BufRead, Result as IoResult, Error as IoError};
pub use self::os::*;
pub use self::webdav::*;
pub use self::content_encoding::*;
/// The generic HTML page to use as response to errors.
pub const ERROR_HTML: &str = include_str!("../../assets/error.html");
/// The HTML page to use as template for a requested directory's listing.
pub const DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing.html");
/// The HTML page to use as template for a requested directory's listing for mobile devices.
pub const MOBILE_DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing_mobile.html");
lazy_static! {
/// Collection of data to be injected into generated responses.
pub static ref ASSETS: HashMap<&'static str, Cow<'static, str>> = {
let mut ass = HashMap::with_capacity(10);
ass.insert("favicon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("ico").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/favicon.ico")[..], base64::STANDARD))));
ass.insert("dir_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/directory.gif")[..], base64::STANDARD))));
ass.insert("file_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file.gif")[..], base64::STANDARD))));
ass.insert("file_binary_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_binary.gif")[..], base64::STANDARD))));
ass.insert("file_image_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_image.gif")[..], base64::STANDARD))));
ass.insert("file_text_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_text.gif")[..], base64::STANDARD))));
ass.insert("back_arrow_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/back_arrow.gif")[..], base64::STANDARD))));
ass.insert("new_dir_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/new_directory.gif")[..], base64::STANDARD))));
ass.insert("delete_file_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/delete_file.png")[..], base64::STANDARD))));
ass.insert("rename_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/rename.png")[..], base64::STANDARD))));
ass.insert("confirm_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/confirm.png")[..], base64::STANDARD))));
ass.insert("date", Cow::Borrowed(include_str!("../../assets/date.js")));
ass.insert("manage", Cow::Borrowed(include_str!("../../assets/manage.js")));
ass.insert("manage_mobile", Cow::Borrowed(include_str!("../../assets/manage_mobile.js")));
ass.insert("manage_desktop", Cow::Borrowed(include_str!("../../assets/manage_desktop.js")));
ass.insert("upload", Cow::Borrowed(include_str!("../../assets/upload.js")));
ass.insert("adjust_tz", Cow::Borrowed(include_str!("../../assets/adjust_tz.js")));
ass
};
}
/// The port to start scanning from if no ports were given.
pub const PORT_SCAN_LOWEST: u16 = 8000;
/// The port to end scanning at if no ports were given.
pub const PORT_SCAN_HIGHEST: u16 = 9999;
/// The app name and version to use with User-Agent or Server response header.
pub const USER_AGENT: &str = concat!("http/", env!("CARGO_PKG_VERSION"));
/// Index file extensions to look for if `-i` was not specified and strippable extensions to look for if `-x` was specified.
pub const INDEX_EXTENSIONS: &[&str] = &["html", "htm", "shtml"];
/// The [WWW-Authenticate header](https://tools.ietf.org/html/rfc7235#section-4.1), without parsing.
///
/// We don't ever receive this header, only ever send it, so this is fine.
#[derive(Debug, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct WwwAuthenticate(pub Cow<'static, str>);
impl Header for WwwAuthenticate {
fn header_name() -> &'static str {
"WWW-Authenticate"
}
/// Dummy impl returning an empty value, since we're only ever sending these
fn parse_header(_: &[Vec<u8>]) -> HyperResult<WwwAuthenticate> {
Ok(WwwAuthenticate("".into()))
}
}
impl HeaderFormat for WwwAuthenticate {
fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.0)
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct CommaList<D: fmt::Display, I: Iterator<Item = D>>(pub I);
impl<D: fmt::Display, I: Iterator<Item = D> + Clone> fmt::Display for CommaList<D, I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut itr = self.0.clone();
if let Some(item) = itr.next() {
item.fmt(f)?;
for item in itr {
f.write_str(", ")?;
item.fmt(f)?;
}
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct DisplayThree<Df: fmt::Display, Ds: fmt::Display, Dt: fmt::Display>(pub Df, pub Ds, pub Dt);
impl<Df: fmt::Display, Ds: fmt::Display, Dt: fmt::Display> fmt::Display for DisplayThree<Df, Ds, Dt> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)?;
self.1.fmt(f)?;
self.2.fmt(f)?;
Ok(())
}
}
/// `xml`'s `OwnedName::borrow()` returns a value not a reference, so it cannot be used with the libstd `Borrow` trait
pub trait BorrowXmlName<'n> {
fn borrow_xml_name(&'n self) -> XmlName<'n>;
}
impl<'n> BorrowXmlName<'n> for XmlName<'n> {
#[inline(always)]
fn borrow_xml_name(&'n self) -> XmlName<'n> {
*self
}
}
impl<'n> BorrowXmlName<'n> for OwnedXmlName {
#[inline(always)]
fn borrow_xml_name(&'n self) -> XmlName<'n> {
self.borrow()
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct Spaces(pub usize);
impl fmt::Display for Spaces {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for _ in 0..self.0 {
f.write_char(' ')?;
}
Ok(())
}
}
/// Uppercase the first character of the supplied string.
///
/// Based on http://stackoverflow.com/a/38406885/2851815
///
/// # Examples
///
/// ```
/// # use https::util::uppercase_first;
/// assert_eq!(uppercase_first("abolish"), "Abolish".to_string());
/// ```
pub fn uppercase_first(s: &str) -> String {
let mut c = s.chars();
match c.next() {
None => String::new(),
Some(f) => f.to_uppercase().collect::<String>() + c.as_str(),
}
}
/// Percent-encode the last character if it's white space
///
/// Firefox treats, e.g. `href="http://henlo/menlo "` as `href="http://henlo/menlo"`,
/// but that final whitespace is significant, so this turns it into `href="http://henlo/menlo %20"`
pub fn encode_tail_if_trimmed(mut s: String) -> String {
let c = s.chars().rev().next();
if c.map(|c| c.is_whitespace()).unwrap_or(false) {
let c = c.unwrap();
s.pop();
s.push('%');
let mut cb = [0u8; 4];
c.encode_utf8(&mut cb);
for b in cb.iter().take(c.len_utf8()) {
write!(s, "{:02X}", b).expect("Couldn't allocate two more characters?");
}
s
} else {
s
}
}
/// %-escape special characters in an URL
pub fn escape_specials<S: AsRef<str>>(s: S) -> String {
let s = s.as_ref();
let mut ret = Vec::with_capacity(s.len());
for &b in s.as_bytes() {
match b {
b'%' => ret.extend(b"%25"),
b'#' => ret.extend(b"%23"),
b'?' => ret.extend(b"%3F"),
b'[' => ret.extend(b"%5B"),
b']' => ret.extend(b"%5D"),
_ => ret.push(b),
}
}
unsafe { String::from_utf8_unchecked(ret) }
}
/// Check if the specified file is to be considered "binary".
///
/// Basically checks is a file is UTF-8.
///
/// # Examples
///
/// ```
/// # use https::util::file_binary;
/// # #[cfg(target_os = "windows")]
/// # assert!(file_binary("target/debug/http.exe"));
/// # #[cfg(not(target_os = "windows"))]
/// assert!(file_binary("target/debug/http"));
/// assert!(!file_binary("Cargo.toml"));
/// ```
pub fn file_binary<P: AsRef<Path>>(path: P) -> bool {
file_binary_impl(path.as_ref())
}
fn file_binary_impl(path: &Path) -> bool {
path.metadata()
.map(|m| is_device(&m.file_type()) || File::open(path).and_then(|f| BufReader::new(f).read_line(&mut String::new())).is_err())
.unwrap_or(true)
}
/// Fill out an HTML template.
///
/// All fields must be addressed even if formatted to be empty.
///
/// # Examples
///
/// ```
/// # use https::util::{html_response, NOT_IMPLEMENTED_HTML};
/// println!(html_response(NOT_IMPLEMENTED_HTML, &["<p>Abolish the burgeoisie!</p>"]));
/// ```
pub fn html_response<S: AsRef<str>>(data: &str, format_strings: &[S]) -> String {
ASSETS.iter().fold(format_strings.iter().enumerate().fold(data.to_string(), |d, (i, s)| d.replace(&format!("{{{}}}", i), s.as_ref())),
|d, (k, v)| d.replace(&format!("{{{}}}", k), v))
}
/// Return the path part of the URL.
///
/// # Example
///
/// ```
/// # extern crate iron;
/// # extern crate https;
/// # use iron::Url;
/// # use https::util::url_path;
/// let url = Url::parse("127.0.0.1:8000/capitalism/русский/");
/// assert_eq!(url_path(&url), "capitalism/русский/");
/// ```
pub fn url_path(url: &Url) -> String {
let path = url.path();
if path == [""] {
"/".to_string()
} else {
path.into_iter().fold("".to_string(),
|cur, pp| format!("{}/{}", cur, percent_decode(pp).unwrap_or(Cow::Borrowed("<incorrect UTF8>"))))
[1..]
.to_string()
}
}
/// Decode a percent-encoded string (like a part of a URL).
///
/// # Example
///
/// ```
/// # use https::util::percent_decode;
/// # use std::borrow::Cow;
/// assert_eq!(percent_decode("%D0%B0%D1%81%D0%B4%D1%84%20fdsa"), Some(Cow::Owned("асдф fdsa".to_string())));
/// assert_eq!(percent_decode("%D0%D1%81%D0%B4%D1%84%20fdsa"), None);
/// ```
pub fn percent_decode(s: &str) -> Option<Cow<str>> {
percent_encoding::percent_decode(s.as_bytes()).decode_utf8().ok()
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_modified_p(f: &Path) -> Tm {
file_time_modified(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_created_p(f: &Path) -> Tm {
file_time_created(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last access as a `time::Tm` in UTC.
pub fn file_time_accessed_p(f: &Path) -> Tm {
file_time_accessed(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_modified(m: &Metadata) -> Tm {
file_time_impl(m.modified().expect("Failed to get file last modified date"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_created(m: &Metadata) -> Tm {
file_time_impl(m.created().or_else(|_| m.modified()).expect("Failed to get file created date"))
}
/// Get the timestamp of the file's last access as a `time::Tm` in UTC.
pub fn file_time_accessed(m: &Metadata) -> Tm {
file_time_impl(m.accessed().expect("Failed to get file accessed date"))
}
fn file_time_impl(time: SystemTime) -> Tm {
match time.elapsed() {
Ok(dur) => time::now_utc() - Duration::from_std(dur).unwrap(),
Err(ste) => time::now_utc() + Duration::from_std(ste.duration()).unwrap(),
}
}
/// Check, whether, in any place of the path, a file is treated like a directory.
///
/// A file is treated like a directory when it is treated as if it had a subpath, e.g., given:
///
/// ```sh
/// tree.
/// | dir0
/// | dir1
/// | file01
/// ```
///
/// This function would return true for `./dir1/file01/file010`, `./dir1/file01/dir010/file0100`, etc., but not
/// for `./dir0/file00`, `./dir0/dir00/file000`, `./dir1/file02/`, `./dir1/dir010/file0100`.
pub fn detect_file_as_dir(mut p: &Path) -> bool {
while let Some(pnt) = p.parent() {
if pnt.is_file() {
return true;
}
p = pnt;
}
false
}
/// Check if a path refers to a symlink in a way that also works on Windows.
pub fn is_symlink<P: AsRef<Path>>(p: P) -> bool {
p.as_ref().read_link().is_ok()
}
/// Check if a path refers to a file in a way that includes Unix devices and Windows symlinks.
pub fn is_actually_file<P: AsRef<Path>>(tp: &FileType, p: P) -> bool {
tp.is_file() || (tp.is_symlink() && fs::metadata(p).map(|m| is_actually_file(&m.file_type(), "")).unwrap_or(false)) || is_device(tp)
}
/// Check if the specified path is a direct descendant (or an equal) of the specified path.
pub fn is_descendant_of<Pw: AsRef<Path>, Po: AsRef<Path>>(who: Pw, of_whom: Po) -> bool {
let (mut who, of_whom) = if let Ok(p) = fs::canonicalize(who).and_then(|w| fs::canonicalize(of_whom).map(|o| (w, o))) {
p
} else {
return false;
};
if who == of_whom {
return true;
}
while let Some(who_p) = who.parent().map(|p| p.to_path_buf()) {
who = who_p;
if who == of_whom {
return true;
}
}
false
}
/// Check if the specified path is a direct descendant (or an equal) of the specified path, without without requiring it to
/// exist in the first place.
pub fn is_nonexistent_descendant_of<Pw: AsRef<Path>, Po: AsRef<Path>>(who: Pw, of_whom: Po) -> bool {
let mut who = fs::canonicalize(&who).unwrap_or_else(|_| who.as_ref().to_path_buf());
let of_whom = if let Ok(p) = fs::canonicalize(of_whom) {
p
} else {
return false;
};
if who == of_whom {
return true;
}
while let Some(who_p) = who.parent().map(|p| p.to_path_buf()) {
who = if let Ok(p) = fs::canonicalize(&who_p) {
p
} else {
who_p
};
if who == of_whom {
return true;
}
}
false
}
/// Construct string representing a human-readable size.
///
/// Stolen, adapted and inlined from [fielsize.js](http://filesizejs.com).
pub fn human_readable_size(s: u64) -> String {
lazy_static! {
static ref LN_KIB: f64 = 1024f64.log(f64::consts::E);
}
if s == 0 {
"0 B".to_string()
} else {
let num = s as f64;
let exp = cmp::min(cmp::max((num.log(f64::consts::E) / *LN_KIB) as i32, 0), 8);
let val = num / 2f64.powi(exp * 10);
if exp > 0 {
(val * 10f64).round() / 10f64
} else {
val.round()
}
.to_string() + " " + ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"][cmp::max(exp, 0) as usize]
}
}
/// Check if, given the request headers, the client should be considered a mobile device.
pub fn client_mobile(hdr: &Headers) -> bool {
hdr.get::<UserAgent>().map(|s| s.contains("Mobi") || s.contains("mobi")).unwrap_or(false)
}
/// Check if, given the request headers, the client should be treated as Microsoft software.
///
/// Based on https://github.com/miquels/webdav-handler-rs/blob/02433c1acfccd848a7de26889f6857cbad559076/src/handle_props.rs#L529
pub fn client_microsoft(hdr: &Headers) -> bool {
hdr.get::<UserAgent>().map(|s| s.contains("Microsoft") || s.contains("microsoft")).unwrap_or(false)
}
/// Get the suffix for the icon to use to represent the given file.
pub fn file_icon_suffix<P: AsRef<Path>>(f: P, is_file: bool) -> &'static str {
if is_file {
match guess_mime_type_opt(&f) {
Some(mime::Mime(mime::TopLevel::Image,..)) |
Some(mime::Mime(mime::TopLevel::Video,..)) => "_image",
Some(mime::Mime(mime::TopLevel::Text,..)) => "_text",
Some(mime::Mime(mime::TopLevel::Application,..)) => "_binary",
None => if file_binary(&f) { "" } else { "_text" },
_ => "",
}
} else { | }
/// Get the metadata of the specified file.
///
/// The specified path must point to a file.
pub fn get_raw_fs_metadata<P: AsRef<Path>>(f: P) -> RawFileData {
get_raw_fs_metadata_impl(f.as_ref())
}
fn get_raw_fs_metadata_impl(f: &Path) -> RawFileData {
let meta = f.metadata().expect("Failed to get requested file metadata");
RawFileData {
mime_type: guess_mime_type_opt(f).unwrap_or_else(|| if file_binary(f) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
}),
name: f.file_name().unwrap().to_str().expect("Failed to get requested file name").to_string(),
last_modified: file_time_modified(&meta),
size: file_length(&meta, &f),
is_file: true,
}
}
/// Recursively copy a directory
///
/// Stolen from https://github.com/mdunsmuir/copy_dir/blob/0.1.2/src/lib.rs
pub fn copy_dir(from: &Path, to: &Path) -> IoResult<Vec<(IoError, String)>> {
macro_rules! push_error {
($vec:ident, $path:ident, $expr:expr) => {
match $expr {
Ok(_) => (),
Err(e) => $vec.push((e, $path.to_string_lossy().into_owned())),
}
};
}
let mut errors = Vec::new();
fs::create_dir(&to)?;
// The approach taken by this code (i.e. walkdir) will not gracefully
// handle copying a directory into itself, so we're going to simply
// disallow it by checking the paths. This is a thornier problem than I
// wish it was, and I'd like to find a better solution, but for now I
// would prefer to return an error rather than having the copy blow up
// in users' faces. Ultimately I think a solution to this will involve
// not using walkdir at all, and might come along with better handling
// of hard links.
if from.canonicalize().and_then(|fc| to.canonicalize().map(|tc| (fc, tc))).map(|(fc, tc)| tc.starts_with(fc))? {
fs::remove_dir(&to)?;
return Err(IoError::new(IoErrorKind::Other, "cannot copy to a path prefixed by the source path"));
} | ""
} | random_line_split |
mod.rs | headers::{HeaderFormat, UserAgent, Header};
use mime_guess::{guess_mime_type_opt, get_mime_type_str};
use xml::name::{OwnedName as OwnedXmlName, Name as XmlName};
use std::io::{ErrorKind as IoErrorKind, BufReader, BufRead, Result as IoResult, Error as IoError};
pub use self::os::*;
pub use self::webdav::*;
pub use self::content_encoding::*;
/// The generic HTML page to use as response to errors.
pub const ERROR_HTML: &str = include_str!("../../assets/error.html");
/// The HTML page to use as template for a requested directory's listing.
pub const DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing.html");
/// The HTML page to use as template for a requested directory's listing for mobile devices.
pub const MOBILE_DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing_mobile.html");
lazy_static! {
/// Collection of data to be injected into generated responses.
pub static ref ASSETS: HashMap<&'static str, Cow<'static, str>> = {
let mut ass = HashMap::with_capacity(10);
ass.insert("favicon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("ico").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/favicon.ico")[..], base64::STANDARD))));
ass.insert("dir_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/directory.gif")[..], base64::STANDARD))));
ass.insert("file_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file.gif")[..], base64::STANDARD))));
ass.insert("file_binary_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_binary.gif")[..], base64::STANDARD))));
ass.insert("file_image_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_image.gif")[..], base64::STANDARD))));
ass.insert("file_text_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_text.gif")[..], base64::STANDARD))));
ass.insert("back_arrow_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/back_arrow.gif")[..], base64::STANDARD))));
ass.insert("new_dir_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/new_directory.gif")[..], base64::STANDARD))));
ass.insert("delete_file_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/delete_file.png")[..], base64::STANDARD))));
ass.insert("rename_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/rename.png")[..], base64::STANDARD))));
ass.insert("confirm_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/confirm.png")[..], base64::STANDARD))));
ass.insert("date", Cow::Borrowed(include_str!("../../assets/date.js")));
ass.insert("manage", Cow::Borrowed(include_str!("../../assets/manage.js")));
ass.insert("manage_mobile", Cow::Borrowed(include_str!("../../assets/manage_mobile.js")));
ass.insert("manage_desktop", Cow::Borrowed(include_str!("../../assets/manage_desktop.js")));
ass.insert("upload", Cow::Borrowed(include_str!("../../assets/upload.js")));
ass.insert("adjust_tz", Cow::Borrowed(include_str!("../../assets/adjust_tz.js")));
ass
};
}
/// The port to start scanning from if no ports were given.
pub const PORT_SCAN_LOWEST: u16 = 8000;
/// The port to end scanning at if no ports were given.
pub const PORT_SCAN_HIGHEST: u16 = 9999;
/// The app name and version to use with User-Agent or Server response header.
pub const USER_AGENT: &str = concat!("http/", env!("CARGO_PKG_VERSION"));
/// Index file extensions to look for if `-i` was not specified and strippable extensions to look for if `-x` was specified.
pub const INDEX_EXTENSIONS: &[&str] = &["html", "htm", "shtml"];
/// The [WWW-Authenticate header](https://tools.ietf.org/html/rfc7235#section-4.1), without parsing.
///
/// We don't ever receive this header, only ever send it, so this is fine.
#[derive(Debug, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct WwwAuthenticate(pub Cow<'static, str>);
impl Header for WwwAuthenticate {
fn header_name() -> &'static str {
"WWW-Authenticate"
}
/// Dummy impl returning an empty value, since we're only ever sending these
fn parse_header(_: &[Vec<u8>]) -> HyperResult<WwwAuthenticate> {
Ok(WwwAuthenticate("".into()))
}
}
impl HeaderFormat for WwwAuthenticate {
fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.0)
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct CommaList<D: fmt::Display, I: Iterator<Item = D>>(pub I);
impl<D: fmt::Display, I: Iterator<Item = D> + Clone> fmt::Display for CommaList<D, I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut itr = self.0.clone();
if let Some(item) = itr.next() {
item.fmt(f)?;
for item in itr {
f.write_str(", ")?;
item.fmt(f)?;
}
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct DisplayThree<Df: fmt::Display, Ds: fmt::Display, Dt: fmt::Display>(pub Df, pub Ds, pub Dt);
impl<Df: fmt::Display, Ds: fmt::Display, Dt: fmt::Display> fmt::Display for DisplayThree<Df, Ds, Dt> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)?;
self.1.fmt(f)?;
self.2.fmt(f)?;
Ok(())
}
}
/// `xml`'s `OwnedName::borrow()` returns a value not a reference, so it cannot be used with the libstd `Borrow` trait
pub trait BorrowXmlName<'n> {
fn borrow_xml_name(&'n self) -> XmlName<'n>;
}
impl<'n> BorrowXmlName<'n> for XmlName<'n> {
#[inline(always)]
fn borrow_xml_name(&'n self) -> XmlName<'n> |
}
impl<'n> BorrowXmlName<'n> for OwnedXmlName {
#[inline(always)]
fn borrow_xml_name(&'n self) -> XmlName<'n> {
self.borrow()
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct Spaces(pub usize);
impl fmt::Display for Spaces {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for _ in 0..self.0 {
f.write_char(' ')?;
}
Ok(())
}
}
/// Uppercase the first character of the supplied string.
///
/// Based on http://stackoverflow.com/a/38406885/2851815
///
/// # Examples
///
/// ```
/// # use https::util::uppercase_first;
/// assert_eq!(uppercase_first("abolish"), "Abolish".to_string());
/// ```
pub fn uppercase_first(s: &str) -> String {
let mut c = s.chars();
match c.next() {
None => String::new(),
Some(f) => f.to_uppercase().collect::<String>() + c.as_str(),
}
}
/// Percent-encode the last character if it's white space
///
/// Firefox treats, e.g. `href="http://henlo/menlo "` as `href="http://henlo/menlo"`,
/// but that final whitespace is significant, so this turns it into `href="http://henlo/menlo %20"`
pub fn encode_tail_if_trimmed(mut s: String) -> String {
let c = s.chars().rev().next();
if c.map(|c| c.is_whitespace()).unwrap_or(false) {
let c = c.unwrap();
s.pop();
s.push('%');
let mut cb = [0u8; 4];
c.encode_utf8(&mut cb);
for b in cb.iter().take(c.len_utf8()) {
write!(s, "{:02X}", b).expect("Couldn't allocate two more characters?");
}
s
} else {
s
}
}
/// %-escape special characters in an URL
pub fn escape_specials<S: AsRef<str>>(s: S) -> String {
let s = s.as_ref();
let mut ret = Vec::with_capacity(s.len());
for &b in s.as_bytes() {
match b {
b'%' => ret.extend(b"%25"),
b'#' => ret.extend(b"%23"),
b'?' => ret.extend(b"%3F"),
b'[' => ret.extend(b"%5B"),
b']' => ret.extend(b"%5D"),
_ => ret.push(b),
}
}
unsafe { String::from_utf8_unchecked(ret) }
}
/// Check if the specified file is to be considered "binary".
///
/// Basically checks is a file is UTF-8.
///
/// # Examples
///
/// ```
/// # use https::util::file_binary;
/// # #[cfg(target_os = "windows")]
/// # assert!(file_binary("target/debug/http.exe"));
/// # #[cfg(not(target_os = "windows"))]
/// assert!(file_binary("target/debug/http"));
/// assert!(!file_binary("Cargo.toml"));
/// ```
pub fn file_binary<P: AsRef<Path>>(path: P) -> bool {
file_binary_impl(path.as_ref())
}
fn file_binary_impl(path: &Path) -> bool {
path.metadata()
.map(|m| is_device(&m.file_type()) || File::open(path).and_then(|f| BufReader::new(f).read_line(&mut String::new())).is_err())
.unwrap_or(true)
}
/// Fill out an HTML template.
///
/// All fields must be addressed even if formatted to be empty.
///
/// # Examples
///
/// ```
/// # use https::util::{html_response, NOT_IMPLEMENTED_HTML};
/// println!(html_response(NOT_IMPLEMENTED_HTML, &["<p>Abolish the burgeoisie!</p>"]));
/// ```
pub fn html_response<S: AsRef<str>>(data: &str, format_strings: &[S]) -> String {
ASSETS.iter().fold(format_strings.iter().enumerate().fold(data.to_string(), |d, (i, s)| d.replace(&format!("{{{}}}", i), s.as_ref())),
|d, (k, v)| d.replace(&format!("{{{}}}", k), v))
}
/// Return the path part of the URL.
///
/// # Example
///
/// ```
/// # extern crate iron;
/// # extern crate https;
/// # use iron::Url;
/// # use https::util::url_path;
/// let url = Url::parse("127.0.0.1:8000/capitalism/русский/");
/// assert_eq!(url_path(&url), "capitalism/русский/");
/// ```
pub fn url_path(url: &Url) -> String {
let path = url.path();
if path == [""] {
"/".to_string()
} else {
path.into_iter().fold("".to_string(),
|cur, pp| format!("{}/{}", cur, percent_decode(pp).unwrap_or(Cow::Borrowed("<incorrect UTF8>"))))
[1..]
.to_string()
}
}
/// Decode a percent-encoded string (like a part of a URL).
///
/// # Example
///
/// ```
/// # use https::util::percent_decode;
/// # use std::borrow::Cow;
/// assert_eq!(percent_decode("%D0%B0%D1%81%D0%B4%D1%84%20fdsa"), Some(Cow::Owned("асдф fdsa".to_string())));
/// assert_eq!(percent_decode("%D0%D1%81%D0%B4%D1%84%20fdsa"), None);
/// ```
pub fn percent_decode(s: &str) -> Option<Cow<str>> {
percent_encoding::percent_decode(s.as_bytes()).decode_utf8().ok()
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_modified_p(f: &Path) -> Tm {
file_time_modified(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_created_p(f: &Path) -> Tm {
file_time_created(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last access as a `time::Tm` in UTC.
pub fn file_time_accessed_p(f: &Path) -> Tm {
file_time_accessed(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_modified(m: &Metadata) -> Tm {
file_time_impl(m.modified().expect("Failed to get file last modified date"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_created(m: &Metadata) -> Tm {
file_time_impl(m.created().or_else(|_| m.modified()).expect("Failed to get file created date"))
}
/// Get the timestamp of the file's last access as a `time::Tm` in UTC.
pub fn file_time_accessed(m: &Metadata) -> Tm {
file_time_impl(m.accessed().expect("Failed to get file accessed date"))
}
fn file_time_impl(time: SystemTime) -> Tm {
match time.elapsed() {
Ok(dur) => time::now_utc() - Duration::from_std(dur).unwrap(),
Err(ste) => time::now_utc() + Duration::from_std(ste.duration()).unwrap(),
}
}
/// Check, whether, in any place of the path, a file is treated like a directory.
///
/// A file is treated like a directory when it is treated as if it had a subpath, e.g., given:
///
/// ```sh
/// tree.
/// | dir0
/// | dir1
/// | file01
/// ```
///
/// This function would return true for `./dir1/file01/file010`, `./dir1/file01/dir010/file0100`, etc., but not
/// for `./dir0/file00`, `./dir0/dir00/file000`, `./dir1/file02/`, `./dir1/dir010/file0100`.
pub fn detect_file_as_dir(mut p: &Path) -> bool {
while let Some(pnt) = p.parent() {
if pnt.is_file() {
return true;
}
p = pnt;
}
false
}
/// Check if a path refers to a symlink in a way that also works on Windows.
pub fn is_symlink<P: AsRef<Path>>(p: P) -> bool {
p.as_ref().read_link().is_ok()
}
/// Check if a path refers to a file in a way that includes Unix devices and Windows symlinks.
pub fn is_actually_file<P: AsRef<Path>>(tp: &FileType, p: P) -> bool {
tp.is_file() || (tp.is_symlink() && fs::metadata(p).map(|m| is_actually_file(&m.file_type(), "")).unwrap_or(false)) || is_device(tp)
}
/// Check if the specified path is a direct descendant (or an equal) of the specified path.
pub fn is_descendant_of<Pw: AsRef<Path>, Po: AsRef<Path>>(who: Pw, of_whom: Po) -> bool {
let (mut who, of_whom) = if let Ok(p) = fs::canonicalize(who).and_then(|w| fs::canonicalize(of_whom).map(|o| (w, o))) {
p
} else {
return false;
};
if who == of_whom {
return true;
}
while let Some(who_p) = who.parent().map(|p| p.to_path_buf()) {
who = who_p;
if who == of_whom {
return true;
}
}
false
}
/// Check if the specified path is a direct descendant (or an equal) of the specified path, without without requiring it to
/// exist in the first place.
pub fn is_nonexistent_descendant_of<Pw: AsRef<Path>, Po: AsRef<Path>>(who: Pw, of_whom: Po) -> bool {
let mut who = fs::canonicalize(&who).unwrap_or_else(|_| who.as_ref().to_path_buf());
let of_whom = if let Ok(p) = fs::canonicalize(of_whom) {
p
} else {
return false;
};
if who == of_whom {
return true;
}
while let Some(who_p) = who.parent().map(|p| p.to_path_buf()) {
who = if let Ok(p) = fs::canonicalize(&who_p) {
p
} else {
who_p
};
if who == of_whom {
return true;
}
}
false
}
/// Construct string representing a human-readable size.
///
/// Stolen, adapted and inlined from [fielsize.js](http://filesizejs.com).
pub fn human_readable_size(s: u64) -> String {
lazy_static! {
static ref LN_KIB: f64 = 1024f64.log(f64::consts::E);
}
if s == 0 {
"0 B".to_string()
} else {
let num = s as f64;
let exp = cmp::min(cmp::max((num.log(f64::consts::E) / *LN_KIB) as i32, 0), 8);
let val = num / 2f64.powi(exp * 10);
if exp > 0 {
(val * 10f64).round() / 10f64
} else {
val.round()
}
.to_string() + " " + ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"][cmp::max(exp, 0) as usize]
}
}
/// Check if, given the request headers, the client should be considered a mobile device.
pub fn client_mobile(hdr: &Headers) -> bool {
hdr.get::<UserAgent>().map(|s| s.contains("Mobi") || s.contains("mobi")).unwrap_or(false)
}
/// Check if, given the request headers, the client should be treated as Microsoft software.
///
/// Based on https://github.com/miquels/webdav-handler-rs/blob/02433c1acfccd848a7de26889f6857cbad559076/src/handle_props.rs#L529
pub fn client_microsoft(hdr: &Headers) -> bool {
hdr.get::<UserAgent>().map(|s| s.contains("Microsoft") || s.contains("microsoft")).unwrap_or(false)
}
/// Get the suffix for the icon to use to represent the given file.
pub fn file_icon_suffix<P: AsRef<Path>>(f: P, is_file: bool) -> &'static str {
if is_file {
match guess_mime_type_opt(&f) {
Some(mime::Mime(mime::TopLevel::Image,..)) |
Some(mime::Mime(mime::TopLevel::Video,..)) => "_image",
Some(mime::Mime(mime::TopLevel::Text,..)) => "_text",
Some(mime::Mime(mime::TopLevel::Application,..)) => "_binary",
None => if file_binary(&f) { "" } else { "_text" },
_ => "",
}
} else {
""
}
}
/// Get the metadata of the specified file.
///
/// The specified path must point to a file.
pub fn get_raw_fs_metadata<P: AsRef<Path>>(f: P) -> RawFileData {
get_raw_fs_metadata_impl(f.as_ref())
}
fn get_raw_fs_metadata_impl(f: &Path) -> RawFileData {
let meta = f.metadata().expect("Failed to get requested file metadata");
RawFileData {
mime_type: guess_mime_type_opt(f).unwrap_or_else(|| if file_binary(f) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
}),
name: f.file_name().unwrap().to_str().expect("Failed to get requested file name").to_string(),
last_modified: file_time_modified(&meta),
size: file_length(&meta, &f),
is_file: true,
}
}
/// Recursively copy a directory
///
/// Stolen from https://github.com/mdunsmuir/copy_dir/blob/0.1.2/src/lib.rs
pub fn copy_dir(from: &Path, to: &Path) -> IoResult<Vec<(IoError, String)>> {
macro_rules! push_error {
($vec:ident, $path:ident, $expr:expr) => {
match $expr {
Ok(_) => (),
Err(e) => $vec.push((e, $path.to_string_lossy().into_owned())),
}
};
}
let mut errors = Vec::new();
fs::create_dir(&to)?;
// The approach taken by this code (i.e. walkdir) will not gracefully
// handle copying a directory into itself, so we're going to simply
// disallow it by checking the paths. This is a thornier problem than I
// wish it was, and I'd like to find a better solution, but for now I
// would prefer to return an error rather than having the copy blow up
// in users' faces. Ultimately I think a solution to this will involve
// not using walkdir at all, and might come along with better handling
// of hard links.
if from.canonicalize().and_then(|fc| to.canonicalize().map(|tc| (fc, tc))).map(|(fc, tc)| tc.starts_with(fc))? {
fs::remove_dir(&to)?;
return Err(IoError::new(IoErrorKind::Other, "cannot copy to a path prefixed by the source path"));
| {
*self
} | identifier_body |
mod.rs | headers::{HeaderFormat, UserAgent, Header};
use mime_guess::{guess_mime_type_opt, get_mime_type_str};
use xml::name::{OwnedName as OwnedXmlName, Name as XmlName};
use std::io::{ErrorKind as IoErrorKind, BufReader, BufRead, Result as IoResult, Error as IoError};
pub use self::os::*;
pub use self::webdav::*;
pub use self::content_encoding::*;
/// The generic HTML page to use as response to errors.
pub const ERROR_HTML: &str = include_str!("../../assets/error.html");
/// The HTML page to use as template for a requested directory's listing.
pub const DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing.html");
/// The HTML page to use as template for a requested directory's listing for mobile devices.
pub const MOBILE_DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing_mobile.html");
lazy_static! {
/// Collection of data to be injected into generated responses.
pub static ref ASSETS: HashMap<&'static str, Cow<'static, str>> = {
let mut ass = HashMap::with_capacity(10);
ass.insert("favicon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("ico").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/favicon.ico")[..], base64::STANDARD))));
ass.insert("dir_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/directory.gif")[..], base64::STANDARD))));
ass.insert("file_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file.gif")[..], base64::STANDARD))));
ass.insert("file_binary_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_binary.gif")[..], base64::STANDARD))));
ass.insert("file_image_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_image.gif")[..], base64::STANDARD))));
ass.insert("file_text_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_text.gif")[..], base64::STANDARD))));
ass.insert("back_arrow_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/back_arrow.gif")[..], base64::STANDARD))));
ass.insert("new_dir_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/new_directory.gif")[..], base64::STANDARD))));
ass.insert("delete_file_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/delete_file.png")[..], base64::STANDARD))));
ass.insert("rename_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/rename.png")[..], base64::STANDARD))));
ass.insert("confirm_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/confirm.png")[..], base64::STANDARD))));
ass.insert("date", Cow::Borrowed(include_str!("../../assets/date.js")));
ass.insert("manage", Cow::Borrowed(include_str!("../../assets/manage.js")));
ass.insert("manage_mobile", Cow::Borrowed(include_str!("../../assets/manage_mobile.js")));
ass.insert("manage_desktop", Cow::Borrowed(include_str!("../../assets/manage_desktop.js")));
ass.insert("upload", Cow::Borrowed(include_str!("../../assets/upload.js")));
ass.insert("adjust_tz", Cow::Borrowed(include_str!("../../assets/adjust_tz.js")));
ass
};
}
/// The port to start scanning from if no ports were given.
pub const PORT_SCAN_LOWEST: u16 = 8000;
/// The port to end scanning at if no ports were given.
pub const PORT_SCAN_HIGHEST: u16 = 9999;
/// The app name and version to use with User-Agent or Server response header.
pub const USER_AGENT: &str = concat!("http/", env!("CARGO_PKG_VERSION"));
/// Index file extensions to look for if `-i` was not specified and strippable extensions to look for if `-x` was specified.
pub const INDEX_EXTENSIONS: &[&str] = &["html", "htm", "shtml"];
/// The [WWW-Authenticate header](https://tools.ietf.org/html/rfc7235#section-4.1), without parsing.
///
/// We don't ever receive this header, only ever send it, so this is fine.
#[derive(Debug, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct WwwAuthenticate(pub Cow<'static, str>);
impl Header for WwwAuthenticate {
fn header_name() -> &'static str {
"WWW-Authenticate"
}
/// Dummy impl returning an empty value, since we're only ever sending these
fn parse_header(_: &[Vec<u8>]) -> HyperResult<WwwAuthenticate> {
Ok(WwwAuthenticate("".into()))
}
}
impl HeaderFormat for WwwAuthenticate {
fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.0)
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct CommaList<D: fmt::Display, I: Iterator<Item = D>>(pub I);
impl<D: fmt::Display, I: Iterator<Item = D> + Clone> fmt::Display for CommaList<D, I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut itr = self.0.clone();
if let Some(item) = itr.next() {
item.fmt(f)?;
for item in itr {
f.write_str(", ")?;
item.fmt(f)?;
}
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct DisplayThree<Df: fmt::Display, Ds: fmt::Display, Dt: fmt::Display>(pub Df, pub Ds, pub Dt);
impl<Df: fmt::Display, Ds: fmt::Display, Dt: fmt::Display> fmt::Display for DisplayThree<Df, Ds, Dt> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)?;
self.1.fmt(f)?;
self.2.fmt(f)?;
Ok(())
}
}
/// `xml`'s `OwnedName::borrow()` returns a value not a reference, so it cannot be used with the libstd `Borrow` trait
pub trait BorrowXmlName<'n> {
fn borrow_xml_name(&'n self) -> XmlName<'n>;
}
impl<'n> BorrowXmlName<'n> for XmlName<'n> {
#[inline(always)]
fn borrow_xml_name(&'n self) -> XmlName<'n> {
*self
}
}
impl<'n> BorrowXmlName<'n> for OwnedXmlName {
#[inline(always)]
fn borrow_xml_name(&'n self) -> XmlName<'n> {
self.borrow()
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct Spaces(pub usize);
impl fmt::Display for Spaces {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for _ in 0..self.0 {
f.write_char(' ')?;
}
Ok(())
}
}
/// Uppercase the first character of the supplied string.
///
/// Based on http://stackoverflow.com/a/38406885/2851815
///
/// # Examples
///
/// ```
/// # use https::util::uppercase_first;
/// assert_eq!(uppercase_first("abolish"), "Abolish".to_string());
/// ```
pub fn uppercase_first(s: &str) -> String {
let mut c = s.chars();
match c.next() {
None => String::new(),
Some(f) => f.to_uppercase().collect::<String>() + c.as_str(),
}
}
/// Percent-encode the last character if it's white space
///
/// Firefox treats, e.g. `href="http://henlo/menlo "` as `href="http://henlo/menlo"`,
/// but that final whitespace is significant, so this turns it into `href="http://henlo/menlo %20"`
pub fn encode_tail_if_trimmed(mut s: String) -> String {
let c = s.chars().rev().next();
if c.map(|c| c.is_whitespace()).unwrap_or(false) {
let c = c.unwrap();
s.pop();
s.push('%');
let mut cb = [0u8; 4];
c.encode_utf8(&mut cb);
for b in cb.iter().take(c.len_utf8()) {
write!(s, "{:02X}", b).expect("Couldn't allocate two more characters?");
}
s
} else {
s
}
}
/// %-escape special characters in an URL
pub fn escape_specials<S: AsRef<str>>(s: S) -> String {
let s = s.as_ref();
let mut ret = Vec::with_capacity(s.len());
for &b in s.as_bytes() {
match b {
b'%' => ret.extend(b"%25"),
b'#' => ret.extend(b"%23"),
b'?' => ret.extend(b"%3F"),
b'[' => ret.extend(b"%5B"),
b']' => ret.extend(b"%5D"),
_ => ret.push(b),
}
}
unsafe { String::from_utf8_unchecked(ret) }
}
/// Check if the specified file is to be considered "binary".
///
/// Basically checks is a file is UTF-8.
///
/// # Examples
///
/// ```
/// # use https::util::file_binary;
/// # #[cfg(target_os = "windows")]
/// # assert!(file_binary("target/debug/http.exe"));
/// # #[cfg(not(target_os = "windows"))]
/// assert!(file_binary("target/debug/http"));
/// assert!(!file_binary("Cargo.toml"));
/// ```
pub fn file_binary<P: AsRef<Path>>(path: P) -> bool {
file_binary_impl(path.as_ref())
}
fn file_binary_impl(path: &Path) -> bool {
path.metadata()
.map(|m| is_device(&m.file_type()) || File::open(path).and_then(|f| BufReader::new(f).read_line(&mut String::new())).is_err())
.unwrap_or(true)
}
/// Fill out an HTML template.
///
/// All fields must be addressed even if formatted to be empty.
///
/// # Examples
///
/// ```
/// # use https::util::{html_response, NOT_IMPLEMENTED_HTML};
/// println!(html_response(NOT_IMPLEMENTED_HTML, &["<p>Abolish the burgeoisie!</p>"]));
/// ```
pub fn html_response<S: AsRef<str>>(data: &str, format_strings: &[S]) -> String {
ASSETS.iter().fold(format_strings.iter().enumerate().fold(data.to_string(), |d, (i, s)| d.replace(&format!("{{{}}}", i), s.as_ref())),
|d, (k, v)| d.replace(&format!("{{{}}}", k), v))
}
/// Return the path part of the URL.
///
/// # Example
///
/// ```
/// # extern crate iron;
/// # extern crate https;
/// # use iron::Url;
/// # use https::util::url_path;
/// let url = Url::parse("127.0.0.1:8000/capitalism/русский/");
/// assert_eq!(url_path(&url), "capitalism/русский/");
/// ```
pub fn url_path(url: &Url) -> String {
let path = url.path();
if path == [""] {
"/".to_string()
} else {
path.into_iter().fold("".to_string(),
|cur, pp| format!("{}/{}", cur, percent_decode(pp).unwrap_or(Cow::Borrowed("<incorrect UTF8>"))))
[1..]
.to_string()
}
}
/// Decode a percent-encoded string (like a part of a URL).
///
/// # Example
///
/// ```
/// # use https::util::percent_decode;
/// # use std::borrow::Cow;
/// assert_eq!(percent_decode("%D0%B0%D1%81%D0%B4%D1%84%20fdsa"), Some(Cow::Owned("асдф fdsa".to_string())));
/// assert_eq!(percent_decode("%D0%D1%81%D0%B4%D1%84%20fdsa"), None);
/// ```
pub fn percent_decode(s: &str) -> Option<Cow<str>> {
percent_encoding::percent_decode(s.as_bytes()).decode_utf8().ok()
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_modified_p(f: &Path) -> Tm {
file_time_modified(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_created_p(f: &Path) -> Tm {
file_time_created(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last access as a `time::Tm` in UTC.
pub fn file_time_accessed_p(f: &Path) -> Tm {
file_time_accessed(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_modified(m: &Metadata) -> Tm {
file_time_impl(m.modified().expect("Failed to get file last modified date"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_created(m: &Metadata) -> Tm {
file_time_impl(m.created().or_else(|_| m.modified()).expect("Failed to get file created date"))
}
/// Get the timestamp of the file's last access as a `time::Tm` in UTC.
pub fn file_time_accessed(m: &Metadata) -> Tm {
file_time_impl(m.accessed().expect("Failed to get file accessed date"))
}
fn file_time_impl(time: SystemTime) -> Tm {
match time.elapsed() {
Ok(dur) => time::now_utc() - Duration::from_std(dur).unwrap(),
Err(ste) => time::now_utc() + Duration::from_std(ste.duration()).unwrap(),
}
}
/// Check, whether, in any place of the path, a file is treated like a directory.
///
/// A file is treated like a directory when it is treated as if it had a subpath, e.g., given:
///
/// ```sh
/// tree.
/// | dir0
/// | dir1
/// | file01
/// ```
///
/// This function would return true for `./dir1/file01/file010`, `./dir1/file01/dir010/file0100`, etc., but not
/// for `./dir0/file00`, `./dir0/dir00/file000`, `./dir1/file02/`, `./dir1/dir010/file0100`.
pub fn detect_file_as_dir(mut p: &Path) -> bool {
while let Some(pnt) = p.parent() {
if pnt.is_file() {
return true;
}
p = pnt;
}
false
}
/// Check if a path refers to a symlink in a way that also works on Windows.
pub fn is_symlink<P: AsRef<Path>>(p: P) -> bool {
p.as_ref().read_link().is_ok()
}
/// Check if a path refers to a file in a way that includes Unix devices and Windows symlinks.
pub fn is_actually_file<P: AsRef<Path>>(tp: &FileType, p: P) -> bool {
tp.is_file() || (tp.is_symlink() && fs::metadata(p).map(|m| is_actually_file(&m.file_type(), "")).unwrap_or(false)) || is_device(tp)
}
/// Check if the specified path is a direct descendant (or an equal) of the specified path.
pub fn is_descendant_of<Pw: AsRef<Path>, Po: AsRef<Path>>(who: Pw, of_whom: Po) -> bool {
let (mut who, of_whom) = if let Ok(p) = fs::canonicalize(who).and_then(|w| fs::canonicalize(of_whom).map(|o| (w, o))) {
p
} else {
return f | f_whom {
return true;
}
while let Some(who_p) = who.parent().map(|p| p.to_path_buf()) {
who = who_p;
if who == of_whom {
return true;
}
}
false
}
/// Check if the specified path is a direct descendant (or an equal) of the specified path, without without requiring it to
/// exist in the first place.
pub fn is_nonexistent_descendant_of<Pw: AsRef<Path>, Po: AsRef<Path>>(who: Pw, of_whom: Po) -> bool {
let mut who = fs::canonicalize(&who).unwrap_or_else(|_| who.as_ref().to_path_buf());
let of_whom = if let Ok(p) = fs::canonicalize(of_whom) {
p
} else {
return false;
};
if who == of_whom {
return true;
}
while let Some(who_p) = who.parent().map(|p| p.to_path_buf()) {
who = if let Ok(p) = fs::canonicalize(&who_p) {
p
} else {
who_p
};
if who == of_whom {
return true;
}
}
false
}
/// Construct string representing a human-readable size.
///
/// Stolen, adapted and inlined from [fielsize.js](http://filesizejs.com).
pub fn human_readable_size(s: u64) -> String {
lazy_static! {
static ref LN_KIB: f64 = 1024f64.log(f64::consts::E);
}
if s == 0 {
"0 B".to_string()
} else {
let num = s as f64;
let exp = cmp::min(cmp::max((num.log(f64::consts::E) / *LN_KIB) as i32, 0), 8);
let val = num / 2f64.powi(exp * 10);
if exp > 0 {
(val * 10f64).round() / 10f64
} else {
val.round()
}
.to_string() + " " + ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"][cmp::max(exp, 0) as usize]
}
}
/// Check if, given the request headers, the client should be considered a mobile device.
pub fn client_mobile(hdr: &Headers) -> bool {
hdr.get::<UserAgent>().map(|s| s.contains("Mobi") || s.contains("mobi")).unwrap_or(false)
}
/// Check if, given the request headers, the client should be treated as Microsoft software.
///
/// Based on https://github.com/miquels/webdav-handler-rs/blob/02433c1acfccd848a7de26889f6857cbad559076/src/handle_props.rs#L529
pub fn client_microsoft(hdr: &Headers) -> bool {
hdr.get::<UserAgent>().map(|s| s.contains("Microsoft") || s.contains("microsoft")).unwrap_or(false)
}
/// Get the suffix for the icon to use to represent the given file.
pub fn file_icon_suffix<P: AsRef<Path>>(f: P, is_file: bool) -> &'static str {
if is_file {
match guess_mime_type_opt(&f) {
Some(mime::Mime(mime::TopLevel::Image,..)) |
Some(mime::Mime(mime::TopLevel::Video,..)) => "_image",
Some(mime::Mime(mime::TopLevel::Text,..)) => "_text",
Some(mime::Mime(mime::TopLevel::Application,..)) => "_binary",
None => if file_binary(&f) { "" } else { "_text" },
_ => "",
}
} else {
""
}
}
/// Get the metadata of the specified file.
///
/// The specified path must point to a file.
pub fn get_raw_fs_metadata<P: AsRef<Path>>(f: P) -> RawFileData {
get_raw_fs_metadata_impl(f.as_ref())
}
fn get_raw_fs_metadata_impl(f: &Path) -> RawFileData {
let meta = f.metadata().expect("Failed to get requested file metadata");
RawFileData {
mime_type: guess_mime_type_opt(f).unwrap_or_else(|| if file_binary(f) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
}),
name: f.file_name().unwrap().to_str().expect("Failed to get requested file name").to_string(),
last_modified: file_time_modified(&meta),
size: file_length(&meta, &f),
is_file: true,
}
}
/// Recursively copy a directory
///
/// Stolen from https://github.com/mdunsmuir/copy_dir/blob/0.1.2/src/lib.rs
pub fn copy_dir(from: &Path, to: &Path) -> IoResult<Vec<(IoError, String)>> {
macro_rules! push_error {
($vec:ident, $path:ident, $expr:expr) => {
match $expr {
Ok(_) => (),
Err(e) => $vec.push((e, $path.to_string_lossy().into_owned())),
}
};
}
let mut errors = Vec::new();
fs::create_dir(&to)?;
// The approach taken by this code (i.e. walkdir) will not gracefully
// handle copying a directory into itself, so we're going to simply
// disallow it by checking the paths. This is a thornier problem than I
// wish it was, and I'd like to find a better solution, but for now I
// would prefer to return an error rather than having the copy blow up
// in users' faces. Ultimately I think a solution to this will involve
// not using walkdir at all, and might come along with better handling
// of hard links.
if from.canonicalize().and_then(|fc| to.canonicalize().map(|tc| (fc, tc))).map(|(fc, tc)| tc.starts_with(fc))? {
fs::remove_dir(&to)?;
return Err(IoError::new(IoErrorKind::Other, "cannot copy to a path prefixed by the source path"));
| alse;
};
if who == o | conditional_block |
mod.rs | headers::{HeaderFormat, UserAgent, Header};
use mime_guess::{guess_mime_type_opt, get_mime_type_str};
use xml::name::{OwnedName as OwnedXmlName, Name as XmlName};
use std::io::{ErrorKind as IoErrorKind, BufReader, BufRead, Result as IoResult, Error as IoError};
pub use self::os::*;
pub use self::webdav::*;
pub use self::content_encoding::*;
/// The generic HTML page to use as response to errors.
pub const ERROR_HTML: &str = include_str!("../../assets/error.html");
/// The HTML page to use as template for a requested directory's listing.
pub const DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing.html");
/// The HTML page to use as template for a requested directory's listing for mobile devices.
pub const MOBILE_DIRECTORY_LISTING_HTML: &str = include_str!("../../assets/directory_listing_mobile.html");
lazy_static! {
/// Collection of data to be injected into generated responses.
pub static ref ASSETS: HashMap<&'static str, Cow<'static, str>> = {
let mut ass = HashMap::with_capacity(10);
ass.insert("favicon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("ico").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/favicon.ico")[..], base64::STANDARD))));
ass.insert("dir_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/directory.gif")[..], base64::STANDARD))));
ass.insert("file_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file.gif")[..], base64::STANDARD))));
ass.insert("file_binary_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_binary.gif")[..], base64::STANDARD))));
ass.insert("file_image_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_image.gif")[..], base64::STANDARD))));
ass.insert("file_text_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/file_text.gif")[..], base64::STANDARD))));
ass.insert("back_arrow_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/back_arrow.gif")[..], base64::STANDARD))));
ass.insert("new_dir_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("gif").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/new_directory.gif")[..], base64::STANDARD))));
ass.insert("delete_file_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/delete_file.png")[..], base64::STANDARD))));
ass.insert("rename_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/rename.png")[..], base64::STANDARD))));
ass.insert("confirm_icon",
Cow::Owned(format!("data:{};base64,{}",
get_mime_type_str("png").unwrap(),
Base64Display::with_config(&include_bytes!("../../assets/icons/confirm.png")[..], base64::STANDARD))));
ass.insert("date", Cow::Borrowed(include_str!("../../assets/date.js")));
ass.insert("manage", Cow::Borrowed(include_str!("../../assets/manage.js")));
ass.insert("manage_mobile", Cow::Borrowed(include_str!("../../assets/manage_mobile.js")));
ass.insert("manage_desktop", Cow::Borrowed(include_str!("../../assets/manage_desktop.js")));
ass.insert("upload", Cow::Borrowed(include_str!("../../assets/upload.js")));
ass.insert("adjust_tz", Cow::Borrowed(include_str!("../../assets/adjust_tz.js")));
ass
};
}
/// The port to start scanning from if no ports were given.
pub const PORT_SCAN_LOWEST: u16 = 8000;
/// The port to end scanning at if no ports were given.
pub const PORT_SCAN_HIGHEST: u16 = 9999;
/// The app name and version to use with User-Agent or Server response header.
pub const USER_AGENT: &str = concat!("http/", env!("CARGO_PKG_VERSION"));
/// Index file extensions to look for if `-i` was not specified and strippable extensions to look for if `-x` was specified.
pub const INDEX_EXTENSIONS: &[&str] = &["html", "htm", "shtml"];
/// The [WWW-Authenticate header](https://tools.ietf.org/html/rfc7235#section-4.1), without parsing.
///
/// We don't ever receive this header, only ever send it, so this is fine.
#[derive(Debug, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct WwwAuthenticate(pub Cow<'static, str>);
impl Header for WwwAuthenticate {
fn header_name() -> &'static str {
"WWW-Authenticate"
}
/// Dummy impl returning an empty value, since we're only ever sending these
fn parse_header(_: &[Vec<u8>]) -> HyperResult<WwwAuthenticate> {
Ok(WwwAuthenticate("".into()))
}
}
impl HeaderFormat for WwwAuthenticate {
fn fmt_header(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&self.0)
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct CommaList<D: fmt::Display, I: Iterator<Item = D>>(pub I);
impl<D: fmt::Display, I: Iterator<Item = D> + Clone> fmt::Display for CommaList<D, I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut itr = self.0.clone();
if let Some(item) = itr.next() {
item.fmt(f)?;
for item in itr {
f.write_str(", ")?;
item.fmt(f)?;
}
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct DisplayThree<Df: fmt::Display, Ds: fmt::Display, Dt: fmt::Display>(pub Df, pub Ds, pub Dt);
impl<Df: fmt::Display, Ds: fmt::Display, Dt: fmt::Display> fmt::Display for DisplayThree<Df, Ds, Dt> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)?;
self.1.fmt(f)?;
self.2.fmt(f)?;
Ok(())
}
}
/// `xml`'s `OwnedName::borrow()` returns a value not a reference, so it cannot be used with the libstd `Borrow` trait
pub trait BorrowXmlName<'n> {
fn borrow_xml_name(&'n self) -> XmlName<'n>;
}
impl<'n> BorrowXmlName<'n> for XmlName<'n> {
#[inline(always)]
fn borrow_xml_name(&'n self) -> XmlName<'n> {
*self
}
}
impl<'n> BorrowXmlName<'n> for OwnedXmlName {
#[inline(always)]
fn borrow_xml_name(&'n self) -> XmlName<'n> {
self.borrow()
}
}
#[derive(Debug, Copy, Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
pub struct Spaces(pub usize);
impl fmt::Display for Spaces {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for _ in 0..self.0 {
f.write_char(' ')?;
}
Ok(())
}
}
/// Uppercase the first character of the supplied string.
///
/// Based on http://stackoverflow.com/a/38406885/2851815
///
/// # Examples
///
/// ```
/// # use https::util::uppercase_first;
/// assert_eq!(uppercase_first("abolish"), "Abolish".to_string());
/// ```
pub fn uppercase_first(s: &str) -> String {
let mut c = s.chars();
match c.next() {
None => String::new(),
Some(f) => f.to_uppercase().collect::<String>() + c.as_str(),
}
}
/// Percent-encode the last character if it's white space
///
/// Firefox treats, e.g. `href="http://henlo/menlo "` as `href="http://henlo/menlo"`,
/// but that final whitespace is significant, so this turns it into `href="http://henlo/menlo %20"`
pub fn encode_tail_if_trimmed(mut s: String) -> String {
let c = s.chars().rev().next();
if c.map(|c| c.is_whitespace()).unwrap_or(false) {
let c = c.unwrap();
s.pop();
s.push('%');
let mut cb = [0u8; 4];
c.encode_utf8(&mut cb);
for b in cb.iter().take(c.len_utf8()) {
write!(s, "{:02X}", b).expect("Couldn't allocate two more characters?");
}
s
} else {
s
}
}
/// %-escape special characters in an URL
pub fn escape_specials<S: AsRef<str>>(s: S) -> String {
let s = s.as_ref();
let mut ret = Vec::with_capacity(s.len());
for &b in s.as_bytes() {
match b {
b'%' => ret.extend(b"%25"),
b'#' => ret.extend(b"%23"),
b'?' => ret.extend(b"%3F"),
b'[' => ret.extend(b"%5B"),
b']' => ret.extend(b"%5D"),
_ => ret.push(b),
}
}
unsafe { String::from_utf8_unchecked(ret) }
}
/// Check if the specified file is to be considered "binary".
///
/// Basically checks is a file is UTF-8.
///
/// # Examples
///
/// ```
/// # use https::util::file_binary;
/// # #[cfg(target_os = "windows")]
/// # assert!(file_binary("target/debug/http.exe"));
/// # #[cfg(not(target_os = "windows"))]
/// assert!(file_binary("target/debug/http"));
/// assert!(!file_binary("Cargo.toml"));
/// ```
pub fn file_binary<P: AsRef<Path>>(path: P) -> bool {
file_binary_impl(path.as_ref())
}
fn file_binary_impl(path: &Path) -> bool {
path.metadata()
.map(|m| is_device(&m.file_type()) || File::open(path).and_then(|f| BufReader::new(f).read_line(&mut String::new())).is_err())
.unwrap_or(true)
}
/// Fill out an HTML template.
///
/// All fields must be addressed even if formatted to be empty.
///
/// # Examples
///
/// ```
/// # use https::util::{html_response, NOT_IMPLEMENTED_HTML};
/// println!(html_response(NOT_IMPLEMENTED_HTML, &["<p>Abolish the burgeoisie!</p>"]));
/// ```
pub fn html_response<S: AsRef<str>>(data: &str, format_strings: &[S]) -> String {
ASSETS.iter().fold(format_strings.iter().enumerate().fold(data.to_string(), |d, (i, s)| d.replace(&format!("{{{}}}", i), s.as_ref())),
|d, (k, v)| d.replace(&format!("{{{}}}", k), v))
}
/// Return the path part of the URL.
///
/// # Example
///
/// ```
/// # extern crate iron;
/// # extern crate https;
/// # use iron::Url;
/// # use https::util::url_path;
/// let url = Url::parse("127.0.0.1:8000/capitalism/русский/");
/// assert_eq!(url_path(&url), "capitalism/русский/");
/// ```
pub fn url_path(url: &Url) -> String {
let path = url.path();
if path == [""] {
"/".to_string()
} else {
path.into_iter().fold("".to_string(),
|cur, pp| format!("{}/{}", cur, percent_decode(pp).unwrap_or(Cow::Borrowed("<incorrect UTF8>"))))
[1..]
.to_string()
}
}
/// Decode a percent-encoded string (like a part of a URL).
///
/// # Example
///
/// ```
/// # use https::util::percent_decode;
/// # use std::borrow::Cow;
/// assert_eq!(percent_decode("%D0%B0%D1%81%D0%B4%D1%84%20fdsa"), Some(Cow::Owned("асдф fdsa".to_string())));
/// assert_eq!(percent_decode("%D0%D1%81%D0%B4%D1%84%20fdsa"), None);
/// ```
pub fn percent_decode(s: &str) -> Option<Cow<str>> {
percent_encoding::percent_decode(s.as_bytes()).decode_utf8().ok()
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_modified_p(f: &Path) -> Tm {
file_time_modified(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_created_p(f: &Path) -> Tm {
file_time_created(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last access as a `time::Tm` in UTC.
pub fn file_time_accessed_p(f: &Path) -> Tm {
file_time_accessed(&f.metadata().expect("Failed to get file metadata"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_modified(m: &Metadata) -> Tm {
file_time_impl(m.modified().expect("Failed to get file last modified date"))
}
/// Get the timestamp of the file's last modification as a `time::Tm` in UTC.
pub fn file_time_created(m: &Metadata) -> Tm {
file_time_impl(m.created().or_else(|_| m.modified()).expect("Failed to get file created date"))
}
/// Get the timestamp of the file's last access as a `time::Tm` in UTC.
pub fn file_time_accessed(m: &Metadata) -> Tm {
file_time_impl(m.accessed().expect("Failed to get file accessed date"))
}
fn file_time_impl(tim | -> Tm {
match time.elapsed() {
Ok(dur) => time::now_utc() - Duration::from_std(dur).unwrap(),
Err(ste) => time::now_utc() + Duration::from_std(ste.duration()).unwrap(),
}
}
/// Check, whether, in any place of the path, a file is treated like a directory.
///
/// A file is treated like a directory when it is treated as if it had a subpath, e.g., given:
///
/// ```sh
/// tree.
/// | dir0
/// | dir1
/// | file01
/// ```
///
/// This function would return true for `./dir1/file01/file010`, `./dir1/file01/dir010/file0100`, etc., but not
/// for `./dir0/file00`, `./dir0/dir00/file000`, `./dir1/file02/`, `./dir1/dir010/file0100`.
pub fn detect_file_as_dir(mut p: &Path) -> bool {
while let Some(pnt) = p.parent() {
if pnt.is_file() {
return true;
}
p = pnt;
}
false
}
/// Check if a path refers to a symlink in a way that also works on Windows.
pub fn is_symlink<P: AsRef<Path>>(p: P) -> bool {
p.as_ref().read_link().is_ok()
}
/// Check if a path refers to a file in a way that includes Unix devices and Windows symlinks.
pub fn is_actually_file<P: AsRef<Path>>(tp: &FileType, p: P) -> bool {
tp.is_file() || (tp.is_symlink() && fs::metadata(p).map(|m| is_actually_file(&m.file_type(), "")).unwrap_or(false)) || is_device(tp)
}
/// Check if the specified path is a direct descendant (or an equal) of the specified path.
pub fn is_descendant_of<Pw: AsRef<Path>, Po: AsRef<Path>>(who: Pw, of_whom: Po) -> bool {
let (mut who, of_whom) = if let Ok(p) = fs::canonicalize(who).and_then(|w| fs::canonicalize(of_whom).map(|o| (w, o))) {
p
} else {
return false;
};
if who == of_whom {
return true;
}
while let Some(who_p) = who.parent().map(|p| p.to_path_buf()) {
who = who_p;
if who == of_whom {
return true;
}
}
false
}
/// Check if the specified path is a direct descendant (or an equal) of the specified path, without without requiring it to
/// exist in the first place.
pub fn is_nonexistent_descendant_of<Pw: AsRef<Path>, Po: AsRef<Path>>(who: Pw, of_whom: Po) -> bool {
let mut who = fs::canonicalize(&who).unwrap_or_else(|_| who.as_ref().to_path_buf());
let of_whom = if let Ok(p) = fs::canonicalize(of_whom) {
p
} else {
return false;
};
if who == of_whom {
return true;
}
while let Some(who_p) = who.parent().map(|p| p.to_path_buf()) {
who = if let Ok(p) = fs::canonicalize(&who_p) {
p
} else {
who_p
};
if who == of_whom {
return true;
}
}
false
}
/// Construct string representing a human-readable size.
///
/// Stolen, adapted and inlined from [fielsize.js](http://filesizejs.com).
pub fn human_readable_size(s: u64) -> String {
lazy_static! {
static ref LN_KIB: f64 = 1024f64.log(f64::consts::E);
}
if s == 0 {
"0 B".to_string()
} else {
let num = s as f64;
let exp = cmp::min(cmp::max((num.log(f64::consts::E) / *LN_KIB) as i32, 0), 8);
let val = num / 2f64.powi(exp * 10);
if exp > 0 {
(val * 10f64).round() / 10f64
} else {
val.round()
}
.to_string() + " " + ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"][cmp::max(exp, 0) as usize]
}
}
/// Check if, given the request headers, the client should be considered a mobile device.
pub fn client_mobile(hdr: &Headers) -> bool {
hdr.get::<UserAgent>().map(|s| s.contains("Mobi") || s.contains("mobi")).unwrap_or(false)
}
/// Check if, given the request headers, the client should be treated as Microsoft software.
///
/// Based on https://github.com/miquels/webdav-handler-rs/blob/02433c1acfccd848a7de26889f6857cbad559076/src/handle_props.rs#L529
pub fn client_microsoft(hdr: &Headers) -> bool {
hdr.get::<UserAgent>().map(|s| s.contains("Microsoft") || s.contains("microsoft")).unwrap_or(false)
}
/// Get the suffix for the icon to use to represent the given file.
pub fn file_icon_suffix<P: AsRef<Path>>(f: P, is_file: bool) -> &'static str {
if is_file {
match guess_mime_type_opt(&f) {
Some(mime::Mime(mime::TopLevel::Image,..)) |
Some(mime::Mime(mime::TopLevel::Video,..)) => "_image",
Some(mime::Mime(mime::TopLevel::Text,..)) => "_text",
Some(mime::Mime(mime::TopLevel::Application,..)) => "_binary",
None => if file_binary(&f) { "" } else { "_text" },
_ => "",
}
} else {
""
}
}
/// Get the metadata of the specified file.
///
/// The specified path must point to a file.
pub fn get_raw_fs_metadata<P: AsRef<Path>>(f: P) -> RawFileData {
get_raw_fs_metadata_impl(f.as_ref())
}
fn get_raw_fs_metadata_impl(f: &Path) -> RawFileData {
let meta = f.metadata().expect("Failed to get requested file metadata");
RawFileData {
mime_type: guess_mime_type_opt(f).unwrap_or_else(|| if file_binary(f) {
"application/octet-stream".parse().unwrap()
} else {
"text/plain".parse().unwrap()
}),
name: f.file_name().unwrap().to_str().expect("Failed to get requested file name").to_string(),
last_modified: file_time_modified(&meta),
size: file_length(&meta, &f),
is_file: true,
}
}
/// Recursively copy a directory
///
/// Stolen from https://github.com/mdunsmuir/copy_dir/blob/0.1.2/src/lib.rs
pub fn copy_dir(from: &Path, to: &Path) -> IoResult<Vec<(IoError, String)>> {
macro_rules! push_error {
($vec:ident, $path:ident, $expr:expr) => {
match $expr {
Ok(_) => (),
Err(e) => $vec.push((e, $path.to_string_lossy().into_owned())),
}
};
}
let mut errors = Vec::new();
fs::create_dir(&to)?;
// The approach taken by this code (i.e. walkdir) will not gracefully
// handle copying a directory into itself, so we're going to simply
// disallow it by checking the paths. This is a thornier problem than I
// wish it was, and I'd like to find a better solution, but for now I
// would prefer to return an error rather than having the copy blow up
// in users' faces. Ultimately I think a solution to this will involve
// not using walkdir at all, and might come along with better handling
// of hard links.
if from.canonicalize().and_then(|fc| to.canonicalize().map(|tc| (fc, tc))).map(|(fc, tc)| tc.starts_with(fc))? {
fs::remove_dir(&to)?;
return Err(IoError::new(IoErrorKind::Other, "cannot copy to a path prefixed by the source path"));
| e: SystemTime) | identifier_name |
record-pat.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum t1 { a(int), b(uint), }
struct | {x: t1, y: int}
enum t3 { c(T2, uint), }
fn m(in: t3) -> int {
match in {
c(T2 {x: a(m), _}, _) => { return m; }
c(T2 {x: b(m), y: y}, z) => { return ((m + z) as int) + y; }
}
}
pub fn main() {
assert_eq!(m(c(T2 {x: a(10), y: 5}, 4u)), 10);
assert_eq!(m(c(T2 {x: b(10u), y: 5}, 4u)), 19);
}
| T2 | identifier_name |
record-pat.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum t1 { a(int), b(uint), }
struct T2 {x: t1, y: int}
enum t3 { c(T2, uint), }
fn m(in: t3) -> int {
match in {
c(T2 {x: a(m), _}, _) => |
c(T2 {x: b(m), y: y}, z) => { return ((m + z) as int) + y; }
}
}
pub fn main() {
assert_eq!(m(c(T2 {x: a(10), y: 5}, 4u)), 10);
assert_eq!(m(c(T2 {x: b(10u), y: 5}, 4u)), 19);
}
| { return m; } | conditional_block |
record-pat.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum t1 { a(int), b(uint), }
struct T2 {x: t1, y: int}
enum t3 { c(T2, uint), }
fn m(in: t3) -> int {
match in {
c(T2 {x: a(m), _}, _) => { return m; } | assert_eq!(m(c(T2 {x: a(10), y: 5}, 4u)), 10);
assert_eq!(m(c(T2 {x: b(10u), y: 5}, 4u)), 19);
} | c(T2 {x: b(m), y: y}, z) => { return ((m + z) as int) + y; }
}
}
pub fn main() { | random_line_split |
length_limit.rs | //! See [`HtmlWithLimit`].
use std::fmt::Write;
use std::ops::ControlFlow;
use crate::html::escape::Escape;
/// A buffer that allows generating HTML with a length limit.
///
/// This buffer ensures that:
///
/// * all tags are closed,
/// * tags are closed in the reverse order of when they were opened (i.e., the correct HTML order),
/// * no tags are left empty (e.g., `<em></em>`) due to the length limit being reached,
/// * all text is escaped.
#[derive(Debug)]
pub(super) struct HtmlWithLimit {
buf: String,
len: usize,
limit: usize,
/// A list of tags that have been requested to be opened via [`Self::open_tag()`]
/// but have not actually been pushed to `buf` yet. This ensures that tags are not
/// left empty (e.g., `<em></em>`) due to the length limit being reached.
queued_tags: Vec<&'static str>,
/// A list of all tags that have been opened but not yet closed.
unclosed_tags: Vec<&'static str>,
}
impl HtmlWithLimit {
/// Create a new buffer, with a limit of `length_limit`.
pub(super) fn new(length_limit: usize) -> Self {
let buf = if length_limit > 1000 {
// If the length limit is really large, don't preallocate tons of memory.
String::new()
} else {
// The length limit is actually a good heuristic for initial allocation size.
// Measurements showed that using it as the initial capacity ended up using less memory
// than `String::new`.
// See https://github.com/rust-lang/rust/pull/88173#discussion_r692531631 for more.
String::with_capacity(length_limit)
};
Self {
buf,
len: 0,
limit: length_limit,
unclosed_tags: Vec::new(),
queued_tags: Vec::new(),
}
}
/// Finish using the buffer and get the written output.
/// This function will close all unclosed tags for you.
pub(super) fn finish(mut self) -> String {
self.close_all_tags();
self.buf
}
/// Write some plain text to the buffer, escaping as needed.
///
/// This function skips writing the text if the length limit was reached
/// and returns [`ControlFlow::Break`].
pub(super) fn push(&mut self, text: &str) -> ControlFlow<(), ()> |
/// Open an HTML tag.
///
/// **Note:** HTML attributes have not yet been implemented.
/// This function will panic if called with a non-alphabetic `tag_name`.
pub(super) fn open_tag(&mut self, tag_name: &'static str) {
assert!(
tag_name.chars().all(|c| ('a'..='z').contains(&c)),
"tag_name contained non-alphabetic chars: {:?}",
tag_name
);
self.queued_tags.push(tag_name);
}
/// Close the most recently opened HTML tag.
pub(super) fn close_tag(&mut self) {
match self.unclosed_tags.pop() {
// Close the most recently opened tag.
Some(tag_name) => write!(self.buf, "</{}>", tag_name).unwrap(),
// There are valid cases where `close_tag()` is called without
// there being any tags to close. For example, this occurs when
// a tag is opened after the length limit is exceeded;
// `flush_queue()` will never be called, and thus, the tag will
// not end up being added to `unclosed_tags`.
None => {}
}
}
/// Write all queued tags and add them to the `unclosed_tags` list.
fn flush_queue(&mut self) {
for tag_name in self.queued_tags.drain(..) {
write!(self.buf, "<{}>", tag_name).unwrap();
self.unclosed_tags.push(tag_name);
}
}
/// Close all unclosed tags.
fn close_all_tags(&mut self) {
while!self.unclosed_tags.is_empty() {
self.close_tag();
}
}
}
#[cfg(test)]
mod tests;
| {
if self.len + text.len() > self.limit {
return ControlFlow::BREAK;
}
self.flush_queue();
write!(self.buf, "{}", Escape(text)).unwrap();
self.len += text.len();
ControlFlow::CONTINUE
} | identifier_body |
length_limit.rs | //! See [`HtmlWithLimit`].
use std::fmt::Write;
use std::ops::ControlFlow;
use crate::html::escape::Escape;
/// A buffer that allows generating HTML with a length limit.
///
/// This buffer ensures that:
///
/// * all tags are closed,
/// * tags are closed in the reverse order of when they were opened (i.e., the correct HTML order),
/// * no tags are left empty (e.g., `<em></em>`) due to the length limit being reached,
/// * all text is escaped.
#[derive(Debug)]
pub(super) struct HtmlWithLimit {
buf: String,
len: usize,
limit: usize,
/// A list of tags that have been requested to be opened via [`Self::open_tag()`]
/// but have not actually been pushed to `buf` yet. This ensures that tags are not
/// left empty (e.g., `<em></em>`) due to the length limit being reached.
queued_tags: Vec<&'static str>,
/// A list of all tags that have been opened but not yet closed.
unclosed_tags: Vec<&'static str>,
}
impl HtmlWithLimit {
/// Create a new buffer, with a limit of `length_limit`.
pub(super) fn new(length_limit: usize) -> Self {
let buf = if length_limit > 1000 {
// If the length limit is really large, don't preallocate tons of memory.
String::new()
} else {
// The length limit is actually a good heuristic for initial allocation size.
// Measurements showed that using it as the initial capacity ended up using less memory
// than `String::new`.
// See https://github.com/rust-lang/rust/pull/88173#discussion_r692531631 for more.
String::with_capacity(length_limit)
};
Self {
buf,
len: 0,
limit: length_limit,
unclosed_tags: Vec::new(),
queued_tags: Vec::new(),
}
}
/// Finish using the buffer and get the written output.
/// This function will close all unclosed tags for you.
pub(super) fn finish(mut self) -> String {
self.close_all_tags();
self.buf
}
/// Write some plain text to the buffer, escaping as needed.
///
/// This function skips writing the text if the length limit was reached
/// and returns [`ControlFlow::Break`].
pub(super) fn push(&mut self, text: &str) -> ControlFlow<(), ()> {
if self.len + text.len() > self.limit {
return ControlFlow::BREAK;
}
self.flush_queue();
write!(self.buf, "{}", Escape(text)).unwrap();
self.len += text.len();
ControlFlow::CONTINUE
}
/// Open an HTML tag.
///
/// **Note:** HTML attributes have not yet been implemented.
/// This function will panic if called with a non-alphabetic `tag_name`.
pub(super) fn open_tag(&mut self, tag_name: &'static str) {
assert!(
tag_name.chars().all(|c| ('a'..='z').contains(&c)),
"tag_name contained non-alphabetic chars: {:?}",
tag_name
);
self.queued_tags.push(tag_name);
}
/// Close the most recently opened HTML tag.
pub(super) fn close_tag(&mut self) {
match self.unclosed_tags.pop() {
// Close the most recently opened tag.
Some(tag_name) => write!(self.buf, "</{}>", tag_name).unwrap(),
// There are valid cases where `close_tag()` is called without
// there being any tags to close. For example, this occurs when | // a tag is opened after the length limit is exceeded;
// `flush_queue()` will never be called, and thus, the tag will
// not end up being added to `unclosed_tags`.
None => {}
}
}
/// Write all queued tags and add them to the `unclosed_tags` list.
fn flush_queue(&mut self) {
for tag_name in self.queued_tags.drain(..) {
write!(self.buf, "<{}>", tag_name).unwrap();
self.unclosed_tags.push(tag_name);
}
}
/// Close all unclosed tags.
fn close_all_tags(&mut self) {
while!self.unclosed_tags.is_empty() {
self.close_tag();
}
}
}
#[cfg(test)]
mod tests; | random_line_split |
|
length_limit.rs | //! See [`HtmlWithLimit`].
use std::fmt::Write;
use std::ops::ControlFlow;
use crate::html::escape::Escape;
/// A buffer that allows generating HTML with a length limit.
///
/// This buffer ensures that:
///
/// * all tags are closed,
/// * tags are closed in the reverse order of when they were opened (i.e., the correct HTML order),
/// * no tags are left empty (e.g., `<em></em>`) due to the length limit being reached,
/// * all text is escaped.
#[derive(Debug)]
pub(super) struct HtmlWithLimit {
buf: String,
len: usize,
limit: usize,
/// A list of tags that have been requested to be opened via [`Self::open_tag()`]
/// but have not actually been pushed to `buf` yet. This ensures that tags are not
/// left empty (e.g., `<em></em>`) due to the length limit being reached.
queued_tags: Vec<&'static str>,
/// A list of all tags that have been opened but not yet closed.
unclosed_tags: Vec<&'static str>,
}
impl HtmlWithLimit {
/// Create a new buffer, with a limit of `length_limit`.
pub(super) fn new(length_limit: usize) -> Self {
let buf = if length_limit > 1000 {
// If the length limit is really large, don't preallocate tons of memory.
String::new()
} else {
// The length limit is actually a good heuristic for initial allocation size.
// Measurements showed that using it as the initial capacity ended up using less memory
// than `String::new`.
// See https://github.com/rust-lang/rust/pull/88173#discussion_r692531631 for more.
String::with_capacity(length_limit)
};
Self {
buf,
len: 0,
limit: length_limit,
unclosed_tags: Vec::new(),
queued_tags: Vec::new(),
}
}
/// Finish using the buffer and get the written output.
/// This function will close all unclosed tags for you.
pub(super) fn finish(mut self) -> String {
self.close_all_tags();
self.buf
}
/// Write some plain text to the buffer, escaping as needed.
///
/// This function skips writing the text if the length limit was reached
/// and returns [`ControlFlow::Break`].
pub(super) fn push(&mut self, text: &str) -> ControlFlow<(), ()> {
if self.len + text.len() > self.limit {
return ControlFlow::BREAK;
}
self.flush_queue();
write!(self.buf, "{}", Escape(text)).unwrap();
self.len += text.len();
ControlFlow::CONTINUE
}
/// Open an HTML tag.
///
/// **Note:** HTML attributes have not yet been implemented.
/// This function will panic if called with a non-alphabetic `tag_name`.
pub(super) fn open_tag(&mut self, tag_name: &'static str) {
assert!(
tag_name.chars().all(|c| ('a'..='z').contains(&c)),
"tag_name contained non-alphabetic chars: {:?}",
tag_name
);
self.queued_tags.push(tag_name);
}
/// Close the most recently opened HTML tag.
pub(super) fn close_tag(&mut self) {
match self.unclosed_tags.pop() {
// Close the most recently opened tag.
Some(tag_name) => write!(self.buf, "</{}>", tag_name).unwrap(),
// There are valid cases where `close_tag()` is called without
// there being any tags to close. For example, this occurs when
// a tag is opened after the length limit is exceeded;
// `flush_queue()` will never be called, and thus, the tag will
// not end up being added to `unclosed_tags`.
None => {}
}
}
/// Write all queued tags and add them to the `unclosed_tags` list.
fn flush_queue(&mut self) {
for tag_name in self.queued_tags.drain(..) {
write!(self.buf, "<{}>", tag_name).unwrap();
self.unclosed_tags.push(tag_name);
}
}
/// Close all unclosed tags.
fn | (&mut self) {
while!self.unclosed_tags.is_empty() {
self.close_tag();
}
}
}
#[cfg(test)]
mod tests;
| close_all_tags | identifier_name |
tuple_impl.rs | //! Some iterator that produces tuples
use std::iter::Fuse;
// `HomogeneousTuple` is a public facade for `TupleCollect`, allowing
// tuple-related methods to be used by clients in generic contexts, while
// hiding the implementation details of `TupleCollect`.
// See https://github.com/rust-itertools/itertools/issues/387
/// Implemented for homogeneous tuples of size up to 4.
pub trait HomogeneousTuple
: TupleCollect
{}
impl<T: TupleCollect> HomogeneousTuple for T {}
/// An iterator over a incomplete tuple.
///
/// See [`.tuples()`](../trait.Itertools.html#method.tuples) and
/// [`Tuples::into_buffer()`](struct.Tuples.html#method.into_buffer).
#[derive(Clone, Debug)]
pub struct TupleBuffer<T>
where T: HomogeneousTuple
{
cur: usize,
buf: T::Buffer,
}
impl<T> TupleBuffer<T>
where T: HomogeneousTuple
{
fn new(buf: T::Buffer) -> Self {
TupleBuffer {
cur: 0,
buf,
}
}
}
| where T: HomogeneousTuple
{
type Item = T::Item;
fn next(&mut self) -> Option<Self::Item> {
let s = self.buf.as_mut();
if let Some(ref mut item) = s.get_mut(self.cur) {
self.cur += 1;
item.take()
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let buffer = &self.buf.as_ref()[self.cur..];
let len = if buffer.len() == 0 {
0
} else {
buffer.iter()
.position(|x| x.is_none())
.unwrap_or(buffer.len())
};
(len, Some(len))
}
}
impl<T> ExactSizeIterator for TupleBuffer<T>
where T: HomogeneousTuple
{
}
/// An iterator that groups the items in tuples of a specific size.
///
/// See [`.tuples()`](../trait.Itertools.html#method.tuples) for more information.
#[derive(Clone)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
pub struct Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
iter: Fuse<I>,
buf: T::Buffer,
}
/// Create a new tuples iterator.
pub fn tuples<I, T>(iter: I) -> Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
Tuples {
iter: iter.fuse(),
buf: Default::default(),
}
}
impl<I, T> Iterator for Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
type Item = T;
fn next(&mut self) -> Option<T> {
T::collect_from_iter(&mut self.iter, &mut self.buf)
}
}
impl<I, T> Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
/// Return a buffer with the produced items that was not enough to be grouped in a tuple.
///
/// ```
/// use itertools::Itertools;
///
/// let mut iter = (0..5).tuples();
/// assert_eq!(Some((0, 1, 2)), iter.next());
/// assert_eq!(None, iter.next());
/// itertools::assert_equal(vec![3, 4], iter.into_buffer());
/// ```
pub fn into_buffer(self) -> TupleBuffer<T> {
TupleBuffer::new(self.buf)
}
}
/// An iterator over all contiguous windows that produces tuples of a specific size.
///
/// See [`.tuple_windows()`](../trait.Itertools.html#method.tuple_windows) for more
/// information.
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone, Debug)]
pub struct TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
iter: I,
last: Option<T>,
}
/// Create a new tuple windows iterator.
pub fn tuple_windows<I, T>(mut iter: I) -> TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple,
T::Item: Clone
{
use std::iter::once;
let mut last = None;
if T::num_items()!= 1 {
// put in a duplicate item in front of the tuple; this simplifies
//.next() function.
if let Some(item) = iter.next() {
let iter = once(item.clone()).chain(once(item)).chain(&mut iter);
last = T::collect_from_iter_no_buf(iter);
}
}
TupleWindows {
last,
iter,
}
}
impl<I, T> Iterator for TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple + Clone,
T::Item: Clone
{
type Item = T;
fn next(&mut self) -> Option<T> {
if T::num_items() == 1 {
return T::collect_from_iter_no_buf(&mut self.iter)
}
if let Some(ref mut last) = self.last {
if let Some(new) = self.iter.next() {
last.left_shift_push(new);
return Some(last.clone());
}
}
None
}
}
pub trait TupleCollect: Sized {
type Item;
type Buffer: Default + AsRef<[Option<Self::Item>]> + AsMut<[Option<Self::Item>]>;
fn collect_from_iter<I>(iter: I, buf: &mut Self::Buffer) -> Option<Self>
where I: IntoIterator<Item = Self::Item>;
fn collect_from_iter_no_buf<I>(iter: I) -> Option<Self>
where I: IntoIterator<Item = Self::Item>;
fn num_items() -> usize;
fn left_shift_push(&mut self, item: Self::Item);
}
macro_rules! impl_tuple_collect {
() => ();
($N:expr; $A:ident ; $($X:ident),* ; $($Y:ident),* ; $($Y_rev:ident),*) => (
impl<$A> TupleCollect for ($($X),*,) {
type Item = $A;
type Buffer = [Option<$A>; $N - 1];
#[allow(unused_assignments, unused_mut)]
fn collect_from_iter<I>(iter: I, buf: &mut Self::Buffer) -> Option<Self>
where I: IntoIterator<Item = $A>
{
let mut iter = iter.into_iter();
$(
let mut $Y = None;
)*
loop {
$(
$Y = iter.next();
if $Y.is_none() {
break
}
)*
return Some(($($Y.unwrap()),*,))
}
let mut i = 0;
let mut s = buf.as_mut();
$(
if i < s.len() {
s[i] = $Y;
i += 1;
}
)*
return None;
}
#[allow(unused_assignments)]
fn collect_from_iter_no_buf<I>(iter: I) -> Option<Self>
where I: IntoIterator<Item = $A>
{
let mut iter = iter.into_iter();
loop {
$(
let $Y = if let Some($Y) = iter.next() {
$Y
} else {
break;
};
)*
return Some(($($Y),*,))
}
return None;
}
fn num_items() -> usize {
$N
}
fn left_shift_push(&mut self, item: $A) {
use std::mem::replace;
let &mut ($(ref mut $Y),*,) = self;
let tmp = item;
$(
let tmp = replace($Y_rev, tmp);
)*
drop(tmp);
}
}
)
}
impl_tuple_collect!(1; A; A; a; a);
impl_tuple_collect!(2; A; A, A; a, b; b, a);
impl_tuple_collect!(3; A; A, A, A; a, b, c; c, b, a);
impl_tuple_collect!(4; A; A, A, A, A; a, b, c, d; d, c, b, a); | impl<T> Iterator for TupleBuffer<T> | random_line_split |
tuple_impl.rs | //! Some iterator that produces tuples
use std::iter::Fuse;
// `HomogeneousTuple` is a public facade for `TupleCollect`, allowing
// tuple-related methods to be used by clients in generic contexts, while
// hiding the implementation details of `TupleCollect`.
// See https://github.com/rust-itertools/itertools/issues/387
/// Implemented for homogeneous tuples of size up to 4.
pub trait HomogeneousTuple
: TupleCollect
{}
impl<T: TupleCollect> HomogeneousTuple for T {}
/// An iterator over a incomplete tuple.
///
/// See [`.tuples()`](../trait.Itertools.html#method.tuples) and
/// [`Tuples::into_buffer()`](struct.Tuples.html#method.into_buffer).
#[derive(Clone, Debug)]
pub struct TupleBuffer<T>
where T: HomogeneousTuple
{
cur: usize,
buf: T::Buffer,
}
impl<T> TupleBuffer<T>
where T: HomogeneousTuple
{
fn new(buf: T::Buffer) -> Self {
TupleBuffer {
cur: 0,
buf,
}
}
}
impl<T> Iterator for TupleBuffer<T>
where T: HomogeneousTuple
{
type Item = T::Item;
fn next(&mut self) -> Option<Self::Item> {
let s = self.buf.as_mut();
if let Some(ref mut item) = s.get_mut(self.cur) {
self.cur += 1;
item.take()
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let buffer = &self.buf.as_ref()[self.cur..];
let len = if buffer.len() == 0 {
0
} else {
buffer.iter()
.position(|x| x.is_none())
.unwrap_or(buffer.len())
};
(len, Some(len))
}
}
impl<T> ExactSizeIterator for TupleBuffer<T>
where T: HomogeneousTuple
{
}
/// An iterator that groups the items in tuples of a specific size.
///
/// See [`.tuples()`](../trait.Itertools.html#method.tuples) for more information.
#[derive(Clone)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
pub struct Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
iter: Fuse<I>,
buf: T::Buffer,
}
/// Create a new tuples iterator.
pub fn tuples<I, T>(iter: I) -> Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
Tuples {
iter: iter.fuse(),
buf: Default::default(),
}
}
impl<I, T> Iterator for Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
type Item = T;
fn next(&mut self) -> Option<T> {
T::collect_from_iter(&mut self.iter, &mut self.buf)
}
}
impl<I, T> Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
/// Return a buffer with the produced items that was not enough to be grouped in a tuple.
///
/// ```
/// use itertools::Itertools;
///
/// let mut iter = (0..5).tuples();
/// assert_eq!(Some((0, 1, 2)), iter.next());
/// assert_eq!(None, iter.next());
/// itertools::assert_equal(vec![3, 4], iter.into_buffer());
/// ```
pub fn into_buffer(self) -> TupleBuffer<T> {
TupleBuffer::new(self.buf)
}
}
/// An iterator over all contiguous windows that produces tuples of a specific size.
///
/// See [`.tuple_windows()`](../trait.Itertools.html#method.tuple_windows) for more
/// information.
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone, Debug)]
pub struct TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
iter: I,
last: Option<T>,
}
/// Create a new tuple windows iterator.
pub fn tuple_windows<I, T>(mut iter: I) -> TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple,
T::Item: Clone
{
use std::iter::once;
let mut last = None;
if T::num_items()!= 1 {
// put in a duplicate item in front of the tuple; this simplifies
//.next() function.
if let Some(item) = iter.next() {
let iter = once(item.clone()).chain(once(item)).chain(&mut iter);
last = T::collect_from_iter_no_buf(iter);
}
}
TupleWindows {
last,
iter,
}
}
impl<I, T> Iterator for TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple + Clone,
T::Item: Clone
{
type Item = T;
fn next(&mut self) -> Option<T> |
}
pub trait TupleCollect: Sized {
type Item;
type Buffer: Default + AsRef<[Option<Self::Item>]> + AsMut<[Option<Self::Item>]>;
fn collect_from_iter<I>(iter: I, buf: &mut Self::Buffer) -> Option<Self>
where I: IntoIterator<Item = Self::Item>;
fn collect_from_iter_no_buf<I>(iter: I) -> Option<Self>
where I: IntoIterator<Item = Self::Item>;
fn num_items() -> usize;
fn left_shift_push(&mut self, item: Self::Item);
}
macro_rules! impl_tuple_collect {
() => ();
($N:expr; $A:ident ; $($X:ident),* ; $($Y:ident),* ; $($Y_rev:ident),*) => (
impl<$A> TupleCollect for ($($X),*,) {
type Item = $A;
type Buffer = [Option<$A>; $N - 1];
#[allow(unused_assignments, unused_mut)]
fn collect_from_iter<I>(iter: I, buf: &mut Self::Buffer) -> Option<Self>
where I: IntoIterator<Item = $A>
{
let mut iter = iter.into_iter();
$(
let mut $Y = None;
)*
loop {
$(
$Y = iter.next();
if $Y.is_none() {
break
}
)*
return Some(($($Y.unwrap()),*,))
}
let mut i = 0;
let mut s = buf.as_mut();
$(
if i < s.len() {
s[i] = $Y;
i += 1;
}
)*
return None;
}
#[allow(unused_assignments)]
fn collect_from_iter_no_buf<I>(iter: I) -> Option<Self>
where I: IntoIterator<Item = $A>
{
let mut iter = iter.into_iter();
loop {
$(
let $Y = if let Some($Y) = iter.next() {
$Y
} else {
break;
};
)*
return Some(($($Y),*,))
}
return None;
}
fn num_items() -> usize {
$N
}
fn left_shift_push(&mut self, item: $A) {
use std::mem::replace;
let &mut ($(ref mut $Y),*,) = self;
let tmp = item;
$(
let tmp = replace($Y_rev, tmp);
)*
drop(tmp);
}
}
)
}
impl_tuple_collect!(1; A; A; a; a);
impl_tuple_collect!(2; A; A, A; a, b; b, a);
impl_tuple_collect!(3; A; A, A, A; a, b, c; c, b, a);
impl_tuple_collect!(4; A; A, A, A, A; a, b, c, d; d, c, b, a);
| {
if T::num_items() == 1 {
return T::collect_from_iter_no_buf(&mut self.iter)
}
if let Some(ref mut last) = self.last {
if let Some(new) = self.iter.next() {
last.left_shift_push(new);
return Some(last.clone());
}
}
None
} | identifier_body |
tuple_impl.rs | //! Some iterator that produces tuples
use std::iter::Fuse;
// `HomogeneousTuple` is a public facade for `TupleCollect`, allowing
// tuple-related methods to be used by clients in generic contexts, while
// hiding the implementation details of `TupleCollect`.
// See https://github.com/rust-itertools/itertools/issues/387
/// Implemented for homogeneous tuples of size up to 4.
pub trait HomogeneousTuple
: TupleCollect
{}
impl<T: TupleCollect> HomogeneousTuple for T {}
/// An iterator over a incomplete tuple.
///
/// See [`.tuples()`](../trait.Itertools.html#method.tuples) and
/// [`Tuples::into_buffer()`](struct.Tuples.html#method.into_buffer).
#[derive(Clone, Debug)]
pub struct TupleBuffer<T>
where T: HomogeneousTuple
{
cur: usize,
buf: T::Buffer,
}
impl<T> TupleBuffer<T>
where T: HomogeneousTuple
{
fn new(buf: T::Buffer) -> Self {
TupleBuffer {
cur: 0,
buf,
}
}
}
impl<T> Iterator for TupleBuffer<T>
where T: HomogeneousTuple
{
type Item = T::Item;
fn next(&mut self) -> Option<Self::Item> {
let s = self.buf.as_mut();
if let Some(ref mut item) = s.get_mut(self.cur) {
self.cur += 1;
item.take()
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let buffer = &self.buf.as_ref()[self.cur..];
let len = if buffer.len() == 0 {
0
} else | ;
(len, Some(len))
}
}
impl<T> ExactSizeIterator for TupleBuffer<T>
where T: HomogeneousTuple
{
}
/// An iterator that groups the items in tuples of a specific size.
///
/// See [`.tuples()`](../trait.Itertools.html#method.tuples) for more information.
#[derive(Clone)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
pub struct Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
iter: Fuse<I>,
buf: T::Buffer,
}
/// Create a new tuples iterator.
pub fn tuples<I, T>(iter: I) -> Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
Tuples {
iter: iter.fuse(),
buf: Default::default(),
}
}
impl<I, T> Iterator for Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
type Item = T;
fn next(&mut self) -> Option<T> {
T::collect_from_iter(&mut self.iter, &mut self.buf)
}
}
impl<I, T> Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
/// Return a buffer with the produced items that was not enough to be grouped in a tuple.
///
/// ```
/// use itertools::Itertools;
///
/// let mut iter = (0..5).tuples();
/// assert_eq!(Some((0, 1, 2)), iter.next());
/// assert_eq!(None, iter.next());
/// itertools::assert_equal(vec![3, 4], iter.into_buffer());
/// ```
pub fn into_buffer(self) -> TupleBuffer<T> {
TupleBuffer::new(self.buf)
}
}
/// An iterator over all contiguous windows that produces tuples of a specific size.
///
/// See [`.tuple_windows()`](../trait.Itertools.html#method.tuple_windows) for more
/// information.
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone, Debug)]
pub struct TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
iter: I,
last: Option<T>,
}
/// Create a new tuple windows iterator.
pub fn tuple_windows<I, T>(mut iter: I) -> TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple,
T::Item: Clone
{
use std::iter::once;
let mut last = None;
if T::num_items()!= 1 {
// put in a duplicate item in front of the tuple; this simplifies
//.next() function.
if let Some(item) = iter.next() {
let iter = once(item.clone()).chain(once(item)).chain(&mut iter);
last = T::collect_from_iter_no_buf(iter);
}
}
TupleWindows {
last,
iter,
}
}
impl<I, T> Iterator for TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple + Clone,
T::Item: Clone
{
type Item = T;
fn next(&mut self) -> Option<T> {
if T::num_items() == 1 {
return T::collect_from_iter_no_buf(&mut self.iter)
}
if let Some(ref mut last) = self.last {
if let Some(new) = self.iter.next() {
last.left_shift_push(new);
return Some(last.clone());
}
}
None
}
}
pub trait TupleCollect: Sized {
type Item;
type Buffer: Default + AsRef<[Option<Self::Item>]> + AsMut<[Option<Self::Item>]>;
fn collect_from_iter<I>(iter: I, buf: &mut Self::Buffer) -> Option<Self>
where I: IntoIterator<Item = Self::Item>;
fn collect_from_iter_no_buf<I>(iter: I) -> Option<Self>
where I: IntoIterator<Item = Self::Item>;
fn num_items() -> usize;
fn left_shift_push(&mut self, item: Self::Item);
}
macro_rules! impl_tuple_collect {
() => ();
($N:expr; $A:ident ; $($X:ident),* ; $($Y:ident),* ; $($Y_rev:ident),*) => (
impl<$A> TupleCollect for ($($X),*,) {
type Item = $A;
type Buffer = [Option<$A>; $N - 1];
#[allow(unused_assignments, unused_mut)]
fn collect_from_iter<I>(iter: I, buf: &mut Self::Buffer) -> Option<Self>
where I: IntoIterator<Item = $A>
{
let mut iter = iter.into_iter();
$(
let mut $Y = None;
)*
loop {
$(
$Y = iter.next();
if $Y.is_none() {
break
}
)*
return Some(($($Y.unwrap()),*,))
}
let mut i = 0;
let mut s = buf.as_mut();
$(
if i < s.len() {
s[i] = $Y;
i += 1;
}
)*
return None;
}
#[allow(unused_assignments)]
fn collect_from_iter_no_buf<I>(iter: I) -> Option<Self>
where I: IntoIterator<Item = $A>
{
let mut iter = iter.into_iter();
loop {
$(
let $Y = if let Some($Y) = iter.next() {
$Y
} else {
break;
};
)*
return Some(($($Y),*,))
}
return None;
}
fn num_items() -> usize {
$N
}
fn left_shift_push(&mut self, item: $A) {
use std::mem::replace;
let &mut ($(ref mut $Y),*,) = self;
let tmp = item;
$(
let tmp = replace($Y_rev, tmp);
)*
drop(tmp);
}
}
)
}
impl_tuple_collect!(1; A; A; a; a);
impl_tuple_collect!(2; A; A, A; a, b; b, a);
impl_tuple_collect!(3; A; A, A, A; a, b, c; c, b, a);
impl_tuple_collect!(4; A; A, A, A, A; a, b, c, d; d, c, b, a);
| {
buffer.iter()
.position(|x| x.is_none())
.unwrap_or(buffer.len())
} | conditional_block |
tuple_impl.rs | //! Some iterator that produces tuples
use std::iter::Fuse;
// `HomogeneousTuple` is a public facade for `TupleCollect`, allowing
// tuple-related methods to be used by clients in generic contexts, while
// hiding the implementation details of `TupleCollect`.
// See https://github.com/rust-itertools/itertools/issues/387
/// Implemented for homogeneous tuples of size up to 4.
pub trait HomogeneousTuple
: TupleCollect
{}
impl<T: TupleCollect> HomogeneousTuple for T {}
/// An iterator over a incomplete tuple.
///
/// See [`.tuples()`](../trait.Itertools.html#method.tuples) and
/// [`Tuples::into_buffer()`](struct.Tuples.html#method.into_buffer).
#[derive(Clone, Debug)]
pub struct TupleBuffer<T>
where T: HomogeneousTuple
{
cur: usize,
buf: T::Buffer,
}
impl<T> TupleBuffer<T>
where T: HomogeneousTuple
{
fn new(buf: T::Buffer) -> Self {
TupleBuffer {
cur: 0,
buf,
}
}
}
impl<T> Iterator for TupleBuffer<T>
where T: HomogeneousTuple
{
type Item = T::Item;
fn next(&mut self) -> Option<Self::Item> {
let s = self.buf.as_mut();
if let Some(ref mut item) = s.get_mut(self.cur) {
self.cur += 1;
item.take()
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let buffer = &self.buf.as_ref()[self.cur..];
let len = if buffer.len() == 0 {
0
} else {
buffer.iter()
.position(|x| x.is_none())
.unwrap_or(buffer.len())
};
(len, Some(len))
}
}
impl<T> ExactSizeIterator for TupleBuffer<T>
where T: HomogeneousTuple
{
}
/// An iterator that groups the items in tuples of a specific size.
///
/// See [`.tuples()`](../trait.Itertools.html#method.tuples) for more information.
#[derive(Clone)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
pub struct Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
iter: Fuse<I>,
buf: T::Buffer,
}
/// Create a new tuples iterator.
pub fn tuples<I, T>(iter: I) -> Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
Tuples {
iter: iter.fuse(),
buf: Default::default(),
}
}
impl<I, T> Iterator for Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
type Item = T;
fn next(&mut self) -> Option<T> {
T::collect_from_iter(&mut self.iter, &mut self.buf)
}
}
impl<I, T> Tuples<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
/// Return a buffer with the produced items that was not enough to be grouped in a tuple.
///
/// ```
/// use itertools::Itertools;
///
/// let mut iter = (0..5).tuples();
/// assert_eq!(Some((0, 1, 2)), iter.next());
/// assert_eq!(None, iter.next());
/// itertools::assert_equal(vec![3, 4], iter.into_buffer());
/// ```
pub fn into_buffer(self) -> TupleBuffer<T> {
TupleBuffer::new(self.buf)
}
}
/// An iterator over all contiguous windows that produces tuples of a specific size.
///
/// See [`.tuple_windows()`](../trait.Itertools.html#method.tuple_windows) for more
/// information.
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[derive(Clone, Debug)]
pub struct TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple
{
iter: I,
last: Option<T>,
}
/// Create a new tuple windows iterator.
pub fn tuple_windows<I, T>(mut iter: I) -> TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple,
T::Item: Clone
{
use std::iter::once;
let mut last = None;
if T::num_items()!= 1 {
// put in a duplicate item in front of the tuple; this simplifies
//.next() function.
if let Some(item) = iter.next() {
let iter = once(item.clone()).chain(once(item)).chain(&mut iter);
last = T::collect_from_iter_no_buf(iter);
}
}
TupleWindows {
last,
iter,
}
}
impl<I, T> Iterator for TupleWindows<I, T>
where I: Iterator<Item = T::Item>,
T: HomogeneousTuple + Clone,
T::Item: Clone
{
type Item = T;
fn | (&mut self) -> Option<T> {
if T::num_items() == 1 {
return T::collect_from_iter_no_buf(&mut self.iter)
}
if let Some(ref mut last) = self.last {
if let Some(new) = self.iter.next() {
last.left_shift_push(new);
return Some(last.clone());
}
}
None
}
}
pub trait TupleCollect: Sized {
type Item;
type Buffer: Default + AsRef<[Option<Self::Item>]> + AsMut<[Option<Self::Item>]>;
fn collect_from_iter<I>(iter: I, buf: &mut Self::Buffer) -> Option<Self>
where I: IntoIterator<Item = Self::Item>;
fn collect_from_iter_no_buf<I>(iter: I) -> Option<Self>
where I: IntoIterator<Item = Self::Item>;
fn num_items() -> usize;
fn left_shift_push(&mut self, item: Self::Item);
}
macro_rules! impl_tuple_collect {
() => ();
($N:expr; $A:ident ; $($X:ident),* ; $($Y:ident),* ; $($Y_rev:ident),*) => (
impl<$A> TupleCollect for ($($X),*,) {
type Item = $A;
type Buffer = [Option<$A>; $N - 1];
#[allow(unused_assignments, unused_mut)]
fn collect_from_iter<I>(iter: I, buf: &mut Self::Buffer) -> Option<Self>
where I: IntoIterator<Item = $A>
{
let mut iter = iter.into_iter();
$(
let mut $Y = None;
)*
loop {
$(
$Y = iter.next();
if $Y.is_none() {
break
}
)*
return Some(($($Y.unwrap()),*,))
}
let mut i = 0;
let mut s = buf.as_mut();
$(
if i < s.len() {
s[i] = $Y;
i += 1;
}
)*
return None;
}
#[allow(unused_assignments)]
fn collect_from_iter_no_buf<I>(iter: I) -> Option<Self>
where I: IntoIterator<Item = $A>
{
let mut iter = iter.into_iter();
loop {
$(
let $Y = if let Some($Y) = iter.next() {
$Y
} else {
break;
};
)*
return Some(($($Y),*,))
}
return None;
}
fn num_items() -> usize {
$N
}
fn left_shift_push(&mut self, item: $A) {
use std::mem::replace;
let &mut ($(ref mut $Y),*,) = self;
let tmp = item;
$(
let tmp = replace($Y_rev, tmp);
)*
drop(tmp);
}
}
)
}
impl_tuple_collect!(1; A; A; a; a);
impl_tuple_collect!(2; A; A, A; a, b; b, a);
impl_tuple_collect!(3; A; A, A, A; a, b, c; c, b, a);
impl_tuple_collect!(4; A; A, A, A, A; a, b, c, d; d, c, b, a);
| next | identifier_name |
users.rs | #![crate_name = "users"]
/*
* This file is part of the uutils coreutils package.
*
* (c) KokaKiwi <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: whoami (GNU coreutils) 8.22 */
// Allow dead code here in order to keep all fields, constants here, for consistency.
#![allow(dead_code)]
extern crate getopts;
extern crate libc;
use getopts::Options;
use std::ffi::{CStr, CString};
use std::mem;
use std::ptr;
use utmpx::*;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
#[path = "../common/utmpx.rs"]
mod utmpx;
extern {
fn getutxent() -> *const c_utmp;
fn getutxid(ut: *const c_utmp) -> *const c_utmp;
fn getutxline(ut: *const c_utmp) -> *const c_utmp;
fn pututxline(ut: *const c_utmp) -> *const c_utmp;
fn setutxent();
fn endutxent();
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn utmpxname(file: *const libc::c_char) -> libc::c_int;
}
#[cfg(target_os = "freebsd")]
unsafe extern fn utmpxname(_file: *const libc::c_char) -> libc::c_int {
0
}
static NAME: &'static str = "users";
static VERSION: &'static str = "1.0.0";
pub fn uumain(args: Vec<String>) -> i32 |
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
let filename = if matches.free.len() > 0 {
matches.free[0].as_ref()
} else {
DEFAULT_FILE
};
exec(filename);
0
}
fn exec(filename: &str) {
unsafe {
utmpxname(CString::new(filename).unwrap().as_ptr());
}
let mut users = vec!();
unsafe {
setutxent();
loop {
let line = getutxent();
if line == ptr::null() {
break;
}
if (*line).ut_type == USER_PROCESS {
let user = String::from_utf8_lossy(CStr::from_ptr(mem::transmute(&(*line).ut_user)).to_bytes()).to_string();
users.push(user);
}
}
endutxent();
}
if users.len() > 0 {
users.sort();
println!("{}", users.connect(" "));
}
}
| {
let mut opts = Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("{}", f),
};
if matches.opt_present("help") {
println!("{} {}", NAME, VERSION);
println!("");
println!("Usage:");
println!(" {} [OPTION]... [FILE]", NAME);
println!("");
println!("{}", opts.usage("Output who is currently logged in according to FILE."));
return 0;
} | identifier_body |
users.rs | #![crate_name = "users"]
/*
* This file is part of the uutils coreutils package.
*
* (c) KokaKiwi <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: whoami (GNU coreutils) 8.22 */
// Allow dead code here in order to keep all fields, constants here, for consistency.
#![allow(dead_code)]
extern crate getopts;
extern crate libc;
use getopts::Options;
use std::ffi::{CStr, CString};
use std::mem;
use std::ptr;
use utmpx::*;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
#[path = "../common/utmpx.rs"]
mod utmpx;
extern {
fn getutxent() -> *const c_utmp;
fn getutxid(ut: *const c_utmp) -> *const c_utmp;
fn getutxline(ut: *const c_utmp) -> *const c_utmp;
fn pututxline(ut: *const c_utmp) -> *const c_utmp;
fn setutxent();
fn endutxent();
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn utmpxname(file: *const libc::c_char) -> libc::c_int;
}
#[cfg(target_os = "freebsd")]
unsafe extern fn utmpxname(_file: *const libc::c_char) -> libc::c_int {
0
}
static NAME: &'static str = "users";
static VERSION: &'static str = "1.0.0";
pub fn | (args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("{}", f),
};
if matches.opt_present("help") {
println!("{} {}", NAME, VERSION);
println!("");
println!("Usage:");
println!(" {} [OPTION]... [FILE]", NAME);
println!("");
println!("{}", opts.usage("Output who is currently logged in according to FILE."));
return 0;
}
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
let filename = if matches.free.len() > 0 {
matches.free[0].as_ref()
} else {
DEFAULT_FILE
};
exec(filename);
0
}
fn exec(filename: &str) {
unsafe {
utmpxname(CString::new(filename).unwrap().as_ptr());
}
let mut users = vec!();
unsafe {
setutxent();
loop {
let line = getutxent();
if line == ptr::null() {
break;
}
if (*line).ut_type == USER_PROCESS {
let user = String::from_utf8_lossy(CStr::from_ptr(mem::transmute(&(*line).ut_user)).to_bytes()).to_string();
users.push(user);
}
}
endutxent();
}
if users.len() > 0 {
users.sort();
println!("{}", users.connect(" "));
}
}
| uumain | identifier_name |
users.rs | #![crate_name = "users"]
/*
* This file is part of the uutils coreutils package.
*
* (c) KokaKiwi <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: whoami (GNU coreutils) 8.22 */
// Allow dead code here in order to keep all fields, constants here, for consistency.
#![allow(dead_code)]
extern crate getopts;
extern crate libc;
use getopts::Options;
use std::ffi::{CStr, CString};
use std::mem;
use std::ptr;
use utmpx::*;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
#[path = "../common/utmpx.rs"]
mod utmpx;
extern {
fn getutxent() -> *const c_utmp;
fn getutxid(ut: *const c_utmp) -> *const c_utmp;
fn getutxline(ut: *const c_utmp) -> *const c_utmp;
fn pututxline(ut: *const c_utmp) -> *const c_utmp;
fn setutxent();
fn endutxent();
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn utmpxname(file: *const libc::c_char) -> libc::c_int;
}
#[cfg(target_os = "freebsd")]
unsafe extern fn utmpxname(_file: *const libc::c_char) -> libc::c_int {
0
}
static NAME: &'static str = "users";
static VERSION: &'static str = "1.0.0";
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("{}", f),
};
if matches.opt_present("help") {
println!("{} {}", NAME, VERSION);
println!("");
println!("Usage:");
println!(" {} [OPTION]... [FILE]", NAME);
println!("");
println!("{}", opts.usage("Output who is currently logged in according to FILE."));
return 0;
}
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
let filename = if matches.free.len() > 0 {
matches.free[0].as_ref()
} else {
DEFAULT_FILE
};
exec(filename);
0
}
fn exec(filename: &str) {
unsafe {
utmpxname(CString::new(filename).unwrap().as_ptr());
}
let mut users = vec!();
unsafe {
setutxent();
loop {
let line = getutxent();
if line == ptr::null() |
if (*line).ut_type == USER_PROCESS {
let user = String::from_utf8_lossy(CStr::from_ptr(mem::transmute(&(*line).ut_user)).to_bytes()).to_string();
users.push(user);
}
}
endutxent();
}
if users.len() > 0 {
users.sort();
println!("{}", users.connect(" "));
}
}
| {
break;
} | conditional_block |
users.rs | #![crate_name = "users"]
/*
* This file is part of the uutils coreutils package.
*
* (c) KokaKiwi <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: whoami (GNU coreutils) 8.22 */
// Allow dead code here in order to keep all fields, constants here, for consistency.
#![allow(dead_code)]
extern crate getopts;
extern crate libc;
use getopts::Options;
use std::ffi::{CStr, CString};
use std::mem;
use std::ptr;
use utmpx::*;
#[path = "../common/util.rs"]
#[macro_use]
mod util;
#[path = "../common/utmpx.rs"]
mod utmpx;
extern {
fn getutxent() -> *const c_utmp;
fn getutxid(ut: *const c_utmp) -> *const c_utmp;
fn getutxline(ut: *const c_utmp) -> *const c_utmp;
fn pututxline(ut: *const c_utmp) -> *const c_utmp;
fn setutxent();
fn endutxent();
#[cfg(any(target_os = "macos", target_os = "linux"))]
fn utmpxname(file: *const libc::c_char) -> libc::c_int;
}
#[cfg(target_os = "freebsd")]
unsafe extern fn utmpxname(_file: *const libc::c_char) -> libc::c_int {
0
}
static NAME: &'static str = "users";
static VERSION: &'static str = "1.0.0";
pub fn uumain(args: Vec<String>) -> i32 {
let mut opts = Options::new();
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!("{}", f),
};
if matches.opt_present("help") { | println!("");
println!("Usage:");
println!(" {} [OPTION]... [FILE]", NAME);
println!("");
println!("{}", opts.usage("Output who is currently logged in according to FILE."));
return 0;
}
if matches.opt_present("version") {
println!("{} {}", NAME, VERSION);
return 0;
}
let filename = if matches.free.len() > 0 {
matches.free[0].as_ref()
} else {
DEFAULT_FILE
};
exec(filename);
0
}
fn exec(filename: &str) {
unsafe {
utmpxname(CString::new(filename).unwrap().as_ptr());
}
let mut users = vec!();
unsafe {
setutxent();
loop {
let line = getutxent();
if line == ptr::null() {
break;
}
if (*line).ut_type == USER_PROCESS {
let user = String::from_utf8_lossy(CStr::from_ptr(mem::transmute(&(*line).ut_user)).to_bytes()).to_string();
users.push(user);
}
}
endutxent();
}
if users.len() > 0 {
users.sort();
println!("{}", users.connect(" "));
}
} | println!("{} {}", NAME, VERSION); | random_line_split |
hello_triangle.rs | extern crate bootstrap_rs as bootstrap;
extern crate polygon;
use bootstrap::window::*;
use polygon::*;
use polygon::anchor::*;
use polygon::camera::*;
use polygon::math::*;
use polygon::mesh_instance::*;
use polygon::geometry::mesh::*;
static VERTEX_POSITIONS: [f32; 12] = [
-1.0, -1.0, 0.0, 1.0,
1.0, -1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
];
static INDICES: [u32; 3] = [0, 1, 2];
fn | () {
// Open a window and create the renderer instance.
let mut window = Window::new("Hello, Triangle!").unwrap();
let mut renderer = RendererBuilder::new(&window).build();
// Build a triangle mesh.
let mesh = MeshBuilder::new()
.set_position_data(Point::slice_from_f32_slice(&VERTEX_POSITIONS))
.set_indices(&INDICES)
.build()
.unwrap();
// Send the mesh to the GPU.
let gpu_mesh = renderer.register_mesh(&mesh);
// Create an anchor and register it with the renderer.
let anchor = Anchor::new();
let anchor_id = renderer.register_anchor(anchor);
// Setup the material for the mesh.
let mut material = renderer.default_material();
material.set_color("surface_color", Color::rgb(1.0, 0.0, 0.0));
// Create a mesh instance, attach it to the anchor, and register it.
let mut mesh_instance = MeshInstance::with_owned_material(gpu_mesh, material);
mesh_instance.set_anchor(anchor_id);
renderer.register_mesh_instance(mesh_instance);
// Create a camera and an anchor for it.
let mut camera_anchor = Anchor::new();
camera_anchor.set_position(Point::new(0.0, 0.0, 10.0));
let camera_anchor_id = renderer.register_anchor(camera_anchor);
let mut camera = Camera::default();
camera.set_anchor(camera_anchor_id);
renderer.register_camera(camera);
// Set ambient color to pure white so we don't need to worry about lighting.
renderer.set_ambient_light(Color::rgb(1.0, 1.0, 1.0));
'outer: loop {
while let Some(message) = window.next_message() {
match message {
Message::Close => break 'outer,
_ => {},
}
}
// Rotate the triangle slightly.
{
let anchor = renderer.get_anchor_mut(anchor_id).unwrap();
let orientation = anchor.orientation();
anchor.set_orientation(orientation + Orientation::from_eulers(0.0, 0.0, 0.0005));
}
// Render the mesh.
renderer.draw();
}
}
| main | identifier_name |
hello_triangle.rs | extern crate bootstrap_rs as bootstrap;
extern crate polygon;
use bootstrap::window::*;
use polygon::*;
use polygon::anchor::*;
use polygon::camera::*;
use polygon::math::*;
use polygon::mesh_instance::*;
use polygon::geometry::mesh::*;
static VERTEX_POSITIONS: [f32; 12] = [
-1.0, -1.0, 0.0, 1.0,
1.0, -1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
];
static INDICES: [u32; 3] = [0, 1, 2];
fn main() {
// Open a window and create the renderer instance.
let mut window = Window::new("Hello, Triangle!").unwrap();
let mut renderer = RendererBuilder::new(&window).build();
// Build a triangle mesh.
let mesh = MeshBuilder::new()
.set_position_data(Point::slice_from_f32_slice(&VERTEX_POSITIONS)) |
// Send the mesh to the GPU.
let gpu_mesh = renderer.register_mesh(&mesh);
// Create an anchor and register it with the renderer.
let anchor = Anchor::new();
let anchor_id = renderer.register_anchor(anchor);
// Setup the material for the mesh.
let mut material = renderer.default_material();
material.set_color("surface_color", Color::rgb(1.0, 0.0, 0.0));
// Create a mesh instance, attach it to the anchor, and register it.
let mut mesh_instance = MeshInstance::with_owned_material(gpu_mesh, material);
mesh_instance.set_anchor(anchor_id);
renderer.register_mesh_instance(mesh_instance);
// Create a camera and an anchor for it.
let mut camera_anchor = Anchor::new();
camera_anchor.set_position(Point::new(0.0, 0.0, 10.0));
let camera_anchor_id = renderer.register_anchor(camera_anchor);
let mut camera = Camera::default();
camera.set_anchor(camera_anchor_id);
renderer.register_camera(camera);
// Set ambient color to pure white so we don't need to worry about lighting.
renderer.set_ambient_light(Color::rgb(1.0, 1.0, 1.0));
'outer: loop {
while let Some(message) = window.next_message() {
match message {
Message::Close => break 'outer,
_ => {},
}
}
// Rotate the triangle slightly.
{
let anchor = renderer.get_anchor_mut(anchor_id).unwrap();
let orientation = anchor.orientation();
anchor.set_orientation(orientation + Orientation::from_eulers(0.0, 0.0, 0.0005));
}
// Render the mesh.
renderer.draw();
}
} | .set_indices(&INDICES)
.build()
.unwrap(); | random_line_split |
hello_triangle.rs | extern crate bootstrap_rs as bootstrap;
extern crate polygon;
use bootstrap::window::*;
use polygon::*;
use polygon::anchor::*;
use polygon::camera::*;
use polygon::math::*;
use polygon::mesh_instance::*;
use polygon::geometry::mesh::*;
static VERTEX_POSITIONS: [f32; 12] = [
-1.0, -1.0, 0.0, 1.0,
1.0, -1.0, 0.0, 1.0,
0.0, 1.0, 0.0, 1.0,
];
static INDICES: [u32; 3] = [0, 1, 2];
fn main() | let mut material = renderer.default_material();
material.set_color("surface_color", Color::rgb(1.0, 0.0, 0.0));
// Create a mesh instance, attach it to the anchor, and register it.
let mut mesh_instance = MeshInstance::with_owned_material(gpu_mesh, material);
mesh_instance.set_anchor(anchor_id);
renderer.register_mesh_instance(mesh_instance);
// Create a camera and an anchor for it.
let mut camera_anchor = Anchor::new();
camera_anchor.set_position(Point::new(0.0, 0.0, 10.0));
let camera_anchor_id = renderer.register_anchor(camera_anchor);
let mut camera = Camera::default();
camera.set_anchor(camera_anchor_id);
renderer.register_camera(camera);
// Set ambient color to pure white so we don't need to worry about lighting.
renderer.set_ambient_light(Color::rgb(1.0, 1.0, 1.0));
'outer: loop {
while let Some(message) = window.next_message() {
match message {
Message::Close => break 'outer,
_ => {},
}
}
// Rotate the triangle slightly.
{
let anchor = renderer.get_anchor_mut(anchor_id).unwrap();
let orientation = anchor.orientation();
anchor.set_orientation(orientation + Orientation::from_eulers(0.0, 0.0, 0.0005));
}
// Render the mesh.
renderer.draw();
}
}
| {
// Open a window and create the renderer instance.
let mut window = Window::new("Hello, Triangle!").unwrap();
let mut renderer = RendererBuilder::new(&window).build();
// Build a triangle mesh.
let mesh = MeshBuilder::new()
.set_position_data(Point::slice_from_f32_slice(&VERTEX_POSITIONS))
.set_indices(&INDICES)
.build()
.unwrap();
// Send the mesh to the GPU.
let gpu_mesh = renderer.register_mesh(&mesh);
// Create an anchor and register it with the renderer.
let anchor = Anchor::new();
let anchor_id = renderer.register_anchor(anchor);
// Setup the material for the mesh. | identifier_body |
xml.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#![allow(unrooted_must_root)]
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::trace::JSTraceable;
use crate::dom::document::Document;
use crate::dom::htmlscriptelement::HTMLScriptElement;
use crate::dom::node::Node;
use crate::dom::servoparser::{ParsingAlgorithm, Sink};
use js::jsapi::JSTracer;
use servo_url::ServoUrl;
use xml5ever::buffer_queue::BufferQueue;
use xml5ever::tokenizer::XmlTokenizer;
use xml5ever::tree_builder::{Tracer as XmlTracer, XmlTreeBuilder};
#[derive(JSTraceable, MallocSizeOf)]
#[unrooted_must_root_lint::must_root]
pub struct Tokenizer {
#[ignore_malloc_size_of = "Defined in xml5ever"]
inner: XmlTokenizer<XmlTreeBuilder<Dom<Node>, Sink>>,
}
impl Tokenizer {
pub fn new(document: &Document, url: ServoUrl) -> Self {
let sink = Sink {
base_url: url,
document: Dom::from_ref(document),
current_line: 1,
script: Default::default(),
parsing_algorithm: ParsingAlgorithm::Normal,
};
let tb = XmlTreeBuilder::new(sink, Default::default());
let tok = XmlTokenizer::new(tb, Default::default());
Tokenizer { inner: tok }
}
pub fn feed(&mut self, input: &mut BufferQueue) -> Result<(), DomRoot<HTMLScriptElement>> {
self.inner.run(input);
if let Some(script) = self.inner.sink.sink.script.take() {
return Err(script);
}
Ok(())
}
pub fn end(&mut self) {
self.inner.end()
}
pub fn url(&self) -> &ServoUrl |
}
#[allow(unsafe_code)]
unsafe impl JSTraceable for XmlTokenizer<XmlTreeBuilder<Dom<Node>, Sink>> {
unsafe fn trace(&self, trc: *mut JSTracer) {
struct Tracer(*mut JSTracer);
let tracer = Tracer(trc);
impl XmlTracer for Tracer {
type Handle = Dom<Node>;
#[allow(unrooted_must_root)]
fn trace_handle(&self, node: &Dom<Node>) {
unsafe {
node.trace(self.0);
}
}
}
let tree_builder = &self.sink;
tree_builder.trace_handles(&tracer);
tree_builder.sink.trace(trc);
}
}
| {
&self.inner.sink.sink.base_url
} | identifier_body |
xml.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#![allow(unrooted_must_root)]
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::trace::JSTraceable;
use crate::dom::document::Document;
use crate::dom::htmlscriptelement::HTMLScriptElement;
use crate::dom::node::Node;
use crate::dom::servoparser::{ParsingAlgorithm, Sink};
use js::jsapi::JSTracer;
use servo_url::ServoUrl;
use xml5ever::buffer_queue::BufferQueue;
use xml5ever::tokenizer::XmlTokenizer;
use xml5ever::tree_builder::{Tracer as XmlTracer, XmlTreeBuilder};
#[derive(JSTraceable, MallocSizeOf)]
#[unrooted_must_root_lint::must_root]
pub struct Tokenizer {
#[ignore_malloc_size_of = "Defined in xml5ever"]
inner: XmlTokenizer<XmlTreeBuilder<Dom<Node>, Sink>>,
}
impl Tokenizer {
pub fn new(document: &Document, url: ServoUrl) -> Self {
let sink = Sink {
base_url: url,
document: Dom::from_ref(document),
current_line: 1,
script: Default::default(),
parsing_algorithm: ParsingAlgorithm::Normal,
};
let tb = XmlTreeBuilder::new(sink, Default::default());
let tok = XmlTokenizer::new(tb, Default::default());
Tokenizer { inner: tok }
}
pub fn feed(&mut self, input: &mut BufferQueue) -> Result<(), DomRoot<HTMLScriptElement>> {
self.inner.run(input);
if let Some(script) = self.inner.sink.sink.script.take() |
Ok(())
}
pub fn end(&mut self) {
self.inner.end()
}
pub fn url(&self) -> &ServoUrl {
&self.inner.sink.sink.base_url
}
}
#[allow(unsafe_code)]
unsafe impl JSTraceable for XmlTokenizer<XmlTreeBuilder<Dom<Node>, Sink>> {
unsafe fn trace(&self, trc: *mut JSTracer) {
struct Tracer(*mut JSTracer);
let tracer = Tracer(trc);
impl XmlTracer for Tracer {
type Handle = Dom<Node>;
#[allow(unrooted_must_root)]
fn trace_handle(&self, node: &Dom<Node>) {
unsafe {
node.trace(self.0);
}
}
}
let tree_builder = &self.sink;
tree_builder.trace_handles(&tracer);
tree_builder.sink.trace(trc);
}
}
| {
return Err(script);
} | conditional_block |
xml.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#![allow(unrooted_must_root)]
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::trace::JSTraceable;
use crate::dom::document::Document;
use crate::dom::htmlscriptelement::HTMLScriptElement;
use crate::dom::node::Node;
use crate::dom::servoparser::{ParsingAlgorithm, Sink};
use js::jsapi::JSTracer;
use servo_url::ServoUrl;
use xml5ever::buffer_queue::BufferQueue;
use xml5ever::tokenizer::XmlTokenizer;
use xml5ever::tree_builder::{Tracer as XmlTracer, XmlTreeBuilder};
#[derive(JSTraceable, MallocSizeOf)]
#[unrooted_must_root_lint::must_root]
pub struct Tokenizer {
#[ignore_malloc_size_of = "Defined in xml5ever"]
inner: XmlTokenizer<XmlTreeBuilder<Dom<Node>, Sink>>,
}
impl Tokenizer {
pub fn new(document: &Document, url: ServoUrl) -> Self {
let sink = Sink {
base_url: url,
document: Dom::from_ref(document),
current_line: 1,
script: Default::default(),
parsing_algorithm: ParsingAlgorithm::Normal,
};
let tb = XmlTreeBuilder::new(sink, Default::default());
let tok = XmlTokenizer::new(tb, Default::default());
Tokenizer { inner: tok }
}
pub fn feed(&mut self, input: &mut BufferQueue) -> Result<(), DomRoot<HTMLScriptElement>> {
self.inner.run(input);
if let Some(script) = self.inner.sink.sink.script.take() {
return Err(script);
}
Ok(())
}
pub fn end(&mut self) {
self.inner.end()
}
pub fn url(&self) -> &ServoUrl {
&self.inner.sink.sink.base_url
}
}
#[allow(unsafe_code)]
unsafe impl JSTraceable for XmlTokenizer<XmlTreeBuilder<Dom<Node>, Sink>> {
unsafe fn trace(&self, trc: *mut JSTracer) {
struct Tracer(*mut JSTracer);
let tracer = Tracer(trc);
impl XmlTracer for Tracer { | node.trace(self.0);
}
}
}
let tree_builder = &self.sink;
tree_builder.trace_handles(&tracer);
tree_builder.sink.trace(trc);
}
} | type Handle = Dom<Node>;
#[allow(unrooted_must_root)]
fn trace_handle(&self, node: &Dom<Node>) {
unsafe { | random_line_split |
xml.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#![allow(unrooted_must_root)]
use crate::dom::bindings::root::{Dom, DomRoot};
use crate::dom::bindings::trace::JSTraceable;
use crate::dom::document::Document;
use crate::dom::htmlscriptelement::HTMLScriptElement;
use crate::dom::node::Node;
use crate::dom::servoparser::{ParsingAlgorithm, Sink};
use js::jsapi::JSTracer;
use servo_url::ServoUrl;
use xml5ever::buffer_queue::BufferQueue;
use xml5ever::tokenizer::XmlTokenizer;
use xml5ever::tree_builder::{Tracer as XmlTracer, XmlTreeBuilder};
#[derive(JSTraceable, MallocSizeOf)]
#[unrooted_must_root_lint::must_root]
pub struct Tokenizer {
#[ignore_malloc_size_of = "Defined in xml5ever"]
inner: XmlTokenizer<XmlTreeBuilder<Dom<Node>, Sink>>,
}
impl Tokenizer {
pub fn new(document: &Document, url: ServoUrl) -> Self {
let sink = Sink {
base_url: url,
document: Dom::from_ref(document),
current_line: 1,
script: Default::default(),
parsing_algorithm: ParsingAlgorithm::Normal,
};
let tb = XmlTreeBuilder::new(sink, Default::default());
let tok = XmlTokenizer::new(tb, Default::default());
Tokenizer { inner: tok }
}
pub fn feed(&mut self, input: &mut BufferQueue) -> Result<(), DomRoot<HTMLScriptElement>> {
self.inner.run(input);
if let Some(script) = self.inner.sink.sink.script.take() {
return Err(script);
}
Ok(())
}
pub fn end(&mut self) {
self.inner.end()
}
pub fn url(&self) -> &ServoUrl {
&self.inner.sink.sink.base_url
}
}
#[allow(unsafe_code)]
unsafe impl JSTraceable for XmlTokenizer<XmlTreeBuilder<Dom<Node>, Sink>> {
unsafe fn | (&self, trc: *mut JSTracer) {
struct Tracer(*mut JSTracer);
let tracer = Tracer(trc);
impl XmlTracer for Tracer {
type Handle = Dom<Node>;
#[allow(unrooted_must_root)]
fn trace_handle(&self, node: &Dom<Node>) {
unsafe {
node.trace(self.0);
}
}
}
let tree_builder = &self.sink;
tree_builder.trace_handles(&tracer);
tree_builder.sink.trace(trc);
}
}
| trace | identifier_name |
method_info.rs | use super::Attributes;
#[derive(Debug)]
pub struct | {
pub access_flags: MethodAccessFlags,
pub name_index: u16,
pub descriptor_index: u16,
pub attrs: Attributes,
}
bitflags! {
pub flags MethodAccessFlags: u16 {
const METHOD_ACC_PUBLIC = 0x0001,
const METHOD_ACC_PRIVATE = 0x0002,
const METHOD_ACC_PROTECTED = 0x0004,
const METHOD_ACC_STATIC = 0x0008,
const METHOD_ACC_FINAL = 0x0010,
const METHOD_ACC_SYNCHRONIZED = 0x0020,
const METHOD_ACC_BRIDGE = 0x0040,
const METHOD_ACC_VARARGS = 0x0080,
const METHOD_ACC_NATIVE = 0x0100,
const METHOD_ACC_ABSTRACT = 0x0400,
const METHOD_ACC_STRICT = 0x0800,
const METHOD_ACC_SYNTHETIC = 0x1000
}
}
impl MethodAccessFlags {
pub fn is_public(&self) -> bool {
self.contains(METHOD_ACC_PUBLIC)
}
pub fn is_private(&self) -> bool {
self.contains(METHOD_ACC_PRIVATE)
}
pub fn is_protected(&self) -> bool {
self.contains(METHOD_ACC_PROTECTED)
}
pub fn is_static(&self) -> bool {
self.contains(METHOD_ACC_STATIC)
}
pub fn is_final(&self) -> bool {
self.contains(METHOD_ACC_FINAL)
}
pub fn is_synchronized(&self) -> bool {
self.contains(METHOD_ACC_SYNCHRONIZED)
}
pub fn is_bridge(&self) -> bool {
self.contains(METHOD_ACC_BRIDGE)
}
pub fn is_varargs(&self) -> bool {
self.contains(METHOD_ACC_VARARGS)
}
pub fn is_native(&self) -> bool {
self.contains(METHOD_ACC_NATIVE)
}
pub fn is_abstract(&self) -> bool {
self.contains(METHOD_ACC_ABSTRACT)
}
pub fn is_strict(&self) -> bool {
self.contains(METHOD_ACC_STRICT)
}
pub fn is_synthetic(&self) -> bool {
self.contains(METHOD_ACC_SYNTHETIC)
}
}
| MethodInfo | identifier_name |
method_info.rs | use super::Attributes;
#[derive(Debug)]
pub struct MethodInfo {
pub access_flags: MethodAccessFlags,
pub name_index: u16,
pub descriptor_index: u16,
pub attrs: Attributes,
}
bitflags! {
pub flags MethodAccessFlags: u16 {
const METHOD_ACC_PUBLIC = 0x0001,
const METHOD_ACC_PRIVATE = 0x0002,
const METHOD_ACC_PROTECTED = 0x0004,
const METHOD_ACC_STATIC = 0x0008,
const METHOD_ACC_FINAL = 0x0010,
const METHOD_ACC_SYNCHRONIZED = 0x0020,
const METHOD_ACC_BRIDGE = 0x0040,
const METHOD_ACC_VARARGS = 0x0080,
const METHOD_ACC_NATIVE = 0x0100,
const METHOD_ACC_ABSTRACT = 0x0400,
const METHOD_ACC_STRICT = 0x0800,
const METHOD_ACC_SYNTHETIC = 0x1000
}
}
impl MethodAccessFlags {
pub fn is_public(&self) -> bool {
self.contains(METHOD_ACC_PUBLIC)
}
pub fn is_private(&self) -> bool {
self.contains(METHOD_ACC_PRIVATE)
}
pub fn is_protected(&self) -> bool {
self.contains(METHOD_ACC_PROTECTED)
}
pub fn is_static(&self) -> bool {
self.contains(METHOD_ACC_STATIC)
}
pub fn is_final(&self) -> bool {
self.contains(METHOD_ACC_FINAL)
}
pub fn is_synchronized(&self) -> bool {
self.contains(METHOD_ACC_SYNCHRONIZED) | }
pub fn is_bridge(&self) -> bool {
self.contains(METHOD_ACC_BRIDGE)
}
pub fn is_varargs(&self) -> bool {
self.contains(METHOD_ACC_VARARGS)
}
pub fn is_native(&self) -> bool {
self.contains(METHOD_ACC_NATIVE)
}
pub fn is_abstract(&self) -> bool {
self.contains(METHOD_ACC_ABSTRACT)
}
pub fn is_strict(&self) -> bool {
self.contains(METHOD_ACC_STRICT)
}
pub fn is_synthetic(&self) -> bool {
self.contains(METHOD_ACC_SYNTHETIC)
}
} | random_line_split |
|
text.rs | debug!("TextRunScanner: complete.");
InlineFragments {
fragments: new_fragments,
}
}
/// A "clump" is a range of inline flow leaves that can be merged together into a single
/// fragment. Adjacent text with the same style can be merged, and nothing else can.
///
/// The flow keeps track of the fragments contained by all non-leaf DOM nodes. This is necessary
/// for correct painting order. Since we compress several leaf fragments here, the mapping must
/// be adjusted.
fn flush_clump_to_list(&mut self,
font_context: &mut FontContext,
out_fragments: &mut Vec<Fragment>,
paragraph_bytes_processed: &mut usize,
bidi_levels: Option<&[u8]>,
mut last_whitespace: bool)
-> bool {
debug!("TextRunScanner: flushing {} fragments in range", self.clump.len());
debug_assert!(!self.clump.is_empty());
match self.clump.front().unwrap().specific {
SpecificFragmentInfo::UnscannedText(_) => {}
_ => {
debug_assert!(self.clump.len() == 1,
"WAT: can't coalesce non-text nodes in flush_clump_to_list()!");
out_fragments.push(self.clump.pop_front().unwrap());
return false
}
}
// Concatenate all of the transformed strings together, saving the new character indices.
let mut mappings: Vec<RunMapping> = Vec::new();
let runs = {
let fontgroup;
let compression;
let text_transform;
let letter_spacing;
let word_spacing;
let text_rendering;
{
let in_fragment = self.clump.front().unwrap();
let font_style = in_fragment.style().get_font_arc();
let inherited_text_style = in_fragment.style().get_inheritedtext();
fontgroup = font_context.layout_font_group_for_style(font_style);
compression = match in_fragment.white_space() {
white_space::T::normal |
white_space::T::nowrap => CompressionMode::CompressWhitespaceNewline,
white_space::T::pre |
white_space::T::pre_wrap => CompressionMode::CompressNone,
white_space::T::pre_line => CompressionMode::CompressWhitespace,
};
text_transform = inherited_text_style.text_transform;
letter_spacing = inherited_text_style.letter_spacing.0;
word_spacing = inherited_text_style.word_spacing.0.unwrap_or(Au(0));
text_rendering = inherited_text_style.text_rendering;
}
// First, transform/compress text of all the nodes.
let (mut run_info_list, mut run_info) = (Vec::new(), RunInfo::new());
for (fragment_index, in_fragment) in self.clump.iter().enumerate() {
let mut mapping = RunMapping::new(&run_info_list[..], &run_info, fragment_index);
let text;
let insertion_point;
match in_fragment.specific {
SpecificFragmentInfo::UnscannedText(ref text_fragment_info) => {
text = &text_fragment_info.text;
insertion_point = text_fragment_info.insertion_point;
}
_ => panic!("Expected an unscanned text fragment!"),
};
let (mut start_position, mut end_position) = (0, 0);
for character in text.chars() {
// Search for the first font in this font group that contains a glyph for this
// character.
let mut font_index = 0;
// The following code panics one way or another if this condition isn't met.
assert!(fontgroup.fonts.len() > 0);
while font_index < fontgroup.fonts.len() - 1 {
if fontgroup.fonts.get(font_index).unwrap().borrow()
.glyph_index(character)
.is_some() {
break
}
font_index += 1;
}
let bidi_level = match bidi_levels {
Some(levels) => levels[*paragraph_bytes_processed],
None => 0
};
// Break the run if the new character has a different explicit script than the
// previous characters.
//
// TODO: Special handling of paired punctuation characters.
// http://www.unicode.org/reports/tr24/#Common
let script = get_script(character);
let compatible_script = is_compatible(script, run_info.script);
if compatible_script &&!is_specific(run_info.script) && is_specific(script) {
run_info.script = script;
}
// Now, if necessary, flush the mapping we were building up.
if run_info.font_index!= font_index ||
run_info.bidi_level!= bidi_level ||
!compatible_script
{
if end_position > start_position {
mapping.flush(&mut mappings,
&mut run_info,
&**text,
insertion_point,
compression,
text_transform,
&mut last_whitespace,
&mut start_position,
end_position);
}
if run_info.text.len() > 0 {
run_info_list.push(run_info);
run_info = RunInfo::new();
mapping = RunMapping::new(&run_info_list[..],
&run_info,
fragment_index);
}
run_info.font_index = font_index;
run_info.bidi_level = bidi_level;
run_info.script = script;
}
// Consume this character.
end_position += character.len_utf8();
*paragraph_bytes_processed += character.len_utf8();
}
// If the mapping is zero-length, don't flush it.
if start_position == end_position {
continue
}
// Flush the last mapping we created for this fragment to the list.
mapping.flush(&mut mappings,
&mut run_info,
&**text,
insertion_point,
compression,
text_transform,
&mut last_whitespace,
&mut start_position,
end_position);
}
// Push the final run info.
run_info_list.push(run_info);
// Per CSS 2.1 § 16.4, "when the resultant space between two characters is not the same
// as the default space, user agents should not use ligatures." This ensures that, for
// example, `finally` with a wide `letter-spacing` renders as `f i n a l l y` and not
// `fi n a l l y`.
let mut flags = ShapingFlags::empty();
match letter_spacing {
Some(Au(0)) | None => {}
Some(_) => flags.insert(IGNORE_LIGATURES_SHAPING_FLAG),
}
if text_rendering == text_rendering::T::optimizespeed {
flags.insert(IGNORE_LIGATURES_SHAPING_FLAG);
flags.insert(DISABLE_KERNING_SHAPING_FLAG)
}
let options = ShapingOptions {
letter_spacing: letter_spacing,
word_spacing: word_spacing,
script: Script::Common,
flags: flags,
};
// FIXME(https://github.com/rust-lang/rust/issues/23338)
run_info_list.into_iter().map(|run_info| {
let mut options = options;
options.script = run_info.script;
if is_rtl(run_info.bidi_level) {
options.flags.insert(RTL_FLAG);
}
let mut font = fontgroup.fonts.get(run_info.font_index).unwrap().borrow_mut();
ScannedTextRun {
run: Arc::new(TextRun::new(&mut *font,
run_info.text,
&options,
run_info.bidi_level)),
insertion_point: run_info.insertion_point,
}
}).collect::<Vec<_>>()
};
// Make new fragments with the runs and adjusted text indices.
debug!("TextRunScanner: pushing {} fragment(s)", self.clump.len());
let mut mappings = mappings.into_iter().peekable();
for (logical_offset, old_fragment) in
mem::replace(&mut self.clump, LinkedList::new()).into_iter().enumerate() {
loop {
match mappings.peek() {
Some(mapping) if mapping.old_fragment_index == logical_offset => {}
Some(_) | None => {
if let Some(ref mut last_fragment) = out_fragments.last_mut() {
last_fragment.meld_with_next_inline_fragment(&old_fragment);
}
break;
}
};
let mut mapping = mappings.next().unwrap();
let scanned_run = runs[mapping.text_run_index].clone();
let requires_line_break_afterward_if_wrapping_on_newlines =
scanned_run.run.text.char_at_reverse(mapping.byte_range.end()) == '\n';
if requires_line_break_afterward_if_wrapping_on_newlines {
mapping.char_range.extend_by(CharIndex(-1));
}
let text_size = old_fragment.border_box.size;
let mut new_text_fragment_info = box ScannedTextFragmentInfo::new(
scanned_run.run,
mapping.char_range,
text_size,
&scanned_run.insertion_point,
requires_line_break_afterward_if_wrapping_on_newlines);
let new_metrics = new_text_fragment_info.run.metrics_for_range(&mapping.char_range);
let writing_mode = old_fragment.style.writing_mode;
let bounding_box_size = bounding_box_for_run_metrics(&new_metrics, writing_mode);
new_text_fragment_info.content_size = bounding_box_size;
let new_fragment = old_fragment.transform(
bounding_box_size,
SpecificFragmentInfo::ScannedText(new_text_fragment_info));
out_fragments.push(new_fragment)
}
}
last_whitespace
}
}
#[inline]
fn bounding_box_for_run_metrics(metrics: &RunMetrics, writing_mode: WritingMode)
-> LogicalSize<Au> {
// This does nothing, but it will fail to build
// when more values are added to the `text-orientation` CSS property.
// This will be a reminder to update the code below.
let dummy: Option<text_orientation::T> = None;
match dummy {
Some(text_orientation::T::sideways_right) |
Some(text_orientation::T::sideways_left) |
Some(text_orientation::T::sideways) |
None => {}
}
// In vertical sideways or horizontal upright text,
// the "width" of text metrics is always inline
// This will need to be updated when other text orientations are supported.
LogicalSize::new(
writing_mode,
metrics.bounding_box.size.width,
metrics.bounding_box.size.height)
}
/// Returns the metrics of the font represented by the given `FontStyle`, respectively.
///
/// `#[inline]` because often the caller only needs a few fields from the font metrics.
#[inline]
pub fn font_metrics_for_style(font_context: &mut FontContext, font_style: Arc<FontStyle>)
-> FontMetrics {
let fontgroup = font_context.layout_font_group_for_style(font_style);
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let font = fontgroup.fonts[0].borrow();
font.metrics.clone()
}
/// Returns the line block-size needed by the given computed style and font size.
pub fn line_height_from_style(style: &ComputedValues, metrics: &FontMetrics) -> Au {
let font_size = style.get_font().font_size;
match style.get_inheritedbox().line_height {
line_height::T::Normal => metrics.line_gap,
line_height::T::Number(l) => font_size.scale_by(l),
line_height::T::Length(l) => l
}
}
fn split_first_fragment_at_newline_if_necessary(fragments: &mut LinkedList<Fragment>) {
if fragments.is_empty() {
return
}
let new_fragment = {
let mut first_fragment = fragments.front_mut().unwrap();
let string_before;
let insertion_point_before;
{
if!first_fragment.white_space().preserve_newlines() {
return;
}
let unscanned_text_fragment_info = match first_fragment.specific {
SpecificFragmentInfo::UnscannedText(ref mut unscanned_text_fragment_info) => {
unscanned_text_fragment_info
}
_ => return,
};
let position = match unscanned_text_fragment_info.text.find('\n') {
Some(position) if position < unscanned_text_fragment_info.text.len() - 1 => {
position
}
Some(_) | None => return,
};
string_before =
unscanned_text_fragment_info.text[..(position + 1)].to_owned();
unscanned_text_fragment_info.text =
unscanned_text_fragment_info.text[(position + 1)..].to_owned().into_boxed_str();
let offset = CharIndex(string_before.char_indices().count() as isize);
match unscanned_text_fragment_info.insertion_point {
Some(insertion_point) if insertion_point >= offset => {
insertion_point_before = None;
unscanned_text_fragment_info.insertion_point = Some(insertion_point - offset);
}
Some(_) | None => {
insertion_point_before = unscanned_text_fragment_info.insertion_point;
unscanned_text_fragment_info.insertion_point = None;
}
};
}
first_fragment.transform(first_fragment.border_box.size,
SpecificFragmentInfo::UnscannedText( | UnscannedTextFragmentInfo::new(string_before,
insertion_point_before))) | random_line_split |
|
text.rs | t_newline_if_necessary(&mut fragments);
self.clump.append(&mut split_off_head(&mut fragments));
}
// Flush that clump to the list of fragments we're building up.
last_whitespace = self.flush_clump_to_list(font_context,
&mut new_fragments,
&mut paragraph_bytes_processed,
bidi_levels,
last_whitespace);
}
debug!("TextRunScanner: complete.");
InlineFragments {
fragments: new_fragments,
}
}
/// A "clump" is a range of inline flow leaves that can be merged together into a single
/// fragment. Adjacent text with the same style can be merged, and nothing else can.
///
/// The flow keeps track of the fragments contained by all non-leaf DOM nodes. This is necessary
/// for correct painting order. Since we compress several leaf fragments here, the mapping must
/// be adjusted.
fn flush_clump_to_list(&mut self,
font_context: &mut FontContext,
out_fragments: &mut Vec<Fragment>,
paragraph_bytes_processed: &mut usize,
bidi_levels: Option<&[u8]>,
mut last_whitespace: bool)
-> bool {
debug!("TextRunScanner: flushing {} fragments in range", self.clump.len());
debug_assert!(!self.clump.is_empty());
match self.clump.front().unwrap().specific {
SpecificFragmentInfo::UnscannedText(_) => {}
_ => {
debug_assert!(self.clump.len() == 1,
"WAT: can't coalesce non-text nodes in flush_clump_to_list()!");
out_fragments.push(self.clump.pop_front().unwrap());
return false
}
}
// Concatenate all of the transformed strings together, saving the new character indices.
let mut mappings: Vec<RunMapping> = Vec::new();
let runs = {
let fontgroup;
let compression;
let text_transform;
let letter_spacing;
let word_spacing;
let text_rendering;
{
let in_fragment = self.clump.front().unwrap();
let font_style = in_fragment.style().get_font_arc();
let inherited_text_style = in_fragment.style().get_inheritedtext();
fontgroup = font_context.layout_font_group_for_style(font_style);
compression = match in_fragment.white_space() {
white_space::T::normal |
white_space::T::nowrap => CompressionMode::CompressWhitespaceNewline,
white_space::T::pre |
white_space::T::pre_wrap => CompressionMode::CompressNone,
white_space::T::pre_line => CompressionMode::CompressWhitespace,
};
text_transform = inherited_text_style.text_transform;
letter_spacing = inherited_text_style.letter_spacing.0;
word_spacing = inherited_text_style.word_spacing.0.unwrap_or(Au(0));
text_rendering = inherited_text_style.text_rendering;
}
// First, transform/compress text of all the nodes.
let (mut run_info_list, mut run_info) = (Vec::new(), RunInfo::new());
for (fragment_index, in_fragment) in self.clump.iter().enumerate() {
let mut mapping = RunMapping::new(&run_info_list[..], &run_info, fragment_index);
let text;
let insertion_point;
match in_fragment.specific {
SpecificFragmentInfo::UnscannedText(ref text_fragment_info) => {
text = &text_fragment_info.text;
insertion_point = text_fragment_info.insertion_point;
}
_ => panic!("Expected an unscanned text fragment!"),
};
let (mut start_position, mut end_position) = (0, 0);
for character in text.chars() {
// Search for the first font in this font group that contains a glyph for this
// character.
let mut font_index = 0;
// The following code panics one way or another if this condition isn't met.
assert!(fontgroup.fonts.len() > 0);
while font_index < fontgroup.fonts.len() - 1 {
if fontgroup.fonts.get(font_index).unwrap().borrow()
.glyph_index(character)
.is_some() {
break
}
font_index += 1;
}
let bidi_level = match bidi_levels {
Some(levels) => levels[*paragraph_bytes_processed],
None => 0
};
// Break the run if the new character has a different explicit script than the
// previous characters.
//
// TODO: Special handling of paired punctuation characters.
// http://www.unicode.org/reports/tr24/#Common
let script = get_script(character);
let compatible_script = is_compatible(script, run_info.script);
if compatible_script &&!is_specific(run_info.script) && is_specific(script) {
run_info.script = script;
}
// Now, if necessary, flush the mapping we were building up.
if run_info.font_index!= font_index ||
run_info.bidi_level!= bidi_level ||
!compatible_script
{
if end_position > start_position {
mapping.flush(&mut mappings,
&mut run_info,
&**text,
insertion_point,
compression,
text_transform,
&mut last_whitespace,
&mut start_position,
end_position);
}
if run_info.text.len() > 0 {
run_info_list.push(run_info);
run_info = RunInfo::new();
mapping = RunMapping::new(&run_info_list[..],
&run_info,
fragment_index);
}
run_info.font_index = font_index;
run_info.bidi_level = bidi_level;
run_info.script = script;
}
// Consume this character.
end_position += character.len_utf8();
*paragraph_bytes_processed += character.len_utf8();
}
// If the mapping is zero-length, don't flush it.
if start_position == end_position {
continue
}
// Flush the last mapping we created for this fragment to the list.
mapping.flush(&mut mappings,
&mut run_info,
&**text,
insertion_point,
compression,
text_transform,
&mut last_whitespace,
&mut start_position,
end_position);
}
// Push the final run info.
run_info_list.push(run_info);
// Per CSS 2.1 § 16.4, "when the resultant space between two characters is not the same
// as the default space, user agents should not use ligatures." This ensures that, for
// example, `finally` with a wide `letter-spacing` renders as `f i n a l l y` and not
// `fi n a l l y`.
let mut flags = ShapingFlags::empty();
match letter_spacing {
Some(Au(0)) | None => {}
Some(_) => flags.insert(IGNORE_LIGATURES_SHAPING_FLAG),
}
if text_rendering == text_rendering::T::optimizespeed {
flags.insert(IGNORE_LIGATURES_SHAPING_FLAG);
flags.insert(DISABLE_KERNING_SHAPING_FLAG)
}
let options = ShapingOptions {
letter_spacing: letter_spacing,
word_spacing: word_spacing,
script: Script::Common,
flags: flags,
};
// FIXME(https://github.com/rust-lang/rust/issues/23338)
run_info_list.into_iter().map(|run_info| {
let mut options = options;
options.script = run_info.script;
if is_rtl(run_info.bidi_level) {
options.flags.insert(RTL_FLAG);
}
let mut font = fontgroup.fonts.get(run_info.font_index).unwrap().borrow_mut();
ScannedTextRun {
run: Arc::new(TextRun::new(&mut *font,
run_info.text,
&options,
run_info.bidi_level)),
insertion_point: run_info.insertion_point,
}
}).collect::<Vec<_>>()
};
// Make new fragments with the runs and adjusted text indices.
debug!("TextRunScanner: pushing {} fragment(s)", self.clump.len());
let mut mappings = mappings.into_iter().peekable();
for (logical_offset, old_fragment) in
mem::replace(&mut self.clump, LinkedList::new()).into_iter().enumerate() {
loop {
match mappings.peek() {
Some(mapping) if mapping.old_fragment_index == logical_offset => {}
Some(_) | None => {
if let Some(ref mut last_fragment) = out_fragments.last_mut() {
last_fragment.meld_with_next_inline_fragment(&old_fragment);
}
break;
}
};
let mut mapping = mappings.next().unwrap();
let scanned_run = runs[mapping.text_run_index].clone();
let requires_line_break_afterward_if_wrapping_on_newlines =
scanned_run.run.text.char_at_reverse(mapping.byte_range.end()) == '\n';
if requires_line_break_afterward_if_wrapping_on_newlines {
mapping.char_range.extend_by(CharIndex(-1));
}
let text_size = old_fragment.border_box.size;
let mut new_text_fragment_info = box ScannedTextFragmentInfo::new(
scanned_run.run,
mapping.char_range,
text_size,
&scanned_run.insertion_point,
requires_line_break_afterward_if_wrapping_on_newlines);
let new_metrics = new_text_fragment_info.run.metrics_for_range(&mapping.char_range);
let writing_mode = old_fragment.style.writing_mode;
let bounding_box_size = bounding_box_for_run_metrics(&new_metrics, writing_mode);
new_text_fragment_info.content_size = bounding_box_size;
let new_fragment = old_fragment.transform(
bounding_box_size,
SpecificFragmentInfo::ScannedText(new_text_fragment_info));
out_fragments.push(new_fragment)
}
}
last_whitespace
}
}
#[inline]
fn bounding_box_for_run_metrics(metrics: &RunMetrics, writing_mode: WritingMode)
-> LogicalSize<Au> {
// This does nothing, but it will fail to build
// when more values are added to the `text-orientation` CSS property.
// This will be a reminder to update the code below.
let dummy: Option<text_orientation::T> = None;
match dummy {
Some(text_orientation::T::sideways_right) |
Some(text_orientation::T::sideways_left) |
Some(text_orientation::T::sideways) |
None => {}
}
// In vertical sideways or horizontal upright text,
// the "width" of text metrics is always inline
// This will need to be updated when other text orientations are supported.
LogicalSize::new(
writing_mode,
metrics.bounding_box.size.width,
metrics.bounding_box.size.height)
}
/// Returns the metrics of the font represented by the given `FontStyle`, respectively.
///
/// `#[inline]` because often the caller only needs a few fields from the font metrics.
#[inline]
pub fn font_metrics_for_style(font_context: &mut FontContext, font_style: Arc<FontStyle>)
-> FontMetrics {
let fontgroup = font_context.layout_font_group_for_style(font_style);
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let font = fontgroup.fonts[0].borrow();
font.metrics.clone()
}
/// Returns the line block-size needed by the given computed style and font size.
pub fn lin | yle: &ComputedValues, metrics: &FontMetrics) -> Au {
let font_size = style.get_font().font_size;
match style.get_inheritedbox().line_height {
line_height::T::Normal => metrics.line_gap,
line_height::T::Number(l) => font_size.scale_by(l),
line_height::T::Length(l) => l
}
}
fn split_first_fragment_at_newline_if_necessary(fragments: &mut LinkedList<Fragment>) {
if fragments.is_empty() {
return
}
let new_fragment = {
let mut first_fragment = fragments.front_mut().unwrap();
let string_before;
let insertion_point_before;
{
if!first_fragment.white_space().preserve_newlines() {
return;
}
let unscanned_text_fragment_info = match first_fragment.specific {
SpecificFragmentInfo::UnscannedText(ref mut unscanned_text_fragment_info) => {
unscanned_text_fragment_info
}
_ => return,
};
let position = match unscanned_text_fragment_info.text.find('\n') {
Some(position) if position < unscanned_text_fragment_info.text.len() - 1 => {
position
}
Some(_) | None => return,
};
string_before =
unscanned_text_fragment_info.text[..(position + 1)].to_owned();
unscanned_text_fragment_info.text =
unscanned_text_fragment_info.text[(position + 1)..].to_owned().into_boxed_str();
let offset = CharIndex(string_before.char_indices().count() as isize);
match unscanned_text_fragment_info.insertion_point {
Some(insertion_point) if insertion_point >= offset => {
insertion_point_before = None;
| e_height_from_style(st | identifier_name |
text.rs | at_newline_if_necessary(&mut fragments);
self.clump.append(&mut split_off_head(&mut fragments));
}
// Flush that clump to the list of fragments we're building up.
last_whitespace = self.flush_clump_to_list(font_context,
&mut new_fragments,
&mut paragraph_bytes_processed,
bidi_levels,
last_whitespace);
}
debug!("TextRunScanner: complete.");
InlineFragments {
fragments: new_fragments,
}
}
/// A "clump" is a range of inline flow leaves that can be merged together into a single
/// fragment. Adjacent text with the same style can be merged, and nothing else can.
///
/// The flow keeps track of the fragments contained by all non-leaf DOM nodes. This is necessary
/// for correct painting order. Since we compress several leaf fragments here, the mapping must
/// be adjusted.
fn flush_clump_to_list(&mut self,
font_context: &mut FontContext,
out_fragments: &mut Vec<Fragment>,
paragraph_bytes_processed: &mut usize,
bidi_levels: Option<&[u8]>,
mut last_whitespace: bool)
-> bool {
debug!("TextRunScanner: flushing {} fragments in range", self.clump.len());
debug_assert!(!self.clump.is_empty());
match self.clump.front().unwrap().specific {
SpecificFragmentInfo::UnscannedText(_) => {}
_ => {
debug_assert!(self.clump.len() == 1,
"WAT: can't coalesce non-text nodes in flush_clump_to_list()!");
out_fragments.push(self.clump.pop_front().unwrap());
return false
}
}
// Concatenate all of the transformed strings together, saving the new character indices.
let mut mappings: Vec<RunMapping> = Vec::new();
let runs = {
let fontgroup;
let compression;
let text_transform;
let letter_spacing;
let word_spacing;
let text_rendering;
{
let in_fragment = self.clump.front().unwrap();
let font_style = in_fragment.style().get_font_arc();
let inherited_text_style = in_fragment.style().get_inheritedtext();
fontgroup = font_context.layout_font_group_for_style(font_style);
compression = match in_fragment.white_space() {
white_space::T::normal |
white_space::T::nowrap => CompressionMode::CompressWhitespaceNewline,
white_space::T::pre |
white_space::T::pre_wrap => CompressionMode::CompressNone,
white_space::T::pre_line => CompressionMode::CompressWhitespace,
};
text_transform = inherited_text_style.text_transform;
letter_spacing = inherited_text_style.letter_spacing.0;
word_spacing = inherited_text_style.word_spacing.0.unwrap_or(Au(0));
text_rendering = inherited_text_style.text_rendering;
}
// First, transform/compress text of all the nodes.
let (mut run_info_list, mut run_info) = (Vec::new(), RunInfo::new());
for (fragment_index, in_fragment) in self.clump.iter().enumerate() {
let mut mapping = RunMapping::new(&run_info_list[..], &run_info, fragment_index);
let text;
let insertion_point;
match in_fragment.specific {
SpecificFragmentInfo::UnscannedText(ref text_fragment_info) => |
_ => panic!("Expected an unscanned text fragment!"),
};
let (mut start_position, mut end_position) = (0, 0);
for character in text.chars() {
// Search for the first font in this font group that contains a glyph for this
// character.
let mut font_index = 0;
// The following code panics one way or another if this condition isn't met.
assert!(fontgroup.fonts.len() > 0);
while font_index < fontgroup.fonts.len() - 1 {
if fontgroup.fonts.get(font_index).unwrap().borrow()
.glyph_index(character)
.is_some() {
break
}
font_index += 1;
}
let bidi_level = match bidi_levels {
Some(levels) => levels[*paragraph_bytes_processed],
None => 0
};
// Break the run if the new character has a different explicit script than the
// previous characters.
//
// TODO: Special handling of paired punctuation characters.
// http://www.unicode.org/reports/tr24/#Common
let script = get_script(character);
let compatible_script = is_compatible(script, run_info.script);
if compatible_script &&!is_specific(run_info.script) && is_specific(script) {
run_info.script = script;
}
// Now, if necessary, flush the mapping we were building up.
if run_info.font_index!= font_index ||
run_info.bidi_level!= bidi_level ||
!compatible_script
{
if end_position > start_position {
mapping.flush(&mut mappings,
&mut run_info,
&**text,
insertion_point,
compression,
text_transform,
&mut last_whitespace,
&mut start_position,
end_position);
}
if run_info.text.len() > 0 {
run_info_list.push(run_info);
run_info = RunInfo::new();
mapping = RunMapping::new(&run_info_list[..],
&run_info,
fragment_index);
}
run_info.font_index = font_index;
run_info.bidi_level = bidi_level;
run_info.script = script;
}
// Consume this character.
end_position += character.len_utf8();
*paragraph_bytes_processed += character.len_utf8();
}
// If the mapping is zero-length, don't flush it.
if start_position == end_position {
continue
}
// Flush the last mapping we created for this fragment to the list.
mapping.flush(&mut mappings,
&mut run_info,
&**text,
insertion_point,
compression,
text_transform,
&mut last_whitespace,
&mut start_position,
end_position);
}
// Push the final run info.
run_info_list.push(run_info);
// Per CSS 2.1 § 16.4, "when the resultant space between two characters is not the same
// as the default space, user agents should not use ligatures." This ensures that, for
// example, `finally` with a wide `letter-spacing` renders as `f i n a l l y` and not
// `fi n a l l y`.
let mut flags = ShapingFlags::empty();
match letter_spacing {
Some(Au(0)) | None => {}
Some(_) => flags.insert(IGNORE_LIGATURES_SHAPING_FLAG),
}
if text_rendering == text_rendering::T::optimizespeed {
flags.insert(IGNORE_LIGATURES_SHAPING_FLAG);
flags.insert(DISABLE_KERNING_SHAPING_FLAG)
}
let options = ShapingOptions {
letter_spacing: letter_spacing,
word_spacing: word_spacing,
script: Script::Common,
flags: flags,
};
// FIXME(https://github.com/rust-lang/rust/issues/23338)
run_info_list.into_iter().map(|run_info| {
let mut options = options;
options.script = run_info.script;
if is_rtl(run_info.bidi_level) {
options.flags.insert(RTL_FLAG);
}
let mut font = fontgroup.fonts.get(run_info.font_index).unwrap().borrow_mut();
ScannedTextRun {
run: Arc::new(TextRun::new(&mut *font,
run_info.text,
&options,
run_info.bidi_level)),
insertion_point: run_info.insertion_point,
}
}).collect::<Vec<_>>()
};
// Make new fragments with the runs and adjusted text indices.
debug!("TextRunScanner: pushing {} fragment(s)", self.clump.len());
let mut mappings = mappings.into_iter().peekable();
for (logical_offset, old_fragment) in
mem::replace(&mut self.clump, LinkedList::new()).into_iter().enumerate() {
loop {
match mappings.peek() {
Some(mapping) if mapping.old_fragment_index == logical_offset => {}
Some(_) | None => {
if let Some(ref mut last_fragment) = out_fragments.last_mut() {
last_fragment.meld_with_next_inline_fragment(&old_fragment);
}
break;
}
};
let mut mapping = mappings.next().unwrap();
let scanned_run = runs[mapping.text_run_index].clone();
let requires_line_break_afterward_if_wrapping_on_newlines =
scanned_run.run.text.char_at_reverse(mapping.byte_range.end()) == '\n';
if requires_line_break_afterward_if_wrapping_on_newlines {
mapping.char_range.extend_by(CharIndex(-1));
}
let text_size = old_fragment.border_box.size;
let mut new_text_fragment_info = box ScannedTextFragmentInfo::new(
scanned_run.run,
mapping.char_range,
text_size,
&scanned_run.insertion_point,
requires_line_break_afterward_if_wrapping_on_newlines);
let new_metrics = new_text_fragment_info.run.metrics_for_range(&mapping.char_range);
let writing_mode = old_fragment.style.writing_mode;
let bounding_box_size = bounding_box_for_run_metrics(&new_metrics, writing_mode);
new_text_fragment_info.content_size = bounding_box_size;
let new_fragment = old_fragment.transform(
bounding_box_size,
SpecificFragmentInfo::ScannedText(new_text_fragment_info));
out_fragments.push(new_fragment)
}
}
last_whitespace
}
}
#[inline]
fn bounding_box_for_run_metrics(metrics: &RunMetrics, writing_mode: WritingMode)
-> LogicalSize<Au> {
// This does nothing, but it will fail to build
// when more values are added to the `text-orientation` CSS property.
// This will be a reminder to update the code below.
let dummy: Option<text_orientation::T> = None;
match dummy {
Some(text_orientation::T::sideways_right) |
Some(text_orientation::T::sideways_left) |
Some(text_orientation::T::sideways) |
None => {}
}
// In vertical sideways or horizontal upright text,
// the "width" of text metrics is always inline
// This will need to be updated when other text orientations are supported.
LogicalSize::new(
writing_mode,
metrics.bounding_box.size.width,
metrics.bounding_box.size.height)
}
/// Returns the metrics of the font represented by the given `FontStyle`, respectively.
///
/// `#[inline]` because often the caller only needs a few fields from the font metrics.
#[inline]
pub fn font_metrics_for_style(font_context: &mut FontContext, font_style: Arc<FontStyle>)
-> FontMetrics {
let fontgroup = font_context.layout_font_group_for_style(font_style);
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let font = fontgroup.fonts[0].borrow();
font.metrics.clone()
}
/// Returns the line block-size needed by the given computed style and font size.
pub fn line_height_from_style(style: &ComputedValues, metrics: &FontMetrics) -> Au {
let font_size = style.get_font().font_size;
match style.get_inheritedbox().line_height {
line_height::T::Normal => metrics.line_gap,
line_height::T::Number(l) => font_size.scale_by(l),
line_height::T::Length(l) => l
}
}
fn split_first_fragment_at_newline_if_necessary(fragments: &mut LinkedList<Fragment>) {
if fragments.is_empty() {
return
}
let new_fragment = {
let mut first_fragment = fragments.front_mut().unwrap();
let string_before;
let insertion_point_before;
{
if!first_fragment.white_space().preserve_newlines() {
return;
}
let unscanned_text_fragment_info = match first_fragment.specific {
SpecificFragmentInfo::UnscannedText(ref mut unscanned_text_fragment_info) => {
unscanned_text_fragment_info
}
_ => return,
};
let position = match unscanned_text_fragment_info.text.find('\n') {
Some(position) if position < unscanned_text_fragment_info.text.len() - 1 => {
position
}
Some(_) | None => return,
};
string_before =
unscanned_text_fragment_info.text[..(position + 1)].to_owned();
unscanned_text_fragment_info.text =
unscanned_text_fragment_info.text[(position + 1)..].to_owned().into_boxed_str();
let offset = CharIndex(string_before.char_indices().count() as isize);
match unscanned_text_fragment_info.insertion_point {
Some(insertion_point) if insertion_point >= offset => {
insertion_point_before = None;
| {
text = &text_fragment_info.text;
insertion_point = text_fragment_info.insertion_point;
} | conditional_block |
progress.rs | //! A simple progress meter.
//!
//! Records updates of number of files visited, and number of bytes
//! processed. When given an estimate, printes a simple periodic report of
//! how far along we think we are.
use env_logger::Builder;
use lazy_static::lazy_static;
use log::Log;
use std::{
io::{stdout, Write},
sync::Mutex,
};
use time::{Duration, OffsetDateTime};
// The Rust logging system (log crate) only allows a single logger to be
// logged once. If we want to capture this, it has to be done before any
// logger is initialized. Globally, within a mutex, we keep this simple
// state of what is happening.
struct State {
// The last message printed. Since an empty string an no message are
// the same thing, we don't worry about having an option here.
message: String,
// When we next expect to update the message.
next_update: OffsetDateTime,
// Set to true if the logging system has been initialized.
is_logging: bool,
}
// The SafeLogger wraps another logger, coordinating the logging with the
// state to properly interleave logs and messages.
struct SafeLogger {
inner: Box<dyn Log>,
}
/// Initialize the standard logger, based on `env_logger::init()`, but
/// coordinated with any progress meters. Like `init`, this will panic if
/// the logging system has already been initialized.
pub fn log_init() {
let mut st = STATE.lock().unwrap();
let inner = Builder::from_default_env().build();
let max_level = inner.filter();
let logger = SafeLogger {
inner: Box::new(inner),
};
log::set_boxed_logger(Box::new(logger)).expect("Set Logger");
log::set_max_level(max_level);
st.is_logging = true;
st.next_update = update_interval(true);
}
// There are two update intervals, depending on whether we are logging.
fn update_interval(is_logging: bool) -> OffsetDateTime {
if is_logging {
OffsetDateTime::now_utc() + Duration::milliseconds(250)
} else {
OffsetDateTime::now_utc() + Duration::seconds(5)
}
}
lazy_static! {
// The current global state.
static ref STATE: Mutex<State> = Mutex::new(State {
message: String::new(),
next_update: update_interval(false),
is_logging: false,
});
}
impl State {
/// Called to advance to the next message, sets the update time
/// appropriately.
fn next(&mut self) {
self.next_update = update_interval(self.is_logging);
}
/// Clears the visual text of the current message (but not the message
/// buffer itself, so that it can be redisplayed if needed).
fn clear(&self) {
for ch in self.message.chars() {
if ch == '\n' {
print!("\x1b[1A\x1b[2K");
}
}
stdout().flush().expect("safe stdout write");
}
/// Update the current message.
fn update(&mut self, message: String) {
self.clear();
self.message = message;
print!("{}", self.message);
stdout().flush().expect("safe stdout write");
self.next();
}
/// Indicates if the time has expired and another update should be
/// done. This can be used where the formatting/allocation of the
/// update message would be slower than the possible system call needed
/// to determine the current time.
fn need_update(&self) -> bool {
OffsetDateTime::now_utc() >= self.next_update
}
}
impl Log for SafeLogger {
fn enabled(&self, metadata: &log::Metadata) -> bool {
self.inner.enabled(metadata)
}
fn log(&self, record: &log::Record) {
let enabled = self.inner.enabled(record.metadata());
if enabled {
let st = STATE.lock().unwrap();
st.clear();
self.inner.log(record);
print!("{}", st.message);
stdout().flush().expect("safe stdout write");
}
}
fn flush(&self) {
let st = STATE.lock().unwrap();
st.clear();
self.inner.flush();
print!("{}", st.message);
stdout().flush().expect("safe stdout write");
}
}
pub struct Progress {
cur_files: u64,
total_files: u64,
cur_bytes: u64,
total_bytes: u64,
}
impl Progress {
/// Construct a progress meter, with the given number of files and
/// bytes as an estimate.
pub fn new(files: u64, bytes: u64) -> Progress {
Progress {
cur_files: 0,
total_files: files,
cur_bytes: 0,
total_bytes: bytes,
}
}
/// Update the progress meter.
pub fn update(&mut self, files: u64, bytes: u64) {
self.cur_files += files;
self.cur_bytes += bytes;
let mut st = STATE.lock().unwrap();
if st.need_update() {
st.update(self.message());
}
}
/// Flush the output, regardless of if any update is needed.
pub fn flush(&mut self) {
let mut st = STATE.lock().unwrap();
st.update(self.message());
// Clear the current message so that we don't clear out the shown
// message.
st.message.clear();
}
pub fn message(&self) -> String {
format!(
"{:7}/{:7} ({:5.1}%) files, {}/{} ({:5.1}%) bytes\n",
self.cur_files,
self.total_files,
(self.cur_files as f64 * 100.0) / self.total_files as f64,
humanize(self.cur_bytes),
humanize(self.total_bytes),
(self.cur_bytes as f64 * 100.0) / self.total_bytes as f64
)
}
}
/// A progress meter used when initially scanning.
pub struct ScanProgress {
dirs: u64,
files: u64,
bytes: u64,
}
impl ScanProgress {
/// Construct a new scanning progress meter.
pub fn new() -> ScanProgress {
ScanProgress {
dirs: 0,
files: 0,
bytes: 0,
}
}
/// Update the meter.
pub fn update(&mut self, dirs: u64, files: u64, bytes: u64) {
self.dirs += dirs;
self.files += files;
self.bytes += bytes;
let mut st = STATE.lock().unwrap();
if st.need_update() {
st.update(self.message());
}
}
fn message(&self) -> String {
format!(
"scan: {} dirs {} files, {} bytes\n",
self.dirs,
self.files,
humanize(self.bytes)
)
}
}
impl Drop for ScanProgress {
fn drop(&mut self) {
let mut st = STATE.lock().unwrap();
st.update(self.message());
st.message.clear();
}
}
/// Print a size in a more human-friendly format.
pub fn humanize(value: u64) -> String |
format!("{:6.*}{}", precision, value, UNITS[unit])
}
| {
let mut value = value as f64;
let mut unit = 0;
while value > 1024.0 {
value /= 1024.0;
unit += 1;
}
static UNITS: [&str; 9] = [
"B ", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB",
];
let precision = if value < 10.0 {
3
} else if value < 100.0 {
2
} else {
1
}; | identifier_body |
progress.rs | //! A simple progress meter.
//!
//! Records updates of number of files visited, and number of bytes
//! processed. When given an estimate, printes a simple periodic report of
//! how far along we think we are.
use env_logger::Builder;
use lazy_static::lazy_static;
use log::Log;
use std::{
io::{stdout, Write},
sync::Mutex,
};
use time::{Duration, OffsetDateTime};
// The Rust logging system (log crate) only allows a single logger to be
// logged once. If we want to capture this, it has to be done before any
// logger is initialized. Globally, within a mutex, we keep this simple
// state of what is happening.
struct State {
// The last message printed. Since an empty string an no message are
// the same thing, we don't worry about having an option here.
message: String,
// When we next expect to update the message.
next_update: OffsetDateTime,
// Set to true if the logging system has been initialized.
is_logging: bool,
}
// The SafeLogger wraps another logger, coordinating the logging with the
// state to properly interleave logs and messages.
struct SafeLogger {
inner: Box<dyn Log>,
}
/// Initialize the standard logger, based on `env_logger::init()`, but
/// coordinated with any progress meters. Like `init`, this will panic if
/// the logging system has already been initialized.
pub fn log_init() {
let mut st = STATE.lock().unwrap();
let inner = Builder::from_default_env().build();
let max_level = inner.filter();
let logger = SafeLogger {
inner: Box::new(inner),
};
log::set_boxed_logger(Box::new(logger)).expect("Set Logger");
log::set_max_level(max_level);
st.is_logging = true;
st.next_update = update_interval(true);
}
// There are two update intervals, depending on whether we are logging.
fn update_interval(is_logging: bool) -> OffsetDateTime {
if is_logging {
OffsetDateTime::now_utc() + Duration::milliseconds(250)
} else {
OffsetDateTime::now_utc() + Duration::seconds(5)
}
}
lazy_static! {
// The current global state.
static ref STATE: Mutex<State> = Mutex::new(State { | next_update: update_interval(false),
is_logging: false,
});
}
impl State {
/// Called to advance to the next message, sets the update time
/// appropriately.
fn next(&mut self) {
self.next_update = update_interval(self.is_logging);
}
/// Clears the visual text of the current message (but not the message
/// buffer itself, so that it can be redisplayed if needed).
fn clear(&self) {
for ch in self.message.chars() {
if ch == '\n' {
print!("\x1b[1A\x1b[2K");
}
}
stdout().flush().expect("safe stdout write");
}
/// Update the current message.
fn update(&mut self, message: String) {
self.clear();
self.message = message;
print!("{}", self.message);
stdout().flush().expect("safe stdout write");
self.next();
}
/// Indicates if the time has expired and another update should be
/// done. This can be used where the formatting/allocation of the
/// update message would be slower than the possible system call needed
/// to determine the current time.
fn need_update(&self) -> bool {
OffsetDateTime::now_utc() >= self.next_update
}
}
impl Log for SafeLogger {
fn enabled(&self, metadata: &log::Metadata) -> bool {
self.inner.enabled(metadata)
}
fn log(&self, record: &log::Record) {
let enabled = self.inner.enabled(record.metadata());
if enabled {
let st = STATE.lock().unwrap();
st.clear();
self.inner.log(record);
print!("{}", st.message);
stdout().flush().expect("safe stdout write");
}
}
fn flush(&self) {
let st = STATE.lock().unwrap();
st.clear();
self.inner.flush();
print!("{}", st.message);
stdout().flush().expect("safe stdout write");
}
}
pub struct Progress {
cur_files: u64,
total_files: u64,
cur_bytes: u64,
total_bytes: u64,
}
impl Progress {
/// Construct a progress meter, with the given number of files and
/// bytes as an estimate.
pub fn new(files: u64, bytes: u64) -> Progress {
Progress {
cur_files: 0,
total_files: files,
cur_bytes: 0,
total_bytes: bytes,
}
}
/// Update the progress meter.
pub fn update(&mut self, files: u64, bytes: u64) {
self.cur_files += files;
self.cur_bytes += bytes;
let mut st = STATE.lock().unwrap();
if st.need_update() {
st.update(self.message());
}
}
/// Flush the output, regardless of if any update is needed.
pub fn flush(&mut self) {
let mut st = STATE.lock().unwrap();
st.update(self.message());
// Clear the current message so that we don't clear out the shown
// message.
st.message.clear();
}
pub fn message(&self) -> String {
format!(
"{:7}/{:7} ({:5.1}%) files, {}/{} ({:5.1}%) bytes\n",
self.cur_files,
self.total_files,
(self.cur_files as f64 * 100.0) / self.total_files as f64,
humanize(self.cur_bytes),
humanize(self.total_bytes),
(self.cur_bytes as f64 * 100.0) / self.total_bytes as f64
)
}
}
/// A progress meter used when initially scanning.
pub struct ScanProgress {
dirs: u64,
files: u64,
bytes: u64,
}
impl ScanProgress {
/// Construct a new scanning progress meter.
pub fn new() -> ScanProgress {
ScanProgress {
dirs: 0,
files: 0,
bytes: 0,
}
}
/// Update the meter.
pub fn update(&mut self, dirs: u64, files: u64, bytes: u64) {
self.dirs += dirs;
self.files += files;
self.bytes += bytes;
let mut st = STATE.lock().unwrap();
if st.need_update() {
st.update(self.message());
}
}
fn message(&self) -> String {
format!(
"scan: {} dirs {} files, {} bytes\n",
self.dirs,
self.files,
humanize(self.bytes)
)
}
}
impl Drop for ScanProgress {
fn drop(&mut self) {
let mut st = STATE.lock().unwrap();
st.update(self.message());
st.message.clear();
}
}
/// Print a size in a more human-friendly format.
pub fn humanize(value: u64) -> String {
let mut value = value as f64;
let mut unit = 0;
while value > 1024.0 {
value /= 1024.0;
unit += 1;
}
static UNITS: [&str; 9] = [
"B ", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB",
];
let precision = if value < 10.0 {
3
} else if value < 100.0 {
2
} else {
1
};
format!("{:6.*}{}", precision, value, UNITS[unit])
} | message: String::new(), | random_line_split |
progress.rs | //! A simple progress meter.
//!
//! Records updates of number of files visited, and number of bytes
//! processed. When given an estimate, printes a simple periodic report of
//! how far along we think we are.
use env_logger::Builder;
use lazy_static::lazy_static;
use log::Log;
use std::{
io::{stdout, Write},
sync::Mutex,
};
use time::{Duration, OffsetDateTime};
// The Rust logging system (log crate) only allows a single logger to be
// logged once. If we want to capture this, it has to be done before any
// logger is initialized. Globally, within a mutex, we keep this simple
// state of what is happening.
struct State {
// The last message printed. Since an empty string an no message are
// the same thing, we don't worry about having an option here.
message: String,
// When we next expect to update the message.
next_update: OffsetDateTime,
// Set to true if the logging system has been initialized.
is_logging: bool,
}
// The SafeLogger wraps another logger, coordinating the logging with the
// state to properly interleave logs and messages.
struct SafeLogger {
inner: Box<dyn Log>,
}
/// Initialize the standard logger, based on `env_logger::init()`, but
/// coordinated with any progress meters. Like `init`, this will panic if
/// the logging system has already been initialized.
pub fn log_init() {
let mut st = STATE.lock().unwrap();
let inner = Builder::from_default_env().build();
let max_level = inner.filter();
let logger = SafeLogger {
inner: Box::new(inner),
};
log::set_boxed_logger(Box::new(logger)).expect("Set Logger");
log::set_max_level(max_level);
st.is_logging = true;
st.next_update = update_interval(true);
}
// There are two update intervals, depending on whether we are logging.
fn update_interval(is_logging: bool) -> OffsetDateTime {
if is_logging {
OffsetDateTime::now_utc() + Duration::milliseconds(250)
} else {
OffsetDateTime::now_utc() + Duration::seconds(5)
}
}
lazy_static! {
// The current global state.
static ref STATE: Mutex<State> = Mutex::new(State {
message: String::new(),
next_update: update_interval(false),
is_logging: false,
});
}
impl State {
/// Called to advance to the next message, sets the update time
/// appropriately.
fn | (&mut self) {
self.next_update = update_interval(self.is_logging);
}
/// Clears the visual text of the current message (but not the message
/// buffer itself, so that it can be redisplayed if needed).
fn clear(&self) {
for ch in self.message.chars() {
if ch == '\n' {
print!("\x1b[1A\x1b[2K");
}
}
stdout().flush().expect("safe stdout write");
}
/// Update the current message.
fn update(&mut self, message: String) {
self.clear();
self.message = message;
print!("{}", self.message);
stdout().flush().expect("safe stdout write");
self.next();
}
/// Indicates if the time has expired and another update should be
/// done. This can be used where the formatting/allocation of the
/// update message would be slower than the possible system call needed
/// to determine the current time.
fn need_update(&self) -> bool {
OffsetDateTime::now_utc() >= self.next_update
}
}
impl Log for SafeLogger {
fn enabled(&self, metadata: &log::Metadata) -> bool {
self.inner.enabled(metadata)
}
fn log(&self, record: &log::Record) {
let enabled = self.inner.enabled(record.metadata());
if enabled {
let st = STATE.lock().unwrap();
st.clear();
self.inner.log(record);
print!("{}", st.message);
stdout().flush().expect("safe stdout write");
}
}
fn flush(&self) {
let st = STATE.lock().unwrap();
st.clear();
self.inner.flush();
print!("{}", st.message);
stdout().flush().expect("safe stdout write");
}
}
pub struct Progress {
cur_files: u64,
total_files: u64,
cur_bytes: u64,
total_bytes: u64,
}
impl Progress {
/// Construct a progress meter, with the given number of files and
/// bytes as an estimate.
pub fn new(files: u64, bytes: u64) -> Progress {
Progress {
cur_files: 0,
total_files: files,
cur_bytes: 0,
total_bytes: bytes,
}
}
/// Update the progress meter.
pub fn update(&mut self, files: u64, bytes: u64) {
self.cur_files += files;
self.cur_bytes += bytes;
let mut st = STATE.lock().unwrap();
if st.need_update() {
st.update(self.message());
}
}
/// Flush the output, regardless of if any update is needed.
pub fn flush(&mut self) {
let mut st = STATE.lock().unwrap();
st.update(self.message());
// Clear the current message so that we don't clear out the shown
// message.
st.message.clear();
}
pub fn message(&self) -> String {
format!(
"{:7}/{:7} ({:5.1}%) files, {}/{} ({:5.1}%) bytes\n",
self.cur_files,
self.total_files,
(self.cur_files as f64 * 100.0) / self.total_files as f64,
humanize(self.cur_bytes),
humanize(self.total_bytes),
(self.cur_bytes as f64 * 100.0) / self.total_bytes as f64
)
}
}
/// A progress meter used when initially scanning.
pub struct ScanProgress {
dirs: u64,
files: u64,
bytes: u64,
}
impl ScanProgress {
/// Construct a new scanning progress meter.
pub fn new() -> ScanProgress {
ScanProgress {
dirs: 0,
files: 0,
bytes: 0,
}
}
/// Update the meter.
pub fn update(&mut self, dirs: u64, files: u64, bytes: u64) {
self.dirs += dirs;
self.files += files;
self.bytes += bytes;
let mut st = STATE.lock().unwrap();
if st.need_update() {
st.update(self.message());
}
}
fn message(&self) -> String {
format!(
"scan: {} dirs {} files, {} bytes\n",
self.dirs,
self.files,
humanize(self.bytes)
)
}
}
impl Drop for ScanProgress {
fn drop(&mut self) {
let mut st = STATE.lock().unwrap();
st.update(self.message());
st.message.clear();
}
}
/// Print a size in a more human-friendly format.
pub fn humanize(value: u64) -> String {
let mut value = value as f64;
let mut unit = 0;
while value > 1024.0 {
value /= 1024.0;
unit += 1;
}
static UNITS: [&str; 9] = [
"B ", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB",
];
let precision = if value < 10.0 {
3
} else if value < 100.0 {
2
} else {
1
};
format!("{:6.*}{}", precision, value, UNITS[unit])
}
| next | identifier_name |
progress.rs | //! A simple progress meter.
//!
//! Records updates of number of files visited, and number of bytes
//! processed. When given an estimate, printes a simple periodic report of
//! how far along we think we are.
use env_logger::Builder;
use lazy_static::lazy_static;
use log::Log;
use std::{
io::{stdout, Write},
sync::Mutex,
};
use time::{Duration, OffsetDateTime};
// The Rust logging system (log crate) only allows a single logger to be
// logged once. If we want to capture this, it has to be done before any
// logger is initialized. Globally, within a mutex, we keep this simple
// state of what is happening.
struct State {
// The last message printed. Since an empty string an no message are
// the same thing, we don't worry about having an option here.
message: String,
// When we next expect to update the message.
next_update: OffsetDateTime,
// Set to true if the logging system has been initialized.
is_logging: bool,
}
// The SafeLogger wraps another logger, coordinating the logging with the
// state to properly interleave logs and messages.
struct SafeLogger {
inner: Box<dyn Log>,
}
/// Initialize the standard logger, based on `env_logger::init()`, but
/// coordinated with any progress meters. Like `init`, this will panic if
/// the logging system has already been initialized.
pub fn log_init() {
let mut st = STATE.lock().unwrap();
let inner = Builder::from_default_env().build();
let max_level = inner.filter();
let logger = SafeLogger {
inner: Box::new(inner),
};
log::set_boxed_logger(Box::new(logger)).expect("Set Logger");
log::set_max_level(max_level);
st.is_logging = true;
st.next_update = update_interval(true);
}
// There are two update intervals, depending on whether we are logging.
fn update_interval(is_logging: bool) -> OffsetDateTime {
if is_logging {
OffsetDateTime::now_utc() + Duration::milliseconds(250)
} else |
}
lazy_static! {
// The current global state.
static ref STATE: Mutex<State> = Mutex::new(State {
message: String::new(),
next_update: update_interval(false),
is_logging: false,
});
}
impl State {
/// Called to advance to the next message, sets the update time
/// appropriately.
fn next(&mut self) {
self.next_update = update_interval(self.is_logging);
}
/// Clears the visual text of the current message (but not the message
/// buffer itself, so that it can be redisplayed if needed).
fn clear(&self) {
for ch in self.message.chars() {
if ch == '\n' {
print!("\x1b[1A\x1b[2K");
}
}
stdout().flush().expect("safe stdout write");
}
/// Update the current message.
fn update(&mut self, message: String) {
self.clear();
self.message = message;
print!("{}", self.message);
stdout().flush().expect("safe stdout write");
self.next();
}
/// Indicates if the time has expired and another update should be
/// done. This can be used where the formatting/allocation of the
/// update message would be slower than the possible system call needed
/// to determine the current time.
fn need_update(&self) -> bool {
OffsetDateTime::now_utc() >= self.next_update
}
}
impl Log for SafeLogger {
fn enabled(&self, metadata: &log::Metadata) -> bool {
self.inner.enabled(metadata)
}
fn log(&self, record: &log::Record) {
let enabled = self.inner.enabled(record.metadata());
if enabled {
let st = STATE.lock().unwrap();
st.clear();
self.inner.log(record);
print!("{}", st.message);
stdout().flush().expect("safe stdout write");
}
}
fn flush(&self) {
let st = STATE.lock().unwrap();
st.clear();
self.inner.flush();
print!("{}", st.message);
stdout().flush().expect("safe stdout write");
}
}
pub struct Progress {
cur_files: u64,
total_files: u64,
cur_bytes: u64,
total_bytes: u64,
}
impl Progress {
/// Construct a progress meter, with the given number of files and
/// bytes as an estimate.
pub fn new(files: u64, bytes: u64) -> Progress {
Progress {
cur_files: 0,
total_files: files,
cur_bytes: 0,
total_bytes: bytes,
}
}
/// Update the progress meter.
pub fn update(&mut self, files: u64, bytes: u64) {
self.cur_files += files;
self.cur_bytes += bytes;
let mut st = STATE.lock().unwrap();
if st.need_update() {
st.update(self.message());
}
}
/// Flush the output, regardless of if any update is needed.
pub fn flush(&mut self) {
let mut st = STATE.lock().unwrap();
st.update(self.message());
// Clear the current message so that we don't clear out the shown
// message.
st.message.clear();
}
pub fn message(&self) -> String {
format!(
"{:7}/{:7} ({:5.1}%) files, {}/{} ({:5.1}%) bytes\n",
self.cur_files,
self.total_files,
(self.cur_files as f64 * 100.0) / self.total_files as f64,
humanize(self.cur_bytes),
humanize(self.total_bytes),
(self.cur_bytes as f64 * 100.0) / self.total_bytes as f64
)
}
}
/// A progress meter used when initially scanning.
pub struct ScanProgress {
dirs: u64,
files: u64,
bytes: u64,
}
impl ScanProgress {
/// Construct a new scanning progress meter.
pub fn new() -> ScanProgress {
ScanProgress {
dirs: 0,
files: 0,
bytes: 0,
}
}
/// Update the meter.
pub fn update(&mut self, dirs: u64, files: u64, bytes: u64) {
self.dirs += dirs;
self.files += files;
self.bytes += bytes;
let mut st = STATE.lock().unwrap();
if st.need_update() {
st.update(self.message());
}
}
fn message(&self) -> String {
format!(
"scan: {} dirs {} files, {} bytes\n",
self.dirs,
self.files,
humanize(self.bytes)
)
}
}
impl Drop for ScanProgress {
fn drop(&mut self) {
let mut st = STATE.lock().unwrap();
st.update(self.message());
st.message.clear();
}
}
/// Print a size in a more human-friendly format.
pub fn humanize(value: u64) -> String {
let mut value = value as f64;
let mut unit = 0;
while value > 1024.0 {
value /= 1024.0;
unit += 1;
}
static UNITS: [&str; 9] = [
"B ", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB",
];
let precision = if value < 10.0 {
3
} else if value < 100.0 {
2
} else {
1
};
format!("{:6.*}{}", precision, value, UNITS[unit])
}
| {
OffsetDateTime::now_utc() + Duration::seconds(5)
} | conditional_block |
shared_lock.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Different objects protected by the same lock
use crate::str::{CssString, CssStringWriter};
use crate::stylesheets::Origin;
#[cfg(feature = "gecko")]
use atomic_refcell::{AtomicRef, AtomicRefCell, AtomicRefMut};
#[cfg(feature = "servo")]
use parking_lot::RwLock;
use servo_arc::Arc;
use std::cell::UnsafeCell;
use std::fmt;
#[cfg(feature = "servo")]
use std::mem;
#[cfg(feature = "gecko")]
use std::ptr;
use to_shmem::{SharedMemoryBuilder, ToShmem};
/// A shared read/write lock that can protect multiple objects.
///
/// In Gecko builds, we don't need the blocking behavior, just the safety. As
/// such we implement this with an AtomicRefCell instead in Gecko builds,
/// which is ~2x as fast, and panics (rather than deadlocking) when things go
/// wrong (which is much easier to debug on CI).
///
/// Servo needs the blocking behavior for its unsynchronized animation setup,
/// but that may not be web-compatible and may need to be changed (at which
/// point Servo could use AtomicRefCell too).
///
/// Gecko also needs the ability to have "read only" SharedRwLocks, which are
/// used for objects stored in (read only) shared memory. Attempting to acquire
/// write access to objects protected by a read only SharedRwLock will panic.
#[derive(Clone)]
#[cfg_attr(feature = "servo", derive(MallocSizeOf))]
pub struct SharedRwLock {
#[cfg(feature = "servo")]
#[cfg_attr(feature = "servo", ignore_malloc_size_of = "Arc")]
arc: Arc<RwLock<()>>,
#[cfg(feature = "gecko")]
cell: Option<Arc<AtomicRefCell<SomethingZeroSizedButTyped>>>,
}
#[cfg(feature = "gecko")]
struct SomethingZeroSizedButTyped;
impl fmt::Debug for SharedRwLock {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("SharedRwLock")
}
}
impl SharedRwLock {
/// Create a new shared lock (servo).
#[cfg(feature = "servo")]
pub fn new() -> Self {
SharedRwLock {
arc: Arc::new(RwLock::new(())),
}
}
/// Create a new shared lock (gecko).
#[cfg(feature = "gecko")]
pub fn new() -> Self {
SharedRwLock {
cell: Some(Arc::new(AtomicRefCell::new(SomethingZeroSizedButTyped))),
}
}
/// Create a new global shared lock (servo).
#[cfg(feature = "servo")]
pub fn new_leaked() -> Self {
SharedRwLock {
arc: Arc::new_leaked(RwLock::new(())),
}
}
/// Create a new global shared lock (gecko).
#[cfg(feature = "gecko")]
pub fn new_leaked() -> Self {
SharedRwLock {
cell: Some(Arc::new_leaked(AtomicRefCell::new(
SomethingZeroSizedButTyped,
))),
}
}
/// Create a new read-only shared lock (gecko).
#[cfg(feature = "gecko")]
pub fn read_only() -> Self {
SharedRwLock { cell: None }
}
/// Wrap the given data to make its access protected by this lock.
pub fn wrap<T>(&self, data: T) -> Locked<T> {
Locked {
shared_lock: self.clone(),
data: UnsafeCell::new(data),
}
}
/// Obtain the lock for reading (servo).
#[cfg(feature = "servo")]
pub fn read(&self) -> SharedRwLockReadGuard {
mem::forget(self.arc.read());
SharedRwLockReadGuard(self)
}
/// Obtain the lock for reading (gecko).
#[cfg(feature = "gecko")]
pub fn read(&self) -> SharedRwLockReadGuard {
SharedRwLockReadGuard(self.cell.as_ref().map(|cell| cell.borrow()))
}
/// Obtain the lock for writing (servo).
#[cfg(feature = "servo")]
pub fn write(&self) -> SharedRwLockWriteGuard {
mem::forget(self.arc.write());
SharedRwLockWriteGuard(self)
}
/// Obtain the lock for writing (gecko).
#[cfg(feature = "gecko")]
pub fn write(&self) -> SharedRwLockWriteGuard {
SharedRwLockWriteGuard(self.cell.as_ref().unwrap().borrow_mut())
}
}
/// Proof that a shared lock was obtained for reading (servo).
#[cfg(feature = "servo")]
pub struct SharedRwLockReadGuard<'a>(&'a SharedRwLock);
/// Proof that a shared lock was obtained for reading (gecko).
#[cfg(feature = "gecko")]
pub struct SharedRwLockReadGuard<'a>(Option<AtomicRef<'a, SomethingZeroSizedButTyped>>);
#[cfg(feature = "servo")]
impl<'a> Drop for SharedRwLockReadGuard<'a> {
fn drop(&mut self) {
// Unsafe: self.lock is private to this module, only ever set after `read()`,
// and never copied or cloned (see `compile_time_assert` below).
unsafe { self.0.arc.force_unlock_read() }
}
}
/// Proof that a shared lock was obtained for writing (servo).
#[cfg(feature = "servo")]
pub struct SharedRwLockWriteGuard<'a>(&'a SharedRwLock);
/// Proof that a shared lock was obtained for writing (gecko).
#[cfg(feature = "gecko")]
pub struct SharedRwLockWriteGuard<'a>(AtomicRefMut<'a, SomethingZeroSizedButTyped>);
#[cfg(feature = "servo")]
impl<'a> Drop for SharedRwLockWriteGuard<'a> {
fn drop(&mut self) {
// Unsafe: self.lock is private to this module, only ever set after `write()`,
// and never copied or cloned (see `compile_time_assert` below).
unsafe { self.0.arc.force_unlock_write() }
}
}
/// Data protect by a shared lock.
pub struct Locked<T> {
shared_lock: SharedRwLock,
data: UnsafeCell<T>,
}
// Unsafe: the data inside `UnsafeCell` is only accessed in `read_with` and `write_with`,
// where guards ensure synchronization.
unsafe impl<T: Send> Send for Locked<T> {}
unsafe impl<T: Send + Sync> Sync for Locked<T> {}
impl<T: fmt::Debug> fmt::Debug for Locked<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let guard = self.shared_lock.read();
self.read_with(&guard).fmt(f)
}
}
impl<T> Locked<T> {
#[cfg(feature = "gecko")]
#[inline]
fn is_read_only_lock(&self) -> bool {
self.shared_lock.cell.is_none()
}
#[cfg(feature = "servo")]
fn same_lock_as(&self, lock: &SharedRwLock) -> bool {
Arc::ptr_eq(&self.shared_lock.arc, &lock.arc)
}
#[cfg(feature = "gecko")]
fn same_lock_as(&self, derefed_guard: Option<&SomethingZeroSizedButTyped>) -> bool {
ptr::eq(
self.shared_lock
.cell
.as_ref()
.map(|cell| cell.as_ptr())
.unwrap_or(ptr::null_mut()),
derefed_guard
.map(|guard| guard as *const _ as *mut _)
.unwrap_or(ptr::null_mut()),
)
}
/// Access the data for reading.
pub fn read_with<'a>(&'a self, guard: &'a SharedRwLockReadGuard) -> &'a T {
#[cfg(feature = "gecko")]
assert!(
self.is_read_only_lock() || self.same_lock_as(guard.0.as_ref().map(|r| &**r)),
"Locked::read_with called with a guard from an unrelated SharedRwLock"
);
#[cfg(not(feature = "gecko"))]
assert!(self.same_lock_as(&guard.0));
let ptr = self.data.get();
// Unsafe:
//
// * The guard guarantees that the lock is taken for reading,
// and we’ve checked that it’s the correct lock.
// * The returned reference borrows *both* the data and the guard,
// so that it can outlive neither.
unsafe { &*ptr }
}
/// Access the data for reading without verifying the lock. Use with caution.
#[cfg(feature = "gecko")]
pub unsafe fn read_unchecked<'a>(&'a self) -> &'a T {
let ptr = self.data.get();
&*ptr
}
/// Access the data for writing.
pub fn write_with<'a>(&'a self, guard: &'a mut SharedRwLockWriteGuard) -> &'a mut T {
#[cfg(feature = "gecko")]
assert!(
!self.is_read_only_lock() && self.same_lock_as(Some(&guard.0)),
"Locked::write_with called with a guard from a read only or unrelated SharedRwLock"
);
#[cfg(not(feature = "gecko"))]
assert!(self.same_lock_as(&guard.0));
let ptr = self.data.get();
// Unsafe:
//
// * The guard guarantees that the lock is taken for writing,
// and we’ve checked that it’s the correct lock.
// * The returned reference borrows *both* the data and the guard,
// so that it can outlive neither.
// * We require a mutable borrow of the guard,
// so that one write guard can only be used once at a time.
unsafe { &mut *ptr }
}
}
#[cfg(feature = "gecko")]
impl<T: ToShmem> ToShmem for Locked<T> {
fn to_shmem(&self, builder: &mut SharedMemoryBuilder) -> to_shmem::Result<Self> {
use std::mem::ManuallyDrop;
let guard = self.shared_lock.read();
Ok(ManuallyDrop::new(Locked {
shared_lock: SharedRwLock::read_only(),
data: UnsafeCell::new(ManuallyDrop::into_inner(
self.read_with(&guard).to_shmem(builder)?,
)),
}))
}
}
#[cfg(feature = "servo")]
impl<T: ToShmem> ToShmem for Locked<T> {
fn to_shmem(&self, _builder: &mut SharedMemoryBuilder) -> to_shmem::Result<Self> {
panic!("ToShmem not supported in Servo currently")
}
}
#[allow(dead_code)]
mod compile_time_assert {
use super::{SharedRwLockReadGuard, SharedRwLockWriteGuard};
trait Marker1 {}
impl<T: Clone> Marker1 for T {}
impl<'a> Marker1 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard:!Clone
impl<'a> Marker1 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard:!Clone
trait Marker2 {}
impl<T: Copy> Marker2 for T {}
impl<'a> Marker2 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard:!Copy
impl<'a> Marker2 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard:!Copy
}
/// Like ToCss, but with a lock guard given by the caller, and with the writer specified
/// concretely rather than with a parameter.
pub trait ToCssWithGuard {
/// Serialize `self` in CSS syntax, writing to `dest`, using the given lock guard.
fn to_css(&self, guard: &SharedRwLockReadGuard, dest: &mut CssStringWriter) -> fmt::Result;
/// Serialize `self` in CSS syntax using the given lock guard and return a string.
///
/// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
#[inline]
fn to_css_string(&self, guard: &SharedRwLockReadGuard) -> CssString {
let mut s = CssString::new();
self.to_css(guard, &mut s).unwrap();
s
}
}
/// Parameters needed for deep clones.
#[cfg(feature = "gecko")]
pub struct DeepCloneParams {
/// The new sheet we're cloning rules into.
pub reference_sheet: *const crate::gecko_bindings::structs::StyleSheet,
}
/// Parameters needed for deep clones.
#[cfg(feature = "servo")] | /// A trait to do a deep clone of a given CSS type. Gets a lock and a read
/// guard, in order to be able to read and clone nested structures.
pub trait DeepCloneWithLock: Sized {
/// Deep clones this object.
fn deep_clone_with_lock(
&self,
lock: &SharedRwLock,
guard: &SharedRwLockReadGuard,
params: &DeepCloneParams,
) -> Self;
}
/// Guards for a document
#[derive(Clone)]
pub struct StylesheetGuards<'a> {
/// For author-origin stylesheets.
pub author: &'a SharedRwLockReadGuard<'a>,
/// For user-agent-origin and user-origin stylesheets
pub ua_or_user: &'a SharedRwLockReadGuard<'a>,
}
impl<'a> StylesheetGuards<'a> {
/// Get the guard for a given stylesheet origin.
pub fn for_origin(&self, origin: Origin) -> &SharedRwLockReadGuard<'a> {
match origin {
Origin::Author => &self.author,
_ => &self.ua_or_user,
}
}
/// Same guard for all origins
pub fn same(guard: &'a SharedRwLockReadGuard<'a>) -> Self {
StylesheetGuards {
author: guard,
ua_or_user: guard,
}
}
} | pub struct DeepCloneParams;
| random_line_split |
shared_lock.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Different objects protected by the same lock
use crate::str::{CssString, CssStringWriter};
use crate::stylesheets::Origin;
#[cfg(feature = "gecko")]
use atomic_refcell::{AtomicRef, AtomicRefCell, AtomicRefMut};
#[cfg(feature = "servo")]
use parking_lot::RwLock;
use servo_arc::Arc;
use std::cell::UnsafeCell;
use std::fmt;
#[cfg(feature = "servo")]
use std::mem;
#[cfg(feature = "gecko")]
use std::ptr;
use to_shmem::{SharedMemoryBuilder, ToShmem};
/// A shared read/write lock that can protect multiple objects.
///
/// In Gecko builds, we don't need the blocking behavior, just the safety. As
/// such we implement this with an AtomicRefCell instead in Gecko builds,
/// which is ~2x as fast, and panics (rather than deadlocking) when things go
/// wrong (which is much easier to debug on CI).
///
/// Servo needs the blocking behavior for its unsynchronized animation setup,
/// but that may not be web-compatible and may need to be changed (at which
/// point Servo could use AtomicRefCell too).
///
/// Gecko also needs the ability to have "read only" SharedRwLocks, which are
/// used for objects stored in (read only) shared memory. Attempting to acquire
/// write access to objects protected by a read only SharedRwLock will panic.
#[derive(Clone)]
#[cfg_attr(feature = "servo", derive(MallocSizeOf))]
pub struct SharedRwLock {
#[cfg(feature = "servo")]
#[cfg_attr(feature = "servo", ignore_malloc_size_of = "Arc")]
arc: Arc<RwLock<()>>,
#[cfg(feature = "gecko")]
cell: Option<Arc<AtomicRefCell<SomethingZeroSizedButTyped>>>,
}
#[cfg(feature = "gecko")]
struct SomethingZeroSizedButTyped;
impl fmt::Debug for SharedRwLock {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("SharedRwLock")
}
}
impl SharedRwLock {
/// Create a new shared lock (servo).
#[cfg(feature = "servo")]
pub fn new() -> Self {
SharedRwLock {
arc: Arc::new(RwLock::new(())),
}
}
/// Create a new shared lock (gecko).
#[cfg(feature = "gecko")]
pub fn new() -> Self {
SharedRwLock {
cell: Some(Arc::new(AtomicRefCell::new(SomethingZeroSizedButTyped))),
}
}
/// Create a new global shared lock (servo).
#[cfg(feature = "servo")]
pub fn new_leaked() -> Self {
SharedRwLock {
arc: Arc::new_leaked(RwLock::new(())),
}
}
/// Create a new global shared lock (gecko).
#[cfg(feature = "gecko")]
pub fn new_leaked() -> Self {
SharedRwLock {
cell: Some(Arc::new_leaked(AtomicRefCell::new(
SomethingZeroSizedButTyped,
))),
}
}
/// Create a new read-only shared lock (gecko).
#[cfg(feature = "gecko")]
pub fn read_only() -> Self {
SharedRwLock { cell: None }
}
/// Wrap the given data to make its access protected by this lock.
pub fn wrap<T>(&self, data: T) -> Locked<T> {
Locked {
shared_lock: self.clone(),
data: UnsafeCell::new(data),
}
}
/// Obtain the lock for reading (servo).
#[cfg(feature = "servo")]
pub fn read(&self) -> SharedRwLockReadGuard {
mem::forget(self.arc.read());
SharedRwLockReadGuard(self)
}
/// Obtain the lock for reading (gecko).
#[cfg(feature = "gecko")]
pub fn read(&self) -> SharedRwLockReadGuard {
SharedRwLockReadGuard(self.cell.as_ref().map(|cell| cell.borrow()))
}
/// Obtain the lock for writing (servo).
#[cfg(feature = "servo")]
pub fn write(&self) -> SharedRwLockWriteGuard {
mem::forget(self.arc.write());
SharedRwLockWriteGuard(self)
}
/// Obtain the lock for writing (gecko).
#[cfg(feature = "gecko")]
pub fn write(&self) -> SharedRwLockWriteGuard {
SharedRwLockWriteGuard(self.cell.as_ref().unwrap().borrow_mut())
}
}
/// Proof that a shared lock was obtained for reading (servo).
#[cfg(feature = "servo")]
pub struct SharedRwLockReadGuard<'a>(&'a SharedRwLock);
/// Proof that a shared lock was obtained for reading (gecko).
#[cfg(feature = "gecko")]
pub struct SharedRwLockReadGuard<'a>(Option<AtomicRef<'a, SomethingZeroSizedButTyped>>);
#[cfg(feature = "servo")]
impl<'a> Drop for SharedRwLockReadGuard<'a> {
fn drop(&mut self) {
// Unsafe: self.lock is private to this module, only ever set after `read()`,
// and never copied or cloned (see `compile_time_assert` below).
unsafe { self.0.arc.force_unlock_read() }
}
}
/// Proof that a shared lock was obtained for writing (servo).
#[cfg(feature = "servo")]
pub struct SharedRwLockWriteGuard<'a>(&'a SharedRwLock);
/// Proof that a shared lock was obtained for writing (gecko).
#[cfg(feature = "gecko")]
pub struct SharedRwLockWriteGuard<'a>(AtomicRefMut<'a, SomethingZeroSizedButTyped>);
#[cfg(feature = "servo")]
impl<'a> Drop for SharedRwLockWriteGuard<'a> {
fn drop(&mut self) {
// Unsafe: self.lock is private to this module, only ever set after `write()`,
// and never copied or cloned (see `compile_time_assert` below).
unsafe { self.0.arc.force_unlock_write() }
}
}
/// Data protect by a shared lock.
pub struct Locked<T> {
shared_lock: SharedRwLock,
data: UnsafeCell<T>,
}
// Unsafe: the data inside `UnsafeCell` is only accessed in `read_with` and `write_with`,
// where guards ensure synchronization.
unsafe impl<T: Send> Send for Locked<T> {}
unsafe impl<T: Send + Sync> Sync for Locked<T> {}
impl<T: fmt::Debug> fmt::Debug for Locked<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let guard = self.shared_lock.read();
self.read_with(&guard).fmt(f)
}
}
impl<T> Locked<T> {
#[cfg(feature = "gecko")]
#[inline]
fn is_read_only_lock(&self) -> bool {
self.shared_lock.cell.is_none()
}
#[cfg(feature = "servo")]
fn same_lock_as(&self, lock: &SharedRwLock) -> bool {
Arc::ptr_eq(&self.shared_lock.arc, &lock.arc)
}
#[cfg(feature = "gecko")]
fn same_lock_as(&self, derefed_guard: Option<&SomethingZeroSizedButTyped>) -> bool {
ptr::eq(
self.shared_lock
.cell
.as_ref()
.map(|cell| cell.as_ptr())
.unwrap_or(ptr::null_mut()),
derefed_guard
.map(|guard| guard as *const _ as *mut _)
.unwrap_or(ptr::null_mut()),
)
}
/// Access the data for reading.
pub fn read_with<'a>(&'a self, guard: &'a SharedRwLockReadGuard) -> &'a T {
#[cfg(feature = "gecko")]
assert!(
self.is_read_only_lock() || self.same_lock_as(guard.0.as_ref().map(|r| &**r)),
"Locked::read_with called with a guard from an unrelated SharedRwLock"
);
#[cfg(not(feature = "gecko"))]
assert!(self.same_lock_as(&guard.0));
let ptr = self.data.get();
// Unsafe:
//
// * The guard guarantees that the lock is taken for reading,
// and we’ve checked that it’s the correct lock.
// * The returned reference borrows *both* the data and the guard,
// so that it can outlive neither.
unsafe { &*ptr }
}
/// Access the data for reading without verifying the lock. Use with caution.
#[cfg(feature = "gecko")]
pub unsafe fn read_unchecked<'a>(&'a self) -> &'a T {
let ptr = self.data.get();
&*ptr
}
/// Access the data for writing.
pub fn write_with<'a>(&'a self, guard: &'a mut SharedRwLockWriteGuard) -> &'a mut T {
#[cfg(feature = "gecko")]
assert!(
!self.is_read_only_lock() && self.same_lock_as(Some(&guard.0)),
"Locked::write_with called with a guard from a read only or unrelated SharedRwLock"
);
#[cfg(not(feature = "gecko"))]
assert!(self.same_lock_as(&guard.0));
let ptr = self.data.get();
// Unsafe:
//
// * The guard guarantees that the lock is taken for writing,
// and we’ve checked that it’s the correct lock.
// * The returned reference borrows *both* the data and the guard,
// so that it can outlive neither.
// * We require a mutable borrow of the guard,
// so that one write guard can only be used once at a time.
unsafe { &mut *ptr }
}
}
#[cfg(feature = "gecko")]
impl<T: ToShmem> ToShmem for Locked<T> {
fn to_shmem(&self, builder: &mut SharedMemoryBuilder) -> to_shmem::Result<Self> {
use std::mem::ManuallyDrop;
let guard = self.shared_lock.read();
Ok(ManuallyDrop::new(Locked {
shared_lock: SharedRwLock::read_only(),
data: UnsafeCell::new(ManuallyDrop::into_inner(
self.read_with(&guard).to_shmem(builder)?,
)),
}))
}
}
#[cfg(feature = "servo")]
impl<T: ToShmem> ToShmem for Locked<T> {
fn to_shmem(&self, _builder: &mut SharedMemoryBuilder) -> to_shmem::Result<Self> {
panic!("ToShmem not supported in Servo currently")
}
}
#[allow(dead_code)]
mod compile_time_assert {
use super::{SharedRwLockReadGuard, SharedRwLockWriteGuard};
trait Marker1 {}
impl<T: Clone> Marker1 for T {}
impl<'a> Marker1 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard:!Clone
impl<'a> Marker1 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard:!Clone
trait Marker2 {}
impl<T: Copy> Marker2 for T {}
impl<'a> Marker2 for SharedRwLockReadGuard<'a> {} // Assert SharedRwLockReadGuard:!Copy
impl<'a> Marker2 for SharedRwLockWriteGuard<'a> {} // Assert SharedRwLockWriteGuard:!Copy
}
/// Like ToCss, but with a lock guard given by the caller, and with the writer specified
/// concretely rather than with a parameter.
pub trait ToCssWithGuard {
/// Serialize `self` in CSS syntax, writing to `dest`, using the given lock guard.
fn to_css(&self, guard: &SharedRwLockReadGuard, dest: &mut CssStringWriter) -> fmt::Result;
/// Serialize `self` in CSS syntax using the given lock guard and return a string.
///
/// (This is a convenience wrapper for `to_css` and probably should not be overridden.)
#[inline]
fn to_css_string(&self, guard: &SharedRwLockReadGuard) -> CssString {
let mut s = CssString::new();
self.to_css(guard, &mut s).unwrap();
s
}
}
/// Parameters needed for deep clones.
#[cfg(feature = "gecko")]
pub struct DeepCloneParams {
/// The new sheet we're cloning rules into.
pub reference_sheet: *const crate::gecko_bindings::structs::StyleSheet,
}
/// Parameters needed for deep clones.
#[cfg(feature = "servo")]
pub struct DeepClon | trait to do a deep clone of a given CSS type. Gets a lock and a read
/// guard, in order to be able to read and clone nested structures.
pub trait DeepCloneWithLock: Sized {
/// Deep clones this object.
fn deep_clone_with_lock(
&self,
lock: &SharedRwLock,
guard: &SharedRwLockReadGuard,
params: &DeepCloneParams,
) -> Self;
}
/// Guards for a document
#[derive(Clone)]
pub struct StylesheetGuards<'a> {
/// For author-origin stylesheets.
pub author: &'a SharedRwLockReadGuard<'a>,
/// For user-agent-origin and user-origin stylesheets
pub ua_or_user: &'a SharedRwLockReadGuard<'a>,
}
impl<'a> StylesheetGuards<'a> {
/// Get the guard for a given stylesheet origin.
pub fn for_origin(&self, origin: Origin) -> &SharedRwLockReadGuard<'a> {
match origin {
Origin::Author => &self.author,
_ => &self.ua_or_user,
}
}
/// Same guard for all origins
pub fn same(guard: &'a SharedRwLockReadGuard<'a>) -> Self {
StylesheetGuards {
author: guard,
ua_or_user: guard,
}
}
}
| eParams;
/// A | identifier_name |
rotation.rs | // Copyright 2015 The CGMath Developers. For a full listing of the authors,
// refer to the Cargo.toml file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate cgmath;
use cgmath::*;
mod rotation {
use super::cgmath::*;
pub fn a2<R: Rotation2<f64>>() -> R {
Rotation2::from_angle(deg(30.0).to_rad())
}
pub fn a3<R: Rotation3<f64>>() -> R {
let axis = Vector3::new(1.0, 1.0, 0.0).normalize();
Rotation3::from_axis_angle(&axis, deg(30.0).to_rad())
}
}
#[test]
fn test_invert_basis2() {
let a: Basis2<_> = rotation::a2();
assert!(a.concat(&a.invert()).as_matrix2().is_identity());
}
#[test]
fn | () {
let a: Basis3<_> = rotation::a3();
assert!(a.concat(&a.invert()).as_matrix3().is_identity());
}
| test_invert_basis3 | identifier_name |
rotation.rs | // Copyright 2015 The CGMath Developers. For a full listing of the authors,
// refer to the Cargo.toml file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate cgmath;
use cgmath::*;
mod rotation {
use super::cgmath::*;
pub fn a2<R: Rotation2<f64>>() -> R {
Rotation2::from_angle(deg(30.0).to_rad())
}
pub fn a3<R: Rotation3<f64>>() -> R {
let axis = Vector3::new(1.0, 1.0, 0.0).normalize();
Rotation3::from_axis_angle(&axis, deg(30.0).to_rad())
}
}
#[test]
fn test_invert_basis2() { | }
#[test]
fn test_invert_basis3() {
let a: Basis3<_> = rotation::a3();
assert!(a.concat(&a.invert()).as_matrix3().is_identity());
} | let a: Basis2<_> = rotation::a2();
assert!(a.concat(&a.invert()).as_matrix2().is_identity()); | random_line_split |
rotation.rs | // Copyright 2015 The CGMath Developers. For a full listing of the authors,
// refer to the Cargo.toml file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate cgmath;
use cgmath::*;
mod rotation {
use super::cgmath::*;
pub fn a2<R: Rotation2<f64>>() -> R {
Rotation2::from_angle(deg(30.0).to_rad())
}
pub fn a3<R: Rotation3<f64>>() -> R {
let axis = Vector3::new(1.0, 1.0, 0.0).normalize();
Rotation3::from_axis_angle(&axis, deg(30.0).to_rad())
}
}
#[test]
fn test_invert_basis2() |
#[test]
fn test_invert_basis3() {
let a: Basis3<_> = rotation::a3();
assert!(a.concat(&a.invert()).as_matrix3().is_identity());
}
| {
let a: Basis2<_> = rotation::a2();
assert!(a.concat(&a.invert()).as_matrix2().is_identity());
} | identifier_body |
doc_test_lints.rs | //! This pass is overloaded and runs two different lints.
//!
//! - MISSING_DOC_CODE_EXAMPLES: this lint is **UNSTABLE** and looks for public items missing doctests
//! - PRIVATE_DOC_TESTS: this lint is **STABLE** and looks for private items with doctests.
use super::Pass;
use crate::clean;
use crate::clean::*;
use crate::core::DocContext;
use crate::fold::DocFolder;
use crate::html::markdown::{find_testable_code, ErrorCodes, Ignore, LangString};
use crate::visit_ast::inherits_doc_hidden;
use rustc_middle::lint::LintLevelSource;
use rustc_session::lint;
use rustc_span::symbol::sym;
crate const CHECK_PRIVATE_ITEMS_DOC_TESTS: Pass = Pass {
name: "check-private-items-doc-tests",
run: check_private_items_doc_tests,
description: "check private items doc tests",
};
struct PrivateItemDocTestLinter<'a, 'tcx> {
cx: &'a mut DocContext<'tcx>,
}
crate fn check_private_items_doc_tests(krate: Crate, cx: &mut DocContext<'_>) -> Crate {
let mut coll = PrivateItemDocTestLinter { cx };
coll.fold_crate(krate)
}
impl<'a, 'tcx> DocFolder for PrivateItemDocTestLinter<'a, 'tcx> {
fn fold_item(&mut self, item: Item) -> Option<Item> {
let dox = item.attrs.collapsed_doc_value().unwrap_or_else(String::new);
look_for_tests(self.cx, &dox, &item);
Some(self.fold_item_recur(item))
} | }
pub(crate) struct Tests {
pub(crate) found_tests: usize,
}
impl crate::doctest::Tester for Tests {
fn add_test(&mut self, _: String, config: LangString, _: usize) {
if config.rust && config.ignore == Ignore::None {
self.found_tests += 1;
}
}
}
crate fn should_have_doc_example(cx: &DocContext<'_>, item: &clean::Item) -> bool {
if!cx.cache.access_levels.is_public(item.def_id.expect_def_id())
|| matches!(
*item.kind,
clean::StructFieldItem(_)
| clean::VariantItem(_)
| clean::AssocConstItem(_, _)
| clean::AssocTypeItem(_, _)
| clean::TypedefItem(_, _)
| clean::StaticItem(_)
| clean::ConstantItem(_)
| clean::ExternCrateItem {.. }
| clean::ImportItem(_)
| clean::PrimitiveItem(_)
| clean::KeywordItem(_)
)
{
return false;
}
// The `expect_def_id()` should be okay because `local_def_id_to_hir_id`
// would presumably panic if a fake `DefIndex` were passed.
let hir_id = cx.tcx.hir().local_def_id_to_hir_id(item.def_id.expect_def_id().expect_local());
if cx.tcx.hir().attrs(hir_id).lists(sym::doc).has_word(sym::hidden)
|| inherits_doc_hidden(cx.tcx, hir_id)
{
return false;
}
let (level, source) = cx.tcx.lint_level_at_node(crate::lint::MISSING_DOC_CODE_EXAMPLES, hir_id);
level!= lint::Level::Allow || matches!(source, LintLevelSource::Default)
}
crate fn look_for_tests<'tcx>(cx: &DocContext<'tcx>, dox: &str, item: &Item) {
let hir_id = match DocContext::as_local_hir_id(cx.tcx, item.def_id) {
Some(hir_id) => hir_id,
None => {
// If non-local, no need to check anything.
return;
}
};
let mut tests = Tests { found_tests: 0 };
find_testable_code(&dox, &mut tests, ErrorCodes::No, false, None);
if tests.found_tests == 0 && cx.tcx.sess.is_nightly_build() {
if should_have_doc_example(cx, &item) {
debug!("reporting error for {:?} (hir_id={:?})", item, hir_id);
let sp = item.attr_span(cx.tcx);
cx.tcx.struct_span_lint_hir(
crate::lint::MISSING_DOC_CODE_EXAMPLES,
hir_id,
sp,
|lint| lint.build("missing code example in this documentation").emit(),
);
}
} else if tests.found_tests > 0
&&!cx.cache.access_levels.is_public(item.def_id.expect_def_id())
{
cx.tcx.struct_span_lint_hir(
crate::lint::PRIVATE_DOC_TESTS,
hir_id,
item.attr_span(cx.tcx),
|lint| lint.build("documentation test in private item").emit(),
);
}
} | random_line_split |
|
doc_test_lints.rs | //! This pass is overloaded and runs two different lints.
//!
//! - MISSING_DOC_CODE_EXAMPLES: this lint is **UNSTABLE** and looks for public items missing doctests
//! - PRIVATE_DOC_TESTS: this lint is **STABLE** and looks for private items with doctests.
use super::Pass;
use crate::clean;
use crate::clean::*;
use crate::core::DocContext;
use crate::fold::DocFolder;
use crate::html::markdown::{find_testable_code, ErrorCodes, Ignore, LangString};
use crate::visit_ast::inherits_doc_hidden;
use rustc_middle::lint::LintLevelSource;
use rustc_session::lint;
use rustc_span::symbol::sym;
crate const CHECK_PRIVATE_ITEMS_DOC_TESTS: Pass = Pass {
name: "check-private-items-doc-tests",
run: check_private_items_doc_tests,
description: "check private items doc tests",
};
struct PrivateItemDocTestLinter<'a, 'tcx> {
cx: &'a mut DocContext<'tcx>,
}
crate fn check_private_items_doc_tests(krate: Crate, cx: &mut DocContext<'_>) -> Crate {
let mut coll = PrivateItemDocTestLinter { cx };
coll.fold_crate(krate)
}
impl<'a, 'tcx> DocFolder for PrivateItemDocTestLinter<'a, 'tcx> {
fn fold_item(&mut self, item: Item) -> Option<Item> {
let dox = item.attrs.collapsed_doc_value().unwrap_or_else(String::new);
look_for_tests(self.cx, &dox, &item);
Some(self.fold_item_recur(item))
}
}
pub(crate) struct Tests {
pub(crate) found_tests: usize,
}
impl crate::doctest::Tester for Tests {
fn add_test(&mut self, _: String, config: LangString, _: usize) {
if config.rust && config.ignore == Ignore::None {
self.found_tests += 1;
}
}
}
crate fn should_have_doc_example(cx: &DocContext<'_>, item: &clean::Item) -> bool {
if!cx.cache.access_levels.is_public(item.def_id.expect_def_id())
|| matches!(
*item.kind,
clean::StructFieldItem(_)
| clean::VariantItem(_)
| clean::AssocConstItem(_, _)
| clean::AssocTypeItem(_, _)
| clean::TypedefItem(_, _)
| clean::StaticItem(_)
| clean::ConstantItem(_)
| clean::ExternCrateItem {.. }
| clean::ImportItem(_)
| clean::PrimitiveItem(_)
| clean::KeywordItem(_)
)
{
return false;
}
// The `expect_def_id()` should be okay because `local_def_id_to_hir_id`
// would presumably panic if a fake `DefIndex` were passed.
let hir_id = cx.tcx.hir().local_def_id_to_hir_id(item.def_id.expect_def_id().expect_local());
if cx.tcx.hir().attrs(hir_id).lists(sym::doc).has_word(sym::hidden)
|| inherits_doc_hidden(cx.tcx, hir_id)
{
return false;
}
let (level, source) = cx.tcx.lint_level_at_node(crate::lint::MISSING_DOC_CODE_EXAMPLES, hir_id);
level!= lint::Level::Allow || matches!(source, LintLevelSource::Default)
}
crate fn | <'tcx>(cx: &DocContext<'tcx>, dox: &str, item: &Item) {
let hir_id = match DocContext::as_local_hir_id(cx.tcx, item.def_id) {
Some(hir_id) => hir_id,
None => {
// If non-local, no need to check anything.
return;
}
};
let mut tests = Tests { found_tests: 0 };
find_testable_code(&dox, &mut tests, ErrorCodes::No, false, None);
if tests.found_tests == 0 && cx.tcx.sess.is_nightly_build() {
if should_have_doc_example(cx, &item) {
debug!("reporting error for {:?} (hir_id={:?})", item, hir_id);
let sp = item.attr_span(cx.tcx);
cx.tcx.struct_span_lint_hir(
crate::lint::MISSING_DOC_CODE_EXAMPLES,
hir_id,
sp,
|lint| lint.build("missing code example in this documentation").emit(),
);
}
} else if tests.found_tests > 0
&&!cx.cache.access_levels.is_public(item.def_id.expect_def_id())
{
cx.tcx.struct_span_lint_hir(
crate::lint::PRIVATE_DOC_TESTS,
hir_id,
item.attr_span(cx.tcx),
|lint| lint.build("documentation test in private item").emit(),
);
}
}
| look_for_tests | identifier_name |
doc_test_lints.rs | //! This pass is overloaded and runs two different lints.
//!
//! - MISSING_DOC_CODE_EXAMPLES: this lint is **UNSTABLE** and looks for public items missing doctests
//! - PRIVATE_DOC_TESTS: this lint is **STABLE** and looks for private items with doctests.
use super::Pass;
use crate::clean;
use crate::clean::*;
use crate::core::DocContext;
use crate::fold::DocFolder;
use crate::html::markdown::{find_testable_code, ErrorCodes, Ignore, LangString};
use crate::visit_ast::inherits_doc_hidden;
use rustc_middle::lint::LintLevelSource;
use rustc_session::lint;
use rustc_span::symbol::sym;
crate const CHECK_PRIVATE_ITEMS_DOC_TESTS: Pass = Pass {
name: "check-private-items-doc-tests",
run: check_private_items_doc_tests,
description: "check private items doc tests",
};
struct PrivateItemDocTestLinter<'a, 'tcx> {
cx: &'a mut DocContext<'tcx>,
}
crate fn check_private_items_doc_tests(krate: Crate, cx: &mut DocContext<'_>) -> Crate {
let mut coll = PrivateItemDocTestLinter { cx };
coll.fold_crate(krate)
}
impl<'a, 'tcx> DocFolder for PrivateItemDocTestLinter<'a, 'tcx> {
fn fold_item(&mut self, item: Item) -> Option<Item> |
}
pub(crate) struct Tests {
pub(crate) found_tests: usize,
}
impl crate::doctest::Tester for Tests {
fn add_test(&mut self, _: String, config: LangString, _: usize) {
if config.rust && config.ignore == Ignore::None {
self.found_tests += 1;
}
}
}
crate fn should_have_doc_example(cx: &DocContext<'_>, item: &clean::Item) -> bool {
if!cx.cache.access_levels.is_public(item.def_id.expect_def_id())
|| matches!(
*item.kind,
clean::StructFieldItem(_)
| clean::VariantItem(_)
| clean::AssocConstItem(_, _)
| clean::AssocTypeItem(_, _)
| clean::TypedefItem(_, _)
| clean::StaticItem(_)
| clean::ConstantItem(_)
| clean::ExternCrateItem {.. }
| clean::ImportItem(_)
| clean::PrimitiveItem(_)
| clean::KeywordItem(_)
)
{
return false;
}
// The `expect_def_id()` should be okay because `local_def_id_to_hir_id`
// would presumably panic if a fake `DefIndex` were passed.
let hir_id = cx.tcx.hir().local_def_id_to_hir_id(item.def_id.expect_def_id().expect_local());
if cx.tcx.hir().attrs(hir_id).lists(sym::doc).has_word(sym::hidden)
|| inherits_doc_hidden(cx.tcx, hir_id)
{
return false;
}
let (level, source) = cx.tcx.lint_level_at_node(crate::lint::MISSING_DOC_CODE_EXAMPLES, hir_id);
level!= lint::Level::Allow || matches!(source, LintLevelSource::Default)
}
crate fn look_for_tests<'tcx>(cx: &DocContext<'tcx>, dox: &str, item: &Item) {
let hir_id = match DocContext::as_local_hir_id(cx.tcx, item.def_id) {
Some(hir_id) => hir_id,
None => {
// If non-local, no need to check anything.
return;
}
};
let mut tests = Tests { found_tests: 0 };
find_testable_code(&dox, &mut tests, ErrorCodes::No, false, None);
if tests.found_tests == 0 && cx.tcx.sess.is_nightly_build() {
if should_have_doc_example(cx, &item) {
debug!("reporting error for {:?} (hir_id={:?})", item, hir_id);
let sp = item.attr_span(cx.tcx);
cx.tcx.struct_span_lint_hir(
crate::lint::MISSING_DOC_CODE_EXAMPLES,
hir_id,
sp,
|lint| lint.build("missing code example in this documentation").emit(),
);
}
} else if tests.found_tests > 0
&&!cx.cache.access_levels.is_public(item.def_id.expect_def_id())
{
cx.tcx.struct_span_lint_hir(
crate::lint::PRIVATE_DOC_TESTS,
hir_id,
item.attr_span(cx.tcx),
|lint| lint.build("documentation test in private item").emit(),
);
}
}
| {
let dox = item.attrs.collapsed_doc_value().unwrap_or_else(String::new);
look_for_tests(self.cx, &dox, &item);
Some(self.fold_item_recur(item))
} | identifier_body |
doc_test_lints.rs | //! This pass is overloaded and runs two different lints.
//!
//! - MISSING_DOC_CODE_EXAMPLES: this lint is **UNSTABLE** and looks for public items missing doctests
//! - PRIVATE_DOC_TESTS: this lint is **STABLE** and looks for private items with doctests.
use super::Pass;
use crate::clean;
use crate::clean::*;
use crate::core::DocContext;
use crate::fold::DocFolder;
use crate::html::markdown::{find_testable_code, ErrorCodes, Ignore, LangString};
use crate::visit_ast::inherits_doc_hidden;
use rustc_middle::lint::LintLevelSource;
use rustc_session::lint;
use rustc_span::symbol::sym;
crate const CHECK_PRIVATE_ITEMS_DOC_TESTS: Pass = Pass {
name: "check-private-items-doc-tests",
run: check_private_items_doc_tests,
description: "check private items doc tests",
};
struct PrivateItemDocTestLinter<'a, 'tcx> {
cx: &'a mut DocContext<'tcx>,
}
crate fn check_private_items_doc_tests(krate: Crate, cx: &mut DocContext<'_>) -> Crate {
let mut coll = PrivateItemDocTestLinter { cx };
coll.fold_crate(krate)
}
impl<'a, 'tcx> DocFolder for PrivateItemDocTestLinter<'a, 'tcx> {
fn fold_item(&mut self, item: Item) -> Option<Item> {
let dox = item.attrs.collapsed_doc_value().unwrap_or_else(String::new);
look_for_tests(self.cx, &dox, &item);
Some(self.fold_item_recur(item))
}
}
pub(crate) struct Tests {
pub(crate) found_tests: usize,
}
impl crate::doctest::Tester for Tests {
fn add_test(&mut self, _: String, config: LangString, _: usize) {
if config.rust && config.ignore == Ignore::None |
}
}
crate fn should_have_doc_example(cx: &DocContext<'_>, item: &clean::Item) -> bool {
if!cx.cache.access_levels.is_public(item.def_id.expect_def_id())
|| matches!(
*item.kind,
clean::StructFieldItem(_)
| clean::VariantItem(_)
| clean::AssocConstItem(_, _)
| clean::AssocTypeItem(_, _)
| clean::TypedefItem(_, _)
| clean::StaticItem(_)
| clean::ConstantItem(_)
| clean::ExternCrateItem {.. }
| clean::ImportItem(_)
| clean::PrimitiveItem(_)
| clean::KeywordItem(_)
)
{
return false;
}
// The `expect_def_id()` should be okay because `local_def_id_to_hir_id`
// would presumably panic if a fake `DefIndex` were passed.
let hir_id = cx.tcx.hir().local_def_id_to_hir_id(item.def_id.expect_def_id().expect_local());
if cx.tcx.hir().attrs(hir_id).lists(sym::doc).has_word(sym::hidden)
|| inherits_doc_hidden(cx.tcx, hir_id)
{
return false;
}
let (level, source) = cx.tcx.lint_level_at_node(crate::lint::MISSING_DOC_CODE_EXAMPLES, hir_id);
level!= lint::Level::Allow || matches!(source, LintLevelSource::Default)
}
crate fn look_for_tests<'tcx>(cx: &DocContext<'tcx>, dox: &str, item: &Item) {
let hir_id = match DocContext::as_local_hir_id(cx.tcx, item.def_id) {
Some(hir_id) => hir_id,
None => {
// If non-local, no need to check anything.
return;
}
};
let mut tests = Tests { found_tests: 0 };
find_testable_code(&dox, &mut tests, ErrorCodes::No, false, None);
if tests.found_tests == 0 && cx.tcx.sess.is_nightly_build() {
if should_have_doc_example(cx, &item) {
debug!("reporting error for {:?} (hir_id={:?})", item, hir_id);
let sp = item.attr_span(cx.tcx);
cx.tcx.struct_span_lint_hir(
crate::lint::MISSING_DOC_CODE_EXAMPLES,
hir_id,
sp,
|lint| lint.build("missing code example in this documentation").emit(),
);
}
} else if tests.found_tests > 0
&&!cx.cache.access_levels.is_public(item.def_id.expect_def_id())
{
cx.tcx.struct_span_lint_hir(
crate::lint::PRIVATE_DOC_TESTS,
hir_id,
item.attr_span(cx.tcx),
|lint| lint.build("documentation test in private item").emit(),
);
}
}
| {
self.found_tests += 1;
} | conditional_block |
mut_mut.rs | use clippy_utils::diagnostics::span_lint;
use clippy_utils::higher;
use rustc_hir as hir;
use rustc_hir::intravisit;
use rustc_lint::{LateContext, LateLintPass, LintContext};
use rustc_middle::hir::map::Map;
use rustc_middle::lint::in_external_macro;
use rustc_middle::ty;
use rustc_session::{declare_lint_pass, declare_tool_lint};
declare_clippy_lint! {
/// ### What it does
/// Checks for instances of `mut mut` references.
///
/// ### Why is this bad?
/// Multiple `mut`s don't add anything meaningful to the
/// source. This is either a copy'n'paste error, or it shows a fundamental
/// misunderstanding of references.
///
/// ### Example
/// ```rust
/// # let mut y = 1;
/// let x = &mut &mut y;
/// ```
pub MUT_MUT,
pedantic,
"usage of double-mut refs, e.g., `&mut &mut...`"
}
declare_lint_pass!(MutMut => [MUT_MUT]);
impl<'tcx> LateLintPass<'tcx> for MutMut {
fn check_block(&mut self, cx: &LateContext<'tcx>, block: &'tcx hir::Block<'_>) {
intravisit::walk_block(&mut MutVisitor { cx }, block);
}
fn check_ty(&mut self, cx: &LateContext<'tcx>, ty: &'tcx hir::Ty<'_>) {
use rustc_hir::intravisit::Visitor;
MutVisitor { cx }.visit_ty(ty);
}
}
pub struct MutVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for MutVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'_>) {
if in_external_macro(self.cx.sess(), expr.span) {
return;
}
if let Some(higher::ForLoop { arg, body,.. }) = higher::ForLoop::hir(expr) {
// A `for` loop lowers to:
// ```rust
// match ::std::iter::Iterator::next(&mut iter) {
// // ^^^^
// ```
// Let's ignore the generated code.
intravisit::walk_expr(self, arg);
intravisit::walk_expr(self, body);
} else if let hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Mut, e) = expr.kind {
if let hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Mut, _) = e.kind {
span_lint(
self.cx,
MUT_MUT,
expr.span,
"generally you want to avoid `&mut &mut _` if possible",
);
} else if let ty::Ref(_, _, hir::Mutability::Mut) = self.cx.typeck_results().expr_ty(e).kind() {
span_lint(
self.cx,
MUT_MUT,
expr.span,
"this expression mutably borrows a mutable reference. Consider reborrowing",
);
}
}
}
fn visit_ty(&mut self, ty: &'tcx hir::Ty<'_>) | {
span_lint(
self.cx,
MUT_MUT,
ty.span,
"generally you want to avoid `&mut &mut _` if possible",
);
}
}
intravisit::walk_ty(self, ty);
}
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::None
}
}
| {
if in_external_macro(self.cx.sess(), ty.span) {
return;
}
if let hir::TyKind::Rptr(
_,
hir::MutTy {
ty: pty,
mutbl: hir::Mutability::Mut,
},
) = ty.kind
{
if let hir::TyKind::Rptr(
_,
hir::MutTy {
mutbl: hir::Mutability::Mut,
..
},
) = pty.kind | identifier_body |
mut_mut.rs | use clippy_utils::diagnostics::span_lint;
use clippy_utils::higher;
use rustc_hir as hir;
use rustc_hir::intravisit;
use rustc_lint::{LateContext, LateLintPass, LintContext};
use rustc_middle::hir::map::Map;
use rustc_middle::lint::in_external_macro;
use rustc_middle::ty;
use rustc_session::{declare_lint_pass, declare_tool_lint};
declare_clippy_lint! {
/// ### What it does
/// Checks for instances of `mut mut` references.
///
/// ### Why is this bad?
/// Multiple `mut`s don't add anything meaningful to the
/// source. This is either a copy'n'paste error, or it shows a fundamental
/// misunderstanding of references.
///
/// ### Example
/// ```rust
/// # let mut y = 1;
/// let x = &mut &mut y;
/// ```
pub MUT_MUT,
pedantic,
"usage of double-mut refs, e.g., `&mut &mut...`"
}
declare_lint_pass!(MutMut => [MUT_MUT]);
impl<'tcx> LateLintPass<'tcx> for MutMut {
fn check_block(&mut self, cx: &LateContext<'tcx>, block: &'tcx hir::Block<'_>) {
intravisit::walk_block(&mut MutVisitor { cx }, block);
}
fn check_ty(&mut self, cx: &LateContext<'tcx>, ty: &'tcx hir::Ty<'_>) {
use rustc_hir::intravisit::Visitor;
MutVisitor { cx }.visit_ty(ty);
}
}
pub struct MutVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for MutVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'_>) {
if in_external_macro(self.cx.sess(), expr.span) {
return;
}
if let Some(higher::ForLoop { arg, body,.. }) = higher::ForLoop::hir(expr) {
// A `for` loop lowers to:
// ```rust
// match ::std::iter::Iterator::next(&mut iter) {
// // ^^^^
// ```
// Let's ignore the generated code.
intravisit::walk_expr(self, arg);
intravisit::walk_expr(self, body);
} else if let hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Mut, e) = expr.kind {
if let hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Mut, _) = e.kind {
span_lint(
self.cx,
MUT_MUT,
expr.span,
"generally you want to avoid `&mut &mut _` if possible",
);
} else if let ty::Ref(_, _, hir::Mutability::Mut) = self.cx.typeck_results().expr_ty(e).kind() {
span_lint(
self.cx,
MUT_MUT,
expr.span,
"this expression mutably borrows a mutable reference. Consider reborrowing",
);
}
}
}
fn visit_ty(&mut self, ty: &'tcx hir::Ty<'_>) {
if in_external_macro(self.cx.sess(), ty.span) {
return;
}
if let hir::TyKind::Rptr(
_,
hir::MutTy {
ty: pty,
mutbl: hir::Mutability::Mut,
},
) = ty.kind
{
if let hir::TyKind::Rptr(
_,
hir::MutTy {
mutbl: hir::Mutability::Mut,
..
},
) = pty.kind
{
span_lint(
self.cx,
MUT_MUT,
ty.span,
"generally you want to avoid `&mut &mut _` if possible",
);
} | }
intravisit::walk_ty(self, ty);
}
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::None
}
} | random_line_split |
|
mut_mut.rs | use clippy_utils::diagnostics::span_lint;
use clippy_utils::higher;
use rustc_hir as hir;
use rustc_hir::intravisit;
use rustc_lint::{LateContext, LateLintPass, LintContext};
use rustc_middle::hir::map::Map;
use rustc_middle::lint::in_external_macro;
use rustc_middle::ty;
use rustc_session::{declare_lint_pass, declare_tool_lint};
declare_clippy_lint! {
/// ### What it does
/// Checks for instances of `mut mut` references.
///
/// ### Why is this bad?
/// Multiple `mut`s don't add anything meaningful to the
/// source. This is either a copy'n'paste error, or it shows a fundamental
/// misunderstanding of references.
///
/// ### Example
/// ```rust
/// # let mut y = 1;
/// let x = &mut &mut y;
/// ```
pub MUT_MUT,
pedantic,
"usage of double-mut refs, e.g., `&mut &mut...`"
}
declare_lint_pass!(MutMut => [MUT_MUT]);
impl<'tcx> LateLintPass<'tcx> for MutMut {
fn check_block(&mut self, cx: &LateContext<'tcx>, block: &'tcx hir::Block<'_>) {
intravisit::walk_block(&mut MutVisitor { cx }, block);
}
fn check_ty(&mut self, cx: &LateContext<'tcx>, ty: &'tcx hir::Ty<'_>) {
use rustc_hir::intravisit::Visitor;
MutVisitor { cx }.visit_ty(ty);
}
}
pub struct | <'a, 'tcx> {
cx: &'a LateContext<'tcx>,
}
impl<'a, 'tcx> intravisit::Visitor<'tcx> for MutVisitor<'a, 'tcx> {
type Map = Map<'tcx>;
fn visit_expr(&mut self, expr: &'tcx hir::Expr<'_>) {
if in_external_macro(self.cx.sess(), expr.span) {
return;
}
if let Some(higher::ForLoop { arg, body,.. }) = higher::ForLoop::hir(expr) {
// A `for` loop lowers to:
// ```rust
// match ::std::iter::Iterator::next(&mut iter) {
// // ^^^^
// ```
// Let's ignore the generated code.
intravisit::walk_expr(self, arg);
intravisit::walk_expr(self, body);
} else if let hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Mut, e) = expr.kind {
if let hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Mut, _) = e.kind {
span_lint(
self.cx,
MUT_MUT,
expr.span,
"generally you want to avoid `&mut &mut _` if possible",
);
} else if let ty::Ref(_, _, hir::Mutability::Mut) = self.cx.typeck_results().expr_ty(e).kind() {
span_lint(
self.cx,
MUT_MUT,
expr.span,
"this expression mutably borrows a mutable reference. Consider reborrowing",
);
}
}
}
fn visit_ty(&mut self, ty: &'tcx hir::Ty<'_>) {
if in_external_macro(self.cx.sess(), ty.span) {
return;
}
if let hir::TyKind::Rptr(
_,
hir::MutTy {
ty: pty,
mutbl: hir::Mutability::Mut,
},
) = ty.kind
{
if let hir::TyKind::Rptr(
_,
hir::MutTy {
mutbl: hir::Mutability::Mut,
..
},
) = pty.kind
{
span_lint(
self.cx,
MUT_MUT,
ty.span,
"generally you want to avoid `&mut &mut _` if possible",
);
}
}
intravisit::walk_ty(self, ty);
}
fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
intravisit::NestedVisitorMap::None
}
}
| MutVisitor | identifier_name |
borrowed-unique-basic.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// min-lldb-version: 310
// Gdb doesn't know about UTF-32 character encoding and will print a rust char as only
// its numerical value.
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// gdb-command:print *bool_ref
// gdb-check:$1 = true
// gdb-command:print *int_ref
// gdb-check:$2 = -1
// gdb-command:print *char_ref
// gdb-check:$3 = 97
// gdb-command:print/d *i8_ref
// gdb-check:$4 = 68
// gdb-command:print *i16_ref
// gdb-check:$5 = -16
// gdb-command:print *i32_ref
// gdb-check:$6 = -32
// gdb-command:print *i64_ref
// gdb-check:$7 = -64
// gdb-command:print *uint_ref
// gdb-check:$8 = 1
// gdb-command:print/d *u8_ref
// gdb-check:$9 = 100
// gdb-command:print *u16_ref
// gdb-check:$10 = 16
// gdb-command:print *u32_ref
// gdb-check:$11 = 32
// gdb-command:print *u64_ref
// gdb-check:$12 = 64
// gdb-command:print *f32_ref
// gdb-check:$13 = 2.5
// gdb-command:print *f64_ref
// gdb-check:$14 = 3.5
// === LLDB TESTS ==================================================================================
// lldb-command:type format add -f decimal char
// lldb-command:type format add -f decimal 'unsigned char'
// lldb-command:run
// lldb-command:print *bool_ref
// lldb-check:[...]$0 = true
// lldb-command:print *int_ref
// lldb-check:[...]$1 = -1
// d ebugger:print *char_ref
// c heck:[...]$3 = 97
// lldb-command:print *i8_ref
// lldb-check:[...]$2 = 68
// lldb-command:print *i16_ref
// lldb-check:[...]$3 = -16
// lldb-command:print *i32_ref
// lldb-check:[...]$4 = -32
// lldb-command:print *i64_ref
// lldb-check:[...]$5 = -64
// lldb-command:print *uint_ref
// lldb-check:[...]$6 = 1
// lldb-command:print *u8_ref
// lldb-check:[...]$7 = 100
// lldb-command:print *u16_ref
// lldb-check:[...]$8 = 16
// lldb-command:print *u32_ref
// lldb-check:[...]$9 = 32
// lldb-command:print *u64_ref
// lldb-check:[...]$10 = 64
// lldb-command:print *f32_ref
// lldb-check:[...]$11 = 2.5
// lldb-command:print *f64_ref
// lldb-check:[...]$12 = 3.5
#![allow(unused_variables)]
#![feature(box_syntax)]
#![omit_gdb_pretty_printer_section]
fn main() {
let bool_box: Box<bool> = box true;
let bool_ref: &bool = &*bool_box;
let int_box: Box<int> = box -1;
let int_ref: &int = &*int_box;
let char_box: Box<char> = box 'a';
let char_ref: &char = &*char_box;
let i8_box: Box<i8> = box 68;
let i8_ref: &i8 = &*i8_box;
let i16_box: Box<i16> = box -16;
let i16_ref: &i16 = &*i16_box;
let i32_box: Box<i32> = box -32;
let i32_ref: &i32 = &*i32_box;
let i64_box: Box<i64> = box -64;
let i64_ref: &i64 = &*i64_box;
let uint_box: Box<uint> = box 1;
let uint_ref: &uint = &*uint_box;
let u8_box: Box<u8> = box 100;
let u8_ref: &u8 = &*u8_box;
let u16_box: Box<u16> = box 16;
let u16_ref: &u16 = &*u16_box;
let u32_box: Box<u32> = box 32;
let u32_ref: &u32 = &*u32_box;
let u64_box: Box<u64> = box 64;
let u64_ref: &u64 = &*u64_box;
let f32_box: Box<f32> = box 2.5;
let f32_ref: &f32 = &*f32_box;
let f64_box: Box<f64> = box 3.5;
let f64_ref: &f64 = &*f64_box;
zzz(); // #break
}
fn | () {()}
| zzz | identifier_name |
borrowed-unique-basic.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// min-lldb-version: 310
// Gdb doesn't know about UTF-32 character encoding and will print a rust char as only
// its numerical value.
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// gdb-command:print *bool_ref
// gdb-check:$1 = true
// gdb-command:print *int_ref
// gdb-check:$2 = -1 |
// gdb-command:print/d *i8_ref
// gdb-check:$4 = 68
// gdb-command:print *i16_ref
// gdb-check:$5 = -16
// gdb-command:print *i32_ref
// gdb-check:$6 = -32
// gdb-command:print *i64_ref
// gdb-check:$7 = -64
// gdb-command:print *uint_ref
// gdb-check:$8 = 1
// gdb-command:print/d *u8_ref
// gdb-check:$9 = 100
// gdb-command:print *u16_ref
// gdb-check:$10 = 16
// gdb-command:print *u32_ref
// gdb-check:$11 = 32
// gdb-command:print *u64_ref
// gdb-check:$12 = 64
// gdb-command:print *f32_ref
// gdb-check:$13 = 2.5
// gdb-command:print *f64_ref
// gdb-check:$14 = 3.5
// === LLDB TESTS ==================================================================================
// lldb-command:type format add -f decimal char
// lldb-command:type format add -f decimal 'unsigned char'
// lldb-command:run
// lldb-command:print *bool_ref
// lldb-check:[...]$0 = true
// lldb-command:print *int_ref
// lldb-check:[...]$1 = -1
// d ebugger:print *char_ref
// c heck:[...]$3 = 97
// lldb-command:print *i8_ref
// lldb-check:[...]$2 = 68
// lldb-command:print *i16_ref
// lldb-check:[...]$3 = -16
// lldb-command:print *i32_ref
// lldb-check:[...]$4 = -32
// lldb-command:print *i64_ref
// lldb-check:[...]$5 = -64
// lldb-command:print *uint_ref
// lldb-check:[...]$6 = 1
// lldb-command:print *u8_ref
// lldb-check:[...]$7 = 100
// lldb-command:print *u16_ref
// lldb-check:[...]$8 = 16
// lldb-command:print *u32_ref
// lldb-check:[...]$9 = 32
// lldb-command:print *u64_ref
// lldb-check:[...]$10 = 64
// lldb-command:print *f32_ref
// lldb-check:[...]$11 = 2.5
// lldb-command:print *f64_ref
// lldb-check:[...]$12 = 3.5
#![allow(unused_variables)]
#![feature(box_syntax)]
#![omit_gdb_pretty_printer_section]
fn main() {
let bool_box: Box<bool> = box true;
let bool_ref: &bool = &*bool_box;
let int_box: Box<int> = box -1;
let int_ref: &int = &*int_box;
let char_box: Box<char> = box 'a';
let char_ref: &char = &*char_box;
let i8_box: Box<i8> = box 68;
let i8_ref: &i8 = &*i8_box;
let i16_box: Box<i16> = box -16;
let i16_ref: &i16 = &*i16_box;
let i32_box: Box<i32> = box -32;
let i32_ref: &i32 = &*i32_box;
let i64_box: Box<i64> = box -64;
let i64_ref: &i64 = &*i64_box;
let uint_box: Box<uint> = box 1;
let uint_ref: &uint = &*uint_box;
let u8_box: Box<u8> = box 100;
let u8_ref: &u8 = &*u8_box;
let u16_box: Box<u16> = box 16;
let u16_ref: &u16 = &*u16_box;
let u32_box: Box<u32> = box 32;
let u32_ref: &u32 = &*u32_box;
let u64_box: Box<u64> = box 64;
let u64_ref: &u64 = &*u64_box;
let f32_box: Box<f32> = box 2.5;
let f32_ref: &f32 = &*f32_box;
let f64_box: Box<f64> = box 3.5;
let f64_ref: &f64 = &*f64_box;
zzz(); // #break
}
fn zzz() {()} |
// gdb-command:print *char_ref
// gdb-check:$3 = 97 | random_line_split |
borrowed-unique-basic.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// min-lldb-version: 310
// Gdb doesn't know about UTF-32 character encoding and will print a rust char as only
// its numerical value.
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// gdb-command:print *bool_ref
// gdb-check:$1 = true
// gdb-command:print *int_ref
// gdb-check:$2 = -1
// gdb-command:print *char_ref
// gdb-check:$3 = 97
// gdb-command:print/d *i8_ref
// gdb-check:$4 = 68
// gdb-command:print *i16_ref
// gdb-check:$5 = -16
// gdb-command:print *i32_ref
// gdb-check:$6 = -32
// gdb-command:print *i64_ref
// gdb-check:$7 = -64
// gdb-command:print *uint_ref
// gdb-check:$8 = 1
// gdb-command:print/d *u8_ref
// gdb-check:$9 = 100
// gdb-command:print *u16_ref
// gdb-check:$10 = 16
// gdb-command:print *u32_ref
// gdb-check:$11 = 32
// gdb-command:print *u64_ref
// gdb-check:$12 = 64
// gdb-command:print *f32_ref
// gdb-check:$13 = 2.5
// gdb-command:print *f64_ref
// gdb-check:$14 = 3.5
// === LLDB TESTS ==================================================================================
// lldb-command:type format add -f decimal char
// lldb-command:type format add -f decimal 'unsigned char'
// lldb-command:run
// lldb-command:print *bool_ref
// lldb-check:[...]$0 = true
// lldb-command:print *int_ref
// lldb-check:[...]$1 = -1
// d ebugger:print *char_ref
// c heck:[...]$3 = 97
// lldb-command:print *i8_ref
// lldb-check:[...]$2 = 68
// lldb-command:print *i16_ref
// lldb-check:[...]$3 = -16
// lldb-command:print *i32_ref
// lldb-check:[...]$4 = -32
// lldb-command:print *i64_ref
// lldb-check:[...]$5 = -64
// lldb-command:print *uint_ref
// lldb-check:[...]$6 = 1
// lldb-command:print *u8_ref
// lldb-check:[...]$7 = 100
// lldb-command:print *u16_ref
// lldb-check:[...]$8 = 16
// lldb-command:print *u32_ref
// lldb-check:[...]$9 = 32
// lldb-command:print *u64_ref
// lldb-check:[...]$10 = 64
// lldb-command:print *f32_ref
// lldb-check:[...]$11 = 2.5
// lldb-command:print *f64_ref
// lldb-check:[...]$12 = 3.5
#![allow(unused_variables)]
#![feature(box_syntax)]
#![omit_gdb_pretty_printer_section]
fn main() {
let bool_box: Box<bool> = box true;
let bool_ref: &bool = &*bool_box;
let int_box: Box<int> = box -1;
let int_ref: &int = &*int_box;
let char_box: Box<char> = box 'a';
let char_ref: &char = &*char_box;
let i8_box: Box<i8> = box 68;
let i8_ref: &i8 = &*i8_box;
let i16_box: Box<i16> = box -16;
let i16_ref: &i16 = &*i16_box;
let i32_box: Box<i32> = box -32;
let i32_ref: &i32 = &*i32_box;
let i64_box: Box<i64> = box -64;
let i64_ref: &i64 = &*i64_box;
let uint_box: Box<uint> = box 1;
let uint_ref: &uint = &*uint_box;
let u8_box: Box<u8> = box 100;
let u8_ref: &u8 = &*u8_box;
let u16_box: Box<u16> = box 16;
let u16_ref: &u16 = &*u16_box;
let u32_box: Box<u32> = box 32;
let u32_ref: &u32 = &*u32_box;
let u64_box: Box<u64> = box 64;
let u64_ref: &u64 = &*u64_box;
let f32_box: Box<f32> = box 2.5;
let f32_ref: &f32 = &*f32_box;
let f64_box: Box<f64> = box 3.5;
let f64_ref: &f64 = &*f64_box;
zzz(); // #break
}
fn zzz() | {()} | identifier_body |
|
coherence-blanket-conflicts-with-specific-cross-crate.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:go_trait.rs
extern crate go_trait;
use go_trait::{Go,GoMut};
use std::fmt::Debug;
use std::default::Default;
struct MyThingy;
impl Go for MyThingy {
fn | (&self, arg: isize) { }
}
impl GoMut for MyThingy { //~ ERROR conflicting implementations
fn go_mut(&mut self, arg: isize) { }
}
fn main() { }
| go | identifier_name |
coherence-blanket-conflicts-with-specific-cross-crate.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:go_trait.rs
extern crate go_trait;
use go_trait::{Go,GoMut};
use std::fmt::Debug;
use std::default::Default;
struct MyThingy;
impl Go for MyThingy {
fn go(&self, arg: isize) { }
}
impl GoMut for MyThingy { //~ ERROR conflicting implementations | fn go_mut(&mut self, arg: isize) { }
}
fn main() { } | random_line_split |
|
coherence-blanket-conflicts-with-specific-cross-crate.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:go_trait.rs
extern crate go_trait;
use go_trait::{Go,GoMut};
use std::fmt::Debug;
use std::default::Default;
struct MyThingy;
impl Go for MyThingy {
fn go(&self, arg: isize) |
}
impl GoMut for MyThingy { //~ ERROR conflicting implementations
fn go_mut(&mut self, arg: isize) { }
}
fn main() { }
| { } | identifier_body |
can_read_log_from_rosout.rs | use crossbeam::channel::unbounded;
use std::collections::BTreeSet;
mod util;
mod msg {
rosrust::rosmsg_include!(rosgraph_msgs / Log);
}
#[test]
fn | () {
let _roscore = util::run_roscore_for(util::Language::None, util::Feature::Log);
rosrust::init("rosout_agg_listener");
let (tx, rx) = unbounded();
let _subscriber =
rosrust::subscribe::<msg::rosgraph_msgs::Log, _>("/rosout_agg", 100, move |data| {
tx.send((data.level, data.msg)).unwrap();
})
.unwrap();
let rate = rosrust::rate(1.0);
let mut expected_messages = BTreeSet::new();
expected_messages.insert((1, "debug message".to_owned()));
expected_messages.insert((2, "info message".to_owned()));
expected_messages.insert((4, "warn message".to_owned()));
expected_messages.insert((8, "err message".to_owned()));
expected_messages.insert((16, "fatal message".to_owned()));
for _ in 0..10 {
for item in rx.try_iter() {
println!("Received message at level {}: {}", item.0, item.1);
expected_messages.remove(&item);
}
if expected_messages.is_empty() {
return;
}
rosrust::ros_debug!("debug message");
rosrust::ros_info!("info message");
rosrust::ros_warn!("warn message");
rosrust::ros_err!("err message");
rosrust::ros_fatal!("fatal message");
rate.sleep();
}
panic!("Failed to receive data on /rosout_agg");
}
| can_read_log_from_rosout | identifier_name |
can_read_log_from_rosout.rs | use crossbeam::channel::unbounded;
use std::collections::BTreeSet;
mod util;
mod msg {
rosrust::rosmsg_include!(rosgraph_msgs / Log);
}
#[test]
fn can_read_log_from_rosout() {
let _roscore = util::run_roscore_for(util::Language::None, util::Feature::Log);
rosrust::init("rosout_agg_listener");
let (tx, rx) = unbounded();
let _subscriber =
rosrust::subscribe::<msg::rosgraph_msgs::Log, _>("/rosout_agg", 100, move |data| {
tx.send((data.level, data.msg)).unwrap();
})
.unwrap();
let rate = rosrust::rate(1.0);
let mut expected_messages = BTreeSet::new();
expected_messages.insert((1, "debug message".to_owned()));
expected_messages.insert((2, "info message".to_owned()));
expected_messages.insert((4, "warn message".to_owned()));
expected_messages.insert((8, "err message".to_owned()));
expected_messages.insert((16, "fatal message".to_owned()));
for _ in 0..10 {
for item in rx.try_iter() {
println!("Received message at level {}: {}", item.0, item.1);
expected_messages.remove(&item);
}
if expected_messages.is_empty() |
rosrust::ros_debug!("debug message");
rosrust::ros_info!("info message");
rosrust::ros_warn!("warn message");
rosrust::ros_err!("err message");
rosrust::ros_fatal!("fatal message");
rate.sleep();
}
panic!("Failed to receive data on /rosout_agg");
}
| {
return;
} | conditional_block |
can_read_log_from_rosout.rs | use crossbeam::channel::unbounded;
use std::collections::BTreeSet;
mod util;
mod msg {
rosrust::rosmsg_include!(rosgraph_msgs / Log);
}
#[test]
fn can_read_log_from_rosout() {
let _roscore = util::run_roscore_for(util::Language::None, util::Feature::Log);
rosrust::init("rosout_agg_listener");
let (tx, rx) = unbounded();
let _subscriber =
rosrust::subscribe::<msg::rosgraph_msgs::Log, _>("/rosout_agg", 100, move |data| {
tx.send((data.level, data.msg)).unwrap();
})
.unwrap();
let rate = rosrust::rate(1.0);
let mut expected_messages = BTreeSet::new();
expected_messages.insert((1, "debug message".to_owned()));
expected_messages.insert((2, "info message".to_owned()));
expected_messages.insert((4, "warn message".to_owned()));
expected_messages.insert((8, "err message".to_owned())); | expected_messages.insert((16, "fatal message".to_owned()));
for _ in 0..10 {
for item in rx.try_iter() {
println!("Received message at level {}: {}", item.0, item.1);
expected_messages.remove(&item);
}
if expected_messages.is_empty() {
return;
}
rosrust::ros_debug!("debug message");
rosrust::ros_info!("info message");
rosrust::ros_warn!("warn message");
rosrust::ros_err!("err message");
rosrust::ros_fatal!("fatal message");
rate.sleep();
}
panic!("Failed to receive data on /rosout_agg");
} | random_line_split |
|
can_read_log_from_rosout.rs | use crossbeam::channel::unbounded;
use std::collections::BTreeSet;
mod util;
mod msg {
rosrust::rosmsg_include!(rosgraph_msgs / Log);
}
#[test]
fn can_read_log_from_rosout() | expected_messages.insert((16, "fatal message".to_owned()));
for _ in 0..10 {
for item in rx.try_iter() {
println!("Received message at level {}: {}", item.0, item.1);
expected_messages.remove(&item);
}
if expected_messages.is_empty() {
return;
}
rosrust::ros_debug!("debug message");
rosrust::ros_info!("info message");
rosrust::ros_warn!("warn message");
rosrust::ros_err!("err message");
rosrust::ros_fatal!("fatal message");
rate.sleep();
}
panic!("Failed to receive data on /rosout_agg");
}
| {
let _roscore = util::run_roscore_for(util::Language::None, util::Feature::Log);
rosrust::init("rosout_agg_listener");
let (tx, rx) = unbounded();
let _subscriber =
rosrust::subscribe::<msg::rosgraph_msgs::Log, _>("/rosout_agg", 100, move |data| {
tx.send((data.level, data.msg)).unwrap();
})
.unwrap();
let rate = rosrust::rate(1.0);
let mut expected_messages = BTreeSet::new();
expected_messages.insert((1, "debug message".to_owned()));
expected_messages.insert((2, "info message".to_owned()));
expected_messages.insert((4, "warn message".to_owned()));
expected_messages.insert((8, "err message".to_owned())); | identifier_body |
origin.rs | extern crate regex;
use std::fmt;
use std::str::FromStr;
use std::string::ToString;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use regex::Regex;
use super::{ NetType, AddrType };
use super::{ ProtocolVersion, SessionVersion };
use error::Error;
// o=<username> <sess-id> <sess-version> <nettype> <addrtype> <unicast-address>
// o=mozilla...THIS_IS_SDPARTA-46.0.1 5381835512098962904 0 IN IP4 0.0.0.0
#[derive(Clone, Debug)]
pub struct Origin {
pub username : String, // username MUST NOT contain spaces
pub session_id: String,
pub session_version: SessionVersion,
pub nettype : NetType, // IN( IANA Registered, Meas `Internet` )
pub addrtype: AddrType, // IP4 | IP6
pub address : IpAddr,
}
impl ToString for Origin {
fn to_string(&self) -> String {
let origin = "o=".to_string()
+ self.username.as_ref() + " "
+ self.session_id.as_ref() + " "
+ self.session_version.to_string().as_ref() + " "
+ self.nettype.to_string().as_ref() + " "
+ self.addrtype.to_string().as_ref() + " "
+ self.address.to_string().as_ref();
origin
}
}
impl FromStr for Origin {
type Err = Error;
fn from_str(s: &str) -> Result<Origin, Error> | Err(_) => return Err(Error::SessionVersion)
},
None => return Err(Error::SessionVersion)
};
let nettype = match cap.at(4) {
Some(nettype) => {
match NetType::from_str(nettype) {
Ok(nettype) => nettype,
Err(_) => return Err(Error::NetType)
}
},
None => return Err(Error::NetType)
};
let addrtype = match cap.at(5) {
Some(addrtype) => match AddrType::from_str(addrtype) {
Ok(addrtype) => addrtype,
Err(_) => return Err(Error::AddrType)
},
None => return Err(Error::AddrType)
};
let address = match cap.at(6) {
Some(address) => {
match IpAddr::from_str(address) {
Ok(address) => address,
Err(e) => return Err(Error::IpAddress)
}
},
None => return Err(Error::IpAddress)
};
// check addrtype <-> address
match addrtype {
AddrType::Ip4 => {
match address {
IpAddr::V6(_) => return Err(Error::AddrType),
IpAddr::V4(_) => { }
};
},
AddrType::Ip6 => {
match address {
IpAddr::V4(_) => return Err(Error::AddrType),
IpAddr::V6(_) => { }
};
}
}
Ok(Origin {
username: username,
session_id : session_id,
session_version: session_version,
nettype : nettype,
addrtype: addrtype,
address : address
})
}
} | {
let re = match Regex::new(r"(\S+)\s(\S+)\s(\d+)\s(IN)\s(IP\d)\s(\d+\.\d+\.\d+\.\d+)") {
Ok(re) => re,
Err(e) => {
println!("[Regex] {:?}", e);
return Err(Error::Origin);
}
};
let cap = re.captures(s).unwrap();
let username = match cap.at(1) {
Some(username) => username.to_string(),
None => return Err(Error::SessionName)
};
let session_id = match cap.at(2) {
Some(session_id) => session_id.to_string(),
None => return Err(Error::SessionId)
};
let session_version = match cap.at(3) {
Some(session_version) => match SessionVersion::from_str(session_version) {
Ok(session_version) => session_version, | identifier_body |
origin.rs | extern crate regex;
use std::fmt;
use std::str::FromStr;
use std::string::ToString;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use regex::Regex;
use super::{ NetType, AddrType };
use super::{ ProtocolVersion, SessionVersion };
use error::Error;
// o=<username> <sess-id> <sess-version> <nettype> <addrtype> <unicast-address>
// o=mozilla...THIS_IS_SDPARTA-46.0.1 5381835512098962904 0 IN IP4 0.0.0.0
#[derive(Clone, Debug)]
pub struct Origin {
pub username : String, // username MUST NOT contain spaces
pub session_id: String,
pub session_version: SessionVersion,
pub nettype : NetType, // IN( IANA Registered, Meas `Internet` )
pub addrtype: AddrType, // IP4 | IP6
pub address : IpAddr,
}
impl ToString for Origin {
fn to_string(&self) -> String {
let origin = "o=".to_string()
+ self.username.as_ref() + " "
+ self.session_id.as_ref() + " "
+ self.session_version.to_string().as_ref() + " "
+ self.nettype.to_string().as_ref() + " "
+ self.addrtype.to_string().as_ref() + " "
+ self.address.to_string().as_ref();
origin
}
}
impl FromStr for Origin {
type Err = Error;
fn from_str(s: &str) -> Result<Origin, Error> {
let re = match Regex::new(r"(\S+)\s(\S+)\s(\d+)\s(IN)\s(IP\d)\s(\d+\.\d+\.\d+\.\d+)") { | };
let cap = re.captures(s).unwrap();
let username = match cap.at(1) {
Some(username) => username.to_string(),
None => return Err(Error::SessionName)
};
let session_id = match cap.at(2) {
Some(session_id) => session_id.to_string(),
None => return Err(Error::SessionId)
};
let session_version = match cap.at(3) {
Some(session_version) => match SessionVersion::from_str(session_version) {
Ok(session_version) => session_version,
Err(_) => return Err(Error::SessionVersion)
},
None => return Err(Error::SessionVersion)
};
let nettype = match cap.at(4) {
Some(nettype) => {
match NetType::from_str(nettype) {
Ok(nettype) => nettype,
Err(_) => return Err(Error::NetType)
}
},
None => return Err(Error::NetType)
};
let addrtype = match cap.at(5) {
Some(addrtype) => match AddrType::from_str(addrtype) {
Ok(addrtype) => addrtype,
Err(_) => return Err(Error::AddrType)
},
None => return Err(Error::AddrType)
};
let address = match cap.at(6) {
Some(address) => {
match IpAddr::from_str(address) {
Ok(address) => address,
Err(e) => return Err(Error::IpAddress)
}
},
None => return Err(Error::IpAddress)
};
// check addrtype <-> address
match addrtype {
AddrType::Ip4 => {
match address {
IpAddr::V6(_) => return Err(Error::AddrType),
IpAddr::V4(_) => { }
};
},
AddrType::Ip6 => {
match address {
IpAddr::V4(_) => return Err(Error::AddrType),
IpAddr::V6(_) => { }
};
}
}
Ok(Origin {
username: username,
session_id : session_id,
session_version: session_version,
nettype : nettype,
addrtype: addrtype,
address : address
})
}
} | Ok(re) => re,
Err(e) => {
println!("[Regex] {:?}", e);
return Err(Error::Origin);
} | random_line_split |
origin.rs | extern crate regex;
use std::fmt;
use std::str::FromStr;
use std::string::ToString;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use regex::Regex;
use super::{ NetType, AddrType };
use super::{ ProtocolVersion, SessionVersion };
use error::Error;
// o=<username> <sess-id> <sess-version> <nettype> <addrtype> <unicast-address>
// o=mozilla...THIS_IS_SDPARTA-46.0.1 5381835512098962904 0 IN IP4 0.0.0.0
#[derive(Clone, Debug)]
pub struct Origin {
pub username : String, // username MUST NOT contain spaces
pub session_id: String,
pub session_version: SessionVersion,
pub nettype : NetType, // IN( IANA Registered, Meas `Internet` )
pub addrtype: AddrType, // IP4 | IP6
pub address : IpAddr,
}
impl ToString for Origin {
fn | (&self) -> String {
let origin = "o=".to_string()
+ self.username.as_ref() + " "
+ self.session_id.as_ref() + " "
+ self.session_version.to_string().as_ref() + " "
+ self.nettype.to_string().as_ref() + " "
+ self.addrtype.to_string().as_ref() + " "
+ self.address.to_string().as_ref();
origin
}
}
impl FromStr for Origin {
type Err = Error;
fn from_str(s: &str) -> Result<Origin, Error> {
let re = match Regex::new(r"(\S+)\s(\S+)\s(\d+)\s(IN)\s(IP\d)\s(\d+\.\d+\.\d+\.\d+)") {
Ok(re) => re,
Err(e) => {
println!("[Regex] {:?}", e);
return Err(Error::Origin);
}
};
let cap = re.captures(s).unwrap();
let username = match cap.at(1) {
Some(username) => username.to_string(),
None => return Err(Error::SessionName)
};
let session_id = match cap.at(2) {
Some(session_id) => session_id.to_string(),
None => return Err(Error::SessionId)
};
let session_version = match cap.at(3) {
Some(session_version) => match SessionVersion::from_str(session_version) {
Ok(session_version) => session_version,
Err(_) => return Err(Error::SessionVersion)
},
None => return Err(Error::SessionVersion)
};
let nettype = match cap.at(4) {
Some(nettype) => {
match NetType::from_str(nettype) {
Ok(nettype) => nettype,
Err(_) => return Err(Error::NetType)
}
},
None => return Err(Error::NetType)
};
let addrtype = match cap.at(5) {
Some(addrtype) => match AddrType::from_str(addrtype) {
Ok(addrtype) => addrtype,
Err(_) => return Err(Error::AddrType)
},
None => return Err(Error::AddrType)
};
let address = match cap.at(6) {
Some(address) => {
match IpAddr::from_str(address) {
Ok(address) => address,
Err(e) => return Err(Error::IpAddress)
}
},
None => return Err(Error::IpAddress)
};
// check addrtype <-> address
match addrtype {
AddrType::Ip4 => {
match address {
IpAddr::V6(_) => return Err(Error::AddrType),
IpAddr::V4(_) => { }
};
},
AddrType::Ip6 => {
match address {
IpAddr::V4(_) => return Err(Error::AddrType),
IpAddr::V6(_) => { }
};
}
}
Ok(Origin {
username: username,
session_id : session_id,
session_version: session_version,
nettype : nettype,
addrtype: addrtype,
address : address
})
}
} | to_string | identifier_name |
functions.rs | /*
* MIT License
*
* Copyright (c) 2016 Johnathan Fercher
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
**/
// receives two u8 and returns the bigger
fn return_max(a: u8, b: u8) -> u8 {
if a > b {
a
}else{
b
}
}
// receives two u8 and prints the bigger, without any return
fn print_max(a: u8, b: u8) -> () {
let mut low;
if a > b {
low = a;
}else{
low = b;
}
println!("{}", low);
}
fn main() {
println!("{}", return_max(10, 50));
print_max(10, 50); | } | random_line_split |
|
functions.rs | /*
* MIT License
*
* Copyright (c) 2016 Johnathan Fercher
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
**/
// receives two u8 and returns the bigger
fn return_max(a: u8, b: u8) -> u8 |
// receives two u8 and prints the bigger, without any return
fn print_max(a: u8, b: u8) -> () {
let mut low;
if a > b {
low = a;
}else{
low = b;
}
println!("{}", low);
}
fn main() {
println!("{}", return_max(10, 50));
print_max(10, 50);
} | {
if a > b {
a
}else{
b
}
} | identifier_body |
functions.rs | /*
* MIT License
*
* Copyright (c) 2016 Johnathan Fercher
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
**/
// receives two u8 and returns the bigger
fn return_max(a: u8, b: u8) -> u8 {
if a > b {
a
}else{
b
}
}
// receives two u8 and prints the bigger, without any return
fn print_max(a: u8, b: u8) -> () {
let mut low;
if a > b | else{
low = b;
}
println!("{}", low);
}
fn main() {
println!("{}", return_max(10, 50));
print_max(10, 50);
} | {
low = a;
} | conditional_block |
functions.rs | /*
* MIT License
*
* Copyright (c) 2016 Johnathan Fercher
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
**/
// receives two u8 and returns the bigger
fn | (a: u8, b: u8) -> u8 {
if a > b {
a
}else{
b
}
}
// receives two u8 and prints the bigger, without any return
fn print_max(a: u8, b: u8) -> () {
let mut low;
if a > b {
low = a;
}else{
low = b;
}
println!("{}", low);
}
fn main() {
println!("{}", return_max(10, 50));
print_max(10, 50);
} | return_max | identifier_name |
issue-11881.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate rbml;
extern crate serialize;
use std::io;
use std::io::{IoError, IoResult, SeekStyle};
use std::slice;
use serialize::{Encodable, Encoder};
use serialize::json;
use rbml::writer;
use rbml::io::SeekableMemWriter;
#[deriving(Encodable)]
struct Foo {
baz: bool,
}
#[deriving(Encodable)]
struct Bar {
froboz: uint,
}
enum WireProtocol {
JSON,
RBML, | //...
}
fn encode_json<'a,
T: Encodable<json::Encoder<'a>,
std::io::IoError>>(val: &T,
wr: &'a mut SeekableMemWriter) {
let mut encoder = json::Encoder::new(wr);
val.encode(&mut encoder);
}
fn encode_rbml<'a,
T: Encodable<writer::Encoder<'a, SeekableMemWriter>,
std::io::IoError>>(val: &T,
wr: &'a mut SeekableMemWriter) {
let mut encoder = writer::Encoder::new(wr);
val.encode(&mut encoder);
}
pub fn main() {
let target = Foo{baz: false,};
let mut wr = SeekableMemWriter::new();
let proto = WireProtocol::JSON;
match proto {
WireProtocol::JSON => encode_json(&target, &mut wr),
WireProtocol::RBML => encode_rbml(&target, &mut wr)
}
} | random_line_split |
|
issue-11881.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate rbml;
extern crate serialize;
use std::io;
use std::io::{IoError, IoResult, SeekStyle};
use std::slice;
use serialize::{Encodable, Encoder};
use serialize::json;
use rbml::writer;
use rbml::io::SeekableMemWriter;
#[deriving(Encodable)]
struct | {
baz: bool,
}
#[deriving(Encodable)]
struct Bar {
froboz: uint,
}
enum WireProtocol {
JSON,
RBML,
//...
}
fn encode_json<'a,
T: Encodable<json::Encoder<'a>,
std::io::IoError>>(val: &T,
wr: &'a mut SeekableMemWriter) {
let mut encoder = json::Encoder::new(wr);
val.encode(&mut encoder);
}
fn encode_rbml<'a,
T: Encodable<writer::Encoder<'a, SeekableMemWriter>,
std::io::IoError>>(val: &T,
wr: &'a mut SeekableMemWriter) {
let mut encoder = writer::Encoder::new(wr);
val.encode(&mut encoder);
}
pub fn main() {
let target = Foo{baz: false,};
let mut wr = SeekableMemWriter::new();
let proto = WireProtocol::JSON;
match proto {
WireProtocol::JSON => encode_json(&target, &mut wr),
WireProtocol::RBML => encode_rbml(&target, &mut wr)
}
}
| Foo | identifier_name |
macro-doc-escapes.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// When expanding a macro, documentation attributes (including documentation comments) must be
// passed "as is" without being parsed. Otherwise, some text will be incorrectly interpreted as
// escape sequences, leading to an ICE.
//
// Related issues: #25929, #25943
macro_rules! homura {
(#[$x:meta]) => ()
}
homura! {
/// \madoka \x41
}
| fn main() { } | random_line_split |
|
macro-doc-escapes.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// When expanding a macro, documentation attributes (including documentation comments) must be
// passed "as is" without being parsed. Otherwise, some text will be incorrectly interpreted as
// escape sequences, leading to an ICE.
//
// Related issues: #25929, #25943
macro_rules! homura {
(#[$x:meta]) => ()
}
homura! {
/// \madoka \x41
}
fn | () { }
| main | identifier_name |
macro-doc-escapes.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// When expanding a macro, documentation attributes (including documentation comments) must be
// passed "as is" without being parsed. Otherwise, some text will be incorrectly interpreted as
// escape sequences, leading to an ICE.
//
// Related issues: #25929, #25943
macro_rules! homura {
(#[$x:meta]) => ()
}
homura! {
/// \madoka \x41
}
fn main() | { } | identifier_body |
|
mod.rs | //! The SMTP transport sends emails using the SMTP protocol.
//!
//! This SMTP client follows [RFC
//! 5321](https://tools.ietf.org/html/rfc5321), and is designed to efficiently send emails from an
//! application to a relay email server, as it relies as much as possible on the relay server
//! for sanity and RFC compliance checks.
//!
//! It implements the following extensions:
//!
//! * 8BITMIME ([RFC 6152](https://tools.ietf.org/html/rfc6152))
//! * AUTH ([RFC 4954](https://tools.ietf.org/html/rfc4954)) with PLAIN, LOGIN and XOAUTH2 mechanisms
//! * STARTTLS ([RFC 2487](https://tools.ietf.org/html/rfc2487))
//!
//! #### SMTP Transport
//!
//! This transport uses the SMTP protocol to send emails over the network (locally or remotely).
//!
//! It is designed to be:
//!
//! * Secured: connections are encrypted by default
//! * Modern: unicode support for email contents and sender/recipient addresses when compatible
//! * Fast: supports connection reuse and pooling
//!
//! This client is designed to send emails to a relay server, and should *not* be used to send
//! emails directly to the destination server.
//!
//! The relay server can be the local email server, a specific host or a third-party service.
//!
//! #### Simple example
//!
//! This is the most basic example of usage:
//!
//! ```rust,no_run
//! # #[cfg(all(feature = "builder", any(feature = "native-tls", feature = "rustls-tls")))]
//! # fn test() -> Result<(), Box<dyn std::error::Error>> {
//! use lettre::{Message, Transport, SmtpTransport};
//!
//! let email = Message::builder()
//! .from("NoBody <[email protected]>".parse()?)
//! .reply_to("Yuin <[email protected]>".parse()?)
//! .to("Hei <[email protected]>".parse()?)
//! .subject("Happy new year")
//! .body(String::from("Be happy!"))?;
//!
//! // Create TLS transport on port 465
//! let sender = SmtpTransport::relay("smtp.example.com")?
//! .build();
//! // Send the email via remote relay
//! let result = sender.send(&email);
//! assert!(result.is_ok());
//! # Ok(())
//! # }
//! ```
//!
//! #### Authentication
//!
//! Example with authentication and connection pool:
//!
//! ```rust,no_run
//! # #[cfg(all(feature = "builder", any(feature = "native-tls", feature = "rustls-tls")))]
//! # fn test() -> Result<(), Box<dyn std::error::Error>> {
//! use lettre::{Message, Transport, SmtpTransport, transport::smtp::{PoolConfig, authentication::{Credentials, Mechanism}}};
//!
//! let email = Message::builder()
//! .from("NoBody <[email protected]>".parse()?)
//! .reply_to("Yuin <[email protected]>".parse()?)
//! .to("Hei <[email protected]>".parse()?)
//! .subject("Happy new year")
//! .body(String::from("Be happy!"))?;
//!
//! // Create TLS transport on port 587 with STARTTLS
//! let sender = SmtpTransport::starttls_relay("smtp.example.com")?
//! // Add credentials for authentication
//! .credentials(Credentials::new("username".to_string(), "password".to_string()))
//! // Configure expected authentication mechanism
//! .authentication(vec![Mechanism::Plain])
//! // Connection pool settings
//! .pool_config( PoolConfig::new().max_size(20))
//! .build();
//!
//! // Send the email via remote relay
//! let result = sender.send(&email);
//! assert!(result.is_ok());
//! # Ok(())
//! # }
//! ```
//!
//! You can specify custom TLS settings:
//!
//! ```rust,no_run
//! # #[cfg(all(feature = "builder", any(feature = "native-tls", feature = "rustls-tls")))]
//! # fn test() -> Result<(), Box<dyn std::error::Error>> {
//! use lettre::{Message, Transport, SmtpTransport, transport::smtp::client::{TlsParameters, Tls}};
//!
//! let email = Message::builder()
//! .from("NoBody <[email protected]>".parse()?)
//! .reply_to("Yuin <[email protected]>".parse()?)
//! .to("Hei <[email protected]>".parse()?)
//! .subject("Happy new year")
//! .body(String::from("Be happy!"))?;
//!
//! // Custom TLS configuration
//! let tls = TlsParameters::builder("smtp.example.com".to_string())
//! .dangerous_accept_invalid_certs(true).build()?;
//!
//! // Create TLS transport on port 465
//! let sender = SmtpTransport::relay("smtp.example.com")?
//! // Custom TLS configuration
//! .tls(Tls::Required(tls))
//! .build();
//!
//! // Send the email via remote relay
//! let result = sender.send(&email);
//! assert!(result.is_ok());
//! # Ok(())
//! # }
//! ```
#[cfg(any(feature = "tokio1", feature = "async-std1"))]
pub use self::async_transport::{AsyncSmtpTransport, AsyncSmtpTransportBuilder};
#[cfg(feature = "pool")]
pub use self::pool::PoolConfig;
pub use self::{
error::Error,
transport::{SmtpTransport, SmtpTransportBuilder},
};
#[cfg(any(feature = "native-tls", feature = "rustls-tls"))]
use crate::transport::smtp::client::TlsParameters;
use crate::transport::smtp::{
authentication::{Credentials, Mechanism, DEFAULT_MECHANISMS},
client::SmtpConnection,
extension::ClientId,
response::Response,
};
use client::Tls;
use std::time::Duration;
#[cfg(any(feature = "tokio1", feature = "async-std1"))]
mod async_transport;
pub mod authentication;
pub mod client;
pub mod commands;
mod error;
pub mod extension;
#[cfg(feature = "pool")]
mod pool;
pub mod response;
mod transport;
pub(super) mod util;
// Registered port numbers:
// https://www.iana.
// org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml
/// Default smtp port
pub const SMTP_PORT: u16 = 25;
/// Default submission port
pub const SUBMISSION_PORT: u16 = 587;
/// Default submission over TLS port
///
/// Defined in [RFC8314](https://tools.ietf.org/html/rfc8314)
pub const SUBMISSIONS_PORT: u16 = 465;
/// Default timeout
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(60);
#[derive(Debug, Clone)]
struct SmtpInfo {
/// Name sent during EHLO
hello_name: ClientId,
/// Server we are connecting to
server: String,
/// Port to connect to
port: u16,
/// TLS security configuration
tls: Tls,
/// Optional enforced authentication mechanism
authentication: Vec<Mechanism>,
/// Credentials
credentials: Option<Credentials>,
/// Define network timeout
/// It can be changed later for specific needs (like a different timeout for each SMTP command)
timeout: Option<Duration>,
}
impl Default for SmtpInfo {
fn | () -> Self {
Self {
server: "localhost".to_string(),
port: SMTP_PORT,
hello_name: ClientId::default(),
credentials: None,
authentication: DEFAULT_MECHANISMS.into(),
timeout: Some(DEFAULT_TIMEOUT),
tls: Tls::None,
}
}
}
| default | identifier_name |
mod.rs | //! The SMTP transport sends emails using the SMTP protocol.
//!
//! This SMTP client follows [RFC
//! 5321](https://tools.ietf.org/html/rfc5321), and is designed to efficiently send emails from an
//! application to a relay email server, as it relies as much as possible on the relay server
//! for sanity and RFC compliance checks.
//!
//! It implements the following extensions:
//!
//! * 8BITMIME ([RFC 6152](https://tools.ietf.org/html/rfc6152))
//! * AUTH ([RFC 4954](https://tools.ietf.org/html/rfc4954)) with PLAIN, LOGIN and XOAUTH2 mechanisms
//! * STARTTLS ([RFC 2487](https://tools.ietf.org/html/rfc2487))
//!
//! #### SMTP Transport
//!
//! This transport uses the SMTP protocol to send emails over the network (locally or remotely).
//! | //! * Fast: supports connection reuse and pooling
//!
//! This client is designed to send emails to a relay server, and should *not* be used to send
//! emails directly to the destination server.
//!
//! The relay server can be the local email server, a specific host or a third-party service.
//!
//! #### Simple example
//!
//! This is the most basic example of usage:
//!
//! ```rust,no_run
//! # #[cfg(all(feature = "builder", any(feature = "native-tls", feature = "rustls-tls")))]
//! # fn test() -> Result<(), Box<dyn std::error::Error>> {
//! use lettre::{Message, Transport, SmtpTransport};
//!
//! let email = Message::builder()
//! .from("NoBody <[email protected]>".parse()?)
//! .reply_to("Yuin <[email protected]>".parse()?)
//! .to("Hei <[email protected]>".parse()?)
//! .subject("Happy new year")
//! .body(String::from("Be happy!"))?;
//!
//! // Create TLS transport on port 465
//! let sender = SmtpTransport::relay("smtp.example.com")?
//! .build();
//! // Send the email via remote relay
//! let result = sender.send(&email);
//! assert!(result.is_ok());
//! # Ok(())
//! # }
//! ```
//!
//! #### Authentication
//!
//! Example with authentication and connection pool:
//!
//! ```rust,no_run
//! # #[cfg(all(feature = "builder", any(feature = "native-tls", feature = "rustls-tls")))]
//! # fn test() -> Result<(), Box<dyn std::error::Error>> {
//! use lettre::{Message, Transport, SmtpTransport, transport::smtp::{PoolConfig, authentication::{Credentials, Mechanism}}};
//!
//! let email = Message::builder()
//! .from("NoBody <[email protected]>".parse()?)
//! .reply_to("Yuin <[email protected]>".parse()?)
//! .to("Hei <[email protected]>".parse()?)
//! .subject("Happy new year")
//! .body(String::from("Be happy!"))?;
//!
//! // Create TLS transport on port 587 with STARTTLS
//! let sender = SmtpTransport::starttls_relay("smtp.example.com")?
//! // Add credentials for authentication
//! .credentials(Credentials::new("username".to_string(), "password".to_string()))
//! // Configure expected authentication mechanism
//! .authentication(vec![Mechanism::Plain])
//! // Connection pool settings
//! .pool_config( PoolConfig::new().max_size(20))
//! .build();
//!
//! // Send the email via remote relay
//! let result = sender.send(&email);
//! assert!(result.is_ok());
//! # Ok(())
//! # }
//! ```
//!
//! You can specify custom TLS settings:
//!
//! ```rust,no_run
//! # #[cfg(all(feature = "builder", any(feature = "native-tls", feature = "rustls-tls")))]
//! # fn test() -> Result<(), Box<dyn std::error::Error>> {
//! use lettre::{Message, Transport, SmtpTransport, transport::smtp::client::{TlsParameters, Tls}};
//!
//! let email = Message::builder()
//! .from("NoBody <[email protected]>".parse()?)
//! .reply_to("Yuin <[email protected]>".parse()?)
//! .to("Hei <[email protected]>".parse()?)
//! .subject("Happy new year")
//! .body(String::from("Be happy!"))?;
//!
//! // Custom TLS configuration
//! let tls = TlsParameters::builder("smtp.example.com".to_string())
//! .dangerous_accept_invalid_certs(true).build()?;
//!
//! // Create TLS transport on port 465
//! let sender = SmtpTransport::relay("smtp.example.com")?
//! // Custom TLS configuration
//! .tls(Tls::Required(tls))
//! .build();
//!
//! // Send the email via remote relay
//! let result = sender.send(&email);
//! assert!(result.is_ok());
//! # Ok(())
//! # }
//! ```
#[cfg(any(feature = "tokio1", feature = "async-std1"))]
pub use self::async_transport::{AsyncSmtpTransport, AsyncSmtpTransportBuilder};
#[cfg(feature = "pool")]
pub use self::pool::PoolConfig;
pub use self::{
error::Error,
transport::{SmtpTransport, SmtpTransportBuilder},
};
#[cfg(any(feature = "native-tls", feature = "rustls-tls"))]
use crate::transport::smtp::client::TlsParameters;
use crate::transport::smtp::{
authentication::{Credentials, Mechanism, DEFAULT_MECHANISMS},
client::SmtpConnection,
extension::ClientId,
response::Response,
};
use client::Tls;
use std::time::Duration;
#[cfg(any(feature = "tokio1", feature = "async-std1"))]
mod async_transport;
pub mod authentication;
pub mod client;
pub mod commands;
mod error;
pub mod extension;
#[cfg(feature = "pool")]
mod pool;
pub mod response;
mod transport;
pub(super) mod util;
// Registered port numbers:
// https://www.iana.
// org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml
/// Default smtp port
pub const SMTP_PORT: u16 = 25;
/// Default submission port
pub const SUBMISSION_PORT: u16 = 587;
/// Default submission over TLS port
///
/// Defined in [RFC8314](https://tools.ietf.org/html/rfc8314)
pub const SUBMISSIONS_PORT: u16 = 465;
/// Default timeout
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(60);
#[derive(Debug, Clone)]
struct SmtpInfo {
/// Name sent during EHLO
hello_name: ClientId,
/// Server we are connecting to
server: String,
/// Port to connect to
port: u16,
/// TLS security configuration
tls: Tls,
/// Optional enforced authentication mechanism
authentication: Vec<Mechanism>,
/// Credentials
credentials: Option<Credentials>,
/// Define network timeout
/// It can be changed later for specific needs (like a different timeout for each SMTP command)
timeout: Option<Duration>,
}
impl Default for SmtpInfo {
fn default() -> Self {
Self {
server: "localhost".to_string(),
port: SMTP_PORT,
hello_name: ClientId::default(),
credentials: None,
authentication: DEFAULT_MECHANISMS.into(),
timeout: Some(DEFAULT_TIMEOUT),
tls: Tls::None,
}
}
} | //! It is designed to be:
//!
//! * Secured: connections are encrypted by default
//! * Modern: unicode support for email contents and sender/recipient addresses when compatible | random_line_split |
rec-align-u64.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #2303
#![feature(intrinsics)]
use std::mem;
mod rusti {
extern "rust-intrinsic" {
pub fn pref_align_of<T>() -> uint;
pub fn min_align_of<T>() -> uint;
}
}
// This is the type with the questionable alignment
#[derive(Debug)]
struct Inner {
c64: u64
}
// This is the type that contains the type with the
// questionable alignment, for testing
#[derive(Debug)]
struct Outer {
c8: u8,
t: Inner
}
#[cfg(any(target_os = "linux",
target_os = "macos",
target_os = "freebsd",
target_os = "dragonfly",
target_os = "openbsd"))]
mod m {
#[cfg(target_arch = "x86")]
pub mod m {
pub fn align() -> uint { 4 }
pub fn size() -> uint { 12 }
}
#[cfg(any(target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64"))]
pub mod m {
pub fn align() -> uint { 8 }
pub fn size() -> uint { 16 }
}
}
#[cfg(target_os = "bitrig")]
mod m {
#[cfg(target_arch = "x86_64")]
pub mod m {
pub fn align() -> uint { 8 }
pub fn size() -> uint { 16 }
}
}
#[cfg(target_os = "windows")]
mod m {
#[cfg(target_arch = "x86")]
pub mod m {
pub fn align() -> uint { 8 }
pub fn size() -> uint { 16 }
}
#[cfg(target_arch = "x86_64")]
pub mod m {
pub fn align() -> uint { 8 }
pub fn size() -> uint { 16 }
}
}
#[cfg(target_os = "android")]
mod m { | }
pub fn main() {
unsafe {
let x = Outer {c8: 22, t: Inner {c64: 44}};
let y = format!("{:?}", x);
println!("align inner = {:?}", rusti::min_align_of::<Inner>());
println!("size outer = {:?}", mem::size_of::<Outer>());
println!("y = {:?}", y);
// per clang/gcc the alignment of `Inner` is 4 on x86.
assert_eq!(rusti::min_align_of::<Inner>(), m::m::align());
// per clang/gcc the size of `Outer` should be 12
// because `Inner`s alignment was 4.
assert_eq!(mem::size_of::<Outer>(), m::m::size());
assert_eq!(y, "Outer { c8: 22, t: Inner { c64: 44 } }".to_string());
}
} | #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
pub mod m {
pub fn align() -> uint { 8 }
pub fn size() -> uint { 16 }
} | random_line_split |
rec-align-u64.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #2303
#![feature(intrinsics)]
use std::mem;
mod rusti {
extern "rust-intrinsic" {
pub fn pref_align_of<T>() -> uint;
pub fn min_align_of<T>() -> uint;
}
}
// This is the type with the questionable alignment
#[derive(Debug)]
struct Inner {
c64: u64
}
// This is the type that contains the type with the
// questionable alignment, for testing
#[derive(Debug)]
struct Outer {
c8: u8,
t: Inner
}
#[cfg(any(target_os = "linux",
target_os = "macos",
target_os = "freebsd",
target_os = "dragonfly",
target_os = "openbsd"))]
mod m {
#[cfg(target_arch = "x86")]
pub mod m {
pub fn align() -> uint { 4 }
pub fn size() -> uint { 12 }
}
#[cfg(any(target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64"))]
pub mod m {
pub fn align() -> uint { 8 }
pub fn size() -> uint { 16 }
}
}
#[cfg(target_os = "bitrig")]
mod m {
#[cfg(target_arch = "x86_64")]
pub mod m {
pub fn align() -> uint { 8 }
pub fn size() -> uint { 16 }
}
}
#[cfg(target_os = "windows")]
mod m {
#[cfg(target_arch = "x86")]
pub mod m {
pub fn align() -> uint { 8 }
pub fn size() -> uint { 16 }
}
#[cfg(target_arch = "x86_64")]
pub mod m {
pub fn | () -> uint { 8 }
pub fn size() -> uint { 16 }
}
}
#[cfg(target_os = "android")]
mod m {
#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
pub mod m {
pub fn align() -> uint { 8 }
pub fn size() -> uint { 16 }
}
}
pub fn main() {
unsafe {
let x = Outer {c8: 22, t: Inner {c64: 44}};
let y = format!("{:?}", x);
println!("align inner = {:?}", rusti::min_align_of::<Inner>());
println!("size outer = {:?}", mem::size_of::<Outer>());
println!("y = {:?}", y);
// per clang/gcc the alignment of `Inner` is 4 on x86.
assert_eq!(rusti::min_align_of::<Inner>(), m::m::align());
// per clang/gcc the size of `Outer` should be 12
// because `Inner`s alignment was 4.
assert_eq!(mem::size_of::<Outer>(), m::m::size());
assert_eq!(y, "Outer { c8: 22, t: Inner { c64: 44 } }".to_string());
}
}
| align | identifier_name |
rec-align-u64.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #2303
#![feature(intrinsics)]
use std::mem;
mod rusti {
extern "rust-intrinsic" {
pub fn pref_align_of<T>() -> uint;
pub fn min_align_of<T>() -> uint;
}
}
// This is the type with the questionable alignment
#[derive(Debug)]
struct Inner {
c64: u64
}
// This is the type that contains the type with the
// questionable alignment, for testing
#[derive(Debug)]
struct Outer {
c8: u8,
t: Inner
}
#[cfg(any(target_os = "linux",
target_os = "macos",
target_os = "freebsd",
target_os = "dragonfly",
target_os = "openbsd"))]
mod m {
#[cfg(target_arch = "x86")]
pub mod m {
pub fn align() -> uint { 4 }
pub fn size() -> uint { 12 }
}
#[cfg(any(target_arch = "x86_64", target_arch = "arm", target_arch = "aarch64"))]
pub mod m {
pub fn align() -> uint |
pub fn size() -> uint { 16 }
}
}
#[cfg(target_os = "bitrig")]
mod m {
#[cfg(target_arch = "x86_64")]
pub mod m {
pub fn align() -> uint { 8 }
pub fn size() -> uint { 16 }
}
}
#[cfg(target_os = "windows")]
mod m {
#[cfg(target_arch = "x86")]
pub mod m {
pub fn align() -> uint { 8 }
pub fn size() -> uint { 16 }
}
#[cfg(target_arch = "x86_64")]
pub mod m {
pub fn align() -> uint { 8 }
pub fn size() -> uint { 16 }
}
}
#[cfg(target_os = "android")]
mod m {
#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
pub mod m {
pub fn align() -> uint { 8 }
pub fn size() -> uint { 16 }
}
}
pub fn main() {
unsafe {
let x = Outer {c8: 22, t: Inner {c64: 44}};
let y = format!("{:?}", x);
println!("align inner = {:?}", rusti::min_align_of::<Inner>());
println!("size outer = {:?}", mem::size_of::<Outer>());
println!("y = {:?}", y);
// per clang/gcc the alignment of `Inner` is 4 on x86.
assert_eq!(rusti::min_align_of::<Inner>(), m::m::align());
// per clang/gcc the size of `Outer` should be 12
// because `Inner`s alignment was 4.
assert_eq!(mem::size_of::<Outer>(), m::m::size());
assert_eq!(y, "Outer { c8: 22, t: Inner { c64: 44 } }".to_string());
}
}
| { 8 } | identifier_body |
easy.rs | use std::sync::{Once, ONCE_INIT};
use std::c_vec::CVec;
use std::{io,mem};
use std::collections::HashMap;
use libc::{c_void,c_int,c_long,c_double,size_t};
use super::{consts,err,info,opt};
use super::err::ErrCode;
use http::body::Body;
use http::{header,Response};
type CURL = c_void;
pub type ProgressCb<'a> = |uint, uint, uint, uint|:'a -> ();
#[link(name = "curl")]
extern {
pub fn curl_easy_init() -> *mut CURL;
pub fn curl_easy_setopt(curl: *mut CURL, option: opt::Opt,...) -> ErrCode;
pub fn curl_easy_perform(curl: *mut CURL) -> ErrCode;
pub fn curl_easy_cleanup(curl: *mut CURL);
pub fn curl_easy_getinfo(curl: *const CURL, info: info::Key,...) -> ErrCode;
pub fn curl_global_cleanup();
}
pub struct Easy {
curl: *mut CURL
}
impl Easy {
pub fn new() -> Easy {
// Ensure that curl is globally initialized
global_init();
let handle = unsafe {
let p = curl_easy_init();
curl_easy_setopt(p, opt::NOPROGRESS, 0u);
p
};
Easy { curl: handle }
}
#[inline]
pub fn setopt<T: opt::OptVal>(&mut self, option: opt::Opt, val: T) -> Result<(), err::ErrCode> {
// TODO: Prevent setting callback related options
let mut res = err::OK;
unsafe {
val.with_c_repr(|repr| {
res = curl_easy_setopt(self.curl, option, repr);
})
}
if res.is_success() { Ok(()) } else { Err(res) }
}
#[inline]
pub fn perform(&mut self, body: Option<&mut Body>, progress: Option<ProgressCb>) -> Result<Response, err::ErrCode> {
let mut builder = ResponseBuilder::new();
unsafe {
let resp_p: uint = mem::transmute(&builder);
let body_p: uint = match body {
Some(b) => mem::transmute(b),
None => 0
};
let progress_p: uint = match progress.as_ref() {
Some(cb) => mem::transmute(cb),
None => 0
};
debug!("setting read fn: {}", body_p!= 0);
// Set callback options
curl_easy_setopt(self.curl, opt::READFUNCTION, curl_read_fn);
curl_easy_setopt(self.curl, opt::READDATA, body_p);
curl_easy_setopt(self.curl, opt::WRITEFUNCTION, curl_write_fn);
curl_easy_setopt(self.curl, opt::WRITEDATA, resp_p);
curl_easy_setopt(self.curl, opt::HEADERFUNCTION, curl_header_fn);
curl_easy_setopt(self.curl, opt::HEADERDATA, resp_p);
curl_easy_setopt(self.curl, opt::PROGRESSFUNCTION, curl_progress_fn);
curl_easy_setopt(self.curl, opt::PROGRESSDATA, progress_p);
}
let err = unsafe { curl_easy_perform(self.curl) };
// If the request failed, abort here
if!err.is_success() {
return Err(err);
}
// Try to get the response code
builder.code = try!(self.get_response_code());
Ok(builder.build())
}
pub fn get_response_code(&self) -> Result<uint, err::ErrCode> {
Ok(try!(self.get_info_long(info::RESPONSE_CODE)) as uint)
}
pub fn get_total_time(&self) -> Result<uint, err::ErrCode> {
Ok(try!(self.get_info_long(info::TOTAL_TIME)) as uint)
}
fn get_info_long(&self, key: info::Key) -> Result<c_long, err::ErrCode> {
let v: c_long = 0;
let res = unsafe {
curl_easy_getinfo(self.curl as *const CURL, key, &v)
};
if!res.is_success() {
return Err(res);
}
Ok(v)
}
}
#[inline]
fn | () {
// Schedule curl to be cleaned up after we're done with this whole process
static mut INIT: Once = ONCE_INIT;
unsafe {
INIT.doit(|| ::std::rt::at_exit(proc() curl_global_cleanup()))
}
}
impl Drop for Easy {
fn drop(&mut self) {
unsafe { curl_easy_cleanup(self.curl) }
}
}
/*
*
* TODO: Move this into handle
*
*/
struct ResponseBuilder {
code: uint,
hdrs: HashMap<String,Vec<String>>,
body: Vec<u8>
}
impl ResponseBuilder {
fn new() -> ResponseBuilder {
ResponseBuilder {
code: 0,
hdrs: HashMap::new(),
body: Vec::new()
}
}
fn add_header(&mut self, name: &str, val: &str) {
// TODO: Reduce allocations
use std::ascii::OwnedAsciiExt;
let name = name.to_string().into_ascii_lower();
let inserted = match self.hdrs.find_mut(&name) {
Some(vals) => {
vals.push(val.to_string());
true
}
None => false
};
if!inserted {
self.hdrs.insert(name, vec!(val.to_string()));
}
}
fn build(self) -> Response {
let ResponseBuilder { code, hdrs, body } = self;
Response::new(code, hdrs, body)
}
}
/*
*
* ===== Callbacks =====
*/
pub extern "C" fn curl_read_fn(p: *mut u8, size: size_t, nmemb: size_t, body: *mut Body) -> size_t {
if body.is_null() {
return 0;
}
let mut dst = unsafe { CVec::new(p, (size * nmemb) as uint) };
let body: &mut Body = unsafe { mem::transmute(body) };
match body.read(dst.as_mut_slice()) {
Ok(len) => len as size_t,
Err(e) => {
match e.kind {
io::EndOfFile => 0 as size_t,
_ => consts::CURL_READFUNC_ABORT as size_t
}
}
}
}
pub extern "C" fn curl_write_fn(p: *mut u8, size: size_t, nmemb: size_t, resp: *mut ResponseBuilder) -> size_t {
if!resp.is_null() {
let builder: &mut ResponseBuilder = unsafe { mem::transmute(resp) };
let chunk = unsafe { CVec::new(p, (size * nmemb) as uint) };
builder.body.push_all(chunk.as_slice());
}
size * nmemb
}
pub extern "C" fn curl_header_fn(p: *mut u8, size: size_t, nmemb: size_t, resp: &mut ResponseBuilder) -> size_t {
// TODO: Skip the first call (it seems to be the status line)
let vec = unsafe { CVec::new(p, (size * nmemb) as uint) };
match header::parse(vec.as_slice()) {
Some((name, val)) => {
resp.add_header(name, val);
}
None => {}
}
vec.len() as size_t
}
pub extern "C" fn curl_progress_fn(cb: *mut ProgressCb, dltotal: c_double, dlnow: c_double, ultotal: c_double, ulnow: c_double) -> c_int {
#[inline]
fn to_uint(v: c_double) -> uint {
if v > 0.0 { v as uint } else { 0 }
}
if!cb.is_null() {
let cb: &mut ProgressCb = unsafe { &mut *cb };
(*cb)(to_uint(dltotal), to_uint(dlnow), to_uint(ultotal), to_uint(ulnow));
}
0
}
| global_init | identifier_name |
easy.rs | use std::sync::{Once, ONCE_INIT};
use std::c_vec::CVec;
use std::{io,mem};
use std::collections::HashMap;
use libc::{c_void,c_int,c_long,c_double,size_t};
use super::{consts,err,info,opt};
use super::err::ErrCode;
use http::body::Body;
use http::{header,Response};
type CURL = c_void;
pub type ProgressCb<'a> = |uint, uint, uint, uint|:'a -> ();
#[link(name = "curl")]
extern {
pub fn curl_easy_init() -> *mut CURL;
pub fn curl_easy_setopt(curl: *mut CURL, option: opt::Opt,...) -> ErrCode;
pub fn curl_easy_perform(curl: *mut CURL) -> ErrCode;
pub fn curl_easy_cleanup(curl: *mut CURL);
pub fn curl_easy_getinfo(curl: *const CURL, info: info::Key,...) -> ErrCode;
pub fn curl_global_cleanup();
}
pub struct Easy {
curl: *mut CURL
}
impl Easy {
pub fn new() -> Easy {
// Ensure that curl is globally initialized
global_init();
let handle = unsafe {
let p = curl_easy_init();
curl_easy_setopt(p, opt::NOPROGRESS, 0u);
p
};
Easy { curl: handle }
}
#[inline]
pub fn setopt<T: opt::OptVal>(&mut self, option: opt::Opt, val: T) -> Result<(), err::ErrCode> {
// TODO: Prevent setting callback related options
let mut res = err::OK;
unsafe {
val.with_c_repr(|repr| {
res = curl_easy_setopt(self.curl, option, repr);
})
}
if res.is_success() { Ok(()) } else { Err(res) }
}
#[inline]
pub fn perform(&mut self, body: Option<&mut Body>, progress: Option<ProgressCb>) -> Result<Response, err::ErrCode> {
let mut builder = ResponseBuilder::new();
unsafe {
let resp_p: uint = mem::transmute(&builder);
let body_p: uint = match body {
Some(b) => mem::transmute(b),
None => 0
};
let progress_p: uint = match progress.as_ref() {
Some(cb) => mem::transmute(cb),
None => 0
};
debug!("setting read fn: {}", body_p!= 0);
// Set callback options
curl_easy_setopt(self.curl, opt::READFUNCTION, curl_read_fn);
curl_easy_setopt(self.curl, opt::READDATA, body_p);
curl_easy_setopt(self.curl, opt::WRITEFUNCTION, curl_write_fn);
curl_easy_setopt(self.curl, opt::WRITEDATA, resp_p);
curl_easy_setopt(self.curl, opt::HEADERFUNCTION, curl_header_fn);
curl_easy_setopt(self.curl, opt::HEADERDATA, resp_p);
curl_easy_setopt(self.curl, opt::PROGRESSFUNCTION, curl_progress_fn);
curl_easy_setopt(self.curl, opt::PROGRESSDATA, progress_p);
}
let err = unsafe { curl_easy_perform(self.curl) };
// If the request failed, abort here
if!err.is_success() {
return Err(err);
}
// Try to get the response code
builder.code = try!(self.get_response_code());
Ok(builder.build())
}
pub fn get_response_code(&self) -> Result<uint, err::ErrCode> {
Ok(try!(self.get_info_long(info::RESPONSE_CODE)) as uint)
}
pub fn get_total_time(&self) -> Result<uint, err::ErrCode> {
Ok(try!(self.get_info_long(info::TOTAL_TIME)) as uint)
}
fn get_info_long(&self, key: info::Key) -> Result<c_long, err::ErrCode> {
let v: c_long = 0;
let res = unsafe {
curl_easy_getinfo(self.curl as *const CURL, key, &v)
};
if!res.is_success() {
return Err(res);
}
Ok(v)
}
}
#[inline]
fn global_init() {
// Schedule curl to be cleaned up after we're done with this whole process
static mut INIT: Once = ONCE_INIT;
unsafe {
INIT.doit(|| ::std::rt::at_exit(proc() curl_global_cleanup()))
}
}
impl Drop for Easy {
fn drop(&mut self) {
unsafe { curl_easy_cleanup(self.curl) }
}
}
/*
*
* TODO: Move this into handle
*
*/
struct ResponseBuilder {
code: uint,
hdrs: HashMap<String,Vec<String>>,
body: Vec<u8>
}
impl ResponseBuilder {
fn new() -> ResponseBuilder {
ResponseBuilder {
code: 0,
hdrs: HashMap::new(),
body: Vec::new()
}
}
fn add_header(&mut self, name: &str, val: &str) {
// TODO: Reduce allocations
use std::ascii::OwnedAsciiExt;
let name = name.to_string().into_ascii_lower();
let inserted = match self.hdrs.find_mut(&name) {
Some(vals) => {
vals.push(val.to_string());
true
}
None => false
};
if!inserted {
self.hdrs.insert(name, vec!(val.to_string()));
}
}
fn build(self) -> Response {
let ResponseBuilder { code, hdrs, body } = self;
Response::new(code, hdrs, body)
}
}
/*
*
* ===== Callbacks =====
*/
pub extern "C" fn curl_read_fn(p: *mut u8, size: size_t, nmemb: size_t, body: *mut Body) -> size_t {
if body.is_null() {
return 0;
}
let mut dst = unsafe { CVec::new(p, (size * nmemb) as uint) };
let body: &mut Body = unsafe { mem::transmute(body) };
match body.read(dst.as_mut_slice()) {
Ok(len) => len as size_t,
Err(e) => {
match e.kind {
io::EndOfFile => 0 as size_t,
_ => consts::CURL_READFUNC_ABORT as size_t
}
}
}
}
pub extern "C" fn curl_write_fn(p: *mut u8, size: size_t, nmemb: size_t, resp: *mut ResponseBuilder) -> size_t |
pub extern "C" fn curl_header_fn(p: *mut u8, size: size_t, nmemb: size_t, resp: &mut ResponseBuilder) -> size_t {
// TODO: Skip the first call (it seems to be the status line)
let vec = unsafe { CVec::new(p, (size * nmemb) as uint) };
match header::parse(vec.as_slice()) {
Some((name, val)) => {
resp.add_header(name, val);
}
None => {}
}
vec.len() as size_t
}
pub extern "C" fn curl_progress_fn(cb: *mut ProgressCb, dltotal: c_double, dlnow: c_double, ultotal: c_double, ulnow: c_double) -> c_int {
#[inline]
fn to_uint(v: c_double) -> uint {
if v > 0.0 { v as uint } else { 0 }
}
if!cb.is_null() {
let cb: &mut ProgressCb = unsafe { &mut *cb };
(*cb)(to_uint(dltotal), to_uint(dlnow), to_uint(ultotal), to_uint(ulnow));
}
0
}
| {
if !resp.is_null() {
let builder: &mut ResponseBuilder = unsafe { mem::transmute(resp) };
let chunk = unsafe { CVec::new(p, (size * nmemb) as uint) };
builder.body.push_all(chunk.as_slice());
}
size * nmemb
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.