file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
diy_hints.rs
|
use std::collections::HashSet;
use rustyline::hint::{Hint, Hinter};
use rustyline::Context;
use rustyline::{Editor, Result};
use rustyline_derive::{Completer, Helper, Highlighter, Validator};
#[derive(Completer, Helper, Validator, Highlighter)]
struct DIYHinter {
// It's simple example of rustyline, for more efficient, please use ** radix trie **
hints: HashSet<CommandHint>,
}
#[derive(Hash, Debug, PartialEq, Eq)]
struct CommandHint {
display: String,
complete_up_to: usize,
}
impl Hint for CommandHint {
fn display(&self) -> &str {
&self.display
}
fn completion(&self) -> Option<&str> {
if self.complete_up_to > 0 {
Some(&self.display[..self.complete_up_to])
} else {
None
}
}
}
impl CommandHint {
fn new(text: &str, complete_up_to: &str) -> CommandHint {
assert!(text.starts_with(complete_up_to));
CommandHint {
display: text.into(),
complete_up_to: complete_up_to.len(),
}
}
fn suffix(&self, strip_chars: usize) -> CommandHint {
CommandHint {
|
}
impl Hinter for DIYHinter {
type Hint = CommandHint;
fn hint(&self, line: &str, pos: usize, _ctx: &Context<'_>) -> Option<CommandHint> {
if line.is_empty() || pos < line.len() {
return None;
}
self.hints
.iter()
.filter_map(|hint| {
// expect hint after word complete, like redis cli, add condition:
// line.ends_with(" ")
if hint.display.starts_with(line) {
Some(hint.suffix(pos))
} else {
None
}
})
.next()
}
}
fn diy_hints() -> HashSet<CommandHint> {
let mut set = HashSet::new();
set.insert(CommandHint::new("help", "help"));
set.insert(CommandHint::new("get key", "get "));
set.insert(CommandHint::new("set key value", "set "));
set.insert(CommandHint::new("hget key field", "hget "));
set.insert(CommandHint::new("hset key field value", "hset "));
set
}
fn main() -> Result<()> {
println!("This is a DIY hint hack of rustyline");
let h = DIYHinter { hints: diy_hints() };
let mut rl: Editor<DIYHinter> = Editor::new();
rl.set_helper(Some(h));
loop {
let input = rl.readline("> ")?;
println!("input: {}", input);
}
}
|
display: self.display[strip_chars..].to_owned(),
complete_up_to: self.complete_up_to.saturating_sub(strip_chars),
}
}
|
random_line_split
|
diy_hints.rs
|
use std::collections::HashSet;
use rustyline::hint::{Hint, Hinter};
use rustyline::Context;
use rustyline::{Editor, Result};
use rustyline_derive::{Completer, Helper, Highlighter, Validator};
#[derive(Completer, Helper, Validator, Highlighter)]
struct DIYHinter {
// It's simple example of rustyline, for more efficient, please use ** radix trie **
hints: HashSet<CommandHint>,
}
#[derive(Hash, Debug, PartialEq, Eq)]
struct CommandHint {
display: String,
complete_up_to: usize,
}
impl Hint for CommandHint {
fn display(&self) -> &str {
&self.display
}
fn completion(&self) -> Option<&str> {
if self.complete_up_to > 0 {
Some(&self.display[..self.complete_up_to])
} else {
None
}
}
}
impl CommandHint {
fn new(text: &str, complete_up_to: &str) -> CommandHint {
assert!(text.starts_with(complete_up_to));
CommandHint {
display: text.into(),
complete_up_to: complete_up_to.len(),
}
}
fn suffix(&self, strip_chars: usize) -> CommandHint {
CommandHint {
display: self.display[strip_chars..].to_owned(),
complete_up_to: self.complete_up_to.saturating_sub(strip_chars),
}
}
}
impl Hinter for DIYHinter {
type Hint = CommandHint;
fn
|
(&self, line: &str, pos: usize, _ctx: &Context<'_>) -> Option<CommandHint> {
if line.is_empty() || pos < line.len() {
return None;
}
self.hints
.iter()
.filter_map(|hint| {
// expect hint after word complete, like redis cli, add condition:
// line.ends_with(" ")
if hint.display.starts_with(line) {
Some(hint.suffix(pos))
} else {
None
}
})
.next()
}
}
fn diy_hints() -> HashSet<CommandHint> {
let mut set = HashSet::new();
set.insert(CommandHint::new("help", "help"));
set.insert(CommandHint::new("get key", "get "));
set.insert(CommandHint::new("set key value", "set "));
set.insert(CommandHint::new("hget key field", "hget "));
set.insert(CommandHint::new("hset key field value", "hset "));
set
}
fn main() -> Result<()> {
println!("This is a DIY hint hack of rustyline");
let h = DIYHinter { hints: diy_hints() };
let mut rl: Editor<DIYHinter> = Editor::new();
rl.set_helper(Some(h));
loop {
let input = rl.readline("> ")?;
println!("input: {}", input);
}
}
|
hint
|
identifier_name
|
diy_hints.rs
|
use std::collections::HashSet;
use rustyline::hint::{Hint, Hinter};
use rustyline::Context;
use rustyline::{Editor, Result};
use rustyline_derive::{Completer, Helper, Highlighter, Validator};
#[derive(Completer, Helper, Validator, Highlighter)]
struct DIYHinter {
// It's simple example of rustyline, for more efficient, please use ** radix trie **
hints: HashSet<CommandHint>,
}
#[derive(Hash, Debug, PartialEq, Eq)]
struct CommandHint {
display: String,
complete_up_to: usize,
}
impl Hint for CommandHint {
fn display(&self) -> &str {
&self.display
}
fn completion(&self) -> Option<&str> {
if self.complete_up_to > 0 {
Some(&self.display[..self.complete_up_to])
} else {
None
}
}
}
impl CommandHint {
fn new(text: &str, complete_up_to: &str) -> CommandHint {
assert!(text.starts_with(complete_up_to));
CommandHint {
display: text.into(),
complete_up_to: complete_up_to.len(),
}
}
fn suffix(&self, strip_chars: usize) -> CommandHint {
CommandHint {
display: self.display[strip_chars..].to_owned(),
complete_up_to: self.complete_up_to.saturating_sub(strip_chars),
}
}
}
impl Hinter for DIYHinter {
type Hint = CommandHint;
fn hint(&self, line: &str, pos: usize, _ctx: &Context<'_>) -> Option<CommandHint> {
if line.is_empty() || pos < line.len() {
return None;
}
self.hints
.iter()
.filter_map(|hint| {
// expect hint after word complete, like redis cli, add condition:
// line.ends_with(" ")
if hint.display.starts_with(line) {
Some(hint.suffix(pos))
} else {
None
}
})
.next()
}
}
fn diy_hints() -> HashSet<CommandHint>
|
fn main() -> Result<()> {
println!("This is a DIY hint hack of rustyline");
let h = DIYHinter { hints: diy_hints() };
let mut rl: Editor<DIYHinter> = Editor::new();
rl.set_helper(Some(h));
loop {
let input = rl.readline("> ")?;
println!("input: {}", input);
}
}
|
{
let mut set = HashSet::new();
set.insert(CommandHint::new("help", "help"));
set.insert(CommandHint::new("get key", "get "));
set.insert(CommandHint::new("set key value", "set "));
set.insert(CommandHint::new("hget key field", "hget "));
set.insert(CommandHint::new("hset key field value", "hset "));
set
}
|
identifier_body
|
diy_hints.rs
|
use std::collections::HashSet;
use rustyline::hint::{Hint, Hinter};
use rustyline::Context;
use rustyline::{Editor, Result};
use rustyline_derive::{Completer, Helper, Highlighter, Validator};
#[derive(Completer, Helper, Validator, Highlighter)]
struct DIYHinter {
// It's simple example of rustyline, for more efficient, please use ** radix trie **
hints: HashSet<CommandHint>,
}
#[derive(Hash, Debug, PartialEq, Eq)]
struct CommandHint {
display: String,
complete_up_to: usize,
}
impl Hint for CommandHint {
fn display(&self) -> &str {
&self.display
}
fn completion(&self) -> Option<&str> {
if self.complete_up_to > 0 {
Some(&self.display[..self.complete_up_to])
} else {
None
}
}
}
impl CommandHint {
fn new(text: &str, complete_up_to: &str) -> CommandHint {
assert!(text.starts_with(complete_up_to));
CommandHint {
display: text.into(),
complete_up_to: complete_up_to.len(),
}
}
fn suffix(&self, strip_chars: usize) -> CommandHint {
CommandHint {
display: self.display[strip_chars..].to_owned(),
complete_up_to: self.complete_up_to.saturating_sub(strip_chars),
}
}
}
impl Hinter for DIYHinter {
type Hint = CommandHint;
fn hint(&self, line: &str, pos: usize, _ctx: &Context<'_>) -> Option<CommandHint> {
if line.is_empty() || pos < line.len() {
return None;
}
self.hints
.iter()
.filter_map(|hint| {
// expect hint after word complete, like redis cli, add condition:
// line.ends_with(" ")
if hint.display.starts_with(line) {
Some(hint.suffix(pos))
} else
|
})
.next()
}
}
fn diy_hints() -> HashSet<CommandHint> {
let mut set = HashSet::new();
set.insert(CommandHint::new("help", "help"));
set.insert(CommandHint::new("get key", "get "));
set.insert(CommandHint::new("set key value", "set "));
set.insert(CommandHint::new("hget key field", "hget "));
set.insert(CommandHint::new("hset key field value", "hset "));
set
}
fn main() -> Result<()> {
println!("This is a DIY hint hack of rustyline");
let h = DIYHinter { hints: diy_hints() };
let mut rl: Editor<DIYHinter> = Editor::new();
rl.set_helper(Some(h));
loop {
let input = rl.readline("> ")?;
println!("input: {}", input);
}
}
|
{
None
}
|
conditional_block
|
complex_query.rs
|
extern crate rustorm;
extern crate uuid;
extern crate chrono;
extern crate rustc_serialize;
use uuid::Uuid;
use rustorm::query::Query;
use rustorm::query::Equality;
use rustorm::dao::{Dao, IsDao};
use rustorm::pool::ManagedPool;
#[derive(Debug, Clone)]
pub struct
|
{
pub photo_id: Uuid,
pub url: Option<String>,
}
impl IsDao for Photo{
fn from_dao(dao: &Dao) -> Self {
Photo {
photo_id: dao.get("photo_id"),
url: dao.get_opt("url"),
}
}
fn to_dao(&self) -> Dao {
let mut dao = Dao::new();
dao.set("photo_id", &self.photo_id);
match self.url {
Some(ref _value) => dao.set("url", _value),
None => dao.set_null("url"),
}
dao
}
}
fn main() {
let url = "postgres://postgres:p0stgr3s@localhost/bazaar_v6";
let pool = ManagedPool::init(&url, 1).unwrap();
let db = pool.connect().unwrap();
let mut query = Query::select_all();
query.from_table("bazaar.product")
.left_join_table("bazaar.product_category",
"product_category.product_id",
"product.product_id")
.left_join_table("bazaar.category",
"category.category_id",
"product_category.category_id")
.left_join_table("product_photo",
"product.product_id",
"product_photo.product_id")
.left_join_table("bazaar.photo", "product_photo.photo_id", "photo.photo_id")
.filter("product.name", Equality::EQ, &"GTX660 Ti videocard")
.filter("category.name", Equality::EQ, &"Electronic")
.group_by(vec!["category.name"])
.having("count(*)", Equality::GT, &1)
.asc("product.name")
.desc("product.created");
let frag = query.build(db.as_ref());
let expected = "
SELECT *
FROM bazaar.product
LEFT JOIN bazaar.product_category\x20
ON product_category.product_id = product.product_id\x20
LEFT JOIN bazaar.category\x20
ON category.category_id = product_category.category_id\x20
LEFT JOIN product_photo\x20
ON product.product_id = product_photo.product_id\x20
LEFT JOIN bazaar.photo\x20
ON product_photo.photo_id = photo.photo_id\x20
WHERE product.name = $1\x20
AND category.name = $2\x20
GROUP BY category.name\x20
HAVING count(*) > $3\x20
ORDER BY product.name ASC, product.created DESC".to_string();
println!("actual: {{\n{}}} [{}]", frag.sql, frag.sql.len());
println!("expected: {{{}}} [{}]", expected, expected.len());
assert!(frag.sql.trim() == expected.trim());
}
|
Photo
|
identifier_name
|
complex_query.rs
|
extern crate rustorm;
extern crate uuid;
extern crate chrono;
extern crate rustc_serialize;
use uuid::Uuid;
use rustorm::query::Query;
use rustorm::query::Equality;
use rustorm::dao::{Dao, IsDao};
use rustorm::pool::ManagedPool;
#[derive(Debug, Clone)]
pub struct Photo {
pub photo_id: Uuid,
pub url: Option<String>,
}
impl IsDao for Photo{
fn from_dao(dao: &Dao) -> Self
|
fn to_dao(&self) -> Dao {
let mut dao = Dao::new();
dao.set("photo_id", &self.photo_id);
match self.url {
Some(ref _value) => dao.set("url", _value),
None => dao.set_null("url"),
}
dao
}
}
fn main() {
let url = "postgres://postgres:p0stgr3s@localhost/bazaar_v6";
let pool = ManagedPool::init(&url, 1).unwrap();
let db = pool.connect().unwrap();
let mut query = Query::select_all();
query.from_table("bazaar.product")
.left_join_table("bazaar.product_category",
"product_category.product_id",
"product.product_id")
.left_join_table("bazaar.category",
"category.category_id",
"product_category.category_id")
.left_join_table("product_photo",
"product.product_id",
"product_photo.product_id")
.left_join_table("bazaar.photo", "product_photo.photo_id", "photo.photo_id")
.filter("product.name", Equality::EQ, &"GTX660 Ti videocard")
.filter("category.name", Equality::EQ, &"Electronic")
.group_by(vec!["category.name"])
.having("count(*)", Equality::GT, &1)
.asc("product.name")
.desc("product.created");
let frag = query.build(db.as_ref());
let expected = "
SELECT *
FROM bazaar.product
LEFT JOIN bazaar.product_category\x20
ON product_category.product_id = product.product_id\x20
LEFT JOIN bazaar.category\x20
ON category.category_id = product_category.category_id\x20
LEFT JOIN product_photo\x20
ON product.product_id = product_photo.product_id\x20
LEFT JOIN bazaar.photo\x20
ON product_photo.photo_id = photo.photo_id\x20
WHERE product.name = $1\x20
AND category.name = $2\x20
GROUP BY category.name\x20
HAVING count(*) > $3\x20
ORDER BY product.name ASC, product.created DESC".to_string();
println!("actual: {{\n{}}} [{}]", frag.sql, frag.sql.len());
println!("expected: {{{}}} [{}]", expected, expected.len());
assert!(frag.sql.trim() == expected.trim());
}
|
{
Photo {
photo_id: dao.get("photo_id"),
url: dao.get_opt("url"),
}
}
|
identifier_body
|
complex_query.rs
|
extern crate rustorm;
extern crate uuid;
extern crate chrono;
|
use uuid::Uuid;
use rustorm::query::Query;
use rustorm::query::Equality;
use rustorm::dao::{Dao, IsDao};
use rustorm::pool::ManagedPool;
#[derive(Debug, Clone)]
pub struct Photo {
pub photo_id: Uuid,
pub url: Option<String>,
}
impl IsDao for Photo{
fn from_dao(dao: &Dao) -> Self {
Photo {
photo_id: dao.get("photo_id"),
url: dao.get_opt("url"),
}
}
fn to_dao(&self) -> Dao {
let mut dao = Dao::new();
dao.set("photo_id", &self.photo_id);
match self.url {
Some(ref _value) => dao.set("url", _value),
None => dao.set_null("url"),
}
dao
}
}
fn main() {
let url = "postgres://postgres:p0stgr3s@localhost/bazaar_v6";
let pool = ManagedPool::init(&url, 1).unwrap();
let db = pool.connect().unwrap();
let mut query = Query::select_all();
query.from_table("bazaar.product")
.left_join_table("bazaar.product_category",
"product_category.product_id",
"product.product_id")
.left_join_table("bazaar.category",
"category.category_id",
"product_category.category_id")
.left_join_table("product_photo",
"product.product_id",
"product_photo.product_id")
.left_join_table("bazaar.photo", "product_photo.photo_id", "photo.photo_id")
.filter("product.name", Equality::EQ, &"GTX660 Ti videocard")
.filter("category.name", Equality::EQ, &"Electronic")
.group_by(vec!["category.name"])
.having("count(*)", Equality::GT, &1)
.asc("product.name")
.desc("product.created");
let frag = query.build(db.as_ref());
let expected = "
SELECT *
FROM bazaar.product
LEFT JOIN bazaar.product_category\x20
ON product_category.product_id = product.product_id\x20
LEFT JOIN bazaar.category\x20
ON category.category_id = product_category.category_id\x20
LEFT JOIN product_photo\x20
ON product.product_id = product_photo.product_id\x20
LEFT JOIN bazaar.photo\x20
ON product_photo.photo_id = photo.photo_id\x20
WHERE product.name = $1\x20
AND category.name = $2\x20
GROUP BY category.name\x20
HAVING count(*) > $3\x20
ORDER BY product.name ASC, product.created DESC".to_string();
println!("actual: {{\n{}}} [{}]", frag.sql, frag.sql.len());
println!("expected: {{{}}} [{}]", expected, expected.len());
assert!(frag.sql.trim() == expected.trim());
}
|
extern crate rustc_serialize;
|
random_line_split
|
extern-pass-TwoU32s.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test a foreign function that accepts and returns a struct
// by value.
#[deriving(PartialEq, Show)]
pub struct TwoU32s {
one: u32, two: u32
}
impl Copy for TwoU32s {}
#[link(name = "rust_test_helpers")]
extern {
pub fn rust_dbg_extern_identity_TwoU32s(v: TwoU32s) -> TwoU32s;
}
pub fn
|
() {
unsafe {
let x = TwoU32s {one: 22, two: 23};
let y = rust_dbg_extern_identity_TwoU32s(x);
assert_eq!(x, y);
}
}
|
main
|
identifier_name
|
extern-pass-TwoU32s.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test a foreign function that accepts and returns a struct
// by value.
#[deriving(PartialEq, Show)]
pub struct TwoU32s {
one: u32, two: u32
}
impl Copy for TwoU32s {}
#[link(name = "rust_test_helpers")]
extern {
pub fn rust_dbg_extern_identity_TwoU32s(v: TwoU32s) -> TwoU32s;
}
pub fn main() {
unsafe {
let x = TwoU32s {one: 22, two: 23};
let y = rust_dbg_extern_identity_TwoU32s(x);
assert_eq!(x, y);
}
}
|
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
|
random_line_split
|
extern-pass-TwoU32s.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test a foreign function that accepts and returns a struct
// by value.
#[deriving(PartialEq, Show)]
pub struct TwoU32s {
one: u32, two: u32
}
impl Copy for TwoU32s {}
#[link(name = "rust_test_helpers")]
extern {
pub fn rust_dbg_extern_identity_TwoU32s(v: TwoU32s) -> TwoU32s;
}
pub fn main()
|
{
unsafe {
let x = TwoU32s {one: 22, two: 23};
let y = rust_dbg_extern_identity_TwoU32s(x);
assert_eq!(x, y);
}
}
|
identifier_body
|
|
lex_rule_loader.rs
|
use common::lexer::{Ruleset, LexRule};
use walkdir::{WalkDir, DirEntry};
use std::io::prelude::*;
use std::io;
use std::fs;
pub fn ruleset_from_dir (dir_path: String) -> io::Result<Ruleset> {
ruleset_from_dir_v(dir_path, false)
}
pub fn
|
(dir_path: String, verbose: bool) -> io::Result<Ruleset> {
let mut lex_list:Vec<DirEntry> = Vec::new();
for entry in WalkDir::new(dir_path) {
let entry = entry.unwrap();
let ent_path = &entry.path();
match ent_path.extension() {
Some(ext) => {
if ext == "lex" {
lex_list.push(entry.clone());
}
},
None => ()
}
}
let mut rules = Ruleset::new();
for entry in lex_list {
if verbose {
println!("Loading rules from file [{}]", &entry.path().display());
}
let mut file = fs::File::open(entry.path())?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
for line in contents.lines() {
if! ((line.trim() == "") || (line.starts_with("#"))) {
rules.add(LexRule::from_string(String::from(line)).unwrap());
}
}
}
Ok(rules.clone())
}
|
ruleset_from_dir_v
|
identifier_name
|
lex_rule_loader.rs
|
use common::lexer::{Ruleset, LexRule};
use walkdir::{WalkDir, DirEntry};
use std::io::prelude::*;
use std::io;
use std::fs;
pub fn ruleset_from_dir (dir_path: String) -> io::Result<Ruleset> {
ruleset_from_dir_v(dir_path, false)
}
pub fn ruleset_from_dir_v (dir_path: String, verbose: bool) -> io::Result<Ruleset>
|
}
let mut file = fs::File::open(entry.path())?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
for line in contents.lines() {
if! ((line.trim() == "") || (line.starts_with("#"))) {
rules.add(LexRule::from_string(String::from(line)).unwrap());
}
}
}
Ok(rules.clone())
}
|
{
let mut lex_list:Vec<DirEntry> = Vec::new();
for entry in WalkDir::new(dir_path) {
let entry = entry.unwrap();
let ent_path = &entry.path();
match ent_path.extension() {
Some(ext) => {
if ext == "lex" {
lex_list.push(entry.clone());
}
},
None => ()
}
}
let mut rules = Ruleset::new();
for entry in lex_list {
if verbose {
println!("Loading rules from file [{}]", &entry.path().display());
|
identifier_body
|
lex_rule_loader.rs
|
use common::lexer::{Ruleset, LexRule};
use walkdir::{WalkDir, DirEntry};
use std::io::prelude::*;
use std::io;
use std::fs;
pub fn ruleset_from_dir (dir_path: String) -> io::Result<Ruleset> {
ruleset_from_dir_v(dir_path, false)
}
pub fn ruleset_from_dir_v (dir_path: String, verbose: bool) -> io::Result<Ruleset> {
let mut lex_list:Vec<DirEntry> = Vec::new();
for entry in WalkDir::new(dir_path) {
let entry = entry.unwrap();
let ent_path = &entry.path();
match ent_path.extension() {
|
lex_list.push(entry.clone());
}
},
None => ()
}
}
let mut rules = Ruleset::new();
for entry in lex_list {
if verbose {
println!("Loading rules from file [{}]", &entry.path().display());
}
let mut file = fs::File::open(entry.path())?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
for line in contents.lines() {
if! ((line.trim() == "") || (line.starts_with("#"))) {
rules.add(LexRule::from_string(String::from(line)).unwrap());
}
}
}
Ok(rules.clone())
}
|
Some(ext) => {
if ext == "lex" {
|
random_line_split
|
lib.rs
|
//! (Hopefully) Safe fork-join parallelism abstractions
//!
//! Implements the [`divide`](fn.divide.html) and
//! [`execute!`](../parallel_macros/macro.execute!.html) functions proposed in Niko's
//! [blog](http://smallcultfollowing.com/babysteps/blog/2013/06/11/data-parallelism-in-rust/).
//!
//! # Cargo
//!
//! ``` text
//! # Cargo.toml
//! [dependencies.parallel]
//! git = "https://github.com/japaric/parallel.rs"
//!
|
#![allow(unused_features)]
#![cfg_attr(test, plugin(quickcheck_macros))]
#![deny(warnings)]
#![feature(core)]
#![feature(os)]
#![feature(plugin)]
#![feature(std_misc)]
#[cfg(test)]
extern crate quickcheck;
#[cfg(test)]
extern crate rand;
pub use divide::divide;
pub use apply::apply;
mod divide;
mod apply;
|
//! [dependencies.parallel_macros]
//! git = "https://github.com/japaric/parallel.rs"
//! ```
|
random_line_split
|
data.rs
|
// Copyright 2014 The html5ever Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::prelude::*;
use tokenizer::Doctype;
use tree_builder::interface::{QuirksMode, Quirks, LimitedQuirks, NoQuirks};
use util::str::AsciiExt;
use collections::string::String;
// These should all be lowercase, for ASCII-case-insensitive matching.
static QUIRKY_PUBLIC_PREFIXES: &'static [&'static str] = &[
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//",
];
static QUIRKY_PUBLIC_MATCHES: &'static [&'static str] = &[
"-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html",
];
static QUIRKY_SYSTEM_MATCHES: &'static [&'static str] = &[
"http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd",
];
static LIMITED_QUIRKY_PUBLIC_PREFIXES: &'static [&'static str] = &[
"-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//",
];
static HTML4_PUBLIC_PREFIXES: &'static [&'static str] = &[
"-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//",
];
pub fn doctype_error_and_quirks(doctype: &Doctype, iframe_srcdoc: bool) -> (bool, QuirksMode) {
fn opt_as_slice<'t>(x: &'t Option<String>) -> Option<&'t str> {
x.as_ref().map(|y| y.as_slice())
}
fn opt_to_ascii_lower(x: Option<&str>) -> Option<String> {
x.map(|y| y.to_ascii_lower())
}
let name = opt_as_slice(&doctype.name);
let public = opt_as_slice(&doctype.public_id);
let system = opt_as_slice(&doctype.system_id);
let err = match (name, public, system) {
(Some("html"), None, None)
| (Some("html"), None, Some("about:legacy-compat"))
| (Some("html"), Some("-//W3C//DTD HTML 4.0//EN"), None)
| (Some("html"), Some("-//W3C//DTD HTML 4.0//EN"), Some("http://www.w3.org/TR/REC-html40/strict.dtd"))
| (Some("html"), Some("-//W3C//DTD HTML 4.01//EN"), None)
| (Some("html"), Some("-//W3C//DTD HTML 4.01//EN"), Some("http://www.w3.org/TR/html4/strict.dtd"))
| (Some("html"), Some("-//W3C//DTD XHTML 1.0 Strict//EN"), Some("http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"))
| (Some("html"), Some("-//W3C//DTD XHTML 1.1//EN"), Some("http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"))
=> false,
_ => true,
};
|
fn contains_pfx(haystack: &[&str], needle: &str) -> bool {
haystack.iter().any(|&x| needle.starts_with(x))
}
// Quirks-mode matches are case-insensitive.
let public = opt_to_ascii_lower(public);
let system = opt_to_ascii_lower(system);
let quirk = match (opt_as_slice(&public), opt_as_slice(&system)) {
_ if doctype.force_quirks => Quirks,
_ if name!= Some("html") => Quirks,
_ if iframe_srcdoc => NoQuirks,
(Some(ref p), _) if QUIRKY_PUBLIC_MATCHES.contains(p) => Quirks,
(_, Some(ref s)) if QUIRKY_SYSTEM_MATCHES.contains(s) => Quirks,
(Some(p), _) if contains_pfx(QUIRKY_PUBLIC_PREFIXES, p) => Quirks,
(Some(p), _) if contains_pfx(LIMITED_QUIRKY_PUBLIC_PREFIXES, p) => LimitedQuirks,
(Some(p), s) if contains_pfx(HTML4_PUBLIC_PREFIXES, p) => match s {
None => Quirks,
Some(_) => LimitedQuirks,
},
_ => NoQuirks,
};
(err, quirk)
}
|
// FIXME: We could do something asymptotically faster here.
// But there aren't many strings, and this happens at most once per parse.
|
random_line_split
|
data.rs
|
// Copyright 2014 The html5ever Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::prelude::*;
use tokenizer::Doctype;
use tree_builder::interface::{QuirksMode, Quirks, LimitedQuirks, NoQuirks};
use util::str::AsciiExt;
use collections::string::String;
// These should all be lowercase, for ASCII-case-insensitive matching.
static QUIRKY_PUBLIC_PREFIXES: &'static [&'static str] = &[
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//",
];
static QUIRKY_PUBLIC_MATCHES: &'static [&'static str] = &[
"-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html",
];
static QUIRKY_SYSTEM_MATCHES: &'static [&'static str] = &[
"http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd",
];
static LIMITED_QUIRKY_PUBLIC_PREFIXES: &'static [&'static str] = &[
"-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//",
];
static HTML4_PUBLIC_PREFIXES: &'static [&'static str] = &[
"-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//",
];
pub fn doctype_error_and_quirks(doctype: &Doctype, iframe_srcdoc: bool) -> (bool, QuirksMode) {
fn opt_as_slice<'t>(x: &'t Option<String>) -> Option<&'t str> {
x.as_ref().map(|y| y.as_slice())
}
fn
|
(x: Option<&str>) -> Option<String> {
x.map(|y| y.to_ascii_lower())
}
let name = opt_as_slice(&doctype.name);
let public = opt_as_slice(&doctype.public_id);
let system = opt_as_slice(&doctype.system_id);
let err = match (name, public, system) {
(Some("html"), None, None)
| (Some("html"), None, Some("about:legacy-compat"))
| (Some("html"), Some("-//W3C//DTD HTML 4.0//EN"), None)
| (Some("html"), Some("-//W3C//DTD HTML 4.0//EN"), Some("http://www.w3.org/TR/REC-html40/strict.dtd"))
| (Some("html"), Some("-//W3C//DTD HTML 4.01//EN"), None)
| (Some("html"), Some("-//W3C//DTD HTML 4.01//EN"), Some("http://www.w3.org/TR/html4/strict.dtd"))
| (Some("html"), Some("-//W3C//DTD XHTML 1.0 Strict//EN"), Some("http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"))
| (Some("html"), Some("-//W3C//DTD XHTML 1.1//EN"), Some("http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"))
=> false,
_ => true,
};
// FIXME: We could do something asymptotically faster here.
// But there aren't many strings, and this happens at most once per parse.
fn contains_pfx(haystack: &[&str], needle: &str) -> bool {
haystack.iter().any(|&x| needle.starts_with(x))
}
// Quirks-mode matches are case-insensitive.
let public = opt_to_ascii_lower(public);
let system = opt_to_ascii_lower(system);
let quirk = match (opt_as_slice(&public), opt_as_slice(&system)) {
_ if doctype.force_quirks => Quirks,
_ if name!= Some("html") => Quirks,
_ if iframe_srcdoc => NoQuirks,
(Some(ref p), _) if QUIRKY_PUBLIC_MATCHES.contains(p) => Quirks,
(_, Some(ref s)) if QUIRKY_SYSTEM_MATCHES.contains(s) => Quirks,
(Some(p), _) if contains_pfx(QUIRKY_PUBLIC_PREFIXES, p) => Quirks,
(Some(p), _) if contains_pfx(LIMITED_QUIRKY_PUBLIC_PREFIXES, p) => LimitedQuirks,
(Some(p), s) if contains_pfx(HTML4_PUBLIC_PREFIXES, p) => match s {
None => Quirks,
Some(_) => LimitedQuirks,
},
_ => NoQuirks,
};
(err, quirk)
}
|
opt_to_ascii_lower
|
identifier_name
|
data.rs
|
// Copyright 2014 The html5ever Project Developers. See the
// COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::prelude::*;
use tokenizer::Doctype;
use tree_builder::interface::{QuirksMode, Quirks, LimitedQuirks, NoQuirks};
use util::str::AsciiExt;
use collections::string::String;
// These should all be lowercase, for ASCII-case-insensitive matching.
static QUIRKY_PUBLIC_PREFIXES: &'static [&'static str] = &[
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//",
];
static QUIRKY_PUBLIC_MATCHES: &'static [&'static str] = &[
"-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html",
];
static QUIRKY_SYSTEM_MATCHES: &'static [&'static str] = &[
"http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd",
];
static LIMITED_QUIRKY_PUBLIC_PREFIXES: &'static [&'static str] = &[
"-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//",
];
static HTML4_PUBLIC_PREFIXES: &'static [&'static str] = &[
"-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//",
];
pub fn doctype_error_and_quirks(doctype: &Doctype, iframe_srcdoc: bool) -> (bool, QuirksMode) {
fn opt_as_slice<'t>(x: &'t Option<String>) -> Option<&'t str> {
x.as_ref().map(|y| y.as_slice())
}
fn opt_to_ascii_lower(x: Option<&str>) -> Option<String> {
x.map(|y| y.to_ascii_lower())
}
let name = opt_as_slice(&doctype.name);
let public = opt_as_slice(&doctype.public_id);
let system = opt_as_slice(&doctype.system_id);
let err = match (name, public, system) {
(Some("html"), None, None)
| (Some("html"), None, Some("about:legacy-compat"))
| (Some("html"), Some("-//W3C//DTD HTML 4.0//EN"), None)
| (Some("html"), Some("-//W3C//DTD HTML 4.0//EN"), Some("http://www.w3.org/TR/REC-html40/strict.dtd"))
| (Some("html"), Some("-//W3C//DTD HTML 4.01//EN"), None)
| (Some("html"), Some("-//W3C//DTD HTML 4.01//EN"), Some("http://www.w3.org/TR/html4/strict.dtd"))
| (Some("html"), Some("-//W3C//DTD XHTML 1.0 Strict//EN"), Some("http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"))
| (Some("html"), Some("-//W3C//DTD XHTML 1.1//EN"), Some("http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"))
=> false,
_ => true,
};
// FIXME: We could do something asymptotically faster here.
// But there aren't many strings, and this happens at most once per parse.
fn contains_pfx(haystack: &[&str], needle: &str) -> bool
|
// Quirks-mode matches are case-insensitive.
let public = opt_to_ascii_lower(public);
let system = opt_to_ascii_lower(system);
let quirk = match (opt_as_slice(&public), opt_as_slice(&system)) {
_ if doctype.force_quirks => Quirks,
_ if name!= Some("html") => Quirks,
_ if iframe_srcdoc => NoQuirks,
(Some(ref p), _) if QUIRKY_PUBLIC_MATCHES.contains(p) => Quirks,
(_, Some(ref s)) if QUIRKY_SYSTEM_MATCHES.contains(s) => Quirks,
(Some(p), _) if contains_pfx(QUIRKY_PUBLIC_PREFIXES, p) => Quirks,
(Some(p), _) if contains_pfx(LIMITED_QUIRKY_PUBLIC_PREFIXES, p) => LimitedQuirks,
(Some(p), s) if contains_pfx(HTML4_PUBLIC_PREFIXES, p) => match s {
None => Quirks,
Some(_) => LimitedQuirks,
},
_ => NoQuirks,
};
(err, quirk)
}
|
{
haystack.iter().any(|&x| needle.starts_with(x))
}
|
identifier_body
|
raw_block.rs
|
use Renderable;
use context::Context;
use LiquidOptions;
use token::Token;
use lexer::Element::{self, Expression, Tag, Raw};
use error::Result;
struct RawT {
content: String,
}
impl Renderable for RawT {
fn
|
(&self, _context: &mut Context) -> Result<Option<String>> {
Ok(Some(self.content.to_owned()))
}
}
pub fn raw_block(_tag_name: &str,
_arguments: &[Token],
tokens: &[Element],
_options: &LiquidOptions)
-> Result<Box<Renderable>> {
let content = tokens.iter().fold("".to_owned(), |a, b| {
match *b {
Expression(_, ref text) |
Tag(_, ref text) |
Raw(ref text) => text,
}.to_owned() + &a
});
Ok(Box::new(RawT { content: content }))
}
#[test]
fn test_raw() {
use std::default::Default;
let options: LiquidOptions = Default::default();
let raw = raw_block("raw",
&[],
&vec![Expression(vec![], "This is a test".to_owned())],
&options);
assert_eq!(raw.unwrap().render(&mut Default::default()).unwrap(),
Some("This is a test".to_owned()));
}
|
render
|
identifier_name
|
raw_block.rs
|
use Renderable;
use context::Context;
use LiquidOptions;
use token::Token;
use lexer::Element::{self, Expression, Tag, Raw};
use error::Result;
struct RawT {
content: String,
}
impl Renderable for RawT {
fn render(&self, _context: &mut Context) -> Result<Option<String>>
|
}
pub fn raw_block(_tag_name: &str,
_arguments: &[Token],
tokens: &[Element],
_options: &LiquidOptions)
-> Result<Box<Renderable>> {
let content = tokens.iter().fold("".to_owned(), |a, b| {
match *b {
Expression(_, ref text) |
Tag(_, ref text) |
Raw(ref text) => text,
}.to_owned() + &a
});
Ok(Box::new(RawT { content: content }))
}
#[test]
fn test_raw() {
use std::default::Default;
let options: LiquidOptions = Default::default();
let raw = raw_block("raw",
&[],
&vec![Expression(vec![], "This is a test".to_owned())],
&options);
assert_eq!(raw.unwrap().render(&mut Default::default()).unwrap(),
Some("This is a test".to_owned()));
}
|
{
Ok(Some(self.content.to_owned()))
}
|
identifier_body
|
raw_block.rs
|
use Renderable;
use context::Context;
use LiquidOptions;
use token::Token;
use lexer::Element::{self, Expression, Tag, Raw};
use error::Result;
struct RawT {
content: String,
|
impl Renderable for RawT {
fn render(&self, _context: &mut Context) -> Result<Option<String>> {
Ok(Some(self.content.to_owned()))
}
}
pub fn raw_block(_tag_name: &str,
_arguments: &[Token],
tokens: &[Element],
_options: &LiquidOptions)
-> Result<Box<Renderable>> {
let content = tokens.iter().fold("".to_owned(), |a, b| {
match *b {
Expression(_, ref text) |
Tag(_, ref text) |
Raw(ref text) => text,
}.to_owned() + &a
});
Ok(Box::new(RawT { content: content }))
}
#[test]
fn test_raw() {
use std::default::Default;
let options: LiquidOptions = Default::default();
let raw = raw_block("raw",
&[],
&vec![Expression(vec![], "This is a test".to_owned())],
&options);
assert_eq!(raw.unwrap().render(&mut Default::default()).unwrap(),
Some("This is a test".to_owned()));
}
|
}
|
random_line_split
|
local-generic.rs
|
// We specify -C incremental here because we want to test the partitioning for
// incremental compilation
// compile-flags:-Zprint-mono-items=eager -Cincremental=tmp/partitioning-tests/local-generic
#![allow(dead_code)]
#![crate_type="lib"]
//~ MONO_ITEM fn generic::<u32> @@ local_generic.volatile[External]
//~ MONO_ITEM fn generic::<u64> @@ local_generic.volatile[External]
//~ MONO_ITEM fn generic::<char> @@ local_generic.volatile[External]
//~ MONO_ITEM fn generic::<&str> @@ local_generic.volatile[External]
pub fn generic<T>(x: T) -> T { x }
//~ MONO_ITEM fn user @@ local_generic[Internal]
fn user() {
let _ = generic(0u32);
}
mod mod1 {
pub use super::generic;
//~ MONO_ITEM fn mod1::user @@ local_generic-mod1[Internal]
fn user() {
let _ = generic(0u64);
}
mod mod1 {
use super::generic;
//~ MONO_ITEM fn mod1::mod1::user @@ local_generic-mod1-mod1[Internal]
fn user() {
let _ = generic('c');
}
}
}
mod mod2 {
use super::generic;
//~ MONO_ITEM fn mod2::user @@ local_generic-mod2[Internal]
fn
|
() {
let _ = generic("abc");
}
}
|
user
|
identifier_name
|
local-generic.rs
|
// We specify -C incremental here because we want to test the partitioning for
// incremental compilation
// compile-flags:-Zprint-mono-items=eager -Cincremental=tmp/partitioning-tests/local-generic
#![allow(dead_code)]
#![crate_type="lib"]
//~ MONO_ITEM fn generic::<u32> @@ local_generic.volatile[External]
//~ MONO_ITEM fn generic::<u64> @@ local_generic.volatile[External]
//~ MONO_ITEM fn generic::<char> @@ local_generic.volatile[External]
//~ MONO_ITEM fn generic::<&str> @@ local_generic.volatile[External]
pub fn generic<T>(x: T) -> T { x }
//~ MONO_ITEM fn user @@ local_generic[Internal]
fn user()
|
mod mod1 {
pub use super::generic;
//~ MONO_ITEM fn mod1::user @@ local_generic-mod1[Internal]
fn user() {
let _ = generic(0u64);
}
mod mod1 {
use super::generic;
//~ MONO_ITEM fn mod1::mod1::user @@ local_generic-mod1-mod1[Internal]
fn user() {
let _ = generic('c');
}
}
}
mod mod2 {
use super::generic;
//~ MONO_ITEM fn mod2::user @@ local_generic-mod2[Internal]
fn user() {
let _ = generic("abc");
}
}
|
{
let _ = generic(0u32);
}
|
identifier_body
|
local-generic.rs
|
// We specify -C incremental here because we want to test the partitioning for
// incremental compilation
// compile-flags:-Zprint-mono-items=eager -Cincremental=tmp/partitioning-tests/local-generic
#![allow(dead_code)]
#![crate_type="lib"]
//~ MONO_ITEM fn generic::<u32> @@ local_generic.volatile[External]
//~ MONO_ITEM fn generic::<u64> @@ local_generic.volatile[External]
|
fn user() {
let _ = generic(0u32);
}
mod mod1 {
pub use super::generic;
//~ MONO_ITEM fn mod1::user @@ local_generic-mod1[Internal]
fn user() {
let _ = generic(0u64);
}
mod mod1 {
use super::generic;
//~ MONO_ITEM fn mod1::mod1::user @@ local_generic-mod1-mod1[Internal]
fn user() {
let _ = generic('c');
}
}
}
mod mod2 {
use super::generic;
//~ MONO_ITEM fn mod2::user @@ local_generic-mod2[Internal]
fn user() {
let _ = generic("abc");
}
}
|
//~ MONO_ITEM fn generic::<char> @@ local_generic.volatile[External]
//~ MONO_ITEM fn generic::<&str> @@ local_generic.volatile[External]
pub fn generic<T>(x: T) -> T { x }
//~ MONO_ITEM fn user @@ local_generic[Internal]
|
random_line_split
|
error.rs
|
use std::error;
use std::fmt;
use std::io::{self, Error};
#[derive(Debug)]
pub struct TarError {
desc: String,
io: io::Error,
}
impl TarError {
pub fn new(desc: &str, err: Error) -> TarError {
TarError {
desc: desc.to_string(),
io: err,
}
}
}
impl error::Error for TarError {
fn description(&self) -> &str {
&self.desc
}
fn cause(&self) -> Option<&error::Error> {
Some(&self.io)
}
}
impl fmt::Display for TarError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.desc.fmt(f)
}
}
impl From<TarError> for Error {
fn from(t: TarError) -> Error {
Error::new(t.io.kind(), t)
}
|
}
|
random_line_split
|
|
error.rs
|
use std::error;
use std::fmt;
use std::io::{self, Error};
#[derive(Debug)]
pub struct TarError {
desc: String,
io: io::Error,
}
impl TarError {
pub fn new(desc: &str, err: Error) -> TarError {
TarError {
desc: desc.to_string(),
io: err,
}
}
}
impl error::Error for TarError {
fn description(&self) -> &str {
&self.desc
}
fn cause(&self) -> Option<&error::Error>
|
}
impl fmt::Display for TarError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.desc.fmt(f)
}
}
impl From<TarError> for Error {
fn from(t: TarError) -> Error {
Error::new(t.io.kind(), t)
}
}
|
{
Some(&self.io)
}
|
identifier_body
|
error.rs
|
use std::error;
use std::fmt;
use std::io::{self, Error};
#[derive(Debug)]
pub struct
|
{
desc: String,
io: io::Error,
}
impl TarError {
pub fn new(desc: &str, err: Error) -> TarError {
TarError {
desc: desc.to_string(),
io: err,
}
}
}
impl error::Error for TarError {
fn description(&self) -> &str {
&self.desc
}
fn cause(&self) -> Option<&error::Error> {
Some(&self.io)
}
}
impl fmt::Display for TarError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.desc.fmt(f)
}
}
impl From<TarError> for Error {
fn from(t: TarError) -> Error {
Error::new(t.io.kind(), t)
}
}
|
TarError
|
identifier_name
|
htmltableheadercellelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTableHeaderCellElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLTableHeaderCellElementDerived;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::HTMLElementTypeId;
use dom::htmltablecellelement::{HTMLTableCellElement, HTMLTableCellElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLTableHeaderCellElement {
htmltablecellelement: HTMLTableCellElement,
}
impl HTMLTableHeaderCellElementDerived for EventTarget {
fn is_htmltableheadercellelement(&self) -> bool {
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Element(
ElementTypeId::HTMLElement(
HTMLElementTypeId::HTMLTableCellElement(
HTMLTableCellElementTypeId::HTMLTableHeaderCellElement))))
}
}
impl HTMLTableHeaderCellElement {
fn new_inherited(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLTableHeaderCellElement {
HTMLTableHeaderCellElement {
htmltablecellelement: HTMLTableCellElement::new_inherited(
HTMLTableCellElementTypeId::HTMLTableHeaderCellElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn
|
(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLTableHeaderCellElement> {
let element = HTMLTableHeaderCellElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLTableHeaderCellElementBinding::Wrap)
}
}
|
new
|
identifier_name
|
htmltableheadercellelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTableHeaderCellElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLTableHeaderCellElementDerived;
use dom::bindings::js::Root;
use dom::document::Document;
use dom::element::ElementTypeId;
|
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::htmlelement::HTMLElementTypeId;
use dom::htmltablecellelement::{HTMLTableCellElement, HTMLTableCellElementTypeId};
use dom::node::{Node, NodeTypeId};
use util::str::DOMString;
#[dom_struct]
pub struct HTMLTableHeaderCellElement {
htmltablecellelement: HTMLTableCellElement,
}
impl HTMLTableHeaderCellElementDerived for EventTarget {
fn is_htmltableheadercellelement(&self) -> bool {
*self.type_id() == EventTargetTypeId::Node(NodeTypeId::Element(
ElementTypeId::HTMLElement(
HTMLElementTypeId::HTMLTableCellElement(
HTMLTableCellElementTypeId::HTMLTableHeaderCellElement))))
}
}
impl HTMLTableHeaderCellElement {
fn new_inherited(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> HTMLTableHeaderCellElement {
HTMLTableHeaderCellElement {
htmltablecellelement: HTMLTableCellElement::new_inherited(
HTMLTableCellElementTypeId::HTMLTableHeaderCellElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLTableHeaderCellElement> {
let element = HTMLTableHeaderCellElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLTableHeaderCellElementBinding::Wrap)
}
}
|
random_line_split
|
|
composite_shape_against_any.rs
|
use std::marker::PhantomData;
use na::{Identity, Translate, Translation};
use na;
use entities::bounding_volume::{HasAABB, AABB};
use entities::partitioning::BVTCostFn;
use entities::shape::CompositeShape;
use entities::inspection::Repr;
use point::PointQuery;
use geometry::distance_internal;
use math::{Scalar, Point, Vect, Isometry};
/// Smallest distance between a composite shape and any other shape.
pub fn composite_shape_against_any<P, M, G1:?Sized, G2:?Sized>(m1: &M, g1: &G1, m2: &M, g2: &G2) -> <P::Vect as Vect>::Scalar
where P: Point,
P::Vect: Translate<P>,
M: Isometry<P, P::Vect> + Translation<P::Vect>,
G1: CompositeShape<P, M>,
G2: Repr<P, M> + HasAABB<P, M> {
let mut cost_fn = CompositeShapeAgainstAnyDistCostFn::new(m1, g1, m2, g2);
g1.bvt().best_first_search(&mut cost_fn).map(|(_, res)| res).expect("The composite shape must not be empty.")
}
/// Smallest distance between a shape and a composite shape.
pub fn any_against_composite_shape<P, M, G1:?Sized, G2:?Sized>(m1: &M, g1: &G1, m2: &M, g2: &G2) -> <P::Vect as Vect>::Scalar
where P: Point,
P::Vect: Translate<P>,
M: Isometry<P, P::Vect> + Translation<P::Vect>,
G1: Repr<P, M> + HasAABB<P, M>,
G2: CompositeShape<P, M> {
composite_shape_against_any(m2, g2, m1, g1)
}
struct CompositeShapeAgainstAnyDistCostFn<'a, P: 'a + Point, M: 'a, G1:?Sized + 'a, G2:?Sized + 'a> {
msum_shift: P::Vect,
msum_margin: P::Vect,
m1: &'a M,
g1: &'a G1,
m2: &'a M,
g2: &'a G2,
point_type: PhantomData<P>
}
impl<'a, P, M, G1:?Sized, G2:?Sized> CompositeShapeAgainstAnyDistCostFn<'a, P, M, G1, G2>
where P: Point,
M: Isometry<P, P::Vect>,
G1: CompositeShape<P, M>,
G2: Repr<P, M> + HasAABB<P, M> {
pub fn new(m1: &'a M, g1: &'a G1, m2: &'a M, g2: &'a G2)
-> CompositeShapeAgainstAnyDistCostFn<'a, P, M, G1, G2> {
let ls_m2 = na::inv(m1).expect("The transformation `m1` must be inversible.") * *m2;
let ls_aabb2 = g2.aabb(&ls_m2);
CompositeShapeAgainstAnyDistCostFn {
msum_shift: -ls_aabb2.center().to_vec(),
msum_margin: ls_aabb2.half_extents(),
m1: m1,
g1: g1,
m2: m2,
g2: g2,
point_type: PhantomData
}
}
}
impl<'a, P, M, G1:?Sized, G2:?Sized>
BVTCostFn<<P::Vect as Vect>::Scalar, usize, AABB<P>, <P::Vect as Vect>::Scalar>
for CompositeShapeAgainstAnyDistCostFn<'a, P, M, G1, G2>
where P: Point,
P::Vect: Translate<P>,
M: Isometry<P, P::Vect> + Translation<P::Vect>,
G1: CompositeShape<P, M>,
G2: Repr<P, M> + HasAABB<P, M> {
#[inline]
fn
|
(&mut self, bv: &AABB<P>) -> Option<<P::Vect as Vect>::Scalar> {
// Compute the minkowski sum of the two AABBs.
let msum = AABB::new(*bv.mins() + self.msum_shift + (-self.msum_margin),
*bv.maxs() + self.msum_shift + self.msum_margin);
// Compute the distance to the origin.
Some(msum.distance_to_point(&Identity::new(), &na::orig()))
}
#[inline]
fn compute_b_cost(&mut self, b: &usize) -> Option<(<P::Vect as Vect>::Scalar, <P::Vect as Vect>::Scalar)> {
let mut res = None;
self.g1.map_transformed_part_at(self.m1, *b, &mut |m1, g1| {
let dist = distance_internal::any_against_any(m1, g1, self.m2, self.g2);
res = Some((dist, dist))
});
res
}
}
|
compute_bv_cost
|
identifier_name
|
composite_shape_against_any.rs
|
use std::marker::PhantomData;
use na::{Identity, Translate, Translation};
use na;
use entities::bounding_volume::{HasAABB, AABB};
use entities::partitioning::BVTCostFn;
use entities::shape::CompositeShape;
use entities::inspection::Repr;
use point::PointQuery;
use geometry::distance_internal;
use math::{Scalar, Point, Vect, Isometry};
/// Smallest distance between a composite shape and any other shape.
pub fn composite_shape_against_any<P, M, G1:?Sized, G2:?Sized>(m1: &M, g1: &G1, m2: &M, g2: &G2) -> <P::Vect as Vect>::Scalar
where P: Point,
P::Vect: Translate<P>,
M: Isometry<P, P::Vect> + Translation<P::Vect>,
G1: CompositeShape<P, M>,
G2: Repr<P, M> + HasAABB<P, M> {
let mut cost_fn = CompositeShapeAgainstAnyDistCostFn::new(m1, g1, m2, g2);
g1.bvt().best_first_search(&mut cost_fn).map(|(_, res)| res).expect("The composite shape must not be empty.")
}
/// Smallest distance between a shape and a composite shape.
pub fn any_against_composite_shape<P, M, G1:?Sized, G2:?Sized>(m1: &M, g1: &G1, m2: &M, g2: &G2) -> <P::Vect as Vect>::Scalar
where P: Point,
P::Vect: Translate<P>,
M: Isometry<P, P::Vect> + Translation<P::Vect>,
G1: Repr<P, M> + HasAABB<P, M>,
G2: CompositeShape<P, M> {
composite_shape_against_any(m2, g2, m1, g1)
}
struct CompositeShapeAgainstAnyDistCostFn<'a, P: 'a + Point, M: 'a, G1:?Sized + 'a, G2:?Sized + 'a> {
msum_shift: P::Vect,
msum_margin: P::Vect,
m1: &'a M,
g1: &'a G1,
m2: &'a M,
g2: &'a G2,
point_type: PhantomData<P>
}
impl<'a, P, M, G1:?Sized, G2:?Sized> CompositeShapeAgainstAnyDistCostFn<'a, P, M, G1, G2>
where P: Point,
M: Isometry<P, P::Vect>,
G1: CompositeShape<P, M>,
G2: Repr<P, M> + HasAABB<P, M> {
pub fn new(m1: &'a M, g1: &'a G1, m2: &'a M, g2: &'a G2)
-> CompositeShapeAgainstAnyDistCostFn<'a, P, M, G1, G2>
|
}
impl<'a, P, M, G1:?Sized, G2:?Sized>
BVTCostFn<<P::Vect as Vect>::Scalar, usize, AABB<P>, <P::Vect as Vect>::Scalar>
for CompositeShapeAgainstAnyDistCostFn<'a, P, M, G1, G2>
where P: Point,
P::Vect: Translate<P>,
M: Isometry<P, P::Vect> + Translation<P::Vect>,
G1: CompositeShape<P, M>,
G2: Repr<P, M> + HasAABB<P, M> {
#[inline]
fn compute_bv_cost(&mut self, bv: &AABB<P>) -> Option<<P::Vect as Vect>::Scalar> {
// Compute the minkowski sum of the two AABBs.
let msum = AABB::new(*bv.mins() + self.msum_shift + (-self.msum_margin),
*bv.maxs() + self.msum_shift + self.msum_margin);
// Compute the distance to the origin.
Some(msum.distance_to_point(&Identity::new(), &na::orig()))
}
#[inline]
fn compute_b_cost(&mut self, b: &usize) -> Option<(<P::Vect as Vect>::Scalar, <P::Vect as Vect>::Scalar)> {
let mut res = None;
self.g1.map_transformed_part_at(self.m1, *b, &mut |m1, g1| {
let dist = distance_internal::any_against_any(m1, g1, self.m2, self.g2);
res = Some((dist, dist))
});
res
}
}
|
{
let ls_m2 = na::inv(m1).expect("The transformation `m1` must be inversible.") * *m2;
let ls_aabb2 = g2.aabb(&ls_m2);
CompositeShapeAgainstAnyDistCostFn {
msum_shift: -ls_aabb2.center().to_vec(),
msum_margin: ls_aabb2.half_extents(),
m1: m1,
g1: g1,
m2: m2,
g2: g2,
point_type: PhantomData
}
}
|
identifier_body
|
composite_shape_against_any.rs
|
use std::marker::PhantomData;
use na::{Identity, Translate, Translation};
use na;
use entities::bounding_volume::{HasAABB, AABB};
use entities::partitioning::BVTCostFn;
use entities::shape::CompositeShape;
use entities::inspection::Repr;
use point::PointQuery;
use geometry::distance_internal;
use math::{Scalar, Point, Vect, Isometry};
/// Smallest distance between a composite shape and any other shape.
pub fn composite_shape_against_any<P, M, G1:?Sized, G2:?Sized>(m1: &M, g1: &G1, m2: &M, g2: &G2) -> <P::Vect as Vect>::Scalar
where P: Point,
P::Vect: Translate<P>,
M: Isometry<P, P::Vect> + Translation<P::Vect>,
G1: CompositeShape<P, M>,
G2: Repr<P, M> + HasAABB<P, M> {
let mut cost_fn = CompositeShapeAgainstAnyDistCostFn::new(m1, g1, m2, g2);
|
g1.bvt().best_first_search(&mut cost_fn).map(|(_, res)| res).expect("The composite shape must not be empty.")
}
/// Smallest distance between a shape and a composite shape.
pub fn any_against_composite_shape<P, M, G1:?Sized, G2:?Sized>(m1: &M, g1: &G1, m2: &M, g2: &G2) -> <P::Vect as Vect>::Scalar
where P: Point,
P::Vect: Translate<P>,
M: Isometry<P, P::Vect> + Translation<P::Vect>,
G1: Repr<P, M> + HasAABB<P, M>,
G2: CompositeShape<P, M> {
composite_shape_against_any(m2, g2, m1, g1)
}
struct CompositeShapeAgainstAnyDistCostFn<'a, P: 'a + Point, M: 'a, G1:?Sized + 'a, G2:?Sized + 'a> {
msum_shift: P::Vect,
msum_margin: P::Vect,
m1: &'a M,
g1: &'a G1,
m2: &'a M,
g2: &'a G2,
point_type: PhantomData<P>
}
impl<'a, P, M, G1:?Sized, G2:?Sized> CompositeShapeAgainstAnyDistCostFn<'a, P, M, G1, G2>
where P: Point,
M: Isometry<P, P::Vect>,
G1: CompositeShape<P, M>,
G2: Repr<P, M> + HasAABB<P, M> {
pub fn new(m1: &'a M, g1: &'a G1, m2: &'a M, g2: &'a G2)
-> CompositeShapeAgainstAnyDistCostFn<'a, P, M, G1, G2> {
let ls_m2 = na::inv(m1).expect("The transformation `m1` must be inversible.") * *m2;
let ls_aabb2 = g2.aabb(&ls_m2);
CompositeShapeAgainstAnyDistCostFn {
msum_shift: -ls_aabb2.center().to_vec(),
msum_margin: ls_aabb2.half_extents(),
m1: m1,
g1: g1,
m2: m2,
g2: g2,
point_type: PhantomData
}
}
}
impl<'a, P, M, G1:?Sized, G2:?Sized>
BVTCostFn<<P::Vect as Vect>::Scalar, usize, AABB<P>, <P::Vect as Vect>::Scalar>
for CompositeShapeAgainstAnyDistCostFn<'a, P, M, G1, G2>
where P: Point,
P::Vect: Translate<P>,
M: Isometry<P, P::Vect> + Translation<P::Vect>,
G1: CompositeShape<P, M>,
G2: Repr<P, M> + HasAABB<P, M> {
#[inline]
fn compute_bv_cost(&mut self, bv: &AABB<P>) -> Option<<P::Vect as Vect>::Scalar> {
// Compute the minkowski sum of the two AABBs.
let msum = AABB::new(*bv.mins() + self.msum_shift + (-self.msum_margin),
*bv.maxs() + self.msum_shift + self.msum_margin);
// Compute the distance to the origin.
Some(msum.distance_to_point(&Identity::new(), &na::orig()))
}
#[inline]
fn compute_b_cost(&mut self, b: &usize) -> Option<(<P::Vect as Vect>::Scalar, <P::Vect as Vect>::Scalar)> {
let mut res = None;
self.g1.map_transformed_part_at(self.m1, *b, &mut |m1, g1| {
let dist = distance_internal::any_against_any(m1, g1, self.m2, self.g2);
res = Some((dist, dist))
});
res
}
}
|
random_line_split
|
|
issue-5100.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
|
enum A { B, C }
fn main() {
match (true, false) {
B => (), //~ ERROR expected `(bool,bool)` but found an enum or structure pattern
_ => ()
}
match (true, false) {
(true, false, false) => () //~ ERROR mismatched types: expected `(bool,bool)` but found tuple (expected a tuple with 2 elements but found one with 3 elements)
}
match (true, false) {
@(true, false) => () //~ ERROR mismatched types: expected `(bool,bool)` but found an @-box pattern
}
match (true, false) {
~(true, false) => () //~ ERROR mismatched types: expected `(bool,bool)` but found a ~-box pattern
}
match (true, false) {
&(true, false) => () //~ ERROR mismatched types: expected `(bool,bool)` but found an &-pointer pattern
}
let v = [('a', 'b') //~ ERROR expected function but found `(char,char)`
('c', 'd'),
('e', 'f')];
for v.each |&(x,y)| {} // should be OK
// Make sure none of the errors above were fatal
let x: char = true; //~ ERROR expected `char` but found `bool`
}
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
random_line_split
|
issue-5100.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum A { B, C }
fn
|
() {
match (true, false) {
B => (), //~ ERROR expected `(bool,bool)` but found an enum or structure pattern
_ => ()
}
match (true, false) {
(true, false, false) => () //~ ERROR mismatched types: expected `(bool,bool)` but found tuple (expected a tuple with 2 elements but found one with 3 elements)
}
match (true, false) {
@(true, false) => () //~ ERROR mismatched types: expected `(bool,bool)` but found an @-box pattern
}
match (true, false) {
~(true, false) => () //~ ERROR mismatched types: expected `(bool,bool)` but found a ~-box pattern
}
match (true, false) {
&(true, false) => () //~ ERROR mismatched types: expected `(bool,bool)` but found an &-pointer pattern
}
let v = [('a', 'b') //~ ERROR expected function but found `(char,char)`
('c', 'd'),
('e', 'f')];
for v.each |&(x,y)| {} // should be OK
// Make sure none of the errors above were fatal
let x: char = true; //~ ERROR expected `char` but found `bool`
}
|
main
|
identifier_name
|
domexception.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::DOMExceptionBinding;
use dom::bindings::codegen::Bindings::DOMExceptionBinding::DOMExceptionConstants;
use dom::bindings::codegen::Bindings::DOMExceptionBinding::DOMExceptionMethods;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::Root;
use dom::bindings::utils::{Reflector, reflect_dom_object};
use util::str::DOMString;
use std::borrow::ToOwned;
#[repr(u16)]
#[derive(JSTraceable, Copy, Clone, Debug)]
pub enum DOMErrorName {
IndexSizeError = DOMExceptionConstants::INDEX_SIZE_ERR,
HierarchyRequestError = DOMExceptionConstants::HIERARCHY_REQUEST_ERR,
WrongDocumentError = DOMExceptionConstants::WRONG_DOCUMENT_ERR,
InvalidCharacterError = DOMExceptionConstants::INVALID_CHARACTER_ERR,
NoModificationAllowedError = DOMExceptionConstants::NO_MODIFICATION_ALLOWED_ERR,
NotFoundError = DOMExceptionConstants::NOT_FOUND_ERR,
NotSupportedError = DOMExceptionConstants::NOT_SUPPORTED_ERR,
InUseAttributeError = DOMExceptionConstants::INUSE_ATTRIBUTE_ERR,
InvalidStateError = DOMExceptionConstants::INVALID_STATE_ERR,
SyntaxError = DOMExceptionConstants::SYNTAX_ERR,
InvalidModificationError = DOMExceptionConstants::INVALID_MODIFICATION_ERR,
NamespaceError = DOMExceptionConstants::NAMESPACE_ERR,
InvalidAccessError = DOMExceptionConstants::INVALID_ACCESS_ERR,
SecurityError = DOMExceptionConstants::SECURITY_ERR,
NetworkError = DOMExceptionConstants::NETWORK_ERR,
AbortError = DOMExceptionConstants::ABORT_ERR,
URLMismatchError = DOMExceptionConstants::URL_MISMATCH_ERR,
TypeMismatchError = DOMExceptionConstants::TYPE_MISMATCH_ERR,
QuotaExceededError = DOMExceptionConstants::QUOTA_EXCEEDED_ERR,
TimeoutError = DOMExceptionConstants::TIMEOUT_ERR,
InvalidNodeTypeError = DOMExceptionConstants::INVALID_NODE_TYPE_ERR,
DataCloneError = DOMExceptionConstants::DATA_CLONE_ERR,
EncodingError
}
#[dom_struct]
pub struct DOMException {
reflector_: Reflector,
code: DOMErrorName,
}
impl DOMException {
fn new_inherited(code: DOMErrorName) -> DOMException {
DOMException {
reflector_: Reflector::new(),
code: code,
}
}
pub fn new(global: GlobalRef, code: DOMErrorName) -> Root<DOMException> {
reflect_dom_object(box DOMException::new_inherited(code), global, DOMExceptionBinding::Wrap)
}
}
impl<'a> DOMExceptionMethods for &'a DOMException {
// https://heycam.github.io/webidl/#dfn-DOMException
fn Code(self) -> u16 {
match self.code {
// https://heycam.github.io/webidl/#dfn-throw
DOMErrorName::EncodingError => 0,
code => code as u16
}
}
// https://heycam.github.io/webidl/#idl-DOMException-error-names
fn Name(self) -> DOMString {
|
format!("{:?}", self.code)
}
// https://heycam.github.io/webidl/#error-names
fn Message(self) -> DOMString {
let message = match self.code {
DOMErrorName::IndexSizeError => "The index is not in the allowed range.",
DOMErrorName::HierarchyRequestError => "The operation would yield an incorrect node tree.",
DOMErrorName::WrongDocumentError => "The object is in the wrong document.",
DOMErrorName::InvalidCharacterError => "The string contains invalid characters.",
DOMErrorName::NoModificationAllowedError => "The object can not be modified.",
DOMErrorName::NotFoundError => "The object can not be found here.",
DOMErrorName::NotSupportedError => "The operation is not supported.",
DOMErrorName::InUseAttributeError => "The attribute already in use.",
DOMErrorName::InvalidStateError => "The object is in an invalid state.",
DOMErrorName::SyntaxError => "The string did not match the expected pattern.",
DOMErrorName::InvalidModificationError => "The object can not be modified in this way.",
DOMErrorName::NamespaceError => "The operation is not allowed by Namespaces in XML.",
DOMErrorName::InvalidAccessError => "The object does not support the operation or argument.",
DOMErrorName::SecurityError => "The operation is insecure.",
DOMErrorName::NetworkError => "A network error occurred.",
DOMErrorName::AbortError => "The operation was aborted.",
DOMErrorName::URLMismatchError => "The given URL does not match another URL.",
DOMErrorName::TypeMismatchError => "The given type does not match any expected type.",
DOMErrorName::QuotaExceededError => "The quota has been exceeded.",
DOMErrorName::TimeoutError => "The operation timed out.",
DOMErrorName::InvalidNodeTypeError =>
"The supplied node is incorrect or has an incorrect ancestor for this operation.",
DOMErrorName::DataCloneError => "The object can not be cloned.",
DOMErrorName::EncodingError => "The encoding operation (either encoded or decoding) failed."
};
message.to_owned()
}
}
|
random_line_split
|
|
domexception.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::DOMExceptionBinding;
use dom::bindings::codegen::Bindings::DOMExceptionBinding::DOMExceptionConstants;
use dom::bindings::codegen::Bindings::DOMExceptionBinding::DOMExceptionMethods;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::Root;
use dom::bindings::utils::{Reflector, reflect_dom_object};
use util::str::DOMString;
use std::borrow::ToOwned;
#[repr(u16)]
#[derive(JSTraceable, Copy, Clone, Debug)]
pub enum DOMErrorName {
IndexSizeError = DOMExceptionConstants::INDEX_SIZE_ERR,
HierarchyRequestError = DOMExceptionConstants::HIERARCHY_REQUEST_ERR,
WrongDocumentError = DOMExceptionConstants::WRONG_DOCUMENT_ERR,
InvalidCharacterError = DOMExceptionConstants::INVALID_CHARACTER_ERR,
NoModificationAllowedError = DOMExceptionConstants::NO_MODIFICATION_ALLOWED_ERR,
NotFoundError = DOMExceptionConstants::NOT_FOUND_ERR,
NotSupportedError = DOMExceptionConstants::NOT_SUPPORTED_ERR,
InUseAttributeError = DOMExceptionConstants::INUSE_ATTRIBUTE_ERR,
InvalidStateError = DOMExceptionConstants::INVALID_STATE_ERR,
SyntaxError = DOMExceptionConstants::SYNTAX_ERR,
InvalidModificationError = DOMExceptionConstants::INVALID_MODIFICATION_ERR,
NamespaceError = DOMExceptionConstants::NAMESPACE_ERR,
InvalidAccessError = DOMExceptionConstants::INVALID_ACCESS_ERR,
SecurityError = DOMExceptionConstants::SECURITY_ERR,
NetworkError = DOMExceptionConstants::NETWORK_ERR,
AbortError = DOMExceptionConstants::ABORT_ERR,
URLMismatchError = DOMExceptionConstants::URL_MISMATCH_ERR,
TypeMismatchError = DOMExceptionConstants::TYPE_MISMATCH_ERR,
QuotaExceededError = DOMExceptionConstants::QUOTA_EXCEEDED_ERR,
TimeoutError = DOMExceptionConstants::TIMEOUT_ERR,
InvalidNodeTypeError = DOMExceptionConstants::INVALID_NODE_TYPE_ERR,
DataCloneError = DOMExceptionConstants::DATA_CLONE_ERR,
EncodingError
}
#[dom_struct]
pub struct DOMException {
reflector_: Reflector,
code: DOMErrorName,
}
impl DOMException {
fn new_inherited(code: DOMErrorName) -> DOMException {
DOMException {
reflector_: Reflector::new(),
code: code,
}
}
pub fn new(global: GlobalRef, code: DOMErrorName) -> Root<DOMException> {
reflect_dom_object(box DOMException::new_inherited(code), global, DOMExceptionBinding::Wrap)
}
}
impl<'a> DOMExceptionMethods for &'a DOMException {
// https://heycam.github.io/webidl/#dfn-DOMException
fn Code(self) -> u16 {
match self.code {
// https://heycam.github.io/webidl/#dfn-throw
DOMErrorName::EncodingError => 0,
code => code as u16
}
}
// https://heycam.github.io/webidl/#idl-DOMException-error-names
fn
|
(self) -> DOMString {
format!("{:?}", self.code)
}
// https://heycam.github.io/webidl/#error-names
fn Message(self) -> DOMString {
let message = match self.code {
DOMErrorName::IndexSizeError => "The index is not in the allowed range.",
DOMErrorName::HierarchyRequestError => "The operation would yield an incorrect node tree.",
DOMErrorName::WrongDocumentError => "The object is in the wrong document.",
DOMErrorName::InvalidCharacterError => "The string contains invalid characters.",
DOMErrorName::NoModificationAllowedError => "The object can not be modified.",
DOMErrorName::NotFoundError => "The object can not be found here.",
DOMErrorName::NotSupportedError => "The operation is not supported.",
DOMErrorName::InUseAttributeError => "The attribute already in use.",
DOMErrorName::InvalidStateError => "The object is in an invalid state.",
DOMErrorName::SyntaxError => "The string did not match the expected pattern.",
DOMErrorName::InvalidModificationError => "The object can not be modified in this way.",
DOMErrorName::NamespaceError => "The operation is not allowed by Namespaces in XML.",
DOMErrorName::InvalidAccessError => "The object does not support the operation or argument.",
DOMErrorName::SecurityError => "The operation is insecure.",
DOMErrorName::NetworkError => "A network error occurred.",
DOMErrorName::AbortError => "The operation was aborted.",
DOMErrorName::URLMismatchError => "The given URL does not match another URL.",
DOMErrorName::TypeMismatchError => "The given type does not match any expected type.",
DOMErrorName::QuotaExceededError => "The quota has been exceeded.",
DOMErrorName::TimeoutError => "The operation timed out.",
DOMErrorName::InvalidNodeTypeError =>
"The supplied node is incorrect or has an incorrect ancestor for this operation.",
DOMErrorName::DataCloneError => "The object can not be cloned.",
DOMErrorName::EncodingError => "The encoding operation (either encoded or decoding) failed."
};
message.to_owned()
}
}
|
Name
|
identifier_name
|
domexception.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::DOMExceptionBinding;
use dom::bindings::codegen::Bindings::DOMExceptionBinding::DOMExceptionConstants;
use dom::bindings::codegen::Bindings::DOMExceptionBinding::DOMExceptionMethods;
use dom::bindings::global::GlobalRef;
use dom::bindings::js::Root;
use dom::bindings::utils::{Reflector, reflect_dom_object};
use util::str::DOMString;
use std::borrow::ToOwned;
#[repr(u16)]
#[derive(JSTraceable, Copy, Clone, Debug)]
pub enum DOMErrorName {
IndexSizeError = DOMExceptionConstants::INDEX_SIZE_ERR,
HierarchyRequestError = DOMExceptionConstants::HIERARCHY_REQUEST_ERR,
WrongDocumentError = DOMExceptionConstants::WRONG_DOCUMENT_ERR,
InvalidCharacterError = DOMExceptionConstants::INVALID_CHARACTER_ERR,
NoModificationAllowedError = DOMExceptionConstants::NO_MODIFICATION_ALLOWED_ERR,
NotFoundError = DOMExceptionConstants::NOT_FOUND_ERR,
NotSupportedError = DOMExceptionConstants::NOT_SUPPORTED_ERR,
InUseAttributeError = DOMExceptionConstants::INUSE_ATTRIBUTE_ERR,
InvalidStateError = DOMExceptionConstants::INVALID_STATE_ERR,
SyntaxError = DOMExceptionConstants::SYNTAX_ERR,
InvalidModificationError = DOMExceptionConstants::INVALID_MODIFICATION_ERR,
NamespaceError = DOMExceptionConstants::NAMESPACE_ERR,
InvalidAccessError = DOMExceptionConstants::INVALID_ACCESS_ERR,
SecurityError = DOMExceptionConstants::SECURITY_ERR,
NetworkError = DOMExceptionConstants::NETWORK_ERR,
AbortError = DOMExceptionConstants::ABORT_ERR,
URLMismatchError = DOMExceptionConstants::URL_MISMATCH_ERR,
TypeMismatchError = DOMExceptionConstants::TYPE_MISMATCH_ERR,
QuotaExceededError = DOMExceptionConstants::QUOTA_EXCEEDED_ERR,
TimeoutError = DOMExceptionConstants::TIMEOUT_ERR,
InvalidNodeTypeError = DOMExceptionConstants::INVALID_NODE_TYPE_ERR,
DataCloneError = DOMExceptionConstants::DATA_CLONE_ERR,
EncodingError
}
#[dom_struct]
pub struct DOMException {
reflector_: Reflector,
code: DOMErrorName,
}
impl DOMException {
fn new_inherited(code: DOMErrorName) -> DOMException {
DOMException {
reflector_: Reflector::new(),
code: code,
}
}
pub fn new(global: GlobalRef, code: DOMErrorName) -> Root<DOMException> {
reflect_dom_object(box DOMException::new_inherited(code), global, DOMExceptionBinding::Wrap)
}
}
impl<'a> DOMExceptionMethods for &'a DOMException {
// https://heycam.github.io/webidl/#dfn-DOMException
fn Code(self) -> u16 {
match self.code {
// https://heycam.github.io/webidl/#dfn-throw
DOMErrorName::EncodingError => 0,
code => code as u16
}
}
// https://heycam.github.io/webidl/#idl-DOMException-error-names
fn Name(self) -> DOMString
|
// https://heycam.github.io/webidl/#error-names
fn Message(self) -> DOMString {
let message = match self.code {
DOMErrorName::IndexSizeError => "The index is not in the allowed range.",
DOMErrorName::HierarchyRequestError => "The operation would yield an incorrect node tree.",
DOMErrorName::WrongDocumentError => "The object is in the wrong document.",
DOMErrorName::InvalidCharacterError => "The string contains invalid characters.",
DOMErrorName::NoModificationAllowedError => "The object can not be modified.",
DOMErrorName::NotFoundError => "The object can not be found here.",
DOMErrorName::NotSupportedError => "The operation is not supported.",
DOMErrorName::InUseAttributeError => "The attribute already in use.",
DOMErrorName::InvalidStateError => "The object is in an invalid state.",
DOMErrorName::SyntaxError => "The string did not match the expected pattern.",
DOMErrorName::InvalidModificationError => "The object can not be modified in this way.",
DOMErrorName::NamespaceError => "The operation is not allowed by Namespaces in XML.",
DOMErrorName::InvalidAccessError => "The object does not support the operation or argument.",
DOMErrorName::SecurityError => "The operation is insecure.",
DOMErrorName::NetworkError => "A network error occurred.",
DOMErrorName::AbortError => "The operation was aborted.",
DOMErrorName::URLMismatchError => "The given URL does not match another URL.",
DOMErrorName::TypeMismatchError => "The given type does not match any expected type.",
DOMErrorName::QuotaExceededError => "The quota has been exceeded.",
DOMErrorName::TimeoutError => "The operation timed out.",
DOMErrorName::InvalidNodeTypeError =>
"The supplied node is incorrect or has an incorrect ancestor for this operation.",
DOMErrorName::DataCloneError => "The object can not be cloned.",
DOMErrorName::EncodingError => "The encoding operation (either encoded or decoding) failed."
};
message.to_owned()
}
}
|
{
format!("{:?}", self.code)
}
|
identifier_body
|
unix.rs
|
#![cfg(any(
target_os = "linux",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd",
))]
use crate::platform::ContextTraitExt;
pub use crate::platform_impl::{HeadlessContextExt, RawContextExt, RawHandle};
use crate::{Context, ContextCurrentState};
pub use glutin_egl_sys::EGLContext;
#[cfg(feature = "x11")]
pub use glutin_glx_sys::GLXContext;
pub use winit::platform::unix::*;
use std::os::raw;
impl<T: ContextCurrentState> ContextTraitExt for Context<T> {
type Handle = RawHandle;
#[inline]
unsafe fn raw_handle(&self) -> Self::Handle
|
#[inline]
unsafe fn get_egl_display(&self) -> Option<*const raw::c_void> {
self.context.get_egl_display()
}
}
|
{
self.context.raw_handle()
}
|
identifier_body
|
unix.rs
|
#![cfg(any(
target_os = "linux",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd",
))]
use crate::platform::ContextTraitExt;
pub use crate::platform_impl::{HeadlessContextExt, RawContextExt, RawHandle};
use crate::{Context, ContextCurrentState};
pub use glutin_egl_sys::EGLContext;
#[cfg(feature = "x11")]
pub use glutin_glx_sys::GLXContext;
pub use winit::platform::unix::*;
use std::os::raw;
impl<T: ContextCurrentState> ContextTraitExt for Context<T> {
type Handle = RawHandle;
#[inline]
unsafe fn raw_handle(&self) -> Self::Handle {
self.context.raw_handle()
}
#[inline]
unsafe fn
|
(&self) -> Option<*const raw::c_void> {
self.context.get_egl_display()
}
}
|
get_egl_display
|
identifier_name
|
unix.rs
|
#![cfg(any(
target_os = "linux",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd",
))]
use crate::platform::ContextTraitExt;
pub use crate::platform_impl::{HeadlessContextExt, RawContextExt, RawHandle};
use crate::{Context, ContextCurrentState};
pub use glutin_egl_sys::EGLContext;
#[cfg(feature = "x11")]
pub use glutin_glx_sys::GLXContext;
pub use winit::platform::unix::*;
use std::os::raw;
impl<T: ContextCurrentState> ContextTraitExt for Context<T> {
type Handle = RawHandle;
#[inline]
unsafe fn raw_handle(&self) -> Self::Handle {
self.context.raw_handle()
}
|
self.context.get_egl_display()
}
}
|
#[inline]
unsafe fn get_egl_display(&self) -> Option<*const raw::c_void> {
|
random_line_split
|
macro-interpolation.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[feature(macro_rules)];
macro_rules! overly_complicated (
($fnname:ident, $arg:ident, $ty:ty, $body:block, $val:expr, $pat:pat, $res:path) =>
({
fn $fnname($arg: $ty) -> Option<$ty> $body
match $fnname($val) {
Some($pat) => {
$res
}
_ => { fail2!(); }
}
})
)
pub fn main()
|
{
assert!(overly_complicated!(f, x, Option<uint>, { return Some(x); },
Some(8u), Some(y), y) == 8u)
}
|
identifier_body
|
|
macro-interpolation.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[feature(macro_rules)];
macro_rules! overly_complicated (
($fnname:ident, $arg:ident, $ty:ty, $body:block, $val:expr, $pat:pat, $res:path) =>
({
|
fn $fnname($arg: $ty) -> Option<$ty> $body
match $fnname($val) {
Some($pat) => {
$res
}
_ => { fail2!(); }
}
})
)
pub fn main() {
assert!(overly_complicated!(f, x, Option<uint>, { return Some(x); },
Some(8u), Some(y), y) == 8u)
}
|
random_line_split
|
|
macro-interpolation.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[feature(macro_rules)];
macro_rules! overly_complicated (
($fnname:ident, $arg:ident, $ty:ty, $body:block, $val:expr, $pat:pat, $res:path) =>
({
fn $fnname($arg: $ty) -> Option<$ty> $body
match $fnname($val) {
Some($pat) => {
$res
}
_ => { fail2!(); }
}
})
)
pub fn
|
() {
assert!(overly_complicated!(f, x, Option<uint>, { return Some(x); },
Some(8u), Some(y), y) == 8u)
}
|
main
|
identifier_name
|
root.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Liberally derived from the [Firefox JS implementation](http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/server/actors/root.js).
/// Connection point for all new remote devtools interactions, providing lists of know actors
/// that perform more specific actions (tabs, addons, browser chrome, etc.)
use actor::{Actor, ActorRegistry};
use actors::tab::{TabActor, TabActorMsg};
use protocol::JsonPacketStream;
use rustc_serialize::json;
use std::net::TcpStream;
#[derive(RustcEncodable)]
struct ActorTraits {
sources: bool,
highlightable: bool,
customHighlighters: Vec<String>,
}
#[derive(RustcEncodable)]
struct ErrorReply {
from: String,
error: String,
message: String,
}
#[derive(RustcEncodable)]
struct ListTabsReply {
from: String,
selected: uint,
tabs: Vec<TabActorMsg>,
}
#[derive(RustcEncodable)]
struct RootActorMsg {
from: String,
applicationType: String,
traits: ActorTraits,
}
pub struct RootActor {
pub tabs: Vec<String>,
}
impl Actor for RootActor {
fn name(&self) -> String {
"root".to_string()
}
|
stream: &mut TcpStream) -> Result<bool, ()> {
Ok(match msg_type {
"listAddons" => {
let actor = ErrorReply {
from: "root".to_string(),
error: "noAddons".to_string(),
message: "This root actor has no browser addons.".to_string(),
};
stream.write_json_packet(&actor);
true
}
//https://wiki.mozilla.org/Remote_Debugging_Protocol#Listing_Browser_Tabs
"listTabs" => {
let actor = ListTabsReply {
from: "root".to_string(),
selected: 0,
tabs: self.tabs.iter().map(|tab| {
registry.find::<TabActor>(tab).encodable()
}).collect()
};
stream.write_json_packet(&actor);
true
}
_ => false
})
}
}
impl RootActor {
pub fn encodable(&self) -> RootActorMsg {
RootActorMsg {
from: "root".to_string(),
applicationType: "browser".to_string(),
traits: ActorTraits {
sources: true,
highlightable: true,
customHighlighters: vec!("BoxModelHighlighter".to_string()),
},
}
}
}
|
fn handle_message(&self,
registry: &ActorRegistry,
msg_type: &str,
_msg: &json::Object,
|
random_line_split
|
root.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Liberally derived from the [Firefox JS implementation](http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/server/actors/root.js).
/// Connection point for all new remote devtools interactions, providing lists of know actors
/// that perform more specific actions (tabs, addons, browser chrome, etc.)
use actor::{Actor, ActorRegistry};
use actors::tab::{TabActor, TabActorMsg};
use protocol::JsonPacketStream;
use rustc_serialize::json;
use std::net::TcpStream;
#[derive(RustcEncodable)]
struct ActorTraits {
sources: bool,
highlightable: bool,
customHighlighters: Vec<String>,
}
#[derive(RustcEncodable)]
struct ErrorReply {
from: String,
error: String,
message: String,
}
#[derive(RustcEncodable)]
struct ListTabsReply {
from: String,
selected: uint,
tabs: Vec<TabActorMsg>,
}
#[derive(RustcEncodable)]
struct RootActorMsg {
from: String,
applicationType: String,
traits: ActorTraits,
}
pub struct RootActor {
pub tabs: Vec<String>,
}
impl Actor for RootActor {
fn name(&self) -> String {
"root".to_string()
}
fn handle_message(&self,
registry: &ActorRegistry,
msg_type: &str,
_msg: &json::Object,
stream: &mut TcpStream) -> Result<bool, ()>
|
};
stream.write_json_packet(&actor);
true
}
_ => false
})
}
}
impl RootActor {
pub fn encodable(&self) -> RootActorMsg {
RootActorMsg {
from: "root".to_string(),
applicationType: "browser".to_string(),
traits: ActorTraits {
sources: true,
highlightable: true,
customHighlighters: vec!("BoxModelHighlighter".to_string()),
},
}
}
}
|
{
Ok(match msg_type {
"listAddons" => {
let actor = ErrorReply {
from: "root".to_string(),
error: "noAddons".to_string(),
message: "This root actor has no browser addons.".to_string(),
};
stream.write_json_packet(&actor);
true
}
//https://wiki.mozilla.org/Remote_Debugging_Protocol#Listing_Browser_Tabs
"listTabs" => {
let actor = ListTabsReply {
from: "root".to_string(),
selected: 0,
tabs: self.tabs.iter().map(|tab| {
registry.find::<TabActor>(tab).encodable()
}).collect()
|
identifier_body
|
root.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Liberally derived from the [Firefox JS implementation](http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/server/actors/root.js).
/// Connection point for all new remote devtools interactions, providing lists of know actors
/// that perform more specific actions (tabs, addons, browser chrome, etc.)
use actor::{Actor, ActorRegistry};
use actors::tab::{TabActor, TabActorMsg};
use protocol::JsonPacketStream;
use rustc_serialize::json;
use std::net::TcpStream;
#[derive(RustcEncodable)]
struct ActorTraits {
sources: bool,
highlightable: bool,
customHighlighters: Vec<String>,
}
#[derive(RustcEncodable)]
struct ErrorReply {
from: String,
error: String,
message: String,
}
#[derive(RustcEncodable)]
struct ListTabsReply {
from: String,
selected: uint,
tabs: Vec<TabActorMsg>,
}
#[derive(RustcEncodable)]
struct RootActorMsg {
from: String,
applicationType: String,
traits: ActorTraits,
}
pub struct RootActor {
pub tabs: Vec<String>,
}
impl Actor for RootActor {
fn name(&self) -> String {
"root".to_string()
}
fn handle_message(&self,
registry: &ActorRegistry,
msg_type: &str,
_msg: &json::Object,
stream: &mut TcpStream) -> Result<bool, ()> {
Ok(match msg_type {
"listAddons" => {
let actor = ErrorReply {
from: "root".to_string(),
error: "noAddons".to_string(),
message: "This root actor has no browser addons.".to_string(),
};
stream.write_json_packet(&actor);
true
}
//https://wiki.mozilla.org/Remote_Debugging_Protocol#Listing_Browser_Tabs
"listTabs" =>
|
_ => false
})
}
}
impl RootActor {
pub fn encodable(&self) -> RootActorMsg {
RootActorMsg {
from: "root".to_string(),
applicationType: "browser".to_string(),
traits: ActorTraits {
sources: true,
highlightable: true,
customHighlighters: vec!("BoxModelHighlighter".to_string()),
},
}
}
}
|
{
let actor = ListTabsReply {
from: "root".to_string(),
selected: 0,
tabs: self.tabs.iter().map(|tab| {
registry.find::<TabActor>(tab).encodable()
}).collect()
};
stream.write_json_packet(&actor);
true
}
|
conditional_block
|
root.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Liberally derived from the [Firefox JS implementation](http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/server/actors/root.js).
/// Connection point for all new remote devtools interactions, providing lists of know actors
/// that perform more specific actions (tabs, addons, browser chrome, etc.)
use actor::{Actor, ActorRegistry};
use actors::tab::{TabActor, TabActorMsg};
use protocol::JsonPacketStream;
use rustc_serialize::json;
use std::net::TcpStream;
#[derive(RustcEncodable)]
struct ActorTraits {
sources: bool,
highlightable: bool,
customHighlighters: Vec<String>,
}
#[derive(RustcEncodable)]
struct ErrorReply {
from: String,
error: String,
message: String,
}
#[derive(RustcEncodable)]
struct ListTabsReply {
from: String,
selected: uint,
tabs: Vec<TabActorMsg>,
}
#[derive(RustcEncodable)]
struct RootActorMsg {
from: String,
applicationType: String,
traits: ActorTraits,
}
pub struct RootActor {
pub tabs: Vec<String>,
}
impl Actor for RootActor {
fn
|
(&self) -> String {
"root".to_string()
}
fn handle_message(&self,
registry: &ActorRegistry,
msg_type: &str,
_msg: &json::Object,
stream: &mut TcpStream) -> Result<bool, ()> {
Ok(match msg_type {
"listAddons" => {
let actor = ErrorReply {
from: "root".to_string(),
error: "noAddons".to_string(),
message: "This root actor has no browser addons.".to_string(),
};
stream.write_json_packet(&actor);
true
}
//https://wiki.mozilla.org/Remote_Debugging_Protocol#Listing_Browser_Tabs
"listTabs" => {
let actor = ListTabsReply {
from: "root".to_string(),
selected: 0,
tabs: self.tabs.iter().map(|tab| {
registry.find::<TabActor>(tab).encodable()
}).collect()
};
stream.write_json_packet(&actor);
true
}
_ => false
})
}
}
impl RootActor {
pub fn encodable(&self) -> RootActorMsg {
RootActorMsg {
from: "root".to_string(),
applicationType: "browser".to_string(),
traits: ActorTraits {
sources: true,
highlightable: true,
customHighlighters: vec!("BoxModelHighlighter".to_string()),
},
}
}
}
|
name
|
identifier_name
|
no_0037_sudoku_solver.rs
|
struct
|
;
impl Solution {
pub fn solve_sudoku(board: &mut Vec<Vec<char>>) {
// 9 行,每个 u16 表示一行上的数据,u16 表示对应索引的数字在这一行上已经有了。
let mut line = vec![0_u16; 9];
// 9 列
let mut column = vec![0_u16; 9];
// 3 x 3 的块
let mut block = vec![vec![0_u16; 3]; 3];
// 如果不是空格,就将对应的位置设置成 1.
for (i, row) in board.iter().enumerate() {
for (j, b) in row.iter().enumerate() {
if *b!= '.' {
Self::flip(
&mut line,
&mut column,
&mut block,
i,
j,
*b as u8 - '1' as u8,
);
}
}
}
// 先把只有一个选择的位置标记上
loop {
let mut modified = false;
let m = board.len();
let n = board[0].len();
for i in 0..m {
for j in 0..n {
let b = board[i][j];
if b!= '.' {
continue;
}
// 先取反,1 就表示空格了,然后 & 0x1ff 是为了把前面的高位的 1 去掉。
let mask =!(line[i] | column[j] | block[i / 3][j / 3]) & 0x1ff;
// mask&(mask-1) 是把最右侧的 1 置 0,如果结果是 0 说明 mask 中只有一个 1,也就是只有一个选择
if mask > 0 && mask & (mask - 1) == 0 {
// 右边 0 的个数,也就是右边 1 的位置
let digit = mask.trailing_zeros() as u8;
// 将 [i][j] 的位置放上数字
Self::flip(&mut line, &mut column, &mut block, i, j, digit);
board[i][j] = (digit + '1' as u8) as char;
modified = true;
}
}
}
if!modified {
break;
}
}
// 将空格收集起来
let mut spaces = Vec::new();
for (i, row) in board.iter().enumerate() {
for (j, b) in row.iter().enumerate() {
if *b == '.' {
spaces.push((i, j));
}
}
}
Self::dfs(&spaces, 0, &mut line, &mut column, &mut block, board);
}
// 将 digit 对应的比特位取反
fn flip(
line: &mut Vec<u16>,
column: &mut Vec<u16>,
block: &mut Vec<Vec<u16>>,
i: usize,
j: usize,
digit: u8,
) {
line[i] ^= 1 << digit;
column[j] ^= 1 << digit;
block[i / 3][j / 3] ^= 1 << digit;
}
fn dfs(
spaces: &Vec<(usize, usize)>,
pos: usize,
line: &mut Vec<u16>,
column: &mut Vec<u16>,
block: &mut Vec<Vec<u16>>,
board: &mut Vec<Vec<char>>,
) -> bool {
if pos == spaces.len() {
return true;
}
let (i, j) = spaces[pos];
let mut mask =!(line[i] | column[j] | block[i / 3][j / 3]) & 0x1ff;
while mask > 0 {
let digit = mask.trailing_zeros() as u8;
Self::flip(line, column, block, i, j, digit);
board[i][j] = (digit + '1' as u8) as char;
if Self::dfs(spaces, pos + 1, line, column, block, board) {
return true;
}
Self::flip(line, column, block, i, j, digit);
mask &= mask - 1;
}
false
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_solve_sudoku() {
let mut board = vec![
vec!['5', '3', '.', '.', '7', '.', '.', '.', '.'],
vec!['6', '.', '.', '1', '9', '5', '.', '.', '.'],
vec!['.', '9', '8', '.', '.', '.', '.', '6', '.'],
vec!['8', '.', '.', '.', '6', '.', '.', '.', '3'],
vec!['4', '.', '.', '8', '.', '3', '.', '.', '1'],
vec!['7', '.', '.', '.', '2', '.', '.', '.', '6'],
vec!['.', '6', '.', '.', '.', '.', '2', '8', '.'],
vec!['.', '.', '.', '4', '1', '9', '.', '.', '5'],
vec!['.', '.', '.', '.', '8', '.', '.', '7', '9'],
];
Solution::solve_sudoku(&mut board);
let want = vec![
vec!['5', '3', '4', '6', '7', '8', '9', '1', '2'],
vec!['6', '7', '2', '1', '9', '5', '3', '4', '8'],
vec!['1', '9', '8', '3', '4', '2', '5', '6', '7'],
vec!['8', '5', '9', '7', '6', '1', '4', '2', '3'],
vec!['4', '2', '6', '8', '5', '3', '7', '9', '1'],
vec!['7', '1', '3', '9', '2', '4', '8', '5', '6'],
vec!['9', '6', '1', '5', '3', '7', '2', '8', '4'],
vec!['2', '8', '7', '4', '1', '9', '6', '3', '5'],
vec!['3', '4', '5', '2', '8', '6', '1', '7', '9'],
];
assert_eq!(board, want);
}
}
|
Solution
|
identifier_name
|
no_0037_sudoku_solver.rs
|
struct Solution;
impl Solution {
pub fn solve_sudoku(board: &mut Vec<Vec<char>>) {
// 9 行,每个 u16 表示一行上的数据,u16 表示对应索引的数字在这一行上已经有了。
let mut line = vec![0_u16; 9];
// 9 列
let mut column = vec![0_u16; 9];
// 3 x 3 的块
let mut block = vec![vec![0_u16; 3]; 3];
// 如果不是空格,就将对应的位置设置成 1.
for (i, row) in board.iter().enumerate() {
for (j, b) in row.iter().enumerate() {
if *b!= '.' {
Self::flip(
&mut line,
&mut column,
&mut block,
i,
j,
*b as u8 - '1' as u8,
);
}
}
}
// 先把只有一个选择的位置标记上
loop {
let mut modified = false;
let m = board.len();
let n = board[0].len();
for i in 0..m {
for j in 0..n {
let b = board[i][j];
if b!= '.' {
continue;
}
// 先取反,1 就表示空格了,然后 & 0x1ff 是为了把前面的高位的 1 去掉。
let mask =!(line[i] | column[j] | block[i / 3][j / 3]) & 0x1ff;
// mask&(mask-1) 是把最右侧的 1 置 0,如果结果是 0 说明 mask 中只有一个 1,也就是只有一个选择
if mask > 0 && mask & (mask - 1) == 0 {
// 右边 0 的个数,也就是右边 1 的位置
let digit = mask.trailing_zeros() as u8;
// 将 [i][j] 的位置放上数字
Self::flip(&mut line, &mut column, &mut block, i, j, digit);
board[i][j] = (digit + '1' as u8) as char;
modified = true;
}
}
}
if!modified {
break;
}
}
// 将空格收集起来
let mut spaces = Vec::new();
for (i, row) in board.iter().enumerate() {
for (j, b) in row.iter().enumerate() {
if *b == '.' {
spaces.push((i, j));
}
}
}
Self::dfs(&spaces, 0, &mut line, &mut column, &mut block, board);
}
// 将 digit 对应的比特位取反
fn flip(
line: &mut Vec<u16>,
column: &mut Vec<u16>,
block: &mut Vec<Vec<u16>>,
|
line[i] ^= 1 << digit;
column[j] ^= 1 << digit;
block[i / 3][j / 3] ^= 1 << digit;
}
fn dfs(
spaces: &Vec<(usize, usize)>,
pos: usize,
line: &mut Vec<u16>,
column: &mut Vec<u16>,
block: &mut Vec<Vec<u16>>,
board: &mut Vec<Vec<char>>,
) -> bool {
if pos == spaces.len() {
return true;
}
let (i, j) = spaces[pos];
let mut mask =!(line[i] | column[j] | block[i / 3][j / 3]) & 0x1ff;
while mask > 0 {
let digit = mask.trailing_zeros() as u8;
Self::flip(line, column, block, i, j, digit);
board[i][j] = (digit + '1' as u8) as char;
if Self::dfs(spaces, pos + 1, line, column, block, board) {
return true;
}
Self::flip(line, column, block, i, j, digit);
mask &= mask - 1;
}
false
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_solve_sudoku() {
let mut board = vec![
vec!['5', '3', '.', '.', '7', '.', '.', '.', '.'],
vec!['6', '.', '.', '1', '9', '5', '.', '.', '.'],
vec!['.', '9', '8', '.', '.', '.', '.', '6', '.'],
vec!['8', '.', '.', '.', '6', '.', '.', '.', '3'],
vec!['4', '.', '.', '8', '.', '3', '.', '.', '1'],
vec!['7', '.', '.', '.', '2', '.', '.', '.', '6'],
vec!['.', '6', '.', '.', '.', '.', '2', '8', '.'],
vec!['.', '.', '.', '4', '1', '9', '.', '.', '5'],
vec!['.', '.', '.', '.', '8', '.', '.', '7', '9'],
];
Solution::solve_sudoku(&mut board);
let want = vec![
vec!['5', '3', '4', '6', '7', '8', '9', '1', '2'],
vec!['6', '7', '2', '1', '9', '5', '3', '4', '8'],
vec!['1', '9', '8', '3', '4', '2', '5', '6', '7'],
vec!['8', '5', '9', '7', '6', '1', '4', '2', '3'],
vec!['4', '2', '6', '8', '5', '3', '7', '9', '1'],
vec!['7', '1', '3', '9', '2', '4', '8', '5', '6'],
vec!['9', '6', '1', '5', '3', '7', '2', '8', '4'],
vec!['2', '8', '7', '4', '1', '9', '6', '3', '5'],
vec!['3', '4', '5', '2', '8', '6', '1', '7', '9'],
];
assert_eq!(board, want);
}
}
|
i: usize,
j: usize,
digit: u8,
) {
|
conditional_block
|
no_0037_sudoku_solver.rs
|
struct Solution;
impl Solution {
pub fn solve_sudoku(board: &mut Vec<Vec<char>>) {
// 9 行,每个 u16 表示一行上的数据,u16 表示对应索引的数字在这一行上已经有了。
let mut line = vec![0_u16; 9];
// 9 列
let mut column = vec![0_u16; 9];
// 3 x 3 的块
let mut block = vec![vec![0_u16; 3]; 3];
// 如果不是空格,就将对应的位置设置成 1.
for (i, row) in board.iter().enumerate() {
for (j, b) in row.iter().enumerate() {
if *b!= '.' {
Self::flip(
&mut line,
&mut column,
&mut block,
i,
j,
*b as u8 - '1' as u8,
);
}
}
}
// 先把只有一个选择的位置标记上
loop {
let mut modified = false;
let m = board.len();
let n = board[0].len();
for i in 0..m {
for j in 0..n {
let b = board[i][j];
if b!= '.' {
continue;
}
// 先取反,1 就表示空格了,然后 & 0x1ff 是为了把前面的高位的 1 去掉。
let mask =!(line[i] | column[j] | block[i / 3][j / 3]) & 0x1ff;
// mask&(mask-1) 是把最右侧的 1 置 0,如果结果是 0 说明 mask 中只有一个 1,也就是只有一个选择
if mask > 0 && mask & (mask - 1) == 0 {
// 右边 0 的个数,也就是右边 1 的位置
let digit = mask.trailing_zeros() as u8;
// 将 [i][j] 的位置放上数字
Self::flip(&mut line, &mut column, &mut block, i, j, digit);
board[i][j] = (digit + '1' as u8) as char;
modified = true;
}
}
}
if!modified {
break;
}
}
// 将空格收集起来
let mut spaces = Vec::new();
for (i, row) in board.iter().enumerate() {
for (j, b) in row.iter().enumerate() {
if *b == '.' {
spaces.push((i, j));
}
}
}
Self::dfs(&spaces, 0, &mut line, &mut column, &mut block, board);
}
// 将 digit 对应的比特位取反
fn flip(
line: &mut Vec<u16>,
column: &mut Vec<u16>,
block: &mut Vec<Vec<u16>>,
i: usize,
j: usize,
digit: u8,
) {
line[i] ^= 1 << digit;
column[j] ^= 1 << digit;
block[i / 3][j / 3] ^= 1 << digit;
}
fn dfs(
spaces: &Vec<(usize, usize)>,
pos: usize,
line: &mut Vec<u16>,
column: &mut Vec<u16>,
block: &mut Vec<Vec<u16>>,
board: &mut Vec<Vec<char>>,
) -> bool {
if pos == spaces.len() {
return true;
}
let (i, j) = spaces[pos];
let mut mask =!(line[i] | column[j] | block[i / 3][j / 3]) & 0x1ff;
while mask > 0 {
let digit = mask.trailing_zeros() as u8;
Self::flip(line, column, block, i, j, digit);
board[i][j] = (digit + '1' as u8) as char;
if Self::dfs(spaces, pos + 1, line, column, block, board) {
return true;
}
Self::flip(line, column, block, i, j, digit);
mask &= mask - 1;
}
false
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_solve_sudoku() {
let mut board = vec![
vec!['5', '3', '.', '.', '7', '.', '.', '.', '.'],
vec!['6', '.', '.', '1', '9', '5', '.', '.', '.'],
vec!['.', '9', '8', '.', '.', '.', '.', '6', '.'],
vec!['8', '.', '.', '.', '6', '.', '.', '.', '3'],
vec!['4', '.', '.', '8',
|
}
|
'.', '3', '.', '.', '1'],
vec!['7', '.', '.', '.', '2', '.', '.', '.', '6'],
vec!['.', '6', '.', '.', '.', '.', '2', '8', '.'],
vec!['.', '.', '.', '4', '1', '9', '.', '.', '5'],
vec!['.', '.', '.', '.', '8', '.', '.', '7', '9'],
];
Solution::solve_sudoku(&mut board);
let want = vec![
vec!['5', '3', '4', '6', '7', '8', '9', '1', '2'],
vec!['6', '7', '2', '1', '9', '5', '3', '4', '8'],
vec!['1', '9', '8', '3', '4', '2', '5', '6', '7'],
vec!['8', '5', '9', '7', '6', '1', '4', '2', '3'],
vec!['4', '2', '6', '8', '5', '3', '7', '9', '1'],
vec!['7', '1', '3', '9', '2', '4', '8', '5', '6'],
vec!['9', '6', '1', '5', '3', '7', '2', '8', '4'],
vec!['2', '8', '7', '4', '1', '9', '6', '3', '5'],
vec!['3', '4', '5', '2', '8', '6', '1', '7', '9'],
];
assert_eq!(board, want);
}
|
identifier_body
|
no_0037_sudoku_solver.rs
|
struct Solution;
impl Solution {
pub fn solve_sudoku(board: &mut Vec<Vec<char>>) {
// 9 行,每个 u16 表示一行上的数据,u16 表示对应索引的数字在这一行上已经有了。
let mut line = vec![0_u16; 9];
// 9 列
let mut column = vec![0_u16; 9];
// 3 x 3 的块
let mut block = vec![vec![0_u16; 3]; 3];
// 如果不是空格,就将对应的位置设置成 1.
for (i, row) in board.iter().enumerate() {
for (j, b) in row.iter().enumerate() {
if *b!= '.' {
Self::flip(
&mut line,
&mut column,
&mut block,
i,
j,
*b as u8 - '1' as u8,
);
}
}
}
// 先把只有一个选择的位置标记上
loop {
let mut modified = false;
let m = board.len();
let n = board[0].len();
for i in 0..m {
for j in 0..n {
let b = board[i][j];
if b!= '.' {
continue;
}
// 先取反,1 就表示空格了,然后 & 0x1ff 是为了把前面的高位的 1 去掉。
let mask =!(line[i] | column[j] | block[i / 3][j / 3]) & 0x1ff;
// mask&(mask-1) 是把最右侧的 1 置 0,如果结果是 0 说明 mask 中只有一个 1,也就是只有一个选择
if mask > 0 && mask & (mask - 1) == 0 {
// 右边 0 的个数,也就是右边 1 的位置
let digit = mask.trailing_zeros() as u8;
// 将 [i][j] 的位置放上数字
Self::flip(&mut line, &mut column, &mut block, i, j, digit);
board[i][j] = (digit + '1' as u8) as char;
modified = true;
}
}
}
if!modified {
break;
}
}
// 将空格收集起来
let mut spaces = Vec::new();
for (i, row) in board.iter().enumerate() {
for (j, b) in row.iter().enumerate() {
if *b == '.' {
spaces.push((i, j));
}
}
}
Self::dfs(&spaces, 0, &mut line, &mut column, &mut block, board);
}
// 将 digit 对应的比特位取反
fn flip(
line: &mut Vec<u16>,
column: &mut Vec<u16>,
block: &mut Vec<Vec<u16>>,
i: usize,
j: usize,
digit: u8,
) {
line[i] ^= 1 << digit;
column[j] ^= 1 << digit;
block[i / 3][j / 3] ^= 1 << digit;
}
fn dfs(
spaces: &Vec<(usize, usize)>,
pos: usize,
line: &mut Vec<u16>,
column: &mut Vec<u16>,
block: &mut Vec<Vec<u16>>,
board: &mut Vec<Vec<char>>,
) -> bool {
if pos == spaces.len() {
return true;
}
let (i, j) = spaces[pos];
let mut mask =!(line[i] | column[j] | block[i / 3][j / 3]) & 0x1ff;
while mask > 0 {
let digit = mask.trailing_zeros() as u8;
Self::flip(line, column, block, i, j, digit);
board[i][j] = (digit + '1' as u8) as char;
if Self::dfs(spaces, pos + 1, line, column, block, board) {
return true;
}
Self::flip(line, column, block, i, j, digit);
mask &= mask - 1;
}
false
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_solve_sudoku() {
let mut board = vec![
vec!['5', '3', '.', '.', '7', '.', '.', '.', '.'],
vec!['6', '.', '.', '1', '9', '5', '.', '.', '.'],
vec!['.', '9', '8', '.', '.', '.', '.', '6', '.'],
vec!['8', '.', '.', '.', '6', '.', '.', '.', '3'],
vec!['4', '.', '.', '8', '.', '3', '.', '.', '1'],
vec!['7', '.', '.', '.', '2', '.', '.', '.', '6'],
vec!['.', '6', '.', '.', '.', '.', '2', '8', '.'],
vec!['.', '.', '.', '4', '1', '9', '.', '.', '5'],
vec!['.', '.', '.', '.', '8', '.', '.', '7', '9'],
];
Solution::solve_sudoku(&mut board);
let want = vec![
vec!['5', '3', '4', '6', '7', '8', '9', '1', '2'],
vec!['6', '7', '2', '1', '9', '5', '3', '4', '8'],
vec!['1', '9', '8', '3', '4', '2', '5', '6', '7'],
vec!['8', '5', '9', '7', '6', '1', '4', '2', '3'],
vec!['4', '2', '6', '8', '5', '3', '7', '9', '1'],
vec!['7', '1', '3', '9', '2', '4', '8', '5', '6'],
|
vec!['3', '4', '5', '2', '8', '6', '1', '7', '9'],
];
assert_eq!(board, want);
}
}
|
vec!['9', '6', '1', '5', '3', '7', '2', '8', '4'],
vec!['2', '8', '7', '4', '1', '9', '6', '3', '5'],
|
random_line_split
|
lib.rs
|
#[derive(Debug, PartialEq)]
pub enum Comparison {
Equal,
Sublist,
Superlist,
Unequal,
}
pub fn sublist<T: PartialEq>(list1: &[T], list2: &[T]) -> Comparison {
if list1.len() == list2.len() {
if list1 == list2 {
return Comparison::Equal;
} else {
return Comparison::Unequal;
}
} else if list1.len() < list2.len() {
let mut slice = &list2[..];
while slice.len() >= list1.len() {
if list1 == &slice[..list1.len()] {
return Comparison::Sublist;
}
slice = &slice[1..];
}
} else {
let mut slice = &list1[..];
while slice.len() >= list2.len() {
if list2 == &slice[..list2.len()] {
return Comparison::Superlist;
}
slice = &slice[1..];
}
}
Comparison::Unequal;
|
}
|
random_line_split
|
|
lib.rs
|
#[derive(Debug, PartialEq)]
pub enum Comparison {
Equal,
Sublist,
Superlist,
Unequal,
}
pub fn sublist<T: PartialEq>(list1: &[T], list2: &[T]) -> Comparison {
if list1.len() == list2.len() {
if list1 == list2 {
return Comparison::Equal;
} else {
return Comparison::Unequal;
}
} else if list1.len() < list2.len() {
let mut slice = &list2[..];
while slice.len() >= list1.len() {
if list1 == &slice[..list1.len()] {
return Comparison::Sublist;
}
slice = &slice[1..];
}
} else
|
Comparison::Unequal;
}
|
{
let mut slice = &list1[..];
while slice.len() >= list2.len() {
if list2 == &slice[..list2.len()] {
return Comparison::Superlist;
}
slice = &slice[1..];
}
}
|
conditional_block
|
lib.rs
|
#[derive(Debug, PartialEq)]
pub enum
|
{
Equal,
Sublist,
Superlist,
Unequal,
}
pub fn sublist<T: PartialEq>(list1: &[T], list2: &[T]) -> Comparison {
if list1.len() == list2.len() {
if list1 == list2 {
return Comparison::Equal;
} else {
return Comparison::Unequal;
}
} else if list1.len() < list2.len() {
let mut slice = &list2[..];
while slice.len() >= list1.len() {
if list1 == &slice[..list1.len()] {
return Comparison::Sublist;
}
slice = &slice[1..];
}
} else {
let mut slice = &list1[..];
while slice.len() >= list2.len() {
if list2 == &slice[..list2.len()] {
return Comparison::Superlist;
}
slice = &slice[1..];
}
}
Comparison::Unequal;
}
|
Comparison
|
identifier_name
|
server.rs
|
extern crate serde_json;
use std::net::TcpListener;
use std::io::{BufReader, BufRead};
use std::thread;
use chat::ChatMessage;
use chat::ReplyMessage;
use std::sync::mpsc::Sender;
pub fn listen(sender: Sender<ReplyMessage>) {
let listener = TcpListener::bind("127.0.0.1:2933").expect("Failed to bind port");
for stream in listener.incoming() {
match stream {
Ok(stream) =>
|
Err(e) => {
println!("{:?}", e);
}
}
}
}
|
{
let sender = sender.clone();
thread::spawn(move || {
let reader = BufReader::new(stream);
for line in reader.lines() {
let message: Result<ChatMessage, _> = serde_json::from_str(line.unwrap().as_str());
match message {
Ok(message) => {
println!("{:?}", message);
let _ = sender.send(ReplyMessage(message, None));
},
Err(e) => println!("Error parsing json: {:?}", e),
}
}
});
}
|
conditional_block
|
server.rs
|
extern crate serde_json;
use std::net::TcpListener;
use std::io::{BufReader, BufRead};
use std::thread;
use chat::ChatMessage;
use chat::ReplyMessage;
use std::sync::mpsc::Sender;
|
pub fn listen(sender: Sender<ReplyMessage>) {
let listener = TcpListener::bind("127.0.0.1:2933").expect("Failed to bind port");
for stream in listener.incoming() {
match stream {
Ok(stream) => {
let sender = sender.clone();
thread::spawn(move || {
let reader = BufReader::new(stream);
for line in reader.lines() {
let message: Result<ChatMessage, _> = serde_json::from_str(line.unwrap().as_str());
match message {
Ok(message) => {
println!("{:?}", message);
let _ = sender.send(ReplyMessage(message, None));
},
Err(e) => println!("Error parsing json: {:?}", e),
}
}
});
}
Err(e) => {
println!("{:?}", e);
}
}
}
}
|
random_line_split
|
|
server.rs
|
extern crate serde_json;
use std::net::TcpListener;
use std::io::{BufReader, BufRead};
use std::thread;
use chat::ChatMessage;
use chat::ReplyMessage;
use std::sync::mpsc::Sender;
pub fn
|
(sender: Sender<ReplyMessage>) {
let listener = TcpListener::bind("127.0.0.1:2933").expect("Failed to bind port");
for stream in listener.incoming() {
match stream {
Ok(stream) => {
let sender = sender.clone();
thread::spawn(move || {
let reader = BufReader::new(stream);
for line in reader.lines() {
let message: Result<ChatMessage, _> = serde_json::from_str(line.unwrap().as_str());
match message {
Ok(message) => {
println!("{:?}", message);
let _ = sender.send(ReplyMessage(message, None));
},
Err(e) => println!("Error parsing json: {:?}", e),
}
}
});
}
Err(e) => {
println!("{:?}", e);
}
}
}
}
|
listen
|
identifier_name
|
server.rs
|
extern crate serde_json;
use std::net::TcpListener;
use std::io::{BufReader, BufRead};
use std::thread;
use chat::ChatMessage;
use chat::ReplyMessage;
use std::sync::mpsc::Sender;
pub fn listen(sender: Sender<ReplyMessage>)
|
Err(e) => {
println!("{:?}", e);
}
}
}
}
|
{
let listener = TcpListener::bind("127.0.0.1:2933").expect("Failed to bind port");
for stream in listener.incoming() {
match stream {
Ok(stream) => {
let sender = sender.clone();
thread::spawn(move || {
let reader = BufReader::new(stream);
for line in reader.lines() {
let message: Result<ChatMessage, _> = serde_json::from_str(line.unwrap().as_str());
match message {
Ok(message) => {
println!("{:?}", message);
let _ = sender.send(ReplyMessage(message, None));
},
Err(e) => println!("Error parsing json: {:?}", e),
}
}
});
}
|
identifier_body
|
packet.rs
|
//! SIMD intersection result
use crate::geometry::{f32xN, m32xN, V3DxN};
use crate::intersection::Isect;
/// Intersection result
#[derive(Copy, Clone, Debug)]
pub struct IsectxN {
pub t: f32xN,
pub p: V3DxN,
pub n: V3DxN,
pub hit: m32xN,
}
impl Default for IsectxN {
#[inline]
fn
|
() -> Self {
Self {
t: f32xN::splat(1e17),
hit: m32xN::splat(false),
p: V3DxN::default(),
n: V3DxN::default(),
}
}
}
impl IsectxN {
pub fn get(&self, idx: usize) -> Isect {
Isect {
t: self.t.extract(idx),
p: self.p.get(idx),
n: self.n.get(idx),
hit: self.hit.extract(idx),
}
}
}
|
default
|
identifier_name
|
packet.rs
|
//! SIMD intersection result
use crate::geometry::{f32xN, m32xN, V3DxN};
use crate::intersection::Isect;
/// Intersection result
#[derive(Copy, Clone, Debug)]
pub struct IsectxN {
pub t: f32xN,
pub p: V3DxN,
pub n: V3DxN,
pub hit: m32xN,
}
|
impl Default for IsectxN {
#[inline]
fn default() -> Self {
Self {
t: f32xN::splat(1e17),
hit: m32xN::splat(false),
p: V3DxN::default(),
n: V3DxN::default(),
}
}
}
impl IsectxN {
pub fn get(&self, idx: usize) -> Isect {
Isect {
t: self.t.extract(idx),
p: self.p.get(idx),
n: self.n.get(idx),
hit: self.hit.extract(idx),
}
}
}
|
random_line_split
|
|
packet.rs
|
//! SIMD intersection result
use crate::geometry::{f32xN, m32xN, V3DxN};
use crate::intersection::Isect;
/// Intersection result
#[derive(Copy, Clone, Debug)]
pub struct IsectxN {
pub t: f32xN,
pub p: V3DxN,
pub n: V3DxN,
pub hit: m32xN,
}
impl Default for IsectxN {
#[inline]
fn default() -> Self
|
}
impl IsectxN {
pub fn get(&self, idx: usize) -> Isect {
Isect {
t: self.t.extract(idx),
p: self.p.get(idx),
n: self.n.get(idx),
hit: self.hit.extract(idx),
}
}
}
|
{
Self {
t: f32xN::splat(1e17),
hit: m32xN::splat(false),
p: V3DxN::default(),
n: V3DxN::default(),
}
}
|
identifier_body
|
stats.rs
|
// Copyright (c) 2014 Richard Diamond & contributors.
//
// This file is part of Rust Rocket.
//
// Rust Rocket is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Rust Rocket is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with Rust Rocket. If not, see <http://www.gnu.org/licenses/>.
//! Stat collection, accounting, and consideration.
//! FIXME(diamond): collect memory consumption and hdd activity stats.
|
use std::collections::HashMap;
use address::Address;
enum Message {
}
#[deriving(Clone, Encodable, Decodable)]
pub struct Router {
/// Every train yard has its own entry here:
time: Vec<u64>,
}
#[deriving(Encodable, Decodable)]
struct Session {
times: HashMap<Address, u64>,
}
#[deriving(Clone)]
pub struct SessionIf {
chan: Sender<Message>,
}
|
use std::comm::Sender;
|
random_line_split
|
stats.rs
|
// Copyright (c) 2014 Richard Diamond & contributors.
//
// This file is part of Rust Rocket.
//
// Rust Rocket is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Rust Rocket is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with Rust Rocket. If not, see <http://www.gnu.org/licenses/>.
//! Stat collection, accounting, and consideration.
//! FIXME(diamond): collect memory consumption and hdd activity stats.
use std::comm::Sender;
use std::collections::HashMap;
use address::Address;
enum Message {
}
#[deriving(Clone, Encodable, Decodable)]
pub struct Router {
/// Every train yard has its own entry here:
time: Vec<u64>,
}
#[deriving(Encodable, Decodable)]
struct
|
{
times: HashMap<Address, u64>,
}
#[deriving(Clone)]
pub struct SessionIf {
chan: Sender<Message>,
}
|
Session
|
identifier_name
|
bounds.rs
|
// 这个 trait 实现了打印标记:`{:?}`。
use std::fmt::Debug;
trait HasArea {
fn area(&self) -> f64;
}
impl HasArea for Rectangle {
fn area(&self) -> f64 { self.length * self.height }
}
#[derive(Debug)]
struct Rectangle { length: f64, height: f64 }
#[allow(dead_code)]
struct Triangle { length: f64, height: f64 }
// 泛型 `T` 必须实现 `Debug`。不管什么类型,都可以正常工作。
fn print_debug<T: Debug>(t: &T) {
println!("{:?}", t);
}
// `T` 必须实现 `HasArea`。任意符合限定的函数都可以访问
// `HasArea` 的 `area` 函数。
fn area<T: HasArea>(t: &T) -> f64 { t.area() }
fn main() {
let rectangle = Rectangle { length: 3.0, height: 4.0 };
let _triangle = Triangle { length: 3.0, heigh
|
t: 4.0 };
print_debug(&rectangle);
println!("Area: {}", area(&rectangle));
//print_debug(&_triangle);
//println!("Area: {}", area(&_triangle));
// ^ 试一试:将上述语句的注释去掉。
// | 报错:未实现 `Debug` 或 `HasArea`。
}
|
identifier_body
|
|
bounds.rs
|
// 这个 trait 实现了打印标记:`{:?}`。
use std::fmt::Debug;
trait HasArea {
fn area(&self) -> f64;
}
impl HasArea for Rectangle {
fn area(&self) -> f64 { self.length * self.height }
}
#[derive(Debug)]
struct Rectangle { length: f64, height: f64 }
#[allow(dead_code)]
struct Triangle { length: f6
|
t: f64 }
// 泛型 `T` 必须实现 `Debug`。不管什么类型,都可以正常工作。
fn print_debug<T: Debug>(t: &T) {
println!("{:?}", t);
}
// `T` 必须实现 `HasArea`。任意符合限定的函数都可以访问
// `HasArea` 的 `area` 函数。
fn area<T: HasArea>(t: &T) -> f64 { t.area() }
fn main() {
let rectangle = Rectangle { length: 3.0, height: 4.0 };
let _triangle = Triangle { length: 3.0, height: 4.0 };
print_debug(&rectangle);
println!("Area: {}", area(&rectangle));
//print_debug(&_triangle);
//println!("Area: {}", area(&_triangle));
// ^ 试一试:将上述语句的注释去掉。
// | 报错:未实现 `Debug` 或 `HasArea`。
}
|
4, heigh
|
identifier_name
|
bounds.rs
|
// 这个 trait 实现了打印标记:`{:?}`。
use std::fmt::Debug;
trait HasArea {
fn area(&self) -> f64;
}
impl HasArea for Rectangle {
fn area(&self) -> f64 { self.length * self.height }
}
#[derive(Debug)]
struct Rectangle { length: f64, height: f64 }
#[allow(dead_code)]
|
// 泛型 `T` 必须实现 `Debug`。不管什么类型,都可以正常工作。
fn print_debug<T: Debug>(t: &T) {
println!("{:?}", t);
}
// `T` 必须实现 `HasArea`。任意符合限定的函数都可以访问
// `HasArea` 的 `area` 函数。
fn area<T: HasArea>(t: &T) -> f64 { t.area() }
fn main() {
let rectangle = Rectangle { length: 3.0, height: 4.0 };
let _triangle = Triangle { length: 3.0, height: 4.0 };
print_debug(&rectangle);
println!("Area: {}", area(&rectangle));
//print_debug(&_triangle);
//println!("Area: {}", area(&_triangle));
// ^ 试一试:将上述语句的注释去掉。
// | 报错:未实现 `Debug` 或 `HasArea`。
}
|
struct Triangle { length: f64, height: f64 }
|
random_line_split
|
is_ascii.rs
|
use super::{LONG, MEDIUM, SHORT};
use test::black_box;
use test::Bencher;
macro_rules! benches {
($( fn $name: ident($arg: ident: &[u8]) $body: block )+) => {
benches!(mod short SHORT[..] $($name $arg $body)+);
benches!(mod medium MEDIUM[..] $($name $arg $body)+);
benches!(mod long LONG[..] $($name $arg $body)+);
// Ensure we benchmark cases where the functions are called with strings
// that are not perfectly aligned or have a length which is not a
// multiple of size_of::<usize>() (or both)
benches!(mod unaligned_head MEDIUM[1..] $($name $arg $body)+);
benches!(mod unaligned_tail MEDIUM[..(MEDIUM.len() - 1)] $($name $arg $body)+);
benches!(mod unaligned_both MEDIUM[1..(MEDIUM.len() - 1)] $($name $arg $body)+);
};
(mod $mod_name: ident $input: ident [$range: expr] $($name: ident $arg: ident $body: block)+) => {
mod $mod_name {
use super::*;
$(
#[bench]
fn $name(bencher: &mut Bencher) {
bencher.bytes = $input[$range].len() as u64;
let mut vec = $input.as_bytes().to_vec();
bencher.iter(|| {
let $arg: &[u8] = &black_box(&mut vec)[$range];
black_box($body)
})
}
)+
}
};
}
benches! {
fn case00_libcore(bytes: &[u8]) {
bytes.is_ascii()
}
fn case01_iter_all(bytes: &[u8]) {
bytes.iter().all(|b| b.is_ascii())
}
fn case02_align_to(bytes: &[u8]) {
is_ascii_align_to(bytes)
}
fn case03_align_to_unrolled(bytes: &[u8]) {
is_ascii_align_to_unrolled(bytes)
}
}
|
return bytes.iter().all(|b| b.is_ascii());
}
// SAFETY: transmuting a sequence of `u8` to `usize` is always fine
let (head, body, tail) = unsafe { bytes.align_to::<usize>() };
head.iter().all(|b| b.is_ascii())
&& body.iter().all(|w|!contains_nonascii(*w))
&& tail.iter().all(|b| b.is_ascii())
}
fn is_ascii_align_to_unrolled(bytes: &[u8]) -> bool {
if bytes.len() < core::mem::size_of::<usize>() {
return bytes.iter().all(|b| b.is_ascii());
}
// SAFETY: transmuting a sequence of `u8` to `[usize; 2]` is always fine
let (head, body, tail) = unsafe { bytes.align_to::<[usize; 2]>() };
head.iter().all(|b| b.is_ascii())
&& body.iter().all(|w|!contains_nonascii(w[0] | w[1]))
&& tail.iter().all(|b| b.is_ascii())
}
#[inline]
fn contains_nonascii(v: usize) -> bool {
const NONASCII_MASK: usize = 0x80808080_80808080u64 as usize;
(NONASCII_MASK & v)!= 0
}
|
// These are separate since it's easier to debug errors if they don't go through
// macro expansion first.
fn is_ascii_align_to(bytes: &[u8]) -> bool {
if bytes.len() < core::mem::size_of::<usize>() {
|
random_line_split
|
is_ascii.rs
|
use super::{LONG, MEDIUM, SHORT};
use test::black_box;
use test::Bencher;
macro_rules! benches {
($( fn $name: ident($arg: ident: &[u8]) $body: block )+) => {
benches!(mod short SHORT[..] $($name $arg $body)+);
benches!(mod medium MEDIUM[..] $($name $arg $body)+);
benches!(mod long LONG[..] $($name $arg $body)+);
// Ensure we benchmark cases where the functions are called with strings
// that are not perfectly aligned or have a length which is not a
// multiple of size_of::<usize>() (or both)
benches!(mod unaligned_head MEDIUM[1..] $($name $arg $body)+);
benches!(mod unaligned_tail MEDIUM[..(MEDIUM.len() - 1)] $($name $arg $body)+);
benches!(mod unaligned_both MEDIUM[1..(MEDIUM.len() - 1)] $($name $arg $body)+);
};
(mod $mod_name: ident $input: ident [$range: expr] $($name: ident $arg: ident $body: block)+) => {
mod $mod_name {
use super::*;
$(
#[bench]
fn $name(bencher: &mut Bencher) {
bencher.bytes = $input[$range].len() as u64;
let mut vec = $input.as_bytes().to_vec();
bencher.iter(|| {
let $arg: &[u8] = &black_box(&mut vec)[$range];
black_box($body)
})
}
)+
}
};
}
benches! {
fn case00_libcore(bytes: &[u8]) {
bytes.is_ascii()
}
fn case01_iter_all(bytes: &[u8]) {
bytes.iter().all(|b| b.is_ascii())
}
fn case02_align_to(bytes: &[u8]) {
is_ascii_align_to(bytes)
}
fn case03_align_to_unrolled(bytes: &[u8]) {
is_ascii_align_to_unrolled(bytes)
}
}
// These are separate since it's easier to debug errors if they don't go through
// macro expansion first.
fn is_ascii_align_to(bytes: &[u8]) -> bool {
if bytes.len() < core::mem::size_of::<usize>()
|
// SAFETY: transmuting a sequence of `u8` to `usize` is always fine
let (head, body, tail) = unsafe { bytes.align_to::<usize>() };
head.iter().all(|b| b.is_ascii())
&& body.iter().all(|w|!contains_nonascii(*w))
&& tail.iter().all(|b| b.is_ascii())
}
fn is_ascii_align_to_unrolled(bytes: &[u8]) -> bool {
if bytes.len() < core::mem::size_of::<usize>() {
return bytes.iter().all(|b| b.is_ascii());
}
// SAFETY: transmuting a sequence of `u8` to `[usize; 2]` is always fine
let (head, body, tail) = unsafe { bytes.align_to::<[usize; 2]>() };
head.iter().all(|b| b.is_ascii())
&& body.iter().all(|w|!contains_nonascii(w[0] | w[1]))
&& tail.iter().all(|b| b.is_ascii())
}
#[inline]
fn contains_nonascii(v: usize) -> bool {
const NONASCII_MASK: usize = 0x80808080_80808080u64 as usize;
(NONASCII_MASK & v)!= 0
}
|
{
return bytes.iter().all(|b| b.is_ascii());
}
|
conditional_block
|
is_ascii.rs
|
use super::{LONG, MEDIUM, SHORT};
use test::black_box;
use test::Bencher;
macro_rules! benches {
($( fn $name: ident($arg: ident: &[u8]) $body: block )+) => {
benches!(mod short SHORT[..] $($name $arg $body)+);
benches!(mod medium MEDIUM[..] $($name $arg $body)+);
benches!(mod long LONG[..] $($name $arg $body)+);
// Ensure we benchmark cases where the functions are called with strings
// that are not perfectly aligned or have a length which is not a
// multiple of size_of::<usize>() (or both)
benches!(mod unaligned_head MEDIUM[1..] $($name $arg $body)+);
benches!(mod unaligned_tail MEDIUM[..(MEDIUM.len() - 1)] $($name $arg $body)+);
benches!(mod unaligned_both MEDIUM[1..(MEDIUM.len() - 1)] $($name $arg $body)+);
};
(mod $mod_name: ident $input: ident [$range: expr] $($name: ident $arg: ident $body: block)+) => {
mod $mod_name {
use super::*;
$(
#[bench]
fn $name(bencher: &mut Bencher) {
bencher.bytes = $input[$range].len() as u64;
let mut vec = $input.as_bytes().to_vec();
bencher.iter(|| {
let $arg: &[u8] = &black_box(&mut vec)[$range];
black_box($body)
})
}
)+
}
};
}
benches! {
fn case00_libcore(bytes: &[u8]) {
bytes.is_ascii()
}
fn case01_iter_all(bytes: &[u8]) {
bytes.iter().all(|b| b.is_ascii())
}
fn case02_align_to(bytes: &[u8]) {
is_ascii_align_to(bytes)
}
fn case03_align_to_unrolled(bytes: &[u8]) {
is_ascii_align_to_unrolled(bytes)
}
}
// These are separate since it's easier to debug errors if they don't go through
// macro expansion first.
fn
|
(bytes: &[u8]) -> bool {
if bytes.len() < core::mem::size_of::<usize>() {
return bytes.iter().all(|b| b.is_ascii());
}
// SAFETY: transmuting a sequence of `u8` to `usize` is always fine
let (head, body, tail) = unsafe { bytes.align_to::<usize>() };
head.iter().all(|b| b.is_ascii())
&& body.iter().all(|w|!contains_nonascii(*w))
&& tail.iter().all(|b| b.is_ascii())
}
fn is_ascii_align_to_unrolled(bytes: &[u8]) -> bool {
if bytes.len() < core::mem::size_of::<usize>() {
return bytes.iter().all(|b| b.is_ascii());
}
// SAFETY: transmuting a sequence of `u8` to `[usize; 2]` is always fine
let (head, body, tail) = unsafe { bytes.align_to::<[usize; 2]>() };
head.iter().all(|b| b.is_ascii())
&& body.iter().all(|w|!contains_nonascii(w[0] | w[1]))
&& tail.iter().all(|b| b.is_ascii())
}
#[inline]
fn contains_nonascii(v: usize) -> bool {
const NONASCII_MASK: usize = 0x80808080_80808080u64 as usize;
(NONASCII_MASK & v)!= 0
}
|
is_ascii_align_to
|
identifier_name
|
is_ascii.rs
|
use super::{LONG, MEDIUM, SHORT};
use test::black_box;
use test::Bencher;
macro_rules! benches {
($( fn $name: ident($arg: ident: &[u8]) $body: block )+) => {
benches!(mod short SHORT[..] $($name $arg $body)+);
benches!(mod medium MEDIUM[..] $($name $arg $body)+);
benches!(mod long LONG[..] $($name $arg $body)+);
// Ensure we benchmark cases where the functions are called with strings
// that are not perfectly aligned or have a length which is not a
// multiple of size_of::<usize>() (or both)
benches!(mod unaligned_head MEDIUM[1..] $($name $arg $body)+);
benches!(mod unaligned_tail MEDIUM[..(MEDIUM.len() - 1)] $($name $arg $body)+);
benches!(mod unaligned_both MEDIUM[1..(MEDIUM.len() - 1)] $($name $arg $body)+);
};
(mod $mod_name: ident $input: ident [$range: expr] $($name: ident $arg: ident $body: block)+) => {
mod $mod_name {
use super::*;
$(
#[bench]
fn $name(bencher: &mut Bencher) {
bencher.bytes = $input[$range].len() as u64;
let mut vec = $input.as_bytes().to_vec();
bencher.iter(|| {
let $arg: &[u8] = &black_box(&mut vec)[$range];
black_box($body)
})
}
)+
}
};
}
benches! {
fn case00_libcore(bytes: &[u8]) {
bytes.is_ascii()
}
fn case01_iter_all(bytes: &[u8]) {
bytes.iter().all(|b| b.is_ascii())
}
fn case02_align_to(bytes: &[u8]) {
is_ascii_align_to(bytes)
}
fn case03_align_to_unrolled(bytes: &[u8]) {
is_ascii_align_to_unrolled(bytes)
}
}
// These are separate since it's easier to debug errors if they don't go through
// macro expansion first.
fn is_ascii_align_to(bytes: &[u8]) -> bool
|
fn is_ascii_align_to_unrolled(bytes: &[u8]) -> bool {
if bytes.len() < core::mem::size_of::<usize>() {
return bytes.iter().all(|b| b.is_ascii());
}
// SAFETY: transmuting a sequence of `u8` to `[usize; 2]` is always fine
let (head, body, tail) = unsafe { bytes.align_to::<[usize; 2]>() };
head.iter().all(|b| b.is_ascii())
&& body.iter().all(|w|!contains_nonascii(w[0] | w[1]))
&& tail.iter().all(|b| b.is_ascii())
}
#[inline]
fn contains_nonascii(v: usize) -> bool {
const NONASCII_MASK: usize = 0x80808080_80808080u64 as usize;
(NONASCII_MASK & v)!= 0
}
|
{
if bytes.len() < core::mem::size_of::<usize>() {
return bytes.iter().all(|b| b.is_ascii());
}
// SAFETY: transmuting a sequence of `u8` to `usize` is always fine
let (head, body, tail) = unsafe { bytes.align_to::<usize>() };
head.iter().all(|b| b.is_ascii())
&& body.iter().all(|w| !contains_nonascii(*w))
&& tail.iter().all(|b| b.is_ascii())
}
|
identifier_body
|
game_state.rs
|
// See LICENSE file for copyright and license details.
use crate::core::core::{Event, ObjectTypes, Unit};
use crate::core::types::{MapPos, PlayerId, UnitId};
use std::collections::HashMap;
pub struct GameState {
pub units: HashMap<UnitId, Unit>,
}
impl<'a> GameState {
pub fn new() -> GameState {
GameState {
units: HashMap::new(),
}
}
pub fn units_at(&'a self, pos: MapPos) -> Vec<&'a Unit> {
let mut units = Vec::new();
for (_, unit) in self.units.iter() {
if unit.pos == pos {
units.push(unit);
}
}
units
}
fn refresh_units(&mut self, object_types: &ObjectTypes, player_id: PlayerId) {
for (_, unit) in self.units.iter_mut() {
if unit.player_id == player_id {
unit.move_points = object_types.get_unit_type(unit.type_id).move_points;
unit.attacked = false;
}
}
}
pub fn apply_event(&mut self, object_types: &ObjectTypes, event: &Event) {
match event {
Event::EventMove(id, ref path) => {
let pos = *path.last().unwrap();
|
unit.pos = pos;
assert!(unit.move_points > 0);
unit.move_points = 0;
}
Event::EventEndTurn(_, new_player_id) => {
self.refresh_units(object_types, new_player_id.clone());
}
Event::EventCreateUnit(id, pos, type_id, player_id) => {
assert!(self.units.get(&id).is_none());
let move_points = object_types.get_unit_type(type_id.clone()).move_points;
let _ = self.units.insert(
id.clone(),
Unit {
id: id.clone(),
pos: pos.clone(),
player_id: player_id.clone(),
type_id: type_id.clone(),
move_points: move_points.clone(),
attacked: false,
},
);
}
Event::EventAttackUnit(attacker_id, defender_id, killed) => {
if *killed {
assert!(self.units.get(&defender_id).is_some());
let _ = self.units.remove(&defender_id).unwrap();
}
let unit = self.units.get_mut(&attacker_id).unwrap();
assert!(!unit.attacked);
unit.attacked = true;
}
}
}
}
// vim: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab:
|
let unit = self.units.get_mut(&id).unwrap();
|
random_line_split
|
game_state.rs
|
// See LICENSE file for copyright and license details.
use crate::core::core::{Event, ObjectTypes, Unit};
use crate::core::types::{MapPos, PlayerId, UnitId};
use std::collections::HashMap;
pub struct GameState {
pub units: HashMap<UnitId, Unit>,
}
impl<'a> GameState {
pub fn new() -> GameState {
GameState {
units: HashMap::new(),
}
}
pub fn units_at(&'a self, pos: MapPos) -> Vec<&'a Unit> {
let mut units = Vec::new();
for (_, unit) in self.units.iter() {
if unit.pos == pos {
units.push(unit);
}
}
units
}
fn
|
(&mut self, object_types: &ObjectTypes, player_id: PlayerId) {
for (_, unit) in self.units.iter_mut() {
if unit.player_id == player_id {
unit.move_points = object_types.get_unit_type(unit.type_id).move_points;
unit.attacked = false;
}
}
}
pub fn apply_event(&mut self, object_types: &ObjectTypes, event: &Event) {
match event {
Event::EventMove(id, ref path) => {
let pos = *path.last().unwrap();
let unit = self.units.get_mut(&id).unwrap();
unit.pos = pos;
assert!(unit.move_points > 0);
unit.move_points = 0;
}
Event::EventEndTurn(_, new_player_id) => {
self.refresh_units(object_types, new_player_id.clone());
}
Event::EventCreateUnit(id, pos, type_id, player_id) => {
assert!(self.units.get(&id).is_none());
let move_points = object_types.get_unit_type(type_id.clone()).move_points;
let _ = self.units.insert(
id.clone(),
Unit {
id: id.clone(),
pos: pos.clone(),
player_id: player_id.clone(),
type_id: type_id.clone(),
move_points: move_points.clone(),
attacked: false,
},
);
}
Event::EventAttackUnit(attacker_id, defender_id, killed) => {
if *killed {
assert!(self.units.get(&defender_id).is_some());
let _ = self.units.remove(&defender_id).unwrap();
}
let unit = self.units.get_mut(&attacker_id).unwrap();
assert!(!unit.attacked);
unit.attacked = true;
}
}
}
}
// vim: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab:
|
refresh_units
|
identifier_name
|
game_state.rs
|
// See LICENSE file for copyright and license details.
use crate::core::core::{Event, ObjectTypes, Unit};
use crate::core::types::{MapPos, PlayerId, UnitId};
use std::collections::HashMap;
pub struct GameState {
pub units: HashMap<UnitId, Unit>,
}
impl<'a> GameState {
pub fn new() -> GameState {
GameState {
units: HashMap::new(),
}
}
pub fn units_at(&'a self, pos: MapPos) -> Vec<&'a Unit> {
let mut units = Vec::new();
for (_, unit) in self.units.iter() {
if unit.pos == pos {
units.push(unit);
}
}
units
}
fn refresh_units(&mut self, object_types: &ObjectTypes, player_id: PlayerId) {
for (_, unit) in self.units.iter_mut() {
if unit.player_id == player_id
|
}
}
pub fn apply_event(&mut self, object_types: &ObjectTypes, event: &Event) {
match event {
Event::EventMove(id, ref path) => {
let pos = *path.last().unwrap();
let unit = self.units.get_mut(&id).unwrap();
unit.pos = pos;
assert!(unit.move_points > 0);
unit.move_points = 0;
}
Event::EventEndTurn(_, new_player_id) => {
self.refresh_units(object_types, new_player_id.clone());
}
Event::EventCreateUnit(id, pos, type_id, player_id) => {
assert!(self.units.get(&id).is_none());
let move_points = object_types.get_unit_type(type_id.clone()).move_points;
let _ = self.units.insert(
id.clone(),
Unit {
id: id.clone(),
pos: pos.clone(),
player_id: player_id.clone(),
type_id: type_id.clone(),
move_points: move_points.clone(),
attacked: false,
},
);
}
Event::EventAttackUnit(attacker_id, defender_id, killed) => {
if *killed {
assert!(self.units.get(&defender_id).is_some());
let _ = self.units.remove(&defender_id).unwrap();
}
let unit = self.units.get_mut(&attacker_id).unwrap();
assert!(!unit.attacked);
unit.attacked = true;
}
}
}
}
// vim: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab:
|
{
unit.move_points = object_types.get_unit_type(unit.type_id).move_points;
unit.attacked = false;
}
|
conditional_block
|
game_state.rs
|
// See LICENSE file for copyright and license details.
use crate::core::core::{Event, ObjectTypes, Unit};
use crate::core::types::{MapPos, PlayerId, UnitId};
use std::collections::HashMap;
pub struct GameState {
pub units: HashMap<UnitId, Unit>,
}
impl<'a> GameState {
pub fn new() -> GameState {
GameState {
units: HashMap::new(),
}
}
pub fn units_at(&'a self, pos: MapPos) -> Vec<&'a Unit>
|
fn refresh_units(&mut self, object_types: &ObjectTypes, player_id: PlayerId) {
for (_, unit) in self.units.iter_mut() {
if unit.player_id == player_id {
unit.move_points = object_types.get_unit_type(unit.type_id).move_points;
unit.attacked = false;
}
}
}
pub fn apply_event(&mut self, object_types: &ObjectTypes, event: &Event) {
match event {
Event::EventMove(id, ref path) => {
let pos = *path.last().unwrap();
let unit = self.units.get_mut(&id).unwrap();
unit.pos = pos;
assert!(unit.move_points > 0);
unit.move_points = 0;
}
Event::EventEndTurn(_, new_player_id) => {
self.refresh_units(object_types, new_player_id.clone());
}
Event::EventCreateUnit(id, pos, type_id, player_id) => {
assert!(self.units.get(&id).is_none());
let move_points = object_types.get_unit_type(type_id.clone()).move_points;
let _ = self.units.insert(
id.clone(),
Unit {
id: id.clone(),
pos: pos.clone(),
player_id: player_id.clone(),
type_id: type_id.clone(),
move_points: move_points.clone(),
attacked: false,
},
);
}
Event::EventAttackUnit(attacker_id, defender_id, killed) => {
if *killed {
assert!(self.units.get(&defender_id).is_some());
let _ = self.units.remove(&defender_id).unwrap();
}
let unit = self.units.get_mut(&attacker_id).unwrap();
assert!(!unit.attacked);
unit.attacked = true;
}
}
}
}
// vim: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab:
|
{
let mut units = Vec::new();
for (_, unit) in self.units.iter() {
if unit.pos == pos {
units.push(unit);
}
}
units
}
|
identifier_body
|
syntax_definition.rs
|
//! Data structures for representing syntax definitions
//!
//! Everything here is public becaues I want this library to be useful in super integrated cases
//! like text editors and I have no idea what kind of monkeying you might want to do with the data.
//! Perhaps parsing your own syntax format into this data structure?
use std::collections::{BTreeMap, HashMap};
use std::hash::Hash;
use super::scope::*;
use super::regex::{Regex, Region};
use regex_syntax::escape;
use serde::{Serialize, Serializer};
use crate::parsing::syntax_set::SyntaxSet;
pub type CaptureMapping = Vec<(usize, Vec<Scope>)>;
/// An opaque ID for a [`Context`].
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)]
pub struct ContextId {
/// Index into [`SyntaxSet::syntaxes`]
pub(crate) syntax_index: usize,
/// Index into [`crate::parsing::LazyContexts::contexts`] for the [`Self::syntax_index`] syntax
pub(crate) context_index: usize,
}
/// The main data structure representing a syntax definition loaded from a
/// `.sublime-syntax` file
///
/// You'll probably only need these as references to be passed around to parsing code.
///
/// Some useful public fields are the `name` field which is a human readable name to display in
/// syntax lists, and the `hidden` field which means hide this syntax from any lists because it is
/// for internal use.
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct SyntaxDefinition {
pub name: String,
pub file_extensions: Vec<String>,
pub scope: Scope,
pub first_line_match: Option<String>,
pub hidden: bool,
#[serde(serialize_with = "ordered_map")]
pub variables: HashMap<String, String>,
#[serde(serialize_with = "ordered_map")]
pub contexts: HashMap<String, Context>,
}
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct
|
{
pub meta_scope: Vec<Scope>,
pub meta_content_scope: Vec<Scope>,
/// This being set false in the syntax file implies this field being set false,
/// but it can also be set falso for contexts that don't include the prototype for other reasons
pub meta_include_prototype: bool,
pub clear_scopes: Option<ClearAmount>,
/// This is filled in by the linker at link time
/// for contexts that have `meta_include_prototype==true`
/// and are not included from the prototype.
pub prototype: Option<ContextId>,
pub uses_backrefs: bool,
pub patterns: Vec<Pattern>,
}
impl Context {
pub fn new(meta_include_prototype: bool) -> Context {
Context {
meta_scope: Vec::new(),
meta_content_scope: Vec::new(),
meta_include_prototype,
clear_scopes: None,
uses_backrefs: false,
patterns: Vec::new(),
prototype: None,
}
}
}
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub enum Pattern {
Match(MatchPattern),
Include(ContextReference),
}
/// Used to iterate over all the match patterns in a context
///
/// Basically walks the tree of patterns and include directives in the correct order.
#[derive(Debug)]
pub struct MatchIter<'a> {
syntax_set: &'a SyntaxSet,
ctx_stack: Vec<&'a Context>,
index_stack: Vec<usize>,
}
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct MatchPattern {
pub has_captures: bool,
pub regex: Regex,
pub scope: Vec<Scope>,
pub captures: Option<CaptureMapping>,
pub operation: MatchOperation,
pub with_prototype: Option<ContextReference>,
}
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
#[non_exhaustive]
pub enum ContextReference {
#[non_exhaustive]
Named(String),
#[non_exhaustive]
ByScope {
scope: Scope,
sub_context: Option<String>,
},
#[non_exhaustive]
File {
name: String,
sub_context: Option<String>,
},
#[non_exhaustive]
Inline(String),
#[non_exhaustive]
Direct(ContextId),
}
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub enum MatchOperation {
Push(Vec<ContextReference>),
Set(Vec<ContextReference>),
Pop,
None,
}
impl<'a> Iterator for MatchIter<'a> {
type Item = (&'a Context, usize);
fn next(&mut self) -> Option<(&'a Context, usize)> {
loop {
if self.ctx_stack.is_empty() {
return None;
}
// uncomment for debugging infinite recursion
// println!("{:?}", self.index_stack);
// use std::thread::sleep_ms;
// sleep_ms(500);
let last_index = self.ctx_stack.len() - 1;
let context = self.ctx_stack[last_index];
let index = self.index_stack[last_index];
self.index_stack[last_index] = index + 1;
if index < context.patterns.len() {
match context.patterns[index] {
Pattern::Match(_) => {
return Some((context, index));
},
Pattern::Include(ref ctx_ref) => {
let ctx_ptr = match *ctx_ref {
ContextReference::Direct(ref context_id) => {
self.syntax_set.get_context(context_id)
}
_ => return self.next(), // skip this and move onto the next one
};
self.ctx_stack.push(ctx_ptr);
self.index_stack.push(0);
}
}
} else {
self.ctx_stack.pop();
self.index_stack.pop();
}
}
}
}
/// Returns an iterator over all the match patterns in this context.
///
/// It recursively follows include directives. Can only be run on contexts that have already been
/// linked up.
pub fn context_iter<'a>(syntax_set: &'a SyntaxSet, context: &'a Context) -> MatchIter<'a> {
MatchIter {
syntax_set,
ctx_stack: vec![context],
index_stack: vec![0],
}
}
impl Context {
/// Returns the match pattern at an index, panics if the thing isn't a match pattern
pub fn match_at(&self, index: usize) -> &MatchPattern {
match self.patterns[index] {
Pattern::Match(ref match_pat) => match_pat,
_ => panic!("bad index to match_at"),
}
}
}
impl ContextReference {
/// find the pointed to context, panics if ref is not linked
pub fn resolve<'a>(&self, syntax_set: &'a SyntaxSet) -> &'a Context {
match *self {
ContextReference::Direct(ref context_id) => syntax_set.get_context(context_id),
_ => panic!("Can only call resolve on linked references: {:?}", self),
}
}
/// get the context ID this reference points to, panics if ref is not linked
pub fn id(&self) -> ContextId {
match *self {
ContextReference::Direct(ref context_id) => *context_id,
_ => panic!("Can only get ContextId of linked references: {:?}", self),
}
}
}
pub(crate) fn substitute_backrefs_in_regex<F>(regex_str: &str, substituter: F) -> String
where F: Fn(usize) -> Option<String>
{
let mut reg_str = String::with_capacity(regex_str.len());
let mut last_was_escape = false;
for c in regex_str.chars() {
if last_was_escape && c.is_digit(10) {
let val = c.to_digit(10).unwrap() as usize;
if let Some(sub) = substituter(val) {
reg_str.push_str(&sub);
}
} else if last_was_escape {
reg_str.push('\\');
reg_str.push(c);
} else if c!= '\\' {
reg_str.push(c);
}
last_was_escape = c == '\\' &&!last_was_escape;
}
reg_str
}
impl MatchPattern {
pub fn new(
has_captures: bool,
regex_str: String,
scope: Vec<Scope>,
captures: Option<CaptureMapping>,
operation: MatchOperation,
with_prototype: Option<ContextReference>,
) -> MatchPattern {
MatchPattern {
has_captures,
regex: Regex::new(regex_str),
scope,
captures,
operation,
with_prototype,
}
}
/// Used by the parser to compile a regex which needs to reference
/// regions from another matched pattern.
pub fn regex_with_refs(&self, region: &Region, text: &str) -> Regex {
let new_regex = substitute_backrefs_in_regex(self.regex.regex_str(), |i| {
region.pos(i).map(|(start, end)| escape(&text[start..end]))
});
Regex::new(new_regex)
}
pub fn regex(&self) -> &Regex {
&self.regex
}
}
/// Serialize the provided map in natural key order, so that it's deterministic when dumping.
pub(crate) fn ordered_map<K, V, S>(map: &HashMap<K, V>, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer, K: Eq + Hash + Ord + Serialize, V: Serialize
{
let ordered: BTreeMap<_, _> = map.iter().collect();
ordered.serialize(serializer)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn can_compile_refs() {
let pat = MatchPattern {
has_captures: true,
regex: Regex::new(r"lol \\ \2 \1 '\9' \wz".into()),
scope: vec![],
captures: None,
operation: MatchOperation::None,
with_prototype: None,
};
let r = Regex::new(r"(\\\[\]\(\))(b)(c)(d)(e)".into());
let s = r"\[]()bcde";
let mut region = Region::new();
let matched = r.search(s, 0, s.len(), Some(&mut region));
assert!(matched);
let regex_with_refs = pat.regex_with_refs(®ion, s);
assert_eq!(regex_with_refs.regex_str(), r"lol \\ b \\\[\]\(\) '' \wz");
}
}
|
Context
|
identifier_name
|
syntax_definition.rs
|
//! Data structures for representing syntax definitions
//!
//! Everything here is public becaues I want this library to be useful in super integrated cases
//! like text editors and I have no idea what kind of monkeying you might want to do with the data.
//! Perhaps parsing your own syntax format into this data structure?
use std::collections::{BTreeMap, HashMap};
use std::hash::Hash;
use super::scope::*;
use super::regex::{Regex, Region};
use regex_syntax::escape;
use serde::{Serialize, Serializer};
use crate::parsing::syntax_set::SyntaxSet;
pub type CaptureMapping = Vec<(usize, Vec<Scope>)>;
/// An opaque ID for a [`Context`].
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)]
pub struct ContextId {
/// Index into [`SyntaxSet::syntaxes`]
pub(crate) syntax_index: usize,
/// Index into [`crate::parsing::LazyContexts::contexts`] for the [`Self::syntax_index`] syntax
pub(crate) context_index: usize,
}
/// The main data structure representing a syntax definition loaded from a
/// `.sublime-syntax` file
///
/// You'll probably only need these as references to be passed around to parsing code.
///
/// Some useful public fields are the `name` field which is a human readable name to display in
/// syntax lists, and the `hidden` field which means hide this syntax from any lists because it is
/// for internal use.
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct SyntaxDefinition {
pub name: String,
pub file_extensions: Vec<String>,
pub scope: Scope,
pub first_line_match: Option<String>,
pub hidden: bool,
#[serde(serialize_with = "ordered_map")]
pub variables: HashMap<String, String>,
#[serde(serialize_with = "ordered_map")]
pub contexts: HashMap<String, Context>,
}
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct Context {
pub meta_scope: Vec<Scope>,
pub meta_content_scope: Vec<Scope>,
/// This being set false in the syntax file implies this field being set false,
/// but it can also be set falso for contexts that don't include the prototype for other reasons
pub meta_include_prototype: bool,
pub clear_scopes: Option<ClearAmount>,
/// This is filled in by the linker at link time
/// for contexts that have `meta_include_prototype==true`
/// and are not included from the prototype.
pub prototype: Option<ContextId>,
pub uses_backrefs: bool,
pub patterns: Vec<Pattern>,
}
impl Context {
pub fn new(meta_include_prototype: bool) -> Context {
Context {
meta_scope: Vec::new(),
meta_content_scope: Vec::new(),
meta_include_prototype,
clear_scopes: None,
uses_backrefs: false,
patterns: Vec::new(),
prototype: None,
}
}
}
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub enum Pattern {
Match(MatchPattern),
Include(ContextReference),
}
/// Used to iterate over all the match patterns in a context
///
/// Basically walks the tree of patterns and include directives in the correct order.
#[derive(Debug)]
pub struct MatchIter<'a> {
syntax_set: &'a SyntaxSet,
ctx_stack: Vec<&'a Context>,
index_stack: Vec<usize>,
}
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct MatchPattern {
pub has_captures: bool,
pub regex: Regex,
pub scope: Vec<Scope>,
pub captures: Option<CaptureMapping>,
pub operation: MatchOperation,
pub with_prototype: Option<ContextReference>,
}
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
#[non_exhaustive]
pub enum ContextReference {
#[non_exhaustive]
Named(String),
#[non_exhaustive]
ByScope {
scope: Scope,
sub_context: Option<String>,
},
#[non_exhaustive]
File {
name: String,
sub_context: Option<String>,
},
#[non_exhaustive]
Inline(String),
#[non_exhaustive]
Direct(ContextId),
}
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub enum MatchOperation {
Push(Vec<ContextReference>),
Set(Vec<ContextReference>),
Pop,
None,
}
impl<'a> Iterator for MatchIter<'a> {
type Item = (&'a Context, usize);
fn next(&mut self) -> Option<(&'a Context, usize)> {
loop {
if self.ctx_stack.is_empty() {
return None;
}
// uncomment for debugging infinite recursion
// println!("{:?}", self.index_stack);
// use std::thread::sleep_ms;
// sleep_ms(500);
let last_index = self.ctx_stack.len() - 1;
let context = self.ctx_stack[last_index];
let index = self.index_stack[last_index];
self.index_stack[last_index] = index + 1;
if index < context.patterns.len() {
match context.patterns[index] {
Pattern::Match(_) => {
return Some((context, index));
},
Pattern::Include(ref ctx_ref) => {
let ctx_ptr = match *ctx_ref {
ContextReference::Direct(ref context_id) => {
self.syntax_set.get_context(context_id)
}
_ => return self.next(), // skip this and move onto the next one
};
self.ctx_stack.push(ctx_ptr);
self.index_stack.push(0);
}
}
} else {
self.ctx_stack.pop();
self.index_stack.pop();
}
}
}
}
/// Returns an iterator over all the match patterns in this context.
///
/// It recursively follows include directives. Can only be run on contexts that have already been
/// linked up.
pub fn context_iter<'a>(syntax_set: &'a SyntaxSet, context: &'a Context) -> MatchIter<'a> {
MatchIter {
syntax_set,
ctx_stack: vec![context],
index_stack: vec![0],
}
}
impl Context {
/// Returns the match pattern at an index, panics if the thing isn't a match pattern
pub fn match_at(&self, index: usize) -> &MatchPattern {
match self.patterns[index] {
|
Pattern::Match(ref match_pat) => match_pat,
_ => panic!("bad index to match_at"),
}
}
}
impl ContextReference {
/// find the pointed to context, panics if ref is not linked
pub fn resolve<'a>(&self, syntax_set: &'a SyntaxSet) -> &'a Context {
match *self {
ContextReference::Direct(ref context_id) => syntax_set.get_context(context_id),
_ => panic!("Can only call resolve on linked references: {:?}", self),
}
}
/// get the context ID this reference points to, panics if ref is not linked
pub fn id(&self) -> ContextId {
match *self {
ContextReference::Direct(ref context_id) => *context_id,
_ => panic!("Can only get ContextId of linked references: {:?}", self),
}
}
}
pub(crate) fn substitute_backrefs_in_regex<F>(regex_str: &str, substituter: F) -> String
where F: Fn(usize) -> Option<String>
{
let mut reg_str = String::with_capacity(regex_str.len());
let mut last_was_escape = false;
for c in regex_str.chars() {
if last_was_escape && c.is_digit(10) {
let val = c.to_digit(10).unwrap() as usize;
if let Some(sub) = substituter(val) {
reg_str.push_str(&sub);
}
} else if last_was_escape {
reg_str.push('\\');
reg_str.push(c);
} else if c!= '\\' {
reg_str.push(c);
}
last_was_escape = c == '\\' &&!last_was_escape;
}
reg_str
}
impl MatchPattern {
pub fn new(
has_captures: bool,
regex_str: String,
scope: Vec<Scope>,
captures: Option<CaptureMapping>,
operation: MatchOperation,
with_prototype: Option<ContextReference>,
) -> MatchPattern {
MatchPattern {
has_captures,
regex: Regex::new(regex_str),
scope,
captures,
operation,
with_prototype,
}
}
/// Used by the parser to compile a regex which needs to reference
/// regions from another matched pattern.
pub fn regex_with_refs(&self, region: &Region, text: &str) -> Regex {
let new_regex = substitute_backrefs_in_regex(self.regex.regex_str(), |i| {
region.pos(i).map(|(start, end)| escape(&text[start..end]))
});
Regex::new(new_regex)
}
pub fn regex(&self) -> &Regex {
&self.regex
}
}
/// Serialize the provided map in natural key order, so that it's deterministic when dumping.
pub(crate) fn ordered_map<K, V, S>(map: &HashMap<K, V>, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer, K: Eq + Hash + Ord + Serialize, V: Serialize
{
let ordered: BTreeMap<_, _> = map.iter().collect();
ordered.serialize(serializer)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn can_compile_refs() {
let pat = MatchPattern {
has_captures: true,
regex: Regex::new(r"lol \\ \2 \1 '\9' \wz".into()),
scope: vec![],
captures: None,
operation: MatchOperation::None,
with_prototype: None,
};
let r = Regex::new(r"(\\\[\]\(\))(b)(c)(d)(e)".into());
let s = r"\[]()bcde";
let mut region = Region::new();
let matched = r.search(s, 0, s.len(), Some(&mut region));
assert!(matched);
let regex_with_refs = pat.regex_with_refs(®ion, s);
assert_eq!(regex_with_refs.regex_str(), r"lol \\ b \\\[\]\(\) '' \wz");
}
}
|
random_line_split
|
|
roerrorapi.rs
|
// Copyright © 2017 winapi-rs developers
|
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
ENUM!{enum RO_ERROR_REPORTING_FLAGS {
RO_ERROR_REPORTING_NONE = 0x00000000,
RO_ERROR_REPORTING_SUPPRESSEXCEPTIONS = 0x00000001,
RO_ERROR_REPORTING_FORCEEXCEPTIONS = 0x00000002,
RO_ERROR_REPORTING_USESETERRORINFO = 0x00000004,
RO_ERROR_REPORTING_SUPPRESSSETERRORINFO = 0x00000008,
}}
FN!{stdcall PINSPECT_MEMORY_CALLBACK(
*const ::VOID,
::UINT_PTR,
::UINT32,
*mut ::BYTE,
) -> ::HRESULT}
|
random_line_split
|
|
xmldocument.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use document_loader::DocumentLoader;
use dom::bindings::codegen::Bindings::DocumentBinding::DocumentMethods;
use dom::bindings::codegen::Bindings::XMLDocumentBinding::{self, XMLDocumentMethods};
use dom::bindings::inheritance::Castable;
use dom::bindings::nonnull::NonNullJSObjectPtr;
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::root::DomRoot;
use dom::bindings::str::DOMString;
use dom::document::{Document, DocumentSource, HasBrowsingContext, IsHTMLDocument};
use dom::location::Location;
use dom::node::Node;
use dom::window::Window;
use dom_struct::dom_struct;
use js::jsapi::JSContext;
use script_traits::DocumentActivity;
use servo_url::{MutableOrigin, ServoUrl};
// https://dom.spec.whatwg.org/#xmldocument
#[dom_struct]
pub struct XMLDocument {
document: Document,
}
impl XMLDocument {
fn new_inherited(window: &Window,
has_browsing_context: HasBrowsingContext,
url: Option<ServoUrl>,
origin: MutableOrigin,
is_html_document: IsHTMLDocument,
content_type: Option<DOMString>,
last_modified: Option<String>,
activity: DocumentActivity,
source: DocumentSource,
doc_loader: DocumentLoader) -> XMLDocument {
XMLDocument {
document: Document::new_inherited(window,
has_browsing_context,
url,
origin,
is_html_document,
content_type,
last_modified,
activity,
source,
doc_loader,
None,
None,
Default::default()),
}
}
pub fn new(window: &Window,
has_browsing_context: HasBrowsingContext,
url: Option<ServoUrl>,
origin: MutableOrigin,
doctype: IsHTMLDocument,
content_type: Option<DOMString>,
last_modified: Option<String>,
activity: DocumentActivity,
source: DocumentSource,
doc_loader: DocumentLoader)
-> DomRoot<XMLDocument>
|
}
doc
}
}
impl XMLDocumentMethods for XMLDocument {
// https://html.spec.whatwg.org/multipage/#dom-document-location
fn GetLocation(&self) -> Option<DomRoot<Location>> {
self.upcast::<Document>().GetLocation()
}
// https://html.spec.whatwg.org/multipage/#dom-tree-accessors:supported-property-names
fn SupportedPropertyNames(&self) -> Vec<DOMString> {
self.upcast::<Document>().SupportedPropertyNames()
}
#[allow(unsafe_code)]
// https://html.spec.whatwg.org/multipage/#dom-tree-accessors:dom-document-nameditem-filter
unsafe fn NamedGetter(&self, _cx: *mut JSContext, name: DOMString) -> Option<NonNullJSObjectPtr> {
self.upcast::<Document>().NamedGetter(_cx, name)
}
}
|
{
let doc = reflect_dom_object(
Box::new(XMLDocument::new_inherited(
window,
has_browsing_context,
url,
origin,
doctype,
content_type,
last_modified,
activity,
source,
doc_loader
)),
window,
XMLDocumentBinding::Wrap
);
{
let node = doc.upcast::<Node>();
node.set_owner_doc(&doc.document);
|
identifier_body
|
xmldocument.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use document_loader::DocumentLoader;
use dom::bindings::codegen::Bindings::DocumentBinding::DocumentMethods;
use dom::bindings::codegen::Bindings::XMLDocumentBinding::{self, XMLDocumentMethods};
use dom::bindings::inheritance::Castable;
use dom::bindings::nonnull::NonNullJSObjectPtr;
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::root::DomRoot;
use dom::bindings::str::DOMString;
use dom::document::{Document, DocumentSource, HasBrowsingContext, IsHTMLDocument};
use dom::location::Location;
use dom::node::Node;
use dom::window::Window;
use dom_struct::dom_struct;
use js::jsapi::JSContext;
use script_traits::DocumentActivity;
use servo_url::{MutableOrigin, ServoUrl};
// https://dom.spec.whatwg.org/#xmldocument
#[dom_struct]
pub struct XMLDocument {
document: Document,
}
impl XMLDocument {
fn
|
(window: &Window,
has_browsing_context: HasBrowsingContext,
url: Option<ServoUrl>,
origin: MutableOrigin,
is_html_document: IsHTMLDocument,
content_type: Option<DOMString>,
last_modified: Option<String>,
activity: DocumentActivity,
source: DocumentSource,
doc_loader: DocumentLoader) -> XMLDocument {
XMLDocument {
document: Document::new_inherited(window,
has_browsing_context,
url,
origin,
is_html_document,
content_type,
last_modified,
activity,
source,
doc_loader,
None,
None,
Default::default()),
}
}
pub fn new(window: &Window,
has_browsing_context: HasBrowsingContext,
url: Option<ServoUrl>,
origin: MutableOrigin,
doctype: IsHTMLDocument,
content_type: Option<DOMString>,
last_modified: Option<String>,
activity: DocumentActivity,
source: DocumentSource,
doc_loader: DocumentLoader)
-> DomRoot<XMLDocument> {
let doc = reflect_dom_object(
Box::new(XMLDocument::new_inherited(
window,
has_browsing_context,
url,
origin,
doctype,
content_type,
last_modified,
activity,
source,
doc_loader
)),
window,
XMLDocumentBinding::Wrap
);
{
let node = doc.upcast::<Node>();
node.set_owner_doc(&doc.document);
}
doc
}
}
impl XMLDocumentMethods for XMLDocument {
// https://html.spec.whatwg.org/multipage/#dom-document-location
fn GetLocation(&self) -> Option<DomRoot<Location>> {
self.upcast::<Document>().GetLocation()
}
// https://html.spec.whatwg.org/multipage/#dom-tree-accessors:supported-property-names
fn SupportedPropertyNames(&self) -> Vec<DOMString> {
self.upcast::<Document>().SupportedPropertyNames()
}
#[allow(unsafe_code)]
// https://html.spec.whatwg.org/multipage/#dom-tree-accessors:dom-document-nameditem-filter
unsafe fn NamedGetter(&self, _cx: *mut JSContext, name: DOMString) -> Option<NonNullJSObjectPtr> {
self.upcast::<Document>().NamedGetter(_cx, name)
}
}
|
new_inherited
|
identifier_name
|
xmldocument.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use document_loader::DocumentLoader;
use dom::bindings::codegen::Bindings::DocumentBinding::DocumentMethods;
use dom::bindings::codegen::Bindings::XMLDocumentBinding::{self, XMLDocumentMethods};
use dom::bindings::inheritance::Castable;
use dom::bindings::nonnull::NonNullJSObjectPtr;
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::root::DomRoot;
use dom::bindings::str::DOMString;
use dom::document::{Document, DocumentSource, HasBrowsingContext, IsHTMLDocument};
use dom::location::Location;
use dom::node::Node;
use dom::window::Window;
use dom_struct::dom_struct;
use js::jsapi::JSContext;
use script_traits::DocumentActivity;
use servo_url::{MutableOrigin, ServoUrl};
// https://dom.spec.whatwg.org/#xmldocument
#[dom_struct]
pub struct XMLDocument {
document: Document,
}
impl XMLDocument {
fn new_inherited(window: &Window,
has_browsing_context: HasBrowsingContext,
url: Option<ServoUrl>,
origin: MutableOrigin,
is_html_document: IsHTMLDocument,
content_type: Option<DOMString>,
last_modified: Option<String>,
activity: DocumentActivity,
source: DocumentSource,
doc_loader: DocumentLoader) -> XMLDocument {
XMLDocument {
document: Document::new_inherited(window,
has_browsing_context,
url,
origin,
is_html_document,
content_type,
last_modified,
activity,
source,
doc_loader,
None,
None,
Default::default()),
}
}
pub fn new(window: &Window,
has_browsing_context: HasBrowsingContext,
url: Option<ServoUrl>,
origin: MutableOrigin,
doctype: IsHTMLDocument,
content_type: Option<DOMString>,
last_modified: Option<String>,
activity: DocumentActivity,
source: DocumentSource,
doc_loader: DocumentLoader)
-> DomRoot<XMLDocument> {
let doc = reflect_dom_object(
Box::new(XMLDocument::new_inherited(
window,
has_browsing_context,
url,
origin,
doctype,
content_type,
last_modified,
activity,
source,
doc_loader
)),
window,
XMLDocumentBinding::Wrap
);
{
let node = doc.upcast::<Node>();
node.set_owner_doc(&doc.document);
}
doc
}
}
|
// https://html.spec.whatwg.org/multipage/#dom-document-location
fn GetLocation(&self) -> Option<DomRoot<Location>> {
self.upcast::<Document>().GetLocation()
}
// https://html.spec.whatwg.org/multipage/#dom-tree-accessors:supported-property-names
fn SupportedPropertyNames(&self) -> Vec<DOMString> {
self.upcast::<Document>().SupportedPropertyNames()
}
#[allow(unsafe_code)]
// https://html.spec.whatwg.org/multipage/#dom-tree-accessors:dom-document-nameditem-filter
unsafe fn NamedGetter(&self, _cx: *mut JSContext, name: DOMString) -> Option<NonNullJSObjectPtr> {
self.upcast::<Document>().NamedGetter(_cx, name)
}
}
|
impl XMLDocumentMethods for XMLDocument {
|
random_line_split
|
wrap_unhygienic_example.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// ignore-pretty pretty-printing is unhygienic
// aux-build:my_crate.rs
// aux-build:unhygienic_example.rs
#![feature(decl_macro)]
extern crate unhygienic_example;
extern crate my_crate; // (b)
// Hygienic version of `unhygienic_macro`.
pub macro hygienic_macro() {
fn g()
|
// (c)
::unhygienic_example::unhygienic_macro!();
// ^ Even though we invoke an unhygienic macro, `hygienic_macro` remains hygienic.
// In the above expansion:
// (1) `my_crate` always resolves to (b) regardless of invocation site.
// (2) The defined function `f` is only usable inside this macro definition.
// (3) `g` always resolves to (c) regardless of invocation site.
// (4) `$crate::g` remains hygienic and continues to resolve to (a).
f();
}
#[allow(unused)]
fn test_hygienic_macro() {
hygienic_macro!();
fn f() {} // (d) no conflict
f(); // resolves to (d)
}
fn main() {}
|
{}
|
identifier_body
|
wrap_unhygienic_example.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// ignore-pretty pretty-printing is unhygienic
// aux-build:my_crate.rs
// aux-build:unhygienic_example.rs
#![feature(decl_macro)]
extern crate unhygienic_example;
extern crate my_crate; // (b)
// Hygienic version of `unhygienic_macro`.
pub macro hygienic_macro() {
fn g() {} // (c)
::unhygienic_example::unhygienic_macro!();
// ^ Even though we invoke an unhygienic macro, `hygienic_macro` remains hygienic.
// In the above expansion:
// (1) `my_crate` always resolves to (b) regardless of invocation site.
// (2) The defined function `f` is only usable inside this macro definition.
// (3) `g` always resolves to (c) regardless of invocation site.
// (4) `$crate::g` remains hygienic and continues to resolve to (a).
|
#[allow(unused)]
fn test_hygienic_macro() {
hygienic_macro!();
fn f() {} // (d) no conflict
f(); // resolves to (d)
}
fn main() {}
|
f();
}
|
random_line_split
|
wrap_unhygienic_example.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// ignore-pretty pretty-printing is unhygienic
// aux-build:my_crate.rs
// aux-build:unhygienic_example.rs
#![feature(decl_macro)]
extern crate unhygienic_example;
extern crate my_crate; // (b)
// Hygienic version of `unhygienic_macro`.
pub macro hygienic_macro() {
fn g() {} // (c)
::unhygienic_example::unhygienic_macro!();
// ^ Even though we invoke an unhygienic macro, `hygienic_macro` remains hygienic.
// In the above expansion:
// (1) `my_crate` always resolves to (b) regardless of invocation site.
// (2) The defined function `f` is only usable inside this macro definition.
// (3) `g` always resolves to (c) regardless of invocation site.
// (4) `$crate::g` remains hygienic and continues to resolve to (a).
f();
}
#[allow(unused)]
fn test_hygienic_macro() {
hygienic_macro!();
fn f() {} // (d) no conflict
f(); // resolves to (d)
}
fn
|
() {}
|
main
|
identifier_name
|
texture.rs
|
use glium::{self, backend::Facade, texture::Texture2d};
use crate::backend::{Direction, Position, ASSETS};
pub struct Textures {
pub crate_: Texture2d,
pub floor: Texture2d,
pub goal: Texture2d,
pub wall: Texture2d,
pub worker: Texture2d,
}
impl Textures {
/// Load all textures.
pub fn new(factory: &dyn Facade) -> Self {
let crate_ = load(factory, "crate");
let floor = load(factory, "floor");
let goal = load(factory, "goal");
let wall = load(factory, "wall");
let worker = load(factory, "worker");
Textures {
crate_,
floor,
goal,
wall,
worker,
}
}
}
/// Load an image from the assets directory and turn it into a `Texture2d`.
pub fn
|
(display: &dyn Facade, name: &str) -> Texture2d {
let mut path = ASSETS.join("images");
path.push(name);
path.set_extension("png");
let image = image::open(path).unwrap().to_rgba();
let image_dimensions = image.dimensions();
let image =
glium::texture::RawImage2d::from_raw_rgba_reversed(&image.into_raw(), image_dimensions);
Texture2d::new(display, image).unwrap()
}
#[derive(Copy, Clone)]
pub struct Vertex {
pub position: [f32; 2],
pub tex_coords: [f32; 2],
}
implement_vertex!(Vertex, position, tex_coords);
/// Pass through coordinates and texture coordinates.
pub const VERTEX_SHADER: &str = r#"
#version 140
in vec2 position;
in vec2 tex_coords;
out vec2 v_tex_coords;
uniform mat4 matrix;
void main() {
v_tex_coords = tex_coords;
gl_Position = matrix * vec4(position, 0.0, 1.0);
}
"#;
/// Render texture on triangles.
pub const FRAGMENT_SHADER: &str = r#"
#version 140
in vec2 v_tex_coords;
out vec4 color;
uniform sampler2D tex;
void main() {
color = texture(tex, v_tex_coords);
}
"#;
/// Darken the screen
pub const DARKEN_SHADER: &str = r#"
#version 140
in vec2 v_tex_coords;
out vec4 color;
void main() {
color = vec4(0.0, 0.0, 0.0, 0.7);
}
"#;
#[derive(Clone, Copy, Debug)]
pub enum TileKind {
Crate,
Worker,
}
/// All tiles face left by default, so the worker has to turned by 90 degrees (clockwise) to face
/// up instead of left, etc.
fn direction_to_index(dir: Direction) -> usize {
match dir {
Direction::Left => 0,
Direction::Down => 1,
Direction::Right => 2,
Direction::Up => 3,
}
}
/// Create a vector of vertices consisting of two triangles which together form a square with the
/// given coordinates, together with texture coordinates to fill that square with a texture.
pub fn lrtb_to_vertices(
left: f32,
right: f32,
top: f32,
bottom: f32,
dir: Direction,
) -> Vec<Vertex> {
let tex = [[0.0, 0.0], [0.0, 1.0], [1.0, 1.0], [1.0, 0.0]];
let rot = direction_to_index(dir);
let a = Vertex {
position: [left, top],
tex_coords: tex[rot],
};
let b = Vertex {
position: [left, bottom],
tex_coords: tex[(rot + 1) % 4],
};
let c = Vertex {
position: [right, bottom],
tex_coords: tex[(rot + 2) % 4],
};
let d = Vertex {
position: [right, top],
tex_coords: tex[(rot + 3) % 4],
};
vec![a, b, c, c, d, a]
}
/// Create a bunch of vertices for rendering a textured square.
pub fn quad(pos: Position, columns: u32, rows: u32) -> Vec<Vertex> {
let left = 2.0 * pos.x as f32 / columns as f32 - 1.0;
let right = left + 2.0 / columns as f32;
let bottom = -2.0 * pos.y as f32 / rows as f32 + 1.0;
let top = bottom - 2.0 / rows as f32;
lrtb_to_vertices(left, right, top, bottom, Direction::Left)
}
/// Create a rectangle covering the entire viewport.
pub fn full_screen() -> Vec<Vertex> {
lrtb_to_vertices(-1.0, 1.0, -1.0, 1.0, Direction::Left)
}
|
load
|
identifier_name
|
texture.rs
|
use glium::{self, backend::Facade, texture::Texture2d};
use crate::backend::{Direction, Position, ASSETS};
pub struct Textures {
pub crate_: Texture2d,
pub floor: Texture2d,
pub goal: Texture2d,
pub wall: Texture2d,
pub worker: Texture2d,
}
impl Textures {
/// Load all textures.
pub fn new(factory: &dyn Facade) -> Self {
let crate_ = load(factory, "crate");
let floor = load(factory, "floor");
let goal = load(factory, "goal");
let wall = load(factory, "wall");
let worker = load(factory, "worker");
Textures {
crate_,
floor,
goal,
wall,
worker,
}
}
}
/// Load an image from the assets directory and turn it into a `Texture2d`.
pub fn load(display: &dyn Facade, name: &str) -> Texture2d {
let mut path = ASSETS.join("images");
path.push(name);
path.set_extension("png");
let image = image::open(path).unwrap().to_rgba();
let image_dimensions = image.dimensions();
let image =
glium::texture::RawImage2d::from_raw_rgba_reversed(&image.into_raw(), image_dimensions);
Texture2d::new(display, image).unwrap()
}
#[derive(Copy, Clone)]
pub struct Vertex {
pub position: [f32; 2],
pub tex_coords: [f32; 2],
}
implement_vertex!(Vertex, position, tex_coords);
/// Pass through coordinates and texture coordinates.
pub const VERTEX_SHADER: &str = r#"
#version 140
in vec2 position;
in vec2 tex_coords;
out vec2 v_tex_coords;
uniform mat4 matrix;
void main() {
v_tex_coords = tex_coords;
gl_Position = matrix * vec4(position, 0.0, 1.0);
}
"#;
/// Render texture on triangles.
pub const FRAGMENT_SHADER: &str = r#"
#version 140
in vec2 v_tex_coords;
out vec4 color;
uniform sampler2D tex;
void main() {
color = texture(tex, v_tex_coords);
}
"#;
/// Darken the screen
pub const DARKEN_SHADER: &str = r#"
#version 140
in vec2 v_tex_coords;
out vec4 color;
void main() {
color = vec4(0.0, 0.0, 0.0, 0.7);
}
"#;
#[derive(Clone, Copy, Debug)]
pub enum TileKind {
Crate,
Worker,
}
/// All tiles face left by default, so the worker has to turned by 90 degrees (clockwise) to face
/// up instead of left, etc.
fn direction_to_index(dir: Direction) -> usize {
match dir {
Direction::Left => 0,
Direction::Down => 1,
Direction::Right => 2,
Direction::Up => 3,
}
}
/// Create a vector of vertices consisting of two triangles which together form a square with the
/// given coordinates, together with texture coordinates to fill that square with a texture.
pub fn lrtb_to_vertices(
left: f32,
right: f32,
top: f32,
bottom: f32,
dir: Direction,
) -> Vec<Vertex> {
let tex = [[0.0, 0.0], [0.0, 1.0], [1.0, 1.0], [1.0, 0.0]];
let rot = direction_to_index(dir);
let a = Vertex {
position: [left, top],
tex_coords: tex[rot],
};
let b = Vertex {
position: [left, bottom],
tex_coords: tex[(rot + 1) % 4],
};
let c = Vertex {
position: [right, bottom],
tex_coords: tex[(rot + 2) % 4],
};
let d = Vertex {
position: [right, top],
tex_coords: tex[(rot + 3) % 4],
};
vec![a, b, c, c, d, a]
}
/// Create a bunch of vertices for rendering a textured square.
pub fn quad(pos: Position, columns: u32, rows: u32) -> Vec<Vertex> {
let left = 2.0 * pos.x as f32 / columns as f32 - 1.0;
let right = left + 2.0 / columns as f32;
let bottom = -2.0 * pos.y as f32 / rows as f32 + 1.0;
let top = bottom - 2.0 / rows as f32;
lrtb_to_vertices(left, right, top, bottom, Direction::Left)
|
}
/// Create a rectangle covering the entire viewport.
pub fn full_screen() -> Vec<Vertex> {
lrtb_to_vertices(-1.0, 1.0, -1.0, 1.0, Direction::Left)
}
|
random_line_split
|
|
texture.rs
|
use glium::{self, backend::Facade, texture::Texture2d};
use crate::backend::{Direction, Position, ASSETS};
pub struct Textures {
pub crate_: Texture2d,
pub floor: Texture2d,
pub goal: Texture2d,
pub wall: Texture2d,
pub worker: Texture2d,
}
impl Textures {
/// Load all textures.
pub fn new(factory: &dyn Facade) -> Self {
let crate_ = load(factory, "crate");
let floor = load(factory, "floor");
let goal = load(factory, "goal");
let wall = load(factory, "wall");
let worker = load(factory, "worker");
Textures {
crate_,
floor,
goal,
wall,
worker,
}
}
}
/// Load an image from the assets directory and turn it into a `Texture2d`.
pub fn load(display: &dyn Facade, name: &str) -> Texture2d {
let mut path = ASSETS.join("images");
path.push(name);
path.set_extension("png");
let image = image::open(path).unwrap().to_rgba();
let image_dimensions = image.dimensions();
let image =
glium::texture::RawImage2d::from_raw_rgba_reversed(&image.into_raw(), image_dimensions);
Texture2d::new(display, image).unwrap()
}
#[derive(Copy, Clone)]
pub struct Vertex {
pub position: [f32; 2],
pub tex_coords: [f32; 2],
}
implement_vertex!(Vertex, position, tex_coords);
/// Pass through coordinates and texture coordinates.
pub const VERTEX_SHADER: &str = r#"
#version 140
in vec2 position;
in vec2 tex_coords;
out vec2 v_tex_coords;
uniform mat4 matrix;
void main() {
v_tex_coords = tex_coords;
gl_Position = matrix * vec4(position, 0.0, 1.0);
}
"#;
/// Render texture on triangles.
pub const FRAGMENT_SHADER: &str = r#"
#version 140
in vec2 v_tex_coords;
out vec4 color;
uniform sampler2D tex;
void main() {
color = texture(tex, v_tex_coords);
}
"#;
/// Darken the screen
pub const DARKEN_SHADER: &str = r#"
#version 140
in vec2 v_tex_coords;
out vec4 color;
void main() {
color = vec4(0.0, 0.0, 0.0, 0.7);
}
"#;
#[derive(Clone, Copy, Debug)]
pub enum TileKind {
Crate,
Worker,
}
/// All tiles face left by default, so the worker has to turned by 90 degrees (clockwise) to face
/// up instead of left, etc.
fn direction_to_index(dir: Direction) -> usize {
match dir {
Direction::Left => 0,
Direction::Down => 1,
Direction::Right => 2,
Direction::Up => 3,
}
}
/// Create a vector of vertices consisting of two triangles which together form a square with the
/// given coordinates, together with texture coordinates to fill that square with a texture.
pub fn lrtb_to_vertices(
left: f32,
right: f32,
top: f32,
bottom: f32,
dir: Direction,
) -> Vec<Vertex> {
let tex = [[0.0, 0.0], [0.0, 1.0], [1.0, 1.0], [1.0, 0.0]];
let rot = direction_to_index(dir);
let a = Vertex {
position: [left, top],
tex_coords: tex[rot],
};
let b = Vertex {
position: [left, bottom],
tex_coords: tex[(rot + 1) % 4],
};
let c = Vertex {
position: [right, bottom],
tex_coords: tex[(rot + 2) % 4],
};
let d = Vertex {
position: [right, top],
tex_coords: tex[(rot + 3) % 4],
};
vec![a, b, c, c, d, a]
}
/// Create a bunch of vertices for rendering a textured square.
pub fn quad(pos: Position, columns: u32, rows: u32) -> Vec<Vertex>
|
/// Create a rectangle covering the entire viewport.
pub fn full_screen() -> Vec<Vertex> {
lrtb_to_vertices(-1.0, 1.0, -1.0, 1.0, Direction::Left)
}
|
{
let left = 2.0 * pos.x as f32 / columns as f32 - 1.0;
let right = left + 2.0 / columns as f32;
let bottom = -2.0 * pos.y as f32 / rows as f32 + 1.0;
let top = bottom - 2.0 / rows as f32;
lrtb_to_vertices(left, right, top, bottom, Direction::Left)
}
|
identifier_body
|
bench.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_snake_case)]
use std::iter::repeat;
use test::Bencher;
use rand::{Rng, thread_rng};
use regex::{Regex, NoExpand};
fn bench_assert_match(b: &mut Bencher, re: Regex, text: &str) {
b.iter(|| if!re.is_match(text) { panic!("no match") });
}
#[bench]
fn no_exponential(b: &mut Bencher) {
let n = 100;
let regex_string = format!(
"{}{}",
repeat("a?").take(n).collect::<String>(),
repeat("a").take(n).collect::<String>());
let re = Regex::new(®ex_string).unwrap();
let text: String = repeat("a").take(n).collect();
bench_assert_match(b, re, &text);
}
#[bench]
fn literal(b: &mut Bencher) {
let re = regex!("y");
let text = format!("{}y", repeat("x").take(50).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn not_literal(b: &mut Bencher) {
let re = regex!(".y");
let text = format!("{}y", repeat("x").take(50).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn match_class(b: &mut Bencher) {
let re = regex!("[abcdw]");
let text = format!("{}w", repeat("xxxx").take(20).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn match_class_in_range(b: &mut Bencher) {
// 'b' is between 'a' and 'c', so the class range checking doesn't help.
let re = regex!("[ac]");
let text = format!("{}c", repeat("bbbb").take(20).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn match_class_unicode(b: &mut Bencher) {
let re = regex!(r"\pL");
let text = format!("{}a", repeat("☃5☃5").take(20).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn replace_all(b: &mut Bencher) {
let re = regex!("[cjrw]");
let text = "abcdefghijklmnopqrstuvwxyz";
// FIXME: This isn't using the $name expand stuff.
// It's possible RE2/Go is using it, but currently, the expand in this
// crate is actually compiling a regex, so it's incredibly slow.
b.iter(|| re.replace_all(text, NoExpand("")));
}
#[bench]
fn anchored_literal_short_non_match(b: &mut Bencher) {
let re = regex!("^zbc(d|e)");
let text = "abcdefghijklmnopqrstuvwxyz";
b.iter(|| re.is_match(text));
}
#[bench]
fn anchored_literal_long_non_match(b: &mut Bencher) {
let re = regex!("^zbc(d|e)");
let text: String = repeat("abcdefghijklmnopqrstuvwxyz").take(15).collect();
b.iter(|| re.is_match(&text));
}
#[bench]
fn anchored_literal_short_match(b: &mut Bencher) {
let re = regex!("^.bc(d|e)");
let text = "abcdefghijklmnopqrstuvwxyz";
b.iter(|| re.is_match(text));
}
#[bench]
fn anchored_literal_long_match(b: &mut Bencher) {
let re = regex!("^.bc(d|e)");
let text: String = repeat("abcdefghijklmnopqrstuvwxyz").take(15).collect();
b.iter(|| re.is_match(&text));
}
#[bench]
fn one_pass_short_a(b: &mut Bencher) {
let re = regex!("^.bc(d|e)*$");
let text = "abcddddddeeeededd";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_short_a_not(b: &mut Bencher) {
let re = regex!(".bc(d|e)*$");
let text = "abcddddddeeeededd";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_short_b(b: &mut Bencher) {
let re = regex!("^.bc(?:d|e)*$");
let text = "abcddddddeeeededd";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_short_b_not(b: &mut Bencher) {
let re = regex!(".bc(?:d|e)*$");
let text = "abcddddddeeeededd";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_long_prefix(b: &mut Bencher) {
let re = regex!("^abcdefghijklmnopqrstuvwxyz.*$");
let text = "abcdefghijklmnopqrstuvwxyz";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_long_prefix_not(b: &mut Bencher) {
let re = regex!("^.bcdefghijklmnopqrstuvwxyz.*$");
let text = "abcdefghijklmnopqrstuvwxyz";
b.iter(|| re.is_match(text));
}
macro_rules! throughput(
($name:ident, $regex:expr, $size:expr) => (
#[bench]
fn $name(b: &mut Bencher) {
let text = gen_text($size);
b.bytes = $size;
let re = $regex;
b.iter(|| if re.is_match(&text) { panic!("match") });
}
);
);
fn easy0() -> Regex { regex!("ABCDEFGHIJKLMNOPQRSTUVWXYZ$") }
fn easy1() -> Regex { regex!("A[AB]B[BC]C[CD]D[DE]E[EF]F[FG]G[GH]H[HI]I[IJ]J$") }
fn medium() -> Regex { regex!("[XYZ]ABCDEFGHIJKLMNOPQRSTUVWXYZ$") }
fn hard() -> Regex { regex!("[ -~]*ABCDEFGHIJKLMNOPQRSTUVWXYZ$") }
fn gen_text(n: usize) -> String {
|
roughput!(easy0_32, easy0(), 32);
throughput!(easy0_1K, easy0(), 1<<10);
throughput!(easy0_32K, easy0(), 32<<10);
throughput!(easy0_1MB, easy0(), 1<<20);
throughput!(easy1_32, easy1(), 32);
throughput!(easy1_1K, easy1(), 1<<10);
throughput!(easy1_32K, easy1(), 32<<10);
throughput!(easy1_1MB, easy1(), 1<<20);
throughput!(medium_32, medium(), 32);
throughput!(medium_1K, medium(), 1<<10);
throughput!(medium_32K,medium(), 32<<10);
throughput!(medium_1MB, medium(), 1<<20);
throughput!(hard_32, hard(), 32);
throughput!(hard_1K, hard(), 1<<10);
throughput!(hard_32K,hard(), 32<<10);
throughput!(hard_1MB, hard(), 1<<20);
|
let mut rng = thread_rng();
let mut bytes = rng.gen_ascii_chars().map(|n| n as u8).take(n)
.collect::<Vec<u8>>();
for (i, b) in bytes.iter_mut().enumerate() {
if i % 20 == 0 {
*b = b'\n'
}
}
String::from_utf8(bytes).unwrap()
}
th
|
identifier_body
|
bench.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_snake_case)]
use std::iter::repeat;
use test::Bencher;
use rand::{Rng, thread_rng};
use regex::{Regex, NoExpand};
fn bench_assert_match(b: &mut Bencher, re: Regex, text: &str) {
b.iter(|| if!re.is_match(text) { panic!("no match") });
}
#[bench]
fn no_exponential(b: &mut Bencher) {
let n = 100;
let regex_string = format!(
"{}{}",
repeat("a?").take(n).collect::<String>(),
repeat("a").take(n).collect::<String>());
let re = Regex::new(®ex_string).unwrap();
let text: String = repeat("a").take(n).collect();
bench_assert_match(b, re, &text);
}
#[bench]
fn literal(b: &mut Bencher) {
let re = regex!("y");
let text = format!("{}y", repeat("x").take(50).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn not_literal(b: &mut Bencher) {
let re = regex!(".y");
let text = format!("{}y", repeat("x").take(50).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn match_class(b: &mut Bencher) {
let re = regex!("[abcdw]");
let text = format!("{}w", repeat("xxxx").take(20).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn match_class_in_range(b: &mut Bencher) {
// 'b' is between 'a' and 'c', so the class range checking doesn't help.
let re = regex!("[ac]");
let text = format!("{}c", repeat("bbbb").take(20).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn match_class_unicode(b: &mut Bencher) {
let re = regex!(r"\pL");
let text = format!("{}a", repeat("☃5☃5").take(20).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn replace_all(b: &mut Bencher) {
let re = regex!("[cjrw]");
let text = "abcdefghijklmnopqrstuvwxyz";
// FIXME: This isn't using the $name expand stuff.
// It's possible RE2/Go is using it, but currently, the expand in this
// crate is actually compiling a regex, so it's incredibly slow.
b.iter(|| re.replace_all(text, NoExpand("")));
}
#[bench]
fn anchored_literal_short_non_match(b: &mut Bencher) {
let re = regex!("^zbc(d|e)");
let text = "abcdefghijklmnopqrstuvwxyz";
b.iter(|| re.is_match(text));
}
#[bench]
|
}
#[bench]
fn anchored_literal_short_match(b: &mut Bencher) {
let re = regex!("^.bc(d|e)");
let text = "abcdefghijklmnopqrstuvwxyz";
b.iter(|| re.is_match(text));
}
#[bench]
fn anchored_literal_long_match(b: &mut Bencher) {
let re = regex!("^.bc(d|e)");
let text: String = repeat("abcdefghijklmnopqrstuvwxyz").take(15).collect();
b.iter(|| re.is_match(&text));
}
#[bench]
fn one_pass_short_a(b: &mut Bencher) {
let re = regex!("^.bc(d|e)*$");
let text = "abcddddddeeeededd";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_short_a_not(b: &mut Bencher) {
let re = regex!(".bc(d|e)*$");
let text = "abcddddddeeeededd";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_short_b(b: &mut Bencher) {
let re = regex!("^.bc(?:d|e)*$");
let text = "abcddddddeeeededd";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_short_b_not(b: &mut Bencher) {
let re = regex!(".bc(?:d|e)*$");
let text = "abcddddddeeeededd";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_long_prefix(b: &mut Bencher) {
let re = regex!("^abcdefghijklmnopqrstuvwxyz.*$");
let text = "abcdefghijklmnopqrstuvwxyz";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_long_prefix_not(b: &mut Bencher) {
let re = regex!("^.bcdefghijklmnopqrstuvwxyz.*$");
let text = "abcdefghijklmnopqrstuvwxyz";
b.iter(|| re.is_match(text));
}
macro_rules! throughput(
($name:ident, $regex:expr, $size:expr) => (
#[bench]
fn $name(b: &mut Bencher) {
let text = gen_text($size);
b.bytes = $size;
let re = $regex;
b.iter(|| if re.is_match(&text) { panic!("match") });
}
);
);
fn easy0() -> Regex { regex!("ABCDEFGHIJKLMNOPQRSTUVWXYZ$") }
fn easy1() -> Regex { regex!("A[AB]B[BC]C[CD]D[DE]E[EF]F[FG]G[GH]H[HI]I[IJ]J$") }
fn medium() -> Regex { regex!("[XYZ]ABCDEFGHIJKLMNOPQRSTUVWXYZ$") }
fn hard() -> Regex { regex!("[ -~]*ABCDEFGHIJKLMNOPQRSTUVWXYZ$") }
fn gen_text(n: usize) -> String {
let mut rng = thread_rng();
let mut bytes = rng.gen_ascii_chars().map(|n| n as u8).take(n)
.collect::<Vec<u8>>();
for (i, b) in bytes.iter_mut().enumerate() {
if i % 20 == 0 {
*b = b'\n'
}
}
String::from_utf8(bytes).unwrap()
}
throughput!(easy0_32, easy0(), 32);
throughput!(easy0_1K, easy0(), 1<<10);
throughput!(easy0_32K, easy0(), 32<<10);
throughput!(easy0_1MB, easy0(), 1<<20);
throughput!(easy1_32, easy1(), 32);
throughput!(easy1_1K, easy1(), 1<<10);
throughput!(easy1_32K, easy1(), 32<<10);
throughput!(easy1_1MB, easy1(), 1<<20);
throughput!(medium_32, medium(), 32);
throughput!(medium_1K, medium(), 1<<10);
throughput!(medium_32K,medium(), 32<<10);
throughput!(medium_1MB, medium(), 1<<20);
throughput!(hard_32, hard(), 32);
throughput!(hard_1K, hard(), 1<<10);
throughput!(hard_32K,hard(), 32<<10);
throughput!(hard_1MB, hard(), 1<<20);
|
fn anchored_literal_long_non_match(b: &mut Bencher) {
let re = regex!("^zbc(d|e)");
let text: String = repeat("abcdefghijklmnopqrstuvwxyz").take(15).collect();
b.iter(|| re.is_match(&text));
|
random_line_split
|
bench.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_snake_case)]
use std::iter::repeat;
use test::Bencher;
use rand::{Rng, thread_rng};
use regex::{Regex, NoExpand};
fn
|
(b: &mut Bencher, re: Regex, text: &str) {
b.iter(|| if!re.is_match(text) { panic!("no match") });
}
#[bench]
fn no_exponential(b: &mut Bencher) {
let n = 100;
let regex_string = format!(
"{}{}",
repeat("a?").take(n).collect::<String>(),
repeat("a").take(n).collect::<String>());
let re = Regex::new(®ex_string).unwrap();
let text: String = repeat("a").take(n).collect();
bench_assert_match(b, re, &text);
}
#[bench]
fn literal(b: &mut Bencher) {
let re = regex!("y");
let text = format!("{}y", repeat("x").take(50).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn not_literal(b: &mut Bencher) {
let re = regex!(".y");
let text = format!("{}y", repeat("x").take(50).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn match_class(b: &mut Bencher) {
let re = regex!("[abcdw]");
let text = format!("{}w", repeat("xxxx").take(20).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn match_class_in_range(b: &mut Bencher) {
// 'b' is between 'a' and 'c', so the class range checking doesn't help.
let re = regex!("[ac]");
let text = format!("{}c", repeat("bbbb").take(20).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn match_class_unicode(b: &mut Bencher) {
let re = regex!(r"\pL");
let text = format!("{}a", repeat("☃5☃5").take(20).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn replace_all(b: &mut Bencher) {
let re = regex!("[cjrw]");
let text = "abcdefghijklmnopqrstuvwxyz";
// FIXME: This isn't using the $name expand stuff.
// It's possible RE2/Go is using it, but currently, the expand in this
// crate is actually compiling a regex, so it's incredibly slow.
b.iter(|| re.replace_all(text, NoExpand("")));
}
#[bench]
fn anchored_literal_short_non_match(b: &mut Bencher) {
let re = regex!("^zbc(d|e)");
let text = "abcdefghijklmnopqrstuvwxyz";
b.iter(|| re.is_match(text));
}
#[bench]
fn anchored_literal_long_non_match(b: &mut Bencher) {
let re = regex!("^zbc(d|e)");
let text: String = repeat("abcdefghijklmnopqrstuvwxyz").take(15).collect();
b.iter(|| re.is_match(&text));
}
#[bench]
fn anchored_literal_short_match(b: &mut Bencher) {
let re = regex!("^.bc(d|e)");
let text = "abcdefghijklmnopqrstuvwxyz";
b.iter(|| re.is_match(text));
}
#[bench]
fn anchored_literal_long_match(b: &mut Bencher) {
let re = regex!("^.bc(d|e)");
let text: String = repeat("abcdefghijklmnopqrstuvwxyz").take(15).collect();
b.iter(|| re.is_match(&text));
}
#[bench]
fn one_pass_short_a(b: &mut Bencher) {
let re = regex!("^.bc(d|e)*$");
let text = "abcddddddeeeededd";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_short_a_not(b: &mut Bencher) {
let re = regex!(".bc(d|e)*$");
let text = "abcddddddeeeededd";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_short_b(b: &mut Bencher) {
let re = regex!("^.bc(?:d|e)*$");
let text = "abcddddddeeeededd";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_short_b_not(b: &mut Bencher) {
let re = regex!(".bc(?:d|e)*$");
let text = "abcddddddeeeededd";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_long_prefix(b: &mut Bencher) {
let re = regex!("^abcdefghijklmnopqrstuvwxyz.*$");
let text = "abcdefghijklmnopqrstuvwxyz";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_long_prefix_not(b: &mut Bencher) {
let re = regex!("^.bcdefghijklmnopqrstuvwxyz.*$");
let text = "abcdefghijklmnopqrstuvwxyz";
b.iter(|| re.is_match(text));
}
macro_rules! throughput(
($name:ident, $regex:expr, $size:expr) => (
#[bench]
fn $name(b: &mut Bencher) {
let text = gen_text($size);
b.bytes = $size;
let re = $regex;
b.iter(|| if re.is_match(&text) { panic!("match") });
}
);
);
fn easy0() -> Regex { regex!("ABCDEFGHIJKLMNOPQRSTUVWXYZ$") }
fn easy1() -> Regex { regex!("A[AB]B[BC]C[CD]D[DE]E[EF]F[FG]G[GH]H[HI]I[IJ]J$") }
fn medium() -> Regex { regex!("[XYZ]ABCDEFGHIJKLMNOPQRSTUVWXYZ$") }
fn hard() -> Regex { regex!("[ -~]*ABCDEFGHIJKLMNOPQRSTUVWXYZ$") }
fn gen_text(n: usize) -> String {
let mut rng = thread_rng();
let mut bytes = rng.gen_ascii_chars().map(|n| n as u8).take(n)
.collect::<Vec<u8>>();
for (i, b) in bytes.iter_mut().enumerate() {
if i % 20 == 0 {
*b = b'\n'
}
}
String::from_utf8(bytes).unwrap()
}
throughput!(easy0_32, easy0(), 32);
throughput!(easy0_1K, easy0(), 1<<10);
throughput!(easy0_32K, easy0(), 32<<10);
throughput!(easy0_1MB, easy0(), 1<<20);
throughput!(easy1_32, easy1(), 32);
throughput!(easy1_1K, easy1(), 1<<10);
throughput!(easy1_32K, easy1(), 32<<10);
throughput!(easy1_1MB, easy1(), 1<<20);
throughput!(medium_32, medium(), 32);
throughput!(medium_1K, medium(), 1<<10);
throughput!(medium_32K,medium(), 32<<10);
throughput!(medium_1MB, medium(), 1<<20);
throughput!(hard_32, hard(), 32);
throughput!(hard_1K, hard(), 1<<10);
throughput!(hard_32K,hard(), 32<<10);
throughput!(hard_1MB, hard(), 1<<20);
|
bench_assert_match
|
identifier_name
|
bench.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_snake_case)]
use std::iter::repeat;
use test::Bencher;
use rand::{Rng, thread_rng};
use regex::{Regex, NoExpand};
fn bench_assert_match(b: &mut Bencher, re: Regex, text: &str) {
b.iter(|| if!re.is_match(text)
|
);
}
#[bench]
fn no_exponential(b: &mut Bencher) {
let n = 100;
let regex_string = format!(
"{}{}",
repeat("a?").take(n).collect::<String>(),
repeat("a").take(n).collect::<String>());
let re = Regex::new(®ex_string).unwrap();
let text: String = repeat("a").take(n).collect();
bench_assert_match(b, re, &text);
}
#[bench]
fn literal(b: &mut Bencher) {
let re = regex!("y");
let text = format!("{}y", repeat("x").take(50).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn not_literal(b: &mut Bencher) {
let re = regex!(".y");
let text = format!("{}y", repeat("x").take(50).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn match_class(b: &mut Bencher) {
let re = regex!("[abcdw]");
let text = format!("{}w", repeat("xxxx").take(20).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn match_class_in_range(b: &mut Bencher) {
// 'b' is between 'a' and 'c', so the class range checking doesn't help.
let re = regex!("[ac]");
let text = format!("{}c", repeat("bbbb").take(20).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn match_class_unicode(b: &mut Bencher) {
let re = regex!(r"\pL");
let text = format!("{}a", repeat("☃5☃5").take(20).collect::<String>());
bench_assert_match(b, re, &text);
}
#[bench]
fn replace_all(b: &mut Bencher) {
let re = regex!("[cjrw]");
let text = "abcdefghijklmnopqrstuvwxyz";
// FIXME: This isn't using the $name expand stuff.
// It's possible RE2/Go is using it, but currently, the expand in this
// crate is actually compiling a regex, so it's incredibly slow.
b.iter(|| re.replace_all(text, NoExpand("")));
}
#[bench]
fn anchored_literal_short_non_match(b: &mut Bencher) {
let re = regex!("^zbc(d|e)");
let text = "abcdefghijklmnopqrstuvwxyz";
b.iter(|| re.is_match(text));
}
#[bench]
fn anchored_literal_long_non_match(b: &mut Bencher) {
let re = regex!("^zbc(d|e)");
let text: String = repeat("abcdefghijklmnopqrstuvwxyz").take(15).collect();
b.iter(|| re.is_match(&text));
}
#[bench]
fn anchored_literal_short_match(b: &mut Bencher) {
let re = regex!("^.bc(d|e)");
let text = "abcdefghijklmnopqrstuvwxyz";
b.iter(|| re.is_match(text));
}
#[bench]
fn anchored_literal_long_match(b: &mut Bencher) {
let re = regex!("^.bc(d|e)");
let text: String = repeat("abcdefghijklmnopqrstuvwxyz").take(15).collect();
b.iter(|| re.is_match(&text));
}
#[bench]
fn one_pass_short_a(b: &mut Bencher) {
let re = regex!("^.bc(d|e)*$");
let text = "abcddddddeeeededd";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_short_a_not(b: &mut Bencher) {
let re = regex!(".bc(d|e)*$");
let text = "abcddddddeeeededd";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_short_b(b: &mut Bencher) {
let re = regex!("^.bc(?:d|e)*$");
let text = "abcddddddeeeededd";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_short_b_not(b: &mut Bencher) {
let re = regex!(".bc(?:d|e)*$");
let text = "abcddddddeeeededd";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_long_prefix(b: &mut Bencher) {
let re = regex!("^abcdefghijklmnopqrstuvwxyz.*$");
let text = "abcdefghijklmnopqrstuvwxyz";
b.iter(|| re.is_match(text));
}
#[bench]
fn one_pass_long_prefix_not(b: &mut Bencher) {
let re = regex!("^.bcdefghijklmnopqrstuvwxyz.*$");
let text = "abcdefghijklmnopqrstuvwxyz";
b.iter(|| re.is_match(text));
}
macro_rules! throughput(
($name:ident, $regex:expr, $size:expr) => (
#[bench]
fn $name(b: &mut Bencher) {
let text = gen_text($size);
b.bytes = $size;
let re = $regex;
b.iter(|| if re.is_match(&text) { panic!("match") });
}
);
);
fn easy0() -> Regex { regex!("ABCDEFGHIJKLMNOPQRSTUVWXYZ$") }
fn easy1() -> Regex { regex!("A[AB]B[BC]C[CD]D[DE]E[EF]F[FG]G[GH]H[HI]I[IJ]J$") }
fn medium() -> Regex { regex!("[XYZ]ABCDEFGHIJKLMNOPQRSTUVWXYZ$") }
fn hard() -> Regex { regex!("[ -~]*ABCDEFGHIJKLMNOPQRSTUVWXYZ$") }
fn gen_text(n: usize) -> String {
let mut rng = thread_rng();
let mut bytes = rng.gen_ascii_chars().map(|n| n as u8).take(n)
.collect::<Vec<u8>>();
for (i, b) in bytes.iter_mut().enumerate() {
if i % 20 == 0 {
*b = b'\n'
}
}
String::from_utf8(bytes).unwrap()
}
throughput!(easy0_32, easy0(), 32);
throughput!(easy0_1K, easy0(), 1<<10);
throughput!(easy0_32K, easy0(), 32<<10);
throughput!(easy0_1MB, easy0(), 1<<20);
throughput!(easy1_32, easy1(), 32);
throughput!(easy1_1K, easy1(), 1<<10);
throughput!(easy1_32K, easy1(), 32<<10);
throughput!(easy1_1MB, easy1(), 1<<20);
throughput!(medium_32, medium(), 32);
throughput!(medium_1K, medium(), 1<<10);
throughput!(medium_32K,medium(), 32<<10);
throughput!(medium_1MB, medium(), 1<<20);
throughput!(hard_32, hard(), 32);
throughput!(hard_1K, hard(), 1<<10);
throughput!(hard_32K,hard(), 32<<10);
throughput!(hard_1MB, hard(), 1<<20);
|
{ panic!("no match") }
|
conditional_block
|
mutex.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use cell::UnsafeCell;
use fmt;
use marker;
use ops::{Deref, DerefMut};
use sys_common::mutex as sys;
use sys_common::poison::{self, TryLockError, TryLockResult, LockResult};
/// A mutual exclusion primitive useful for protecting shared data
///
/// This mutex will block threads waiting for the lock to become available. The
/// mutex can also be statically initialized or created via a `new`
/// constructor. Each mutex has a type parameter which represents the data that
/// it is protecting. The data can only be accessed through the RAII guards
/// returned from `lock` and `try_lock`, which guarantees that the data is only
/// ever accessed when the mutex is locked.
///
/// # Poisoning
///
/// The mutexes in this module implement a strategy called "poisoning" where a
/// mutex is considered poisoned whenever a thread panics while holding the
/// lock. Once a mutex is poisoned, all other threads are unable to access the
/// data by default as it is likely tainted (some invariant is not being
/// upheld).
///
/// For a mutex, this means that the `lock` and `try_lock` methods return a
/// `Result` which indicates whether a mutex has been poisoned or not. Most
/// usage of a mutex will simply `unwrap()` these results, propagating panics
/// among threads to ensure that a possibly invalid invariant is not witnessed.
///
/// A poisoned mutex, however, does not prevent all access to the underlying
/// data. The `PoisonError` type has an `into_inner` method which will return
/// the guard that would have otherwise been returned on a successful lock. This
/// allows access to the data, despite the lock being poisoned.
///
/// # Examples
///
/// ```
/// use std::sync::{Arc, Mutex};
/// use std::thread;
/// use std::sync::mpsc::channel;
///
/// const N: usize = 10;
///
/// // Spawn a few threads to increment a shared variable (non-atomically), and
/// // let the main thread know once all increments are done.
/// //
/// // Here we're using an Arc to share memory among threads, and the data inside
/// // the Arc is protected with a mutex.
/// let data = Arc::new(Mutex::new(0));
///
/// let (tx, rx) = channel();
/// for _ in 0..10 {
/// let (data, tx) = (data.clone(), tx.clone());
/// thread::spawn(move || {
/// // The shared static can only be accessed once the lock is held.
/// // Our non-atomic increment is safe because we're the only thread
/// // which can access the shared state when the lock is held.
/// //
/// // We unwrap() the return value to assert that we are not expecting
/// // threads to ever fail while holding the lock.
/// let mut data = data.lock().unwrap();
/// *data += 1;
/// if *data == N {
/// tx.send(()).unwrap();
/// }
/// // the lock is unlocked here when `data` goes out of scope.
/// });
/// }
///
/// rx.recv().unwrap();
/// ```
///
/// To recover from a poisoned mutex:
///
/// ```
/// use std::sync::{Arc, Mutex};
/// use std::thread;
///
/// let lock = Arc::new(Mutex::new(0_u32));
/// let lock2 = lock.clone();
///
/// let _ = thread::spawn(move || -> () {
/// // This thread will acquire the mutex first, unwrapping the result of
/// // `lock` because the lock has not been poisoned.
/// let _lock = lock2.lock().unwrap();
///
/// // This panic while holding the lock (`_guard` is in scope) will poison
/// // the mutex.
/// panic!();
/// }).join();
///
/// // The lock is poisoned by this point, but the returned result can be
/// // pattern matched on to return the underlying guard on both branches.
/// let mut guard = match lock.lock() {
/// Ok(guard) => guard,
/// Err(poisoned) => poisoned.into_inner(),
/// };
///
/// *guard += 1;
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Mutex<T:?Sized> {
// Note that this static mutex is in a *box*, not inlined into the struct
// itself. Once a native mutex has been used once, its address can never
// change (it can't be moved). This mutex type can be safely moved at any
// time, so to ensure that the native mutex is used correctly we box the
// inner lock to give it a constant address.
inner: Box<StaticMutex>,
data: UnsafeCell<T>,
}
// these are the only places where `T: Send` matters; all other
// functionality works fine on a single thread.
unsafe impl<T:?Sized + Send> Send for Mutex<T> { }
unsafe impl<T:?Sized + Send> Sync for Mutex<T> { }
/// The static mutex type is provided to allow for static allocation of mutexes.
///
/// Note that this is a separate type because using a Mutex correctly means that
/// it needs to have a destructor run. In Rust, statics are not allowed to have
/// destructors. As a result, a `StaticMutex` has one extra method when compared
/// to a `Mutex`, a `destroy` method. This method is unsafe to call, and
/// documentation can be found directly on the method.
///
/// # Examples
///
/// ```
/// # #![feature(static_mutex)]
/// use std::sync::{StaticMutex, MUTEX_INIT};
///
/// static LOCK: StaticMutex = MUTEX_INIT;
///
/// {
/// let _g = LOCK.lock().unwrap();
/// // do some productive work
/// }
/// // lock is unlocked here.
/// ```
#[unstable(feature = "static_mutex",
reason = "may be merged with Mutex in the future")]
pub struct StaticMutex {
lock: sys::Mutex,
poison: poison::Flag,
}
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
/// dropped (falls out of scope), the lock will be unlocked.
///
/// The data protected by the mutex can be access through this guard via its
/// `Deref` and `DerefMut` implementations
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct MutexGuard<'a, T:?Sized + 'a> {
// funny underscores due to how Deref/DerefMut currently work (they
// disregard field privacy).
__lock: &'a StaticMutex,
__data: &'a UnsafeCell<T>,
__poison: poison::Guard,
}
impl<'a, T:?Sized>!marker::Send for MutexGuard<'a, T> {}
/// Static initialization of a mutex. This constant can be used to initialize
/// other mutex constants.
#[unstable(feature = "static_mutex",
reason = "may be merged with Mutex in the future")]
pub const MUTEX_INIT: StaticMutex = StaticMutex::new();
impl<T> Mutex<T> {
/// Creates a new mutex in an unlocked state ready for use.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(t: T) -> Mutex<T> {
Mutex {
inner: box StaticMutex::new(),
data: UnsafeCell::new(t),
}
}
}
impl<T:?Sized> Mutex<T> {
/// Acquires a mutex, blocking the current thread until it is able to do so.
///
/// This function will block the local thread until it is available to acquire
/// the mutex. Upon returning, the thread is the only thread with the mutex
/// held. An RAII guard is returned to allow scoped unlock of the lock. When
/// the guard goes out of scope, the mutex will be unlocked.
///
/// # Failure
///
/// If another user of this mutex panicked while holding the mutex, then
/// this call will return an error once the mutex is acquired.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn lock(&self) -> LockResult<MutexGuard<T>> {
unsafe { self.inner.lock.lock() }
MutexGuard::new(&*self.inner, &self.data)
}
/// Attempts to acquire this lock.
///
/// If the lock could not be acquired at this time, then `Err` is returned.
/// Otherwise, an RAII guard is returned. The lock will be unlocked when the
/// guard is dropped.
///
/// This function does not block.
///
/// # Failure
///
/// If another user of this mutex panicked while holding the mutex, then
/// this call will return failure if the mutex would otherwise be
/// acquired.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn try_lock(&self) -> TryLockResult<MutexGuard<T>> {
if unsafe { self.inner.lock.try_lock() } {
Ok(try!(MutexGuard::new(&*self.inner, &self.data)))
} else {
Err(TryLockError::WouldBlock)
}
}
/// Determines whether the lock is poisoned.
///
/// If another thread is active, the lock can still become poisoned at any
/// time. You should not trust a `false` value for program correctness
/// without additional synchronization.
#[inline]
#[stable(feature = "sync_poison", since = "1.2.0")]
pub fn is_poisoned(&self) -> bool {
self.inner.poison.get()
}
}
|
// dropped, that's not our job)
unsafe { self.inner.lock.destroy() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized + fmt::Debug +'static> fmt::Debug for Mutex<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_lock() {
Ok(guard) => write!(f, "Mutex {{ data: {:?} }}", &*guard),
Err(TryLockError::Poisoned(err)) => {
write!(f, "Mutex {{ data: Poisoned({:?}) }}", &**err.get_ref())
},
Err(TryLockError::WouldBlock) => write!(f, "Mutex {{ <locked> }}")
}
}
}
struct Dummy(UnsafeCell<()>);
unsafe impl Sync for Dummy {}
static DUMMY: Dummy = Dummy(UnsafeCell::new(()));
#[unstable(feature = "static_mutex",
reason = "may be merged with Mutex in the future")]
impl StaticMutex {
/// Creates a new mutex in an unlocked state ready for use.
pub const fn new() -> StaticMutex {
StaticMutex {
lock: sys::Mutex::new(),
poison: poison::Flag::new(),
}
}
/// Acquires this lock, see `Mutex::lock`
#[inline]
pub fn lock(&'static self) -> LockResult<MutexGuard<()>> {
unsafe { self.lock.lock() }
MutexGuard::new(self, &DUMMY.0)
}
/// Attempts to grab this lock, see `Mutex::try_lock`
#[inline]
pub fn try_lock(&'static self) -> TryLockResult<MutexGuard<()>> {
if unsafe { self.lock.try_lock() } {
Ok(try!(MutexGuard::new(self, &DUMMY.0)))
} else {
Err(TryLockError::WouldBlock)
}
}
/// Deallocates resources associated with this static mutex.
///
/// This method is unsafe because it provides no guarantees that there are
/// no active users of this mutex, and safety is not guaranteed if there are
/// active users of this mutex.
///
/// This method is required to ensure that there are no memory leaks on
/// *all* platforms. It may be the case that some platforms do not leak
/// memory if this method is not called, but this is not guaranteed to be
/// true on all platforms.
pub unsafe fn destroy(&'static self) {
self.lock.destroy()
}
}
impl<'mutex, T:?Sized> MutexGuard<'mutex, T> {
fn new(lock: &'mutex StaticMutex, data: &'mutex UnsafeCell<T>)
-> LockResult<MutexGuard<'mutex, T>> {
poison::map_result(lock.poison.borrow(), |guard| {
MutexGuard {
__lock: lock,
__data: data,
__poison: guard,
}
})
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'mutex, T:?Sized> Deref for MutexGuard<'mutex, T> {
type Target = T;
fn deref<'a>(&'a self) -> &'a T {
unsafe { &*self.__data.get() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'mutex, T:?Sized> DerefMut for MutexGuard<'mutex, T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut T {
unsafe { &mut *self.__data.get() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T:?Sized> Drop for MutexGuard<'a, T> {
#[inline]
fn drop(&mut self) {
unsafe {
self.__lock.poison.done(&self.__poison);
self.__lock.lock.unlock();
}
}
}
pub fn guard_lock<'a, T:?Sized>(guard: &MutexGuard<'a, T>) -> &'a sys::Mutex {
&guard.__lock.lock
}
pub fn guard_poison<'a, T:?Sized>(guard: &MutexGuard<'a, T>) -> &'a poison::Flag {
&guard.__lock.poison
}
#[cfg(test)]
mod tests {
use prelude::v1::*;
use sync::mpsc::channel;
use sync::{Arc, Mutex, StaticMutex, Condvar};
use thread;
struct Packet<T: Send>(Arc<(Mutex<T>, Condvar)>);
unsafe impl<T: Send> Send for Packet<T> {}
unsafe impl<T> Sync for Packet<T> {}
#[test]
fn smoke() {
let m = Mutex::new(());
drop(m.lock().unwrap());
drop(m.lock().unwrap());
}
#[test]
fn smoke_static() {
static M: StaticMutex = StaticMutex::new();
unsafe {
drop(M.lock().unwrap());
drop(M.lock().unwrap());
M.destroy();
}
}
#[test]
fn lots_and_lots() {
static M: StaticMutex = StaticMutex::new();
static mut CNT: u32 = 0;
const J: u32 = 1000;
const K: u32 = 3;
fn inc() {
for _ in 0..J {
unsafe {
let _g = M.lock().unwrap();
CNT += 1;
}
}
}
let (tx, rx) = channel();
for _ in 0..K {
let tx2 = tx.clone();
thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
let tx2 = tx.clone();
thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
}
drop(tx);
for _ in 0..2 * K {
rx.recv().unwrap();
}
assert_eq!(unsafe {CNT}, J * K * 2);
unsafe {
M.destroy();
}
}
#[test]
fn try_lock() {
let m = Mutex::new(());
*m.try_lock().unwrap() = ();
}
#[test]
fn test_mutex_arc_condvar() {
let packet = Packet(Arc::new((Mutex::new(false), Condvar::new())));
let packet2 = Packet(packet.0.clone());
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
// wait until parent gets in
rx.recv().unwrap();
let &(ref lock, ref cvar) = &*packet2.0;
let mut lock = lock.lock().unwrap();
*lock = true;
cvar.notify_one();
});
let &(ref lock, ref cvar) = &*packet.0;
let mut lock = lock.lock().unwrap();
tx.send(()).unwrap();
assert!(!*lock);
while!*lock {
lock = cvar.wait(lock).unwrap();
}
}
#[test]
fn test_arc_condvar_poison() {
let packet = Packet(Arc::new((Mutex::new(1), Condvar::new())));
let packet2 = Packet(packet.0.clone());
let (tx, rx) = channel();
let _t = thread::spawn(move || -> () {
rx.recv().unwrap();
let &(ref lock, ref cvar) = &*packet2.0;
let _g = lock.lock().unwrap();
cvar.notify_one();
// Parent should fail when it wakes up.
panic!();
});
let &(ref lock, ref cvar) = &*packet.0;
let mut lock = lock.lock().unwrap();
tx.send(()).unwrap();
while *lock == 1 {
match cvar.wait(lock) {
Ok(l) => {
lock = l;
assert_eq!(*lock, 1);
}
Err(..) => break,
}
}
}
#[test]
fn test_mutex_arc_poison() {
let arc = Arc::new(Mutex::new(1));
assert!(!arc.is_poisoned());
let arc2 = arc.clone();
let _ = thread::spawn(move|| {
let lock = arc2.lock().unwrap();
assert_eq!(*lock, 2);
}).join();
assert!(arc.lock().is_err());
assert!(arc.is_poisoned());
}
#[test]
fn test_mutex_arc_nested() {
// Tests nested mutexes and access
// to underlying data.
let arc = Arc::new(Mutex::new(1));
let arc2 = Arc::new(Mutex::new(arc));
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
let lock = arc2.lock().unwrap();
let lock2 = lock.lock().unwrap();
assert_eq!(*lock2, 1);
tx.send(()).unwrap();
});
rx.recv().unwrap();
}
#[test]
fn test_mutex_arc_access_in_unwind() {
let arc = Arc::new(Mutex::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move|| -> () {
struct Unwinder {
i: Arc<Mutex<i32>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
*self.i.lock().unwrap() += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
}).join();
let lock = arc.lock().unwrap();
assert_eq!(*lock, 2);
}
// FIXME(#25351) needs deeply nested coercions of DST structs.
// #[test]
// fn test_mutex_unsized() {
// let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
// {
// let b = &mut *mutex.lock().unwrap();
// b[0] = 4;
// b[2] = 5;
// }
// let comp: &[i32] = &[4, 2, 5];
// assert_eq!(&*mutex.lock().unwrap(), comp);
// }
}
|
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Drop for Mutex<T> {
fn drop(&mut self) {
// This is actually safe b/c we know that there is no further usage of
// this mutex (it's up to the user to arrange for a mutex to get
|
random_line_split
|
mutex.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use prelude::v1::*;
use cell::UnsafeCell;
use fmt;
use marker;
use ops::{Deref, DerefMut};
use sys_common::mutex as sys;
use sys_common::poison::{self, TryLockError, TryLockResult, LockResult};
/// A mutual exclusion primitive useful for protecting shared data
///
/// This mutex will block threads waiting for the lock to become available. The
/// mutex can also be statically initialized or created via a `new`
/// constructor. Each mutex has a type parameter which represents the data that
/// it is protecting. The data can only be accessed through the RAII guards
/// returned from `lock` and `try_lock`, which guarantees that the data is only
/// ever accessed when the mutex is locked.
///
/// # Poisoning
///
/// The mutexes in this module implement a strategy called "poisoning" where a
/// mutex is considered poisoned whenever a thread panics while holding the
/// lock. Once a mutex is poisoned, all other threads are unable to access the
/// data by default as it is likely tainted (some invariant is not being
/// upheld).
///
/// For a mutex, this means that the `lock` and `try_lock` methods return a
/// `Result` which indicates whether a mutex has been poisoned or not. Most
/// usage of a mutex will simply `unwrap()` these results, propagating panics
/// among threads to ensure that a possibly invalid invariant is not witnessed.
///
/// A poisoned mutex, however, does not prevent all access to the underlying
/// data. The `PoisonError` type has an `into_inner` method which will return
/// the guard that would have otherwise been returned on a successful lock. This
/// allows access to the data, despite the lock being poisoned.
///
/// # Examples
///
/// ```
/// use std::sync::{Arc, Mutex};
/// use std::thread;
/// use std::sync::mpsc::channel;
///
/// const N: usize = 10;
///
/// // Spawn a few threads to increment a shared variable (non-atomically), and
/// // let the main thread know once all increments are done.
/// //
/// // Here we're using an Arc to share memory among threads, and the data inside
/// // the Arc is protected with a mutex.
/// let data = Arc::new(Mutex::new(0));
///
/// let (tx, rx) = channel();
/// for _ in 0..10 {
/// let (data, tx) = (data.clone(), tx.clone());
/// thread::spawn(move || {
/// // The shared static can only be accessed once the lock is held.
/// // Our non-atomic increment is safe because we're the only thread
/// // which can access the shared state when the lock is held.
/// //
/// // We unwrap() the return value to assert that we are not expecting
/// // threads to ever fail while holding the lock.
/// let mut data = data.lock().unwrap();
/// *data += 1;
/// if *data == N {
/// tx.send(()).unwrap();
/// }
/// // the lock is unlocked here when `data` goes out of scope.
/// });
/// }
///
/// rx.recv().unwrap();
/// ```
///
/// To recover from a poisoned mutex:
///
/// ```
/// use std::sync::{Arc, Mutex};
/// use std::thread;
///
/// let lock = Arc::new(Mutex::new(0_u32));
/// let lock2 = lock.clone();
///
/// let _ = thread::spawn(move || -> () {
/// // This thread will acquire the mutex first, unwrapping the result of
/// // `lock` because the lock has not been poisoned.
/// let _lock = lock2.lock().unwrap();
///
/// // This panic while holding the lock (`_guard` is in scope) will poison
/// // the mutex.
/// panic!();
/// }).join();
///
/// // The lock is poisoned by this point, but the returned result can be
/// // pattern matched on to return the underlying guard on both branches.
/// let mut guard = match lock.lock() {
/// Ok(guard) => guard,
/// Err(poisoned) => poisoned.into_inner(),
/// };
///
/// *guard += 1;
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Mutex<T:?Sized> {
// Note that this static mutex is in a *box*, not inlined into the struct
// itself. Once a native mutex has been used once, its address can never
// change (it can't be moved). This mutex type can be safely moved at any
// time, so to ensure that the native mutex is used correctly we box the
// inner lock to give it a constant address.
inner: Box<StaticMutex>,
data: UnsafeCell<T>,
}
// these are the only places where `T: Send` matters; all other
// functionality works fine on a single thread.
unsafe impl<T:?Sized + Send> Send for Mutex<T> { }
unsafe impl<T:?Sized + Send> Sync for Mutex<T> { }
/// The static mutex type is provided to allow for static allocation of mutexes.
///
/// Note that this is a separate type because using a Mutex correctly means that
/// it needs to have a destructor run. In Rust, statics are not allowed to have
/// destructors. As a result, a `StaticMutex` has one extra method when compared
/// to a `Mutex`, a `destroy` method. This method is unsafe to call, and
/// documentation can be found directly on the method.
///
/// # Examples
///
/// ```
/// # #![feature(static_mutex)]
/// use std::sync::{StaticMutex, MUTEX_INIT};
///
/// static LOCK: StaticMutex = MUTEX_INIT;
///
/// {
/// let _g = LOCK.lock().unwrap();
/// // do some productive work
/// }
/// // lock is unlocked here.
/// ```
#[unstable(feature = "static_mutex",
reason = "may be merged with Mutex in the future")]
pub struct StaticMutex {
lock: sys::Mutex,
poison: poison::Flag,
}
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
/// dropped (falls out of scope), the lock will be unlocked.
///
/// The data protected by the mutex can be access through this guard via its
/// `Deref` and `DerefMut` implementations
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct MutexGuard<'a, T:?Sized + 'a> {
// funny underscores due to how Deref/DerefMut currently work (they
// disregard field privacy).
__lock: &'a StaticMutex,
__data: &'a UnsafeCell<T>,
__poison: poison::Guard,
}
impl<'a, T:?Sized>!marker::Send for MutexGuard<'a, T> {}
/// Static initialization of a mutex. This constant can be used to initialize
/// other mutex constants.
#[unstable(feature = "static_mutex",
reason = "may be merged with Mutex in the future")]
pub const MUTEX_INIT: StaticMutex = StaticMutex::new();
impl<T> Mutex<T> {
/// Creates a new mutex in an unlocked state ready for use.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new(t: T) -> Mutex<T> {
Mutex {
inner: box StaticMutex::new(),
data: UnsafeCell::new(t),
}
}
}
impl<T:?Sized> Mutex<T> {
/// Acquires a mutex, blocking the current thread until it is able to do so.
///
/// This function will block the local thread until it is available to acquire
/// the mutex. Upon returning, the thread is the only thread with the mutex
/// held. An RAII guard is returned to allow scoped unlock of the lock. When
/// the guard goes out of scope, the mutex will be unlocked.
///
/// # Failure
///
/// If another user of this mutex panicked while holding the mutex, then
/// this call will return an error once the mutex is acquired.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn lock(&self) -> LockResult<MutexGuard<T>> {
unsafe { self.inner.lock.lock() }
MutexGuard::new(&*self.inner, &self.data)
}
/// Attempts to acquire this lock.
///
/// If the lock could not be acquired at this time, then `Err` is returned.
/// Otherwise, an RAII guard is returned. The lock will be unlocked when the
/// guard is dropped.
///
/// This function does not block.
///
/// # Failure
///
/// If another user of this mutex panicked while holding the mutex, then
/// this call will return failure if the mutex would otherwise be
/// acquired.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn try_lock(&self) -> TryLockResult<MutexGuard<T>> {
if unsafe { self.inner.lock.try_lock() } {
Ok(try!(MutexGuard::new(&*self.inner, &self.data)))
} else {
Err(TryLockError::WouldBlock)
}
}
/// Determines whether the lock is poisoned.
///
/// If another thread is active, the lock can still become poisoned at any
/// time. You should not trust a `false` value for program correctness
/// without additional synchronization.
#[inline]
#[stable(feature = "sync_poison", since = "1.2.0")]
pub fn is_poisoned(&self) -> bool {
self.inner.poison.get()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Drop for Mutex<T> {
fn drop(&mut self) {
// This is actually safe b/c we know that there is no further usage of
// this mutex (it's up to the user to arrange for a mutex to get
// dropped, that's not our job)
unsafe { self.inner.lock.destroy() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized + fmt::Debug +'static> fmt::Debug for Mutex<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_lock() {
Ok(guard) => write!(f, "Mutex {{ data: {:?} }}", &*guard),
Err(TryLockError::Poisoned(err)) => {
write!(f, "Mutex {{ data: Poisoned({:?}) }}", &**err.get_ref())
},
Err(TryLockError::WouldBlock) => write!(f, "Mutex {{ <locked> }}")
}
}
}
struct Dummy(UnsafeCell<()>);
unsafe impl Sync for Dummy {}
static DUMMY: Dummy = Dummy(UnsafeCell::new(()));
#[unstable(feature = "static_mutex",
reason = "may be merged with Mutex in the future")]
impl StaticMutex {
/// Creates a new mutex in an unlocked state ready for use.
pub const fn new() -> StaticMutex {
StaticMutex {
lock: sys::Mutex::new(),
poison: poison::Flag::new(),
}
}
/// Acquires this lock, see `Mutex::lock`
#[inline]
pub fn lock(&'static self) -> LockResult<MutexGuard<()>> {
unsafe { self.lock.lock() }
MutexGuard::new(self, &DUMMY.0)
}
/// Attempts to grab this lock, see `Mutex::try_lock`
#[inline]
pub fn try_lock(&'static self) -> TryLockResult<MutexGuard<()>> {
if unsafe { self.lock.try_lock() } {
Ok(try!(MutexGuard::new(self, &DUMMY.0)))
} else {
Err(TryLockError::WouldBlock)
}
}
/// Deallocates resources associated with this static mutex.
///
/// This method is unsafe because it provides no guarantees that there are
/// no active users of this mutex, and safety is not guaranteed if there are
/// active users of this mutex.
///
/// This method is required to ensure that there are no memory leaks on
/// *all* platforms. It may be the case that some platforms do not leak
/// memory if this method is not called, but this is not guaranteed to be
/// true on all platforms.
pub unsafe fn destroy(&'static self) {
self.lock.destroy()
}
}
impl<'mutex, T:?Sized> MutexGuard<'mutex, T> {
fn new(lock: &'mutex StaticMutex, data: &'mutex UnsafeCell<T>)
-> LockResult<MutexGuard<'mutex, T>> {
poison::map_result(lock.poison.borrow(), |guard| {
MutexGuard {
__lock: lock,
__data: data,
__poison: guard,
}
})
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'mutex, T:?Sized> Deref for MutexGuard<'mutex, T> {
type Target = T;
fn deref<'a>(&'a self) -> &'a T {
unsafe { &*self.__data.get() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'mutex, T:?Sized> DerefMut for MutexGuard<'mutex, T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut T {
unsafe { &mut *self.__data.get() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T:?Sized> Drop for MutexGuard<'a, T> {
#[inline]
fn drop(&mut self) {
unsafe {
self.__lock.poison.done(&self.__poison);
self.__lock.lock.unlock();
}
}
}
pub fn guard_lock<'a, T:?Sized>(guard: &MutexGuard<'a, T>) -> &'a sys::Mutex {
&guard.__lock.lock
}
pub fn guard_poison<'a, T:?Sized>(guard: &MutexGuard<'a, T>) -> &'a poison::Flag {
&guard.__lock.poison
}
#[cfg(test)]
mod tests {
use prelude::v1::*;
use sync::mpsc::channel;
use sync::{Arc, Mutex, StaticMutex, Condvar};
use thread;
struct Packet<T: Send>(Arc<(Mutex<T>, Condvar)>);
unsafe impl<T: Send> Send for Packet<T> {}
unsafe impl<T> Sync for Packet<T> {}
#[test]
fn smoke() {
let m = Mutex::new(());
drop(m.lock().unwrap());
drop(m.lock().unwrap());
}
#[test]
fn smoke_static() {
static M: StaticMutex = StaticMutex::new();
unsafe {
drop(M.lock().unwrap());
drop(M.lock().unwrap());
M.destroy();
}
}
#[test]
fn lots_and_lots() {
static M: StaticMutex = StaticMutex::new();
static mut CNT: u32 = 0;
const J: u32 = 1000;
const K: u32 = 3;
fn inc() {
for _ in 0..J {
unsafe {
let _g = M.lock().unwrap();
CNT += 1;
}
}
}
let (tx, rx) = channel();
for _ in 0..K {
let tx2 = tx.clone();
thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
let tx2 = tx.clone();
thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
}
drop(tx);
for _ in 0..2 * K {
rx.recv().unwrap();
}
assert_eq!(unsafe {CNT}, J * K * 2);
unsafe {
M.destroy();
}
}
#[test]
fn try_lock() {
let m = Mutex::new(());
*m.try_lock().unwrap() = ();
}
#[test]
fn test_mutex_arc_condvar() {
let packet = Packet(Arc::new((Mutex::new(false), Condvar::new())));
let packet2 = Packet(packet.0.clone());
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
// wait until parent gets in
rx.recv().unwrap();
let &(ref lock, ref cvar) = &*packet2.0;
let mut lock = lock.lock().unwrap();
*lock = true;
cvar.notify_one();
});
let &(ref lock, ref cvar) = &*packet.0;
let mut lock = lock.lock().unwrap();
tx.send(()).unwrap();
assert!(!*lock);
while!*lock {
lock = cvar.wait(lock).unwrap();
}
}
#[test]
fn test_arc_condvar_poison() {
let packet = Packet(Arc::new((Mutex::new(1), Condvar::new())));
let packet2 = Packet(packet.0.clone());
let (tx, rx) = channel();
let _t = thread::spawn(move || -> () {
rx.recv().unwrap();
let &(ref lock, ref cvar) = &*packet2.0;
let _g = lock.lock().unwrap();
cvar.notify_one();
// Parent should fail when it wakes up.
panic!();
});
let &(ref lock, ref cvar) = &*packet.0;
let mut lock = lock.lock().unwrap();
tx.send(()).unwrap();
while *lock == 1 {
match cvar.wait(lock) {
Ok(l) => {
lock = l;
assert_eq!(*lock, 1);
}
Err(..) => break,
}
}
}
#[test]
fn test_mutex_arc_poison() {
let arc = Arc::new(Mutex::new(1));
assert!(!arc.is_poisoned());
let arc2 = arc.clone();
let _ = thread::spawn(move|| {
let lock = arc2.lock().unwrap();
assert_eq!(*lock, 2);
}).join();
assert!(arc.lock().is_err());
assert!(arc.is_poisoned());
}
#[test]
fn test_mutex_arc_nested() {
// Tests nested mutexes and access
// to underlying data.
let arc = Arc::new(Mutex::new(1));
let arc2 = Arc::new(Mutex::new(arc));
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
let lock = arc2.lock().unwrap();
let lock2 = lock.lock().unwrap();
assert_eq!(*lock2, 1);
tx.send(()).unwrap();
});
rx.recv().unwrap();
}
#[test]
fn test_mutex_arc_access_in_unwind() {
let arc = Arc::new(Mutex::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move|| -> () {
struct
|
{
i: Arc<Mutex<i32>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
*self.i.lock().unwrap() += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
}).join();
let lock = arc.lock().unwrap();
assert_eq!(*lock, 2);
}
// FIXME(#25351) needs deeply nested coercions of DST structs.
// #[test]
// fn test_mutex_unsized() {
// let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
// {
// let b = &mut *mutex.lock().unwrap();
// b[0] = 4;
// b[2] = 5;
// }
// let comp: &[i32] = &[4, 2, 5];
// assert_eq!(&*mutex.lock().unwrap(), comp);
// }
}
|
Unwinder
|
identifier_name
|
cgs.rs
|
use super::*;
pub fn new() -> System {
System {
name: "CGS",
module: "cgs",
doc_prelude: "The Gaussian CGS unit system
Note: this system is incomplete. More derived units and constants are coming.
",
base: base_units!(
SQRTCM: SqrtCentimeter, sqrtcm;
SQRTG: SqrtGram, sqrtg;
S: Second, s, Time;
),
derived: derived_units!(
|
S2: Second2 = Second * Second;
S3: Second3 = Second2 * Second;
S4: Second4 = Second3 * Second;
CMPS: CentimeterPerSecond = Centimeter / Second, Velocity;
CMPS3: CentimeterPerSecond3 = Centimeter / Second3, Jerk;
CMPS4: CentimeterPerSecond4 = Centimeter / Second4;
CM2PS: Centimeter2PerSecond = Centimeter2 / Second;
CM2PS2: Centimeter2PerSecond2 = Centimeter2 / Second2;
CM2PS3: Centimeter2PerSecond3 = Centimeter2 / Second3;
CM3PS: Centimeter3PerSecond = Centimeter3 / Second;
CM3PS2: Centimeter3PerSecond2 = Centimeter3 / Second2;
CM3PS3: Centimeter3PerSecond3 = Centimeter3 / Second3;
GAL: Gal = Centimeter / Second2, Acceleration;
DYN: Dyne = Gram * Gal, Force;
ERG: Erg = Dyne * Centimeter, Energy;
ERGPS: ErgPerSecond = Erg / Second, Power;
BA: Barye = Dyne / Centimeter2, Pressure;
P: Poise = Gram / Centimeter / Second;
ST: Stokes = Centimeter2 / Second;
K: Kayser = Unitless / Centimeter, ReciprocalLength;
STATC: StatCoulomb = SqrtGram * SqrtCentimeter * Centimeter / Second;
STATA: StatAmpere = StatCoulomb / Second;
STATV: StatVolt = Erg / StatCoulomb;
),
constants: constants!(
M: Centimeter = HECTO * CM.value_unsafe, "Meter";
),
fmt: false,
from: vec!["SI", "MKS"],
refl_blacklist: Vec::new(),
}
}
|
CM: Centimeter = SqrtCentimeter * SqrtCentimeter, Length;
G: Gram = SqrtGram * SqrtGram, Mass;
CM2: Centimeter2 = Centimeter * Centimeter, Area;
CM3: Centimeter3 = Centimeter2 * Centimeter, Volume;
|
random_line_split
|
cgs.rs
|
use super::*;
pub fn new() -> System
|
S2: Second2 = Second * Second;
S3: Second3 = Second2 * Second;
S4: Second4 = Second3 * Second;
CMPS: CentimeterPerSecond = Centimeter / Second, Velocity;
CMPS3: CentimeterPerSecond3 = Centimeter / Second3, Jerk;
CMPS4: CentimeterPerSecond4 = Centimeter / Second4;
CM2PS: Centimeter2PerSecond = Centimeter2 / Second;
CM2PS2: Centimeter2PerSecond2 = Centimeter2 / Second2;
CM2PS3: Centimeter2PerSecond3 = Centimeter2 / Second3;
CM3PS: Centimeter3PerSecond = Centimeter3 / Second;
CM3PS2: Centimeter3PerSecond2 = Centimeter3 / Second2;
CM3PS3: Centimeter3PerSecond3 = Centimeter3 / Second3;
GAL: Gal = Centimeter / Second2, Acceleration;
DYN: Dyne = Gram * Gal, Force;
ERG: Erg = Dyne * Centimeter, Energy;
ERGPS: ErgPerSecond = Erg / Second, Power;
BA: Barye = Dyne / Centimeter2, Pressure;
P: Poise = Gram / Centimeter / Second;
ST: Stokes = Centimeter2 / Second;
K: Kayser = Unitless / Centimeter, ReciprocalLength;
STATC: StatCoulomb = SqrtGram * SqrtCentimeter * Centimeter / Second;
STATA: StatAmpere = StatCoulomb / Second;
STATV: StatVolt = Erg / StatCoulomb;
),
constants: constants!(
M: Centimeter = HECTO * CM.value_unsafe, "Meter";
),
fmt: false,
from: vec!["SI", "MKS"],
refl_blacklist: Vec::new(),
}
}
|
{
System {
name: "CGS",
module: "cgs",
doc_prelude: "The Gaussian CGS unit system
Note: this system is incomplete. More derived units and constants are coming.
",
base: base_units!(
SQRTCM: SqrtCentimeter, sqrtcm;
SQRTG: SqrtGram, sqrtg;
S: Second, s, Time;
),
derived: derived_units!(
CM: Centimeter = SqrtCentimeter * SqrtCentimeter, Length;
G: Gram = SqrtGram * SqrtGram, Mass;
CM2: Centimeter2 = Centimeter * Centimeter, Area;
CM3: Centimeter3 = Centimeter2 * Centimeter, Volume;
|
identifier_body
|
cgs.rs
|
use super::*;
pub fn
|
() -> System {
System {
name: "CGS",
module: "cgs",
doc_prelude: "The Gaussian CGS unit system
Note: this system is incomplete. More derived units and constants are coming.
",
base: base_units!(
SQRTCM: SqrtCentimeter, sqrtcm;
SQRTG: SqrtGram, sqrtg;
S: Second, s, Time;
),
derived: derived_units!(
CM: Centimeter = SqrtCentimeter * SqrtCentimeter, Length;
G: Gram = SqrtGram * SqrtGram, Mass;
CM2: Centimeter2 = Centimeter * Centimeter, Area;
CM3: Centimeter3 = Centimeter2 * Centimeter, Volume;
S2: Second2 = Second * Second;
S3: Second3 = Second2 * Second;
S4: Second4 = Second3 * Second;
CMPS: CentimeterPerSecond = Centimeter / Second, Velocity;
CMPS3: CentimeterPerSecond3 = Centimeter / Second3, Jerk;
CMPS4: CentimeterPerSecond4 = Centimeter / Second4;
CM2PS: Centimeter2PerSecond = Centimeter2 / Second;
CM2PS2: Centimeter2PerSecond2 = Centimeter2 / Second2;
CM2PS3: Centimeter2PerSecond3 = Centimeter2 / Second3;
CM3PS: Centimeter3PerSecond = Centimeter3 / Second;
CM3PS2: Centimeter3PerSecond2 = Centimeter3 / Second2;
CM3PS3: Centimeter3PerSecond3 = Centimeter3 / Second3;
GAL: Gal = Centimeter / Second2, Acceleration;
DYN: Dyne = Gram * Gal, Force;
ERG: Erg = Dyne * Centimeter, Energy;
ERGPS: ErgPerSecond = Erg / Second, Power;
BA: Barye = Dyne / Centimeter2, Pressure;
P: Poise = Gram / Centimeter / Second;
ST: Stokes = Centimeter2 / Second;
K: Kayser = Unitless / Centimeter, ReciprocalLength;
STATC: StatCoulomb = SqrtGram * SqrtCentimeter * Centimeter / Second;
STATA: StatAmpere = StatCoulomb / Second;
STATV: StatVolt = Erg / StatCoulomb;
),
constants: constants!(
M: Centimeter = HECTO * CM.value_unsafe, "Meter";
),
fmt: false,
from: vec!["SI", "MKS"],
refl_blacklist: Vec::new(),
}
}
|
new
|
identifier_name
|
no_0491_increasing_subsequences.rs
|
struct Solution;
impl Solution {
pub fn find_subsequences(nums: Vec<i32>) -> Vec<Vec<i32>>
|
fn dfs(cur: usize, last: i32, nums: &Vec<i32>, temp: &mut Vec<i32>, ans: &mut Vec<Vec<i32>>) {
if cur == nums.len() {
if temp.len() >= 2 {
ans.push(temp.clone());
}
return;
}
let cur_val = nums[cur];
if cur_val >= last {
temp.push(cur_val);
Self::dfs(cur + 1, cur_val, nums, temp, ans);
temp.pop();
}
if cur_val!= last {
Self::dfs(cur + 1, last, nums, temp, ans);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_find_subsequences() {
let ans = Solution::find_subsequences(vec![4, 6, 7, 7]);
let want = vec![
vec![4, 6, 7, 7],
vec![4, 6, 7],
vec![4, 6],
vec![4, 7, 7],
vec![4, 7],
vec![6, 7, 7],
vec![6, 7],
vec![7, 7],
];
assert_eq!(ans, want);
}
}
|
{
let mut temp = Vec::new();
let mut ans = Vec::new();
Self::dfs(0, std::i32::MIN, &nums, &mut temp, &mut ans);
ans
}
|
identifier_body
|
no_0491_increasing_subsequences.rs
|
struct Solution;
impl Solution {
pub fn find_subsequences(nums: Vec<i32>) -> Vec<Vec<i32>> {
let mut temp = Vec::new();
let mut ans = Vec::new();
Self::dfs(0, std::i32::MIN, &nums, &mut temp, &mut ans);
ans
}
fn dfs(cur: usize, last: i32, nums: &Vec<i32>, temp: &mut Vec<i32>, ans: &mut Vec<Vec<i32>>) {
if cur == nums.len() {
if temp.len() >= 2
|
return;
}
let cur_val = nums[cur];
if cur_val >= last {
temp.push(cur_val);
Self::dfs(cur + 1, cur_val, nums, temp, ans);
temp.pop();
}
if cur_val!= last {
Self::dfs(cur + 1, last, nums, temp, ans);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_find_subsequences() {
let ans = Solution::find_subsequences(vec![4, 6, 7, 7]);
let want = vec![
vec![4, 6, 7, 7],
vec![4, 6, 7],
vec![4, 6],
vec![4, 7, 7],
vec![4, 7],
vec![6, 7, 7],
vec![6, 7],
vec![7, 7],
];
assert_eq!(ans, want);
}
}
|
{
ans.push(temp.clone());
}
|
conditional_block
|
no_0491_increasing_subsequences.rs
|
struct Solution;
impl Solution {
pub fn find_subsequences(nums: Vec<i32>) -> Vec<Vec<i32>> {
let mut temp = Vec::new();
let mut ans = Vec::new();
Self::dfs(0, std::i32::MIN, &nums, &mut temp, &mut ans);
ans
}
fn
|
(cur: usize, last: i32, nums: &Vec<i32>, temp: &mut Vec<i32>, ans: &mut Vec<Vec<i32>>) {
if cur == nums.len() {
if temp.len() >= 2 {
ans.push(temp.clone());
}
return;
}
let cur_val = nums[cur];
if cur_val >= last {
temp.push(cur_val);
Self::dfs(cur + 1, cur_val, nums, temp, ans);
temp.pop();
}
if cur_val!= last {
Self::dfs(cur + 1, last, nums, temp, ans);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_find_subsequences() {
let ans = Solution::find_subsequences(vec![4, 6, 7, 7]);
let want = vec![
vec![4, 6, 7, 7],
vec![4, 6, 7],
vec![4, 6],
vec![4, 7, 7],
vec![4, 7],
vec![6, 7, 7],
vec![6, 7],
vec![7, 7],
];
assert_eq!(ans, want);
}
}
|
dfs
|
identifier_name
|
no_0491_increasing_subsequences.rs
|
struct Solution;
impl Solution {
pub fn find_subsequences(nums: Vec<i32>) -> Vec<Vec<i32>> {
let mut temp = Vec::new();
let mut ans = Vec::new();
Self::dfs(0, std::i32::MIN, &nums, &mut temp, &mut ans);
ans
}
fn dfs(cur: usize, last: i32, nums: &Vec<i32>, temp: &mut Vec<i32>, ans: &mut Vec<Vec<i32>>) {
if cur == nums.len() {
if temp.len() >= 2 {
ans.push(temp.clone());
}
return;
}
let cur_val = nums[cur];
if cur_val >= last {
temp.push(cur_val);
Self::dfs(cur + 1, cur_val, nums, temp, ans);
temp.pop();
}
if cur_val!= last {
Self::dfs(cur + 1, last, nums, temp, ans);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_find_subsequences() {
let ans = Solution::find_subsequences(vec![4, 6, 7, 7]);
let want = vec![
vec![4, 6, 7, 7],
vec![4, 6, 7],
vec![4, 6],
vec![4, 7, 7],
vec![4, 7],
vec![6, 7, 7],
vec![6, 7],
vec![7, 7],
];
assert_eq!(ans, want);
|
}
}
|
random_line_split
|
|
index.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use serde::{Deserialize, Deserializer, Error};
use serde::de::Visitor;
/// Represents usize.
#[derive(Debug, PartialEq)]
pub struct Index(usize);
impl Index {
/// Convert to usize
pub fn value(&self) -> usize {
self.0
}
}
impl Deserialize for Index {
fn deserialize<D>(deserializer: &mut D) -> Result<Index, D::Error>
where D: Deserializer {
deserializer.deserialize(IndexVisitor)
}
}
struct IndexVisitor;
impl Visitor for IndexVisitor {
type Value = Index;
fn visit_str<E>(&mut self, value: &str) -> Result<Self::Value, E> where E: Error {
match value {
_ if value.starts_with("0x") => usize::from_str_radix(&value[2..], 16).map(Index).map_err(|_| Error::custom("invalid index")),
_ => value.parse::<usize>().map(Index).map_err(|_| Error::custom("invalid index"))
}
}
fn
|
<E>(&mut self, value: String) -> Result<Self::Value, E> where E: Error {
self.visit_str(value.as_ref())
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
#[test]
fn block_number_deserialization() {
let s = r#"["0xa", "10"]"#;
let deserialized: Vec<Index> = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, vec![Index(10), Index(10)]);
}
}
|
visit_string
|
identifier_name
|
index.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use serde::{Deserialize, Deserializer, Error};
use serde::de::Visitor;
/// Represents usize.
#[derive(Debug, PartialEq)]
pub struct Index(usize);
impl Index {
/// Convert to usize
pub fn value(&self) -> usize {
self.0
}
}
impl Deserialize for Index {
fn deserialize<D>(deserializer: &mut D) -> Result<Index, D::Error>
where D: Deserializer {
deserializer.deserialize(IndexVisitor)
}
}
struct IndexVisitor;
impl Visitor for IndexVisitor {
type Value = Index;
fn visit_str<E>(&mut self, value: &str) -> Result<Self::Value, E> where E: Error {
match value {
_ if value.starts_with("0x") => usize::from_str_radix(&value[2..], 16).map(Index).map_err(|_| Error::custom("invalid index")),
_ => value.parse::<usize>().map(Index).map_err(|_| Error::custom("invalid index"))
}
}
fn visit_string<E>(&mut self, value: String) -> Result<Self::Value, E> where E: Error {
self.visit_str(value.as_ref())
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json;
#[test]
fn block_number_deserialization()
|
}
|
{
let s = r#"["0xa", "10"]"#;
let deserialized: Vec<Index> = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, vec![Index(10), Index(10)]);
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.