file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs
|
// Copyright (C) 2015, Alberto Corona <[email protected]>
// All rights reserved. This file is part of core-utils, distributed under the
// GPL v3 license. For full terms please see the LICENSE file.
#![crate_type = "bin"]
#![crate_name = "nproc"]
static UTIL: Prog = Prog { name: "nproc", vers: "0.1.0", yr: "2015" };
extern crate pgetopts;
extern crate num_cpus;
extern crate rpf;
use pgetopts::{Options};
use rpf::*;
use std::env;
fn
|
(opts: Options) {
print!("{}: {} {}", UTIL.name.bold(), "Usage".bold(), "[OPTION]".underline());
println!("{}", opts.options());
}
fn main() {
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
opts.optflag("h", "help", "Print help information");
opts.optflag("", "version", "Print version information");
opts.optopt("i", "ignore", "Number of processing units to ignore", "NUM");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(e) => {
UTIL.error(e.to_string(), ExitStatus::OptError);
panic!(e.to_string())
}
};
let num: u32 = num_cpus::get() as u32;
if matches.opt_present("h") {
print_usage(opts);
} else if matches.opt_present("version") {
UTIL.copyright("Copyright (C) 2015 core-utils developers\n\
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>.\n\
This is free software: you are free to change and redistribute it.\n\
There is NO WARRANTY, to the extent permitted by law.\n\n",
&["Alberto Corona"]);
} else if matches.opt_present("i") {
let ignore = match matches.opts_str(&[String::from("i")]) {
Some(s) => { s.parse::<u32>().unwrap_or(0) },
None => { panic!() },
};
if ignore > num {
println!("0");
} else {
println!("{}", num - ignore);
}
} else {
println!("{}", num);
}
}
|
print_usage
|
identifier_name
|
main.rs
|
// Copyright (C) 2015, Alberto Corona <[email protected]>
// All rights reserved. This file is part of core-utils, distributed under the
// GPL v3 license. For full terms please see the LICENSE file.
#![crate_type = "bin"]
#![crate_name = "nproc"]
static UTIL: Prog = Prog { name: "nproc", vers: "0.1.0", yr: "2015" };
extern crate pgetopts;
extern crate num_cpus;
extern crate rpf;
use pgetopts::{Options};
use rpf::*;
use std::env;
fn print_usage(opts: Options) {
print!("{}: {} {}", UTIL.name.bold(), "Usage".bold(), "[OPTION]".underline());
println!("{}", opts.options());
}
fn main() {
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
opts.optflag("h", "help", "Print help information");
opts.optflag("", "version", "Print version information");
opts.optopt("i", "ignore", "Number of processing units to ignore", "NUM");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(e) => {
UTIL.error(e.to_string(), ExitStatus::OptError);
panic!(e.to_string())
}
};
let num: u32 = num_cpus::get() as u32;
if matches.opt_present("h") {
print_usage(opts);
} else if matches.opt_present("version") {
UTIL.copyright("Copyright (C) 2015 core-utils developers\n\
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>.\n\
This is free software: you are free to change and redistribute it.\n\
There is NO WARRANTY, to the extent permitted by law.\n\n",
&["Alberto Corona"]);
} else if matches.opt_present("i") {
let ignore = match matches.opts_str(&[String::from("i")]) {
Some(s) => { s.parse::<u32>().unwrap_or(0) },
None => { panic!() },
};
if ignore > num
|
else {
println!("{}", num - ignore);
}
} else {
println!("{}", num);
}
}
|
{
println!("0");
}
|
conditional_block
|
main.rs
|
// Copyright (C) 2015, Alberto Corona <[email protected]>
// All rights reserved. This file is part of core-utils, distributed under the
// GPL v3 license. For full terms please see the LICENSE file.
#![crate_type = "bin"]
#![crate_name = "nproc"]
static UTIL: Prog = Prog { name: "nproc", vers: "0.1.0", yr: "2015" };
extern crate pgetopts;
extern crate num_cpus;
extern crate rpf;
use pgetopts::{Options};
use rpf::*;
use std::env;
fn print_usage(opts: Options) {
print!("{}: {} {}", UTIL.name.bold(), "Usage".bold(), "[OPTION]".underline());
println!("{}", opts.options());
}
fn main() {
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
opts.optflag("h", "help", "Print help information");
opts.optflag("", "version", "Print version information");
opts.optopt("i", "ignore", "Number of processing units to ignore", "NUM");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(e) => {
UTIL.error(e.to_string(), ExitStatus::OptError);
panic!(e.to_string())
}
};
let num: u32 = num_cpus::get() as u32;
if matches.opt_present("h") {
print_usage(opts);
} else if matches.opt_present("version") {
UTIL.copyright("Copyright (C) 2015 core-utils developers\n\
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>.\n\
This is free software: you are free to change and redistribute it.\n\
There is NO WARRANTY, to the extent permitted by law.\n\n",
|
};
if ignore > num {
println!("0");
} else {
println!("{}", num - ignore);
}
} else {
println!("{}", num);
}
}
|
&["Alberto Corona"]);
} else if matches.opt_present("i") {
let ignore = match matches.opts_str(&[String::from("i")]) {
Some(s) => { s.parse::<u32>().unwrap_or(0) },
None => { panic!() },
|
random_line_split
|
main.rs
|
// Copyright (C) 2015, Alberto Corona <[email protected]>
// All rights reserved. This file is part of core-utils, distributed under the
// GPL v3 license. For full terms please see the LICENSE file.
#![crate_type = "bin"]
#![crate_name = "nproc"]
static UTIL: Prog = Prog { name: "nproc", vers: "0.1.0", yr: "2015" };
extern crate pgetopts;
extern crate num_cpus;
extern crate rpf;
use pgetopts::{Options};
use rpf::*;
use std::env;
fn print_usage(opts: Options) {
print!("{}: {} {}", UTIL.name.bold(), "Usage".bold(), "[OPTION]".underline());
println!("{}", opts.options());
}
fn main()
|
UTIL.copyright("Copyright (C) 2015 core-utils developers\n\
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>.\n\
This is free software: you are free to change and redistribute it.\n\
There is NO WARRANTY, to the extent permitted by law.\n\n",
&["Alberto Corona"]);
} else if matches.opt_present("i") {
let ignore = match matches.opts_str(&[String::from("i")]) {
Some(s) => { s.parse::<u32>().unwrap_or(0) },
None => { panic!() },
};
if ignore > num {
println!("0");
} else {
println!("{}", num - ignore);
}
} else {
println!("{}", num);
}
}
|
{
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
opts.optflag("h", "help", "Print help information");
opts.optflag("", "version", "Print version information");
opts.optopt("i", "ignore", "Number of processing units to ignore", "NUM");
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(e) => {
UTIL.error(e.to_string(), ExitStatus::OptError);
panic!(e.to_string())
}
};
let num: u32 = num_cpus::get() as u32;
if matches.opt_present("h") {
print_usage(opts);
} else if matches.opt_present("version") {
|
identifier_body
|
song_map.rs
|
use radio::song::Song;
use serde_json;
use std::collections::HashMap;
use std::fs::File;
use std::fs;
use std::io::prelude::*;
use std::io::{BufReader, BufWriter};
use util::logger::log;
/// A map containing songs and their times played,
/// Inserting a song already known will increase its counter
#[derive(Debug)]
pub struct SongMap(HashMap<Song, u16>);
impl SongMap {
/// Creates a new empty songmap
fn new() -> Self {
SongMap(HashMap::new())
}
/// Inserts a song into the map
fn insert(&mut self, key: Song, value: u16) {
self.0.insert(key, value);
}
/// Inserts a song into the map
/// If known increases its counter instead
pub fn insert_song(&mut self, song: Song) {
let counter = self.0.entry(song).or_insert(0);
*counter += 1;
}
/// Saves a map to json
pub fn save_to_file(&self, dir: &str, name: &str) {
/// Logs possible errors
fn save_error(dir: &str, name: &str) {
log(&format!("ERROR: Could not save {}\\{}.json",dir, name));
}
// Create file and directory
let _ = fs::create_dir_all(dir);
let to = match File::create(format!("{}\\{}.json",dir, name)) {
Ok(file) => file,
Err(_) => {
save_error(dir, name);
return
}
};
let mut to = BufWriter::new(to);
// Since the map contains a non-string key we cannot save it to json
// straight away, convert it to a save-able format
let mut string_vec: Vec<SongMapHelper> = Vec::new();
for (key, value) in self.0.iter() {
string_vec.push(
SongMapHelper {
artist: key.artist.clone(),
title: key.title.clone(),
count: *value,
}
);
}
|
// Write json to file
match serde_json::to_writer_pretty(&mut to, &string_vec) {
Err(_) => {
save_error(dir, name);
return
}
_ => {},
}
// Add a newline, otherwise the json might be invalid
let _ = to.write(b"\n");
let _ = to.flush();
}
/// Loads a map from json
pub fn load_from_file(dir: &str, name: &'static str) -> Self {
/// Logs possible errors
fn load_error(dir: &str, name: &'static str) {
log(&format!("ERROR: Could not load {}\\{}.json",dir, name));
}
// Create an empty map
let mut song_map = SongMap::new();
// Try to open
let from = match File::open(format!("{}\\{}.json",dir, name)) {
Ok(file) => file,
Err(_) => {
load_error(dir, name);
return song_map
}
};
let from = BufReader::new(from);
// Save into a vector of helper
let string_vec: Vec<SongMapHelper> = match serde_json::from_reader(from) {
Ok(vec) => vec,
Err(_) => {
load_error(dir, name);
return song_map
}
};
// Convert helper to the actual songmap
for song_helper in string_vec {
song_map.insert( Song {
artist: song_helper.artist,
title: song_helper.title
}, song_helper.count);
}
song_map
}
}
/// This is a helper so that the songmap may be saved to json with serde
#[derive(Debug, Serialize, Deserialize)]
struct SongMapHelper {
artist: String,
title: String,
count: u16,
}
|
random_line_split
|
|
song_map.rs
|
use radio::song::Song;
use serde_json;
use std::collections::HashMap;
use std::fs::File;
use std::fs;
use std::io::prelude::*;
use std::io::{BufReader, BufWriter};
use util::logger::log;
/// A map containing songs and their times played,
/// Inserting a song already known will increase its counter
#[derive(Debug)]
pub struct SongMap(HashMap<Song, u16>);
impl SongMap {
/// Creates a new empty songmap
fn new() -> Self {
SongMap(HashMap::new())
}
/// Inserts a song into the map
fn insert(&mut self, key: Song, value: u16) {
self.0.insert(key, value);
}
/// Inserts a song into the map
/// If known increases its counter instead
pub fn insert_song(&mut self, song: Song) {
let counter = self.0.entry(song).or_insert(0);
*counter += 1;
}
/// Saves a map to json
pub fn save_to_file(&self, dir: &str, name: &str) {
/// Logs possible errors
fn save_error(dir: &str, name: &str) {
log(&format!("ERROR: Could not save {}\\{}.json",dir, name));
}
// Create file and directory
let _ = fs::create_dir_all(dir);
let to = match File::create(format!("{}\\{}.json",dir, name)) {
Ok(file) => file,
Err(_) => {
save_error(dir, name);
return
}
};
let mut to = BufWriter::new(to);
// Since the map contains a non-string key we cannot save it to json
// straight away, convert it to a save-able format
let mut string_vec: Vec<SongMapHelper> = Vec::new();
for (key, value) in self.0.iter() {
string_vec.push(
SongMapHelper {
artist: key.artist.clone(),
title: key.title.clone(),
count: *value,
}
);
}
// Write json to file
match serde_json::to_writer_pretty(&mut to, &string_vec) {
Err(_) => {
save_error(dir, name);
return
}
_ => {},
}
// Add a newline, otherwise the json might be invalid
let _ = to.write(b"\n");
let _ = to.flush();
}
/// Loads a map from json
pub fn
|
(dir: &str, name: &'static str) -> Self {
/// Logs possible errors
fn load_error(dir: &str, name: &'static str) {
log(&format!("ERROR: Could not load {}\\{}.json",dir, name));
}
// Create an empty map
let mut song_map = SongMap::new();
// Try to open
let from = match File::open(format!("{}\\{}.json",dir, name)) {
Ok(file) => file,
Err(_) => {
load_error(dir, name);
return song_map
}
};
let from = BufReader::new(from);
// Save into a vector of helper
let string_vec: Vec<SongMapHelper> = match serde_json::from_reader(from) {
Ok(vec) => vec,
Err(_) => {
load_error(dir, name);
return song_map
}
};
// Convert helper to the actual songmap
for song_helper in string_vec {
song_map.insert( Song {
artist: song_helper.artist,
title: song_helper.title
}, song_helper.count);
}
song_map
}
}
/// This is a helper so that the songmap may be saved to json with serde
#[derive(Debug, Serialize, Deserialize)]
struct SongMapHelper {
artist: String,
title: String,
count: u16,
}
|
load_from_file
|
identifier_name
|
song_map.rs
|
use radio::song::Song;
use serde_json;
use std::collections::HashMap;
use std::fs::File;
use std::fs;
use std::io::prelude::*;
use std::io::{BufReader, BufWriter};
use util::logger::log;
/// A map containing songs and their times played,
/// Inserting a song already known will increase its counter
#[derive(Debug)]
pub struct SongMap(HashMap<Song, u16>);
impl SongMap {
/// Creates a new empty songmap
fn new() -> Self
|
/// Inserts a song into the map
fn insert(&mut self, key: Song, value: u16) {
self.0.insert(key, value);
}
/// Inserts a song into the map
/// If known increases its counter instead
pub fn insert_song(&mut self, song: Song) {
let counter = self.0.entry(song).or_insert(0);
*counter += 1;
}
/// Saves a map to json
pub fn save_to_file(&self, dir: &str, name: &str) {
/// Logs possible errors
fn save_error(dir: &str, name: &str) {
log(&format!("ERROR: Could not save {}\\{}.json",dir, name));
}
// Create file and directory
let _ = fs::create_dir_all(dir);
let to = match File::create(format!("{}\\{}.json",dir, name)) {
Ok(file) => file,
Err(_) => {
save_error(dir, name);
return
}
};
let mut to = BufWriter::new(to);
// Since the map contains a non-string key we cannot save it to json
// straight away, convert it to a save-able format
let mut string_vec: Vec<SongMapHelper> = Vec::new();
for (key, value) in self.0.iter() {
string_vec.push(
SongMapHelper {
artist: key.artist.clone(),
title: key.title.clone(),
count: *value,
}
);
}
// Write json to file
match serde_json::to_writer_pretty(&mut to, &string_vec) {
Err(_) => {
save_error(dir, name);
return
}
_ => {},
}
// Add a newline, otherwise the json might be invalid
let _ = to.write(b"\n");
let _ = to.flush();
}
/// Loads a map from json
pub fn load_from_file(dir: &str, name: &'static str) -> Self {
/// Logs possible errors
fn load_error(dir: &str, name: &'static str) {
log(&format!("ERROR: Could not load {}\\{}.json",dir, name));
}
// Create an empty map
let mut song_map = SongMap::new();
// Try to open
let from = match File::open(format!("{}\\{}.json",dir, name)) {
Ok(file) => file,
Err(_) => {
load_error(dir, name);
return song_map
}
};
let from = BufReader::new(from);
// Save into a vector of helper
let string_vec: Vec<SongMapHelper> = match serde_json::from_reader(from) {
Ok(vec) => vec,
Err(_) => {
load_error(dir, name);
return song_map
}
};
// Convert helper to the actual songmap
for song_helper in string_vec {
song_map.insert( Song {
artist: song_helper.artist,
title: song_helper.title
}, song_helper.count);
}
song_map
}
}
/// This is a helper so that the songmap may be saved to json with serde
#[derive(Debug, Serialize, Deserialize)]
struct SongMapHelper {
artist: String,
title: String,
count: u16,
}
|
{
SongMap(HashMap::new())
}
|
identifier_body
|
lib.rs
|
pub extern crate piston_window;
mod printer;
pub use printer::*;
pub mod colors;
#[cfg(test)]
extern crate float_cmp;
#[cfg(test)]
use float_cmp::ApproxEqUlps;
#[cfg(test)]
/// Check if two floats are approximately equal
macro_rules! assert_float_eq {
($left: expr, $right: expr, $precision: expr) => {
assert!($left.approx_eq_ulps(&$right, $precision));
}
}
#[cfg(test)]
/// Check if two colors are equal, taking care of float comparison
macro_rules! assert_color_eq {
($left: expr, $right: expr) => {
assert_float_eq!($left[0], $right[0], 3);
assert_float_eq!($left[1], $right[1], 3);
assert_float_eq!($left[2], $right[2], 3);
assert_float_eq!($left[3], $right[3], 3);
}
}
#[test]
fn color_multiply() {
use colors::multiply_color;
let color = [0.2, 0.4, 0.8, 1.0];
assert_color_eq!([0.1, 0.2, 0.4, 0.5], multiply_color(color, 0.5, true));
assert_color_eq!([0.1, 0.2, 0.4, 1.0], multiply_color(color, 0.5, false));
assert_color_eq!([0.4, 0.8, 1.6, 2.0], multiply_color(color, 2.0, true));
assert_color_eq!([0.4, 0.8, 1.6, 1.0], multiply_color(color, 2.0, false));
}
#[test]
fn color_bounds() {
use colors::check_color_bounds;
let color = [0.2, -0.7, 1.3, 0.0];
assert_color_eq!([0.2, 0.0, 1.0, 0.0], check_color_bounds(color));
}
#[test]
fn test_darker() {
use colors::darker;
let color = [0.3, 0.6, 0.9, 1.0];
assert_color_eq!([0.45, 0.9, 1.0, 1.0], darker(color, 0.5, true));
assert_color_eq!([0.45, 0.9, 1.0, 1.0], darker(color, 0.5, false));
}
#[test]
fn test_lighter() {
use colors::lighter;
let color = [0.3, 0.6, 0.9, 1.0];
assert_color_eq!([0.15, 0.3, 0.45, 0.5], lighter(color, 0.5, true));
|
assert_color_eq!([0.15, 0.3, 0.45, 1.0], lighter(color, 0.5, false));
}
|
random_line_split
|
|
lib.rs
|
pub extern crate piston_window;
mod printer;
pub use printer::*;
pub mod colors;
#[cfg(test)]
extern crate float_cmp;
#[cfg(test)]
use float_cmp::ApproxEqUlps;
#[cfg(test)]
/// Check if two floats are approximately equal
macro_rules! assert_float_eq {
($left: expr, $right: expr, $precision: expr) => {
assert!($left.approx_eq_ulps(&$right, $precision));
}
}
#[cfg(test)]
/// Check if two colors are equal, taking care of float comparison
macro_rules! assert_color_eq {
($left: expr, $right: expr) => {
assert_float_eq!($left[0], $right[0], 3);
assert_float_eq!($left[1], $right[1], 3);
assert_float_eq!($left[2], $right[2], 3);
assert_float_eq!($left[3], $right[3], 3);
}
}
#[test]
fn color_multiply() {
use colors::multiply_color;
let color = [0.2, 0.4, 0.8, 1.0];
assert_color_eq!([0.1, 0.2, 0.4, 0.5], multiply_color(color, 0.5, true));
assert_color_eq!([0.1, 0.2, 0.4, 1.0], multiply_color(color, 0.5, false));
assert_color_eq!([0.4, 0.8, 1.6, 2.0], multiply_color(color, 2.0, true));
assert_color_eq!([0.4, 0.8, 1.6, 1.0], multiply_color(color, 2.0, false));
}
#[test]
fn color_bounds() {
use colors::check_color_bounds;
let color = [0.2, -0.7, 1.3, 0.0];
assert_color_eq!([0.2, 0.0, 1.0, 0.0], check_color_bounds(color));
}
#[test]
fn test_darker()
|
#[test]
fn test_lighter() {
use colors::lighter;
let color = [0.3, 0.6, 0.9, 1.0];
assert_color_eq!([0.15, 0.3, 0.45, 0.5], lighter(color, 0.5, true));
assert_color_eq!([0.15, 0.3, 0.45, 1.0], lighter(color, 0.5, false));
}
|
{
use colors::darker;
let color = [0.3, 0.6, 0.9, 1.0];
assert_color_eq!([0.45, 0.9, 1.0, 1.0], darker(color, 0.5, true));
assert_color_eq!([0.45, 0.9, 1.0, 1.0], darker(color, 0.5, false));
}
|
identifier_body
|
lib.rs
|
pub extern crate piston_window;
mod printer;
pub use printer::*;
pub mod colors;
#[cfg(test)]
extern crate float_cmp;
#[cfg(test)]
use float_cmp::ApproxEqUlps;
#[cfg(test)]
/// Check if two floats are approximately equal
macro_rules! assert_float_eq {
($left: expr, $right: expr, $precision: expr) => {
assert!($left.approx_eq_ulps(&$right, $precision));
}
}
#[cfg(test)]
/// Check if two colors are equal, taking care of float comparison
macro_rules! assert_color_eq {
($left: expr, $right: expr) => {
assert_float_eq!($left[0], $right[0], 3);
assert_float_eq!($left[1], $right[1], 3);
assert_float_eq!($left[2], $right[2], 3);
assert_float_eq!($left[3], $right[3], 3);
}
}
#[test]
fn color_multiply() {
use colors::multiply_color;
let color = [0.2, 0.4, 0.8, 1.0];
assert_color_eq!([0.1, 0.2, 0.4, 0.5], multiply_color(color, 0.5, true));
assert_color_eq!([0.1, 0.2, 0.4, 1.0], multiply_color(color, 0.5, false));
assert_color_eq!([0.4, 0.8, 1.6, 2.0], multiply_color(color, 2.0, true));
assert_color_eq!([0.4, 0.8, 1.6, 1.0], multiply_color(color, 2.0, false));
}
#[test]
fn color_bounds() {
use colors::check_color_bounds;
let color = [0.2, -0.7, 1.3, 0.0];
assert_color_eq!([0.2, 0.0, 1.0, 0.0], check_color_bounds(color));
}
#[test]
fn test_darker() {
use colors::darker;
let color = [0.3, 0.6, 0.9, 1.0];
assert_color_eq!([0.45, 0.9, 1.0, 1.0], darker(color, 0.5, true));
assert_color_eq!([0.45, 0.9, 1.0, 1.0], darker(color, 0.5, false));
}
#[test]
fn
|
() {
use colors::lighter;
let color = [0.3, 0.6, 0.9, 1.0];
assert_color_eq!([0.15, 0.3, 0.45, 0.5], lighter(color, 0.5, true));
assert_color_eq!([0.15, 0.3, 0.45, 1.0], lighter(color, 0.5, false));
}
|
test_lighter
|
identifier_name
|
lib.rs
|
//! This crate provides an interface, `Shifter` that makes it trivially easy to
//! manipulate [shift registers][4] with a Raspberry Pi (thanks to [CuPi][1]).
//! Internally it keeps track of each shift register's state, allowing you to
//! manipulate each pin individually as if it were a regular GPIO pin!
//!
//! Why would you want to do this? **The Raspberry Pi only has 17 usable GPIO
//! pins**. Pin expanders like the [MCP23017][2] can add up to 16 more per chip
//! (at a cost of about ~$2-3/each) but they work over I2C which is *slow* (on
//! the Raspberry Pi anyway). With shift registers like the [74HC595][3]
//! (~$0.05-0.10/each) you can add a *nearly infinite* amount of output pins and
//! *refresh them as fast as the hardware supports*. You can even use many
//! sets of 3 pins to run multiple chains of shift registers in parallel.
//!
//! Realize your dream of controlling an enormous holiday lights display with a
//! single Raspberry Pi using cupi_shift!
//!
//! # Example
//!
//! ```
//! extern crate cupi_shift;
//! use cupi_shift::Shifter;
//!
//! fn main() {
//! // First define which pins you're using for your shift register(s)
//! let (data_pin, latch_pin, clock_pin) = (29, 28, 27);
//!
//! // Now create a new Shifter instance using those pins
//! let mut shifter = Shifter::new(data_pin, latch_pin, clock_pin);
//!
//! // Next we need to call `add()` for each shift register and tell it how
//! // many pins they have
//! let pins = 8;
//! let sr0 = shifter.add(pins); // Starts tracking a new shift register
//!
//! // Now we can set the state (aka data) of our shift register
//! shifter.set(sr0, 0b11111111, true); // Set all pins HIGH
//! }
//!
//! ```
//! # Note about pin numbering
//!
//! [CuPi][1] currently uses GPIO pin numbering. So pin 40 (very last pin on
//! the Raspberry Pi 2) is actually pin 29. You can refer to this image to
//! figure out which pin is which:
//!
//! http://pi4j.com/images/j8header-2b-large.png
//!
//! # Controlling individual pins
//!
//! That's all well and good (setting the state of all pins at once) but what if
//! you want to control just one pin at a time? You can do that too:
//!
//! ```
//! // Set the 8th pin (aka pin 7) HIGH and apply this change immediately
//! shifter.set_pin_high(sr0, 7, true); // NOTE: 3rd arg is 'apply'
//! // Set the first pin (aka pin 0) LOW but don't apply just yet
//! shifter.set_pin_low(sr0, 0, false);
//! shifter.apply(); // Apply the change (the other way to apply changes)
//! ```
//!
//! # Controlling multiple shift registers
//!
//! Every time you call `Shifter.add()` it will start tracking/controlling an
//! additional shift register. So if you have two shift registers chained
//! together you can add and control them individually like so:
//!
//! ```
//! let last = shifter.add(8); // Add an 8-pin shift register (sr_index: 0)
//! let first = shifter.add(8); // Add another (sr_index: 1)
//! // Set pin 0 HIGH on shift register 0 (all others LOW) but don't apply the change yet
//! shifter.set(last, 0b00000001, false);
//! // Set pin 7 HIGH on shift register 1 (all others LOW) and apply the change
//! shifter.set(first, 0b10000000, true);
//! ```
//!
//! **Note:** Shift registers need to be added in the order in which they are
//! chained with the *last* shift register being added first. Why is the order
//! reversed like this? That's how the logic of shift registers works: Every
//! time data is "shifted out" to a shift register it dumps its memory to the
//! the next shift register in the chain.
//!
//! You can also apply changes to individual pins on individual shift registers:
//!
//! ```
//! shifter.set_pin_high(sr1, 2, false); // Set pin 2 HIGH on shift register 1
//! shifter.set_pin_low(sr0, 3, true); // Set pin 3 LOW on shift register 0 (and apply)
//! ```
//!
//! In the above example we didn't set the *apply* (3rd) argument to `true`
//! until the we were done making our changes. If we set *apply* to `true` on
//! each we could wind up with some flickering. The more shift registers you
//! have in your chain the more flickering you can get if you call `apply()`
//! with every state (aka data) change.
//!
//!
//! [1]: https://crates.io/crates/cupi
//! [2]: https://www.adafruit.com/product/732
//! [3]: https://www.sparkfun.com/datasheets/IC/SN74HC595.pdf
//! [4]: https://en.wikipedia.org/wiki/Shift_register
#![allow(dead_code, unused_variables)]
extern crate cupi;
// Using a singly-linked list to represent the chain of shift registers since
// it accurately represents how they're physically linked together.
use std::collections::LinkedList;
use std::cell::RefCell;
use cupi::{CuPi, PinOutput, DigitalWrite};
struct ShiftRegister {
data: usize, // e.g. 0b01010101
pins: u8, // Not aware of any shift registers that have more than 255 output pins
}
// This is great for debugging; displays the Shift Register data in binary:
impl std::fmt::Display for ShiftRegister {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let string = format!("{:b}", self.data);
let pad = (self.pins as usize) - string.len();
let _ = f.write_str("0b");
for _ in 0..pad { let _ = f.write_str("0").unwrap(); }
f.pad_integral(true, "", &string)
}
}
impl ShiftRegister {
fn set(&mut self, data: usize) {
self.data = data;
}
fn get_ref(self) -> RefCell<ShiftRegister> {
RefCell::new(self)
}
}
pub struct Shifter {
pub data: PinOutput,
pub latch: PinOutput,
pub clock: PinOutput,
shift_registers: LinkedList<ShiftRegister>,
invert: bool,
}
impl Shifter {
/// Returns a new `Shifter` object that will shift out data using the given
/// *data_pin*, *latch_pin*, and *clock_pin*. To use a `Shifter` instance
/// you must first call the `add()` method for each shift register you
/// have connected in sequence.
///
/// # Note about pin numbering
///
/// `cupi` currently uses GPIO pin numbering. So pin 40 (very last pin on
/// the Raspberry Pi 2) is actually pin 29. You can refer to this image to
/// figure out which pin is which:
///
/// http://pi4j.com/images/j8header-2b-large.png
pub fn new(data_pin: usize, latch_pin: usize, clock_pin: usize) -> Shifter {
let cupi = CuPi::new().unwrap();
let shift_registers: LinkedList<ShiftRegister> = LinkedList::new();
Shifter {
data: cupi.pin(data_pin).unwrap().output(),
latch: cupi.pin(latch_pin).unwrap().output(),
clock: cupi.pin(clock_pin).unwrap().output(),
shift_registers: shift_registers,
invert: false,
}
}
/// Adds a new shift register to this Shifter and returns a reference to it.
/// You must specify the number of pins.
pub fn add(&mut self, pins: u8) -> usize {
let sr = ShiftRegister { data: 0, pins: pins };
self.shift_registers.push_back(sr);
self.shift_registers.len() - 1
}
/// Sets the *data* on the shift register at the given *sr_index*.
/// If *apply* is `true` the change will be applied immediately.
pub fn set(&mut self, sr_index: usize, data: usize, apply: bool) {
for (i, sr) in self.shift_registers.iter_mut().enumerate() {
if i == sr_index {
sr.set(data);
break;
}
}
if apply { self.apply(); }
}
/// Sets the given *pin* HIGH on the shift register at the given *sr_index*.
/// If *apply* is `true` the change will be applied immediately.
pub fn set_pin_high(&mut self, sr_index: usize, pin: u8, apply: bool) {
for (i, sr) in self.shift_registers.iter_mut().enumerate() {
if i == sr_index {
let new_state = sr.data | 1 << pin;
sr.set(new_state);
break;
}
}
if apply { self.apply(); }
}
/// Sets the given *pin* LOW on the shift register at the given *sr_index*.
/// If *apply* is `true` the change will be applied immediately.
pub fn set_pin_low(&mut self, sr_index: usize, pin: u8, apply: bool) {
for (i, sr) in self.shift_registers.iter_mut().enumerate() {
if i == sr_index {
let new_state = sr.data &!(1 << pin);
sr.set(new_state);
break;
}
}
if apply { self.apply(); }
}
/// This function will invert all logic so that HIGH is LOW and LOW is HIGH.
/// Very convenient if you made a (very common) mistake in your wiring or
/// you need reversed logic for other reasons.
pub fn invert(&mut self) {
match self.invert {
true => self.invert = false,
false => self.invert = true,
}
}
/// Applies all current shift register states by shifting out all the stored
/// data in each ShiftRegister object.
pub fn apply(&mut self) {
self.latch.low().unwrap();
for sr in self.shift_registers.iter() {
for n in 0..sr.pins {
self.clock.low().unwrap();
if self.invert {
match sr.data >> n & 1 {
1 => self.data.low().unwrap(),
0 => self.data.high().unwrap(),
_ => unreachable!(),
}
} else
|
self.clock.high().unwrap();
}
}
self.latch.high().unwrap();
}
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
}
}
|
{
match sr.data >> n & 1 {
0 => self.data.low().unwrap(),
1 => self.data.high().unwrap(),
_ => unreachable!(),
}
}
|
conditional_block
|
lib.rs
|
//! This crate provides an interface, `Shifter` that makes it trivially easy to
//! manipulate [shift registers][4] with a Raspberry Pi (thanks to [CuPi][1]).
//! Internally it keeps track of each shift register's state, allowing you to
//! manipulate each pin individually as if it were a regular GPIO pin!
//!
//! Why would you want to do this? **The Raspberry Pi only has 17 usable GPIO
//! pins**. Pin expanders like the [MCP23017][2] can add up to 16 more per chip
//! (at a cost of about ~$2-3/each) but they work over I2C which is *slow* (on
//! the Raspberry Pi anyway). With shift registers like the [74HC595][3]
//! (~$0.05-0.10/each) you can add a *nearly infinite* amount of output pins and
//! *refresh them as fast as the hardware supports*. You can even use many
//! sets of 3 pins to run multiple chains of shift registers in parallel.
//!
//! Realize your dream of controlling an enormous holiday lights display with a
//! single Raspberry Pi using cupi_shift!
//!
//! # Example
//!
//! ```
//! extern crate cupi_shift;
//! use cupi_shift::Shifter;
//!
//! fn main() {
//! // First define which pins you're using for your shift register(s)
//! let (data_pin, latch_pin, clock_pin) = (29, 28, 27);
//!
//! // Now create a new Shifter instance using those pins
//! let mut shifter = Shifter::new(data_pin, latch_pin, clock_pin);
//!
//! // Next we need to call `add()` for each shift register and tell it how
//! // many pins they have
//! let pins = 8;
//! let sr0 = shifter.add(pins); // Starts tracking a new shift register
//!
//! // Now we can set the state (aka data) of our shift register
//! shifter.set(sr0, 0b11111111, true); // Set all pins HIGH
//! }
//!
//! ```
//! # Note about pin numbering
//!
//! [CuPi][1] currently uses GPIO pin numbering. So pin 40 (very last pin on
//! the Raspberry Pi 2) is actually pin 29. You can refer to this image to
//! figure out which pin is which:
//!
//! http://pi4j.com/images/j8header-2b-large.png
//!
//! # Controlling individual pins
//!
//! That's all well and good (setting the state of all pins at once) but what if
//! you want to control just one pin at a time? You can do that too:
//!
//! ```
//! // Set the 8th pin (aka pin 7) HIGH and apply this change immediately
//! shifter.set_pin_high(sr0, 7, true); // NOTE: 3rd arg is 'apply'
//! // Set the first pin (aka pin 0) LOW but don't apply just yet
//! shifter.set_pin_low(sr0, 0, false);
//! shifter.apply(); // Apply the change (the other way to apply changes)
//! ```
//!
//! # Controlling multiple shift registers
//!
//! Every time you call `Shifter.add()` it will start tracking/controlling an
//! additional shift register. So if you have two shift registers chained
//! together you can add and control them individually like so:
//!
//! ```
//! let last = shifter.add(8); // Add an 8-pin shift register (sr_index: 0)
//! let first = shifter.add(8); // Add another (sr_index: 1)
//! // Set pin 0 HIGH on shift register 0 (all others LOW) but don't apply the change yet
//! shifter.set(last, 0b00000001, false);
//! // Set pin 7 HIGH on shift register 1 (all others LOW) and apply the change
//! shifter.set(first, 0b10000000, true);
//! ```
//!
//! **Note:** Shift registers need to be added in the order in which they are
//! chained with the *last* shift register being added first. Why is the order
//! reversed like this? That's how the logic of shift registers works: Every
//! time data is "shifted out" to a shift register it dumps its memory to the
//! the next shift register in the chain.
//!
//! You can also apply changes to individual pins on individual shift registers:
//!
//! ```
//! shifter.set_pin_high(sr1, 2, false); // Set pin 2 HIGH on shift register 1
//! shifter.set_pin_low(sr0, 3, true); // Set pin 3 LOW on shift register 0 (and apply)
//! ```
//!
//! In the above example we didn't set the *apply* (3rd) argument to `true`
//! until the we were done making our changes. If we set *apply* to `true` on
//! each we could wind up with some flickering. The more shift registers you
//! have in your chain the more flickering you can get if you call `apply()`
//! with every state (aka data) change.
//!
//!
//! [1]: https://crates.io/crates/cupi
//! [2]: https://www.adafruit.com/product/732
//! [3]: https://www.sparkfun.com/datasheets/IC/SN74HC595.pdf
//! [4]: https://en.wikipedia.org/wiki/Shift_register
#![allow(dead_code, unused_variables)]
extern crate cupi;
// Using a singly-linked list to represent the chain of shift registers since
// it accurately represents how they're physically linked together.
use std::collections::LinkedList;
use std::cell::RefCell;
use cupi::{CuPi, PinOutput, DigitalWrite};
struct ShiftRegister {
data: usize, // e.g. 0b01010101
pins: u8, // Not aware of any shift registers that have more than 255 output pins
}
// This is great for debugging; displays the Shift Register data in binary:
impl std::fmt::Display for ShiftRegister {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let string = format!("{:b}", self.data);
let pad = (self.pins as usize) - string.len();
let _ = f.write_str("0b");
for _ in 0..pad { let _ = f.write_str("0").unwrap(); }
f.pad_integral(true, "", &string)
}
}
impl ShiftRegister {
fn set(&mut self, data: usize) {
self.data = data;
}
fn get_ref(self) -> RefCell<ShiftRegister> {
RefCell::new(self)
}
}
pub struct Shifter {
pub data: PinOutput,
pub latch: PinOutput,
pub clock: PinOutput,
shift_registers: LinkedList<ShiftRegister>,
invert: bool,
}
impl Shifter {
/// Returns a new `Shifter` object that will shift out data using the given
/// *data_pin*, *latch_pin*, and *clock_pin*. To use a `Shifter` instance
/// you must first call the `add()` method for each shift register you
/// have connected in sequence.
///
/// # Note about pin numbering
///
/// `cupi` currently uses GPIO pin numbering. So pin 40 (very last pin on
/// the Raspberry Pi 2) is actually pin 29. You can refer to this image to
/// figure out which pin is which:
///
/// http://pi4j.com/images/j8header-2b-large.png
pub fn new(data_pin: usize, latch_pin: usize, clock_pin: usize) -> Shifter {
let cupi = CuPi::new().unwrap();
let shift_registers: LinkedList<ShiftRegister> = LinkedList::new();
Shifter {
data: cupi.pin(data_pin).unwrap().output(),
latch: cupi.pin(latch_pin).unwrap().output(),
clock: cupi.pin(clock_pin).unwrap().output(),
shift_registers: shift_registers,
invert: false,
}
}
/// Adds a new shift register to this Shifter and returns a reference to it.
/// You must specify the number of pins.
pub fn add(&mut self, pins: u8) -> usize {
let sr = ShiftRegister { data: 0, pins: pins };
self.shift_registers.push_back(sr);
self.shift_registers.len() - 1
}
/// Sets the *data* on the shift register at the given *sr_index*.
/// If *apply* is `true` the change will be applied immediately.
pub fn set(&mut self, sr_index: usize, data: usize, apply: bool) {
for (i, sr) in self.shift_registers.iter_mut().enumerate() {
if i == sr_index {
sr.set(data);
break;
}
}
if apply { self.apply(); }
}
/// Sets the given *pin* HIGH on the shift register at the given *sr_index*.
/// If *apply* is `true` the change will be applied immediately.
pub fn set_pin_high(&mut self, sr_index: usize, pin: u8, apply: bool) {
for (i, sr) in self.shift_registers.iter_mut().enumerate() {
if i == sr_index {
let new_state = sr.data | 1 << pin;
sr.set(new_state);
break;
}
}
if apply { self.apply(); }
}
/// Sets the given *pin* LOW on the shift register at the given *sr_index*.
/// If *apply* is `true` the change will be applied immediately.
pub fn set_pin_low(&mut self, sr_index: usize, pin: u8, apply: bool) {
for (i, sr) in self.shift_registers.iter_mut().enumerate() {
if i == sr_index {
let new_state = sr.data &!(1 << pin);
sr.set(new_state);
break;
}
}
if apply { self.apply(); }
}
/// This function will invert all logic so that HIGH is LOW and LOW is HIGH.
/// Very convenient if you made a (very common) mistake in your wiring or
/// you need reversed logic for other reasons.
pub fn invert(&mut self) {
match self.invert {
true => self.invert = false,
false => self.invert = true,
}
}
/// Applies all current shift register states by shifting out all the stored
/// data in each ShiftRegister object.
pub fn apply(&mut self) {
self.latch.low().unwrap();
for sr in self.shift_registers.iter() {
for n in 0..sr.pins {
self.clock.low().unwrap();
if self.invert {
match sr.data >> n & 1 {
1 => self.data.low().unwrap(),
0 => self.data.high().unwrap(),
_ => unreachable!(),
}
} else {
match sr.data >> n & 1 {
0 => self.data.low().unwrap(),
1 => self.data.high().unwrap(),
_ => unreachable!(),
}
}
self.clock.high().unwrap();
}
}
self.latch.high().unwrap();
}
}
#[cfg(test)]
mod tests {
#[test]
fn
|
() {
}
}
|
it_works
|
identifier_name
|
lib.rs
|
//! This crate provides an interface, `Shifter` that makes it trivially easy to
//! manipulate [shift registers][4] with a Raspberry Pi (thanks to [CuPi][1]).
//! Internally it keeps track of each shift register's state, allowing you to
//! manipulate each pin individually as if it were a regular GPIO pin!
//!
//! Why would you want to do this? **The Raspberry Pi only has 17 usable GPIO
//! pins**. Pin expanders like the [MCP23017][2] can add up to 16 more per chip
//! (at a cost of about ~$2-3/each) but they work over I2C which is *slow* (on
//! the Raspberry Pi anyway). With shift registers like the [74HC595][3]
//! (~$0.05-0.10/each) you can add a *nearly infinite* amount of output pins and
//! *refresh them as fast as the hardware supports*. You can even use many
//! sets of 3 pins to run multiple chains of shift registers in parallel.
//!
//! Realize your dream of controlling an enormous holiday lights display with a
//! single Raspberry Pi using cupi_shift!
//!
//! # Example
//!
//! ```
//! extern crate cupi_shift;
//! use cupi_shift::Shifter;
//!
//! fn main() {
//! // First define which pins you're using for your shift register(s)
//! let (data_pin, latch_pin, clock_pin) = (29, 28, 27);
//!
//! // Now create a new Shifter instance using those pins
//! let mut shifter = Shifter::new(data_pin, latch_pin, clock_pin);
//!
//! // Next we need to call `add()` for each shift register and tell it how
//! // many pins they have
//! let pins = 8;
//! let sr0 = shifter.add(pins); // Starts tracking a new shift register
//!
//! // Now we can set the state (aka data) of our shift register
//! shifter.set(sr0, 0b11111111, true); // Set all pins HIGH
//! }
//!
//! ```
//! # Note about pin numbering
//!
//! [CuPi][1] currently uses GPIO pin numbering. So pin 40 (very last pin on
//! the Raspberry Pi 2) is actually pin 29. You can refer to this image to
//! figure out which pin is which:
//!
//! http://pi4j.com/images/j8header-2b-large.png
//!
//! # Controlling individual pins
//!
//! That's all well and good (setting the state of all pins at once) but what if
//! you want to control just one pin at a time? You can do that too:
//!
//! ```
//! // Set the 8th pin (aka pin 7) HIGH and apply this change immediately
//! shifter.set_pin_high(sr0, 7, true); // NOTE: 3rd arg is 'apply'
//! // Set the first pin (aka pin 0) LOW but don't apply just yet
//! shifter.set_pin_low(sr0, 0, false);
//! shifter.apply(); // Apply the change (the other way to apply changes)
//! ```
//!
//! # Controlling multiple shift registers
//!
//! Every time you call `Shifter.add()` it will start tracking/controlling an
//! additional shift register. So if you have two shift registers chained
//! together you can add and control them individually like so:
//!
//! ```
//! let last = shifter.add(8); // Add an 8-pin shift register (sr_index: 0)
//! let first = shifter.add(8); // Add another (sr_index: 1)
//! // Set pin 0 HIGH on shift register 0 (all others LOW) but don't apply the change yet
//! shifter.set(last, 0b00000001, false);
//! // Set pin 7 HIGH on shift register 1 (all others LOW) and apply the change
//! shifter.set(first, 0b10000000, true);
//! ```
//!
//! **Note:** Shift registers need to be added in the order in which they are
//! chained with the *last* shift register being added first. Why is the order
//! reversed like this? That's how the logic of shift registers works: Every
//! time data is "shifted out" to a shift register it dumps its memory to the
//! the next shift register in the chain.
//!
//! You can also apply changes to individual pins on individual shift registers:
//!
//! ```
//! shifter.set_pin_high(sr1, 2, false); // Set pin 2 HIGH on shift register 1
//! shifter.set_pin_low(sr0, 3, true); // Set pin 3 LOW on shift register 0 (and apply)
//! ```
//!
//! In the above example we didn't set the *apply* (3rd) argument to `true`
//! until the we were done making our changes. If we set *apply* to `true` on
//! each we could wind up with some flickering. The more shift registers you
//! have in your chain the more flickering you can get if you call `apply()`
//! with every state (aka data) change.
//!
//!
//! [1]: https://crates.io/crates/cupi
|
#![allow(dead_code, unused_variables)]
extern crate cupi;
// Using a singly-linked list to represent the chain of shift registers since
// it accurately represents how they're physically linked together.
use std::collections::LinkedList;
use std::cell::RefCell;
use cupi::{CuPi, PinOutput, DigitalWrite};
struct ShiftRegister {
data: usize, // e.g. 0b01010101
pins: u8, // Not aware of any shift registers that have more than 255 output pins
}
// This is great for debugging; displays the Shift Register data in binary:
impl std::fmt::Display for ShiftRegister {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let string = format!("{:b}", self.data);
let pad = (self.pins as usize) - string.len();
let _ = f.write_str("0b");
for _ in 0..pad { let _ = f.write_str("0").unwrap(); }
f.pad_integral(true, "", &string)
}
}
impl ShiftRegister {
fn set(&mut self, data: usize) {
self.data = data;
}
fn get_ref(self) -> RefCell<ShiftRegister> {
RefCell::new(self)
}
}
pub struct Shifter {
pub data: PinOutput,
pub latch: PinOutput,
pub clock: PinOutput,
shift_registers: LinkedList<ShiftRegister>,
invert: bool,
}
impl Shifter {
/// Returns a new `Shifter` object that will shift out data using the given
/// *data_pin*, *latch_pin*, and *clock_pin*. To use a `Shifter` instance
/// you must first call the `add()` method for each shift register you
/// have connected in sequence.
///
/// # Note about pin numbering
///
/// `cupi` currently uses GPIO pin numbering. So pin 40 (very last pin on
/// the Raspberry Pi 2) is actually pin 29. You can refer to this image to
/// figure out which pin is which:
///
/// http://pi4j.com/images/j8header-2b-large.png
pub fn new(data_pin: usize, latch_pin: usize, clock_pin: usize) -> Shifter {
let cupi = CuPi::new().unwrap();
let shift_registers: LinkedList<ShiftRegister> = LinkedList::new();
Shifter {
data: cupi.pin(data_pin).unwrap().output(),
latch: cupi.pin(latch_pin).unwrap().output(),
clock: cupi.pin(clock_pin).unwrap().output(),
shift_registers: shift_registers,
invert: false,
}
}
/// Adds a new shift register to this Shifter and returns a reference to it.
/// You must specify the number of pins.
pub fn add(&mut self, pins: u8) -> usize {
let sr = ShiftRegister { data: 0, pins: pins };
self.shift_registers.push_back(sr);
self.shift_registers.len() - 1
}
/// Sets the *data* on the shift register at the given *sr_index*.
/// If *apply* is `true` the change will be applied immediately.
pub fn set(&mut self, sr_index: usize, data: usize, apply: bool) {
for (i, sr) in self.shift_registers.iter_mut().enumerate() {
if i == sr_index {
sr.set(data);
break;
}
}
if apply { self.apply(); }
}
/// Sets the given *pin* HIGH on the shift register at the given *sr_index*.
/// If *apply* is `true` the change will be applied immediately.
pub fn set_pin_high(&mut self, sr_index: usize, pin: u8, apply: bool) {
for (i, sr) in self.shift_registers.iter_mut().enumerate() {
if i == sr_index {
let new_state = sr.data | 1 << pin;
sr.set(new_state);
break;
}
}
if apply { self.apply(); }
}
/// Sets the given *pin* LOW on the shift register at the given *sr_index*.
/// If *apply* is `true` the change will be applied immediately.
pub fn set_pin_low(&mut self, sr_index: usize, pin: u8, apply: bool) {
for (i, sr) in self.shift_registers.iter_mut().enumerate() {
if i == sr_index {
let new_state = sr.data &!(1 << pin);
sr.set(new_state);
break;
}
}
if apply { self.apply(); }
}
/// This function will invert all logic so that HIGH is LOW and LOW is HIGH.
/// Very convenient if you made a (very common) mistake in your wiring or
/// you need reversed logic for other reasons.
pub fn invert(&mut self) {
match self.invert {
true => self.invert = false,
false => self.invert = true,
}
}
/// Applies all current shift register states by shifting out all the stored
/// data in each ShiftRegister object.
pub fn apply(&mut self) {
self.latch.low().unwrap();
for sr in self.shift_registers.iter() {
for n in 0..sr.pins {
self.clock.low().unwrap();
if self.invert {
match sr.data >> n & 1 {
1 => self.data.low().unwrap(),
0 => self.data.high().unwrap(),
_ => unreachable!(),
}
} else {
match sr.data >> n & 1 {
0 => self.data.low().unwrap(),
1 => self.data.high().unwrap(),
_ => unreachable!(),
}
}
self.clock.high().unwrap();
}
}
self.latch.high().unwrap();
}
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
}
}
|
//! [2]: https://www.adafruit.com/product/732
//! [3]: https://www.sparkfun.com/datasheets/IC/SN74HC595.pdf
//! [4]: https://en.wikipedia.org/wiki/Shift_register
|
random_line_split
|
lib.rs
|
//! This crate provides an interface, `Shifter` that makes it trivially easy to
//! manipulate [shift registers][4] with a Raspberry Pi (thanks to [CuPi][1]).
//! Internally it keeps track of each shift register's state, allowing you to
//! manipulate each pin individually as if it were a regular GPIO pin!
//!
//! Why would you want to do this? **The Raspberry Pi only has 17 usable GPIO
//! pins**. Pin expanders like the [MCP23017][2] can add up to 16 more per chip
//! (at a cost of about ~$2-3/each) but they work over I2C which is *slow* (on
//! the Raspberry Pi anyway). With shift registers like the [74HC595][3]
//! (~$0.05-0.10/each) you can add a *nearly infinite* amount of output pins and
//! *refresh them as fast as the hardware supports*. You can even use many
//! sets of 3 pins to run multiple chains of shift registers in parallel.
//!
//! Realize your dream of controlling an enormous holiday lights display with a
//! single Raspberry Pi using cupi_shift!
//!
//! # Example
//!
//! ```
//! extern crate cupi_shift;
//! use cupi_shift::Shifter;
//!
//! fn main() {
//! // First define which pins you're using for your shift register(s)
//! let (data_pin, latch_pin, clock_pin) = (29, 28, 27);
//!
//! // Now create a new Shifter instance using those pins
//! let mut shifter = Shifter::new(data_pin, latch_pin, clock_pin);
//!
//! // Next we need to call `add()` for each shift register and tell it how
//! // many pins they have
//! let pins = 8;
//! let sr0 = shifter.add(pins); // Starts tracking a new shift register
//!
//! // Now we can set the state (aka data) of our shift register
//! shifter.set(sr0, 0b11111111, true); // Set all pins HIGH
//! }
//!
//! ```
//! # Note about pin numbering
//!
//! [CuPi][1] currently uses GPIO pin numbering. So pin 40 (very last pin on
//! the Raspberry Pi 2) is actually pin 29. You can refer to this image to
//! figure out which pin is which:
//!
//! http://pi4j.com/images/j8header-2b-large.png
//!
//! # Controlling individual pins
//!
//! That's all well and good (setting the state of all pins at once) but what if
//! you want to control just one pin at a time? You can do that too:
//!
//! ```
//! // Set the 8th pin (aka pin 7) HIGH and apply this change immediately
//! shifter.set_pin_high(sr0, 7, true); // NOTE: 3rd arg is 'apply'
//! // Set the first pin (aka pin 0) LOW but don't apply just yet
//! shifter.set_pin_low(sr0, 0, false);
//! shifter.apply(); // Apply the change (the other way to apply changes)
//! ```
//!
//! # Controlling multiple shift registers
//!
//! Every time you call `Shifter.add()` it will start tracking/controlling an
//! additional shift register. So if you have two shift registers chained
//! together you can add and control them individually like so:
//!
//! ```
//! let last = shifter.add(8); // Add an 8-pin shift register (sr_index: 0)
//! let first = shifter.add(8); // Add another (sr_index: 1)
//! // Set pin 0 HIGH on shift register 0 (all others LOW) but don't apply the change yet
//! shifter.set(last, 0b00000001, false);
//! // Set pin 7 HIGH on shift register 1 (all others LOW) and apply the change
//! shifter.set(first, 0b10000000, true);
//! ```
//!
//! **Note:** Shift registers need to be added in the order in which they are
//! chained with the *last* shift register being added first. Why is the order
//! reversed like this? That's how the logic of shift registers works: Every
//! time data is "shifted out" to a shift register it dumps its memory to the
//! the next shift register in the chain.
//!
//! You can also apply changes to individual pins on individual shift registers:
//!
//! ```
//! shifter.set_pin_high(sr1, 2, false); // Set pin 2 HIGH on shift register 1
//! shifter.set_pin_low(sr0, 3, true); // Set pin 3 LOW on shift register 0 (and apply)
//! ```
//!
//! In the above example we didn't set the *apply* (3rd) argument to `true`
//! until the we were done making our changes. If we set *apply* to `true` on
//! each we could wind up with some flickering. The more shift registers you
//! have in your chain the more flickering you can get if you call `apply()`
//! with every state (aka data) change.
//!
//!
//! [1]: https://crates.io/crates/cupi
//! [2]: https://www.adafruit.com/product/732
//! [3]: https://www.sparkfun.com/datasheets/IC/SN74HC595.pdf
//! [4]: https://en.wikipedia.org/wiki/Shift_register
#![allow(dead_code, unused_variables)]
extern crate cupi;
// Using a singly-linked list to represent the chain of shift registers since
// it accurately represents how they're physically linked together.
use std::collections::LinkedList;
use std::cell::RefCell;
use cupi::{CuPi, PinOutput, DigitalWrite};
struct ShiftRegister {
data: usize, // e.g. 0b01010101
pins: u8, // Not aware of any shift registers that have more than 255 output pins
}
// This is great for debugging; displays the Shift Register data in binary:
impl std::fmt::Display for ShiftRegister {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let string = format!("{:b}", self.data);
let pad = (self.pins as usize) - string.len();
let _ = f.write_str("0b");
for _ in 0..pad { let _ = f.write_str("0").unwrap(); }
f.pad_integral(true, "", &string)
}
}
impl ShiftRegister {
fn set(&mut self, data: usize) {
self.data = data;
}
fn get_ref(self) -> RefCell<ShiftRegister> {
RefCell::new(self)
}
}
pub struct Shifter {
pub data: PinOutput,
pub latch: PinOutput,
pub clock: PinOutput,
shift_registers: LinkedList<ShiftRegister>,
invert: bool,
}
impl Shifter {
/// Returns a new `Shifter` object that will shift out data using the given
/// *data_pin*, *latch_pin*, and *clock_pin*. To use a `Shifter` instance
/// you must first call the `add()` method for each shift register you
/// have connected in sequence.
///
/// # Note about pin numbering
///
/// `cupi` currently uses GPIO pin numbering. So pin 40 (very last pin on
/// the Raspberry Pi 2) is actually pin 29. You can refer to this image to
/// figure out which pin is which:
///
/// http://pi4j.com/images/j8header-2b-large.png
pub fn new(data_pin: usize, latch_pin: usize, clock_pin: usize) -> Shifter {
let cupi = CuPi::new().unwrap();
let shift_registers: LinkedList<ShiftRegister> = LinkedList::new();
Shifter {
data: cupi.pin(data_pin).unwrap().output(),
latch: cupi.pin(latch_pin).unwrap().output(),
clock: cupi.pin(clock_pin).unwrap().output(),
shift_registers: shift_registers,
invert: false,
}
}
/// Adds a new shift register to this Shifter and returns a reference to it.
/// You must specify the number of pins.
pub fn add(&mut self, pins: u8) -> usize {
let sr = ShiftRegister { data: 0, pins: pins };
self.shift_registers.push_back(sr);
self.shift_registers.len() - 1
}
/// Sets the *data* on the shift register at the given *sr_index*.
/// If *apply* is `true` the change will be applied immediately.
pub fn set(&mut self, sr_index: usize, data: usize, apply: bool) {
for (i, sr) in self.shift_registers.iter_mut().enumerate() {
if i == sr_index {
sr.set(data);
break;
}
}
if apply { self.apply(); }
}
/// Sets the given *pin* HIGH on the shift register at the given *sr_index*.
/// If *apply* is `true` the change will be applied immediately.
pub fn set_pin_high(&mut self, sr_index: usize, pin: u8, apply: bool) {
for (i, sr) in self.shift_registers.iter_mut().enumerate() {
if i == sr_index {
let new_state = sr.data | 1 << pin;
sr.set(new_state);
break;
}
}
if apply { self.apply(); }
}
/// Sets the given *pin* LOW on the shift register at the given *sr_index*.
/// If *apply* is `true` the change will be applied immediately.
pub fn set_pin_low(&mut self, sr_index: usize, pin: u8, apply: bool) {
for (i, sr) in self.shift_registers.iter_mut().enumerate() {
if i == sr_index {
let new_state = sr.data &!(1 << pin);
sr.set(new_state);
break;
}
}
if apply { self.apply(); }
}
/// This function will invert all logic so that HIGH is LOW and LOW is HIGH.
/// Very convenient if you made a (very common) mistake in your wiring or
/// you need reversed logic for other reasons.
pub fn invert(&mut self) {
match self.invert {
true => self.invert = false,
false => self.invert = true,
}
}
/// Applies all current shift register states by shifting out all the stored
/// data in each ShiftRegister object.
pub fn apply(&mut self)
|
}
self.latch.high().unwrap();
}
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
}
}
|
{
self.latch.low().unwrap();
for sr in self.shift_registers.iter() {
for n in 0..sr.pins {
self.clock.low().unwrap();
if self.invert {
match sr.data >> n & 1 {
1 => self.data.low().unwrap(),
0 => self.data.high().unwrap(),
_ => unreachable!(),
}
} else {
match sr.data >> n & 1 {
0 => self.data.low().unwrap(),
1 => self.data.high().unwrap(),
_ => unreachable!(),
}
}
self.clock.high().unwrap();
}
|
identifier_body
|
coherence-tuple-conflict.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::fmt::Debug;
use std::default::Default;
// Test that a blank impl for all T conflicts with an impl for some
// specific T.
trait MyTrait {
fn get(&self) -> usize;
}
impl<T> MyTrait for (T,T) { //~ ERROR E0119
fn get(&self) -> usize { 0 }
}
impl<A,B> MyTrait for (A,B) {
fn
|
(&self) -> usize { self.dummy }
}
fn main() { }
|
get
|
identifier_name
|
coherence-tuple-conflict.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
use std::fmt::Debug;
use std::default::Default;
// Test that a blank impl for all T conflicts with an impl for some
// specific T.
trait MyTrait {
fn get(&self) -> usize;
}
impl<T> MyTrait for (T,T) { //~ ERROR E0119
fn get(&self) -> usize { 0 }
}
impl<A,B> MyTrait for (A,B) {
fn get(&self) -> usize { self.dummy }
}
fn main() { }
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
random_line_split
|
coherence-tuple-conflict.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::fmt::Debug;
use std::default::Default;
// Test that a blank impl for all T conflicts with an impl for some
// specific T.
trait MyTrait {
fn get(&self) -> usize;
}
impl<T> MyTrait for (T,T) { //~ ERROR E0119
fn get(&self) -> usize { 0 }
}
impl<A,B> MyTrait for (A,B) {
fn get(&self) -> usize { self.dummy }
}
fn main()
|
{ }
|
identifier_body
|
|
main.rs
|
extern crate curl;
extern crate tokio_core;
extern crate tokio_curl;
extern crate fibers;
extern crate futures;
extern crate futures_cpupool;
use std::io::{self, Write, BufWriter};
use curl::easy::Easy;
use futures::future::*;
use std::fs::File;
use futures_cpupool::CpuPool;
use std::sync::{Mutex, Arc};
use futures::{Future, Stream, Async};
use futures::stream::futures_unordered;
use tokio_core::reactor::Core;
use tokio_curl::{Session, Perform};
fn make_file(x: i32, data: &mut Vec<u8>) -> usize
|
fn collect_request(x: i32, url: &str, sess: &Session) -> FutureResult<Perform, ()> {
let mut data = Vec::new();
let mut easy = Easy::new();
easy.get(true).unwrap();
easy.url("https://www.rust-lang.org").unwrap();
easy.write_function(|data| Ok(data.len())).unwrap();
make_file(x, &mut data);
ok(sess.perform(easy))
}
fn main() {
let url = "https://en.wikipedia.org/wiki/Immanuel_Kant";
let mut core = Core::new().unwrap();
let handle = core.handle();
let pool = CpuPool::new_num_cpus();
let session = Session::new(handle);
let requests = (0..20).into_iter().map(|x| {
pool.spawn(collect_request(x, url, &session))
});
let performed = futures_unordered(requests).into_future();
}
// let out = requests.into_stream().wait();
|
{
let f = File::create(format!("./data/{}.txt", x)).expect("Unable to open file");
let mut writer = BufWriter::new(&f);
writer.write_all(data.as_mut_slice()).unwrap();
data.len()
}
|
identifier_body
|
main.rs
|
extern crate curl;
extern crate tokio_core;
extern crate tokio_curl;
extern crate fibers;
extern crate futures;
extern crate futures_cpupool;
use std::io::{self, Write, BufWriter};
use curl::easy::Easy;
use futures::future::*;
use std::fs::File;
use futures_cpupool::CpuPool;
use std::sync::{Mutex, Arc};
use futures::{Future, Stream, Async};
use futures::stream::futures_unordered;
use tokio_core::reactor::Core;
use tokio_curl::{Session, Perform};
fn make_file(x: i32, data: &mut Vec<u8>) -> usize {
let f = File::create(format!("./data/{}.txt", x)).expect("Unable to open file");
let mut writer = BufWriter::new(&f);
writer.write_all(data.as_mut_slice()).unwrap();
data.len()
}
fn collect_request(x: i32, url: &str, sess: &Session) -> FutureResult<Perform, ()> {
let mut data = Vec::new();
let mut easy = Easy::new();
easy.get(true).unwrap();
easy.url("https://www.rust-lang.org").unwrap();
easy.write_function(|data| Ok(data.len())).unwrap();
make_file(x, &mut data);
ok(sess.perform(easy))
}
fn main() {
|
let session = Session::new(handle);
let requests = (0..20).into_iter().map(|x| {
pool.spawn(collect_request(x, url, &session))
});
let performed = futures_unordered(requests).into_future();
}
// let out = requests.into_stream().wait();
|
let url = "https://en.wikipedia.org/wiki/Immanuel_Kant";
let mut core = Core::new().unwrap();
let handle = core.handle();
let pool = CpuPool::new_num_cpus();
|
random_line_split
|
main.rs
|
extern crate curl;
extern crate tokio_core;
extern crate tokio_curl;
extern crate fibers;
extern crate futures;
extern crate futures_cpupool;
use std::io::{self, Write, BufWriter};
use curl::easy::Easy;
use futures::future::*;
use std::fs::File;
use futures_cpupool::CpuPool;
use std::sync::{Mutex, Arc};
use futures::{Future, Stream, Async};
use futures::stream::futures_unordered;
use tokio_core::reactor::Core;
use tokio_curl::{Session, Perform};
fn make_file(x: i32, data: &mut Vec<u8>) -> usize {
let f = File::create(format!("./data/{}.txt", x)).expect("Unable to open file");
let mut writer = BufWriter::new(&f);
writer.write_all(data.as_mut_slice()).unwrap();
data.len()
}
fn collect_request(x: i32, url: &str, sess: &Session) -> FutureResult<Perform, ()> {
let mut data = Vec::new();
let mut easy = Easy::new();
easy.get(true).unwrap();
easy.url("https://www.rust-lang.org").unwrap();
easy.write_function(|data| Ok(data.len())).unwrap();
make_file(x, &mut data);
ok(sess.perform(easy))
}
fn
|
() {
let url = "https://en.wikipedia.org/wiki/Immanuel_Kant";
let mut core = Core::new().unwrap();
let handle = core.handle();
let pool = CpuPool::new_num_cpus();
let session = Session::new(handle);
let requests = (0..20).into_iter().map(|x| {
pool.spawn(collect_request(x, url, &session))
});
let performed = futures_unordered(requests).into_future();
}
// let out = requests.into_stream().wait();
|
main
|
identifier_name
|
mod.rs
|
// Copyright (c) 2015 Daniel Grunwald
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this
// software and associated documentation files (the "Software"), to deal in the Software
// without restriction, including without limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
// to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
// PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
// FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
pub use self::object::PyObject;
pub use self::typeobject::PyType;
pub use self::module::PyModule;
pub use self::string::{PyBytes, PyString, PyStringData};
#[cfg(feature="python27-sys")]
pub use self::string::PyUnicode;
#[cfg(feature="python3-sys")]
pub use self::string::PyString as PyUnicode;
pub use self::iterator::PyIterator;
pub use self::boolobject::PyBool;
pub use self::tuple::{PyTuple, NoArgs};
pub use self::dict::PyDict;
pub use self::list::PyList;
#[cfg(feature="python27-sys")]
pub use self::num::PyInt;
#[cfg(feature="python3-sys")]
pub use self::num::PyLong as PyInt;
pub use self::num::{PyLong, PyFloat};
pub use self::sequence::PySequence;
#[macro_export]
macro_rules! pyobject_newtype(
($name: ident) => (
py_impl_to_py_object_for_python_object!($name);
py_impl_from_py_object_for_python_object!($name);
impl $crate::PythonObject for $name {
#[inline]
fn as_object(&self) -> &$crate::PyObject {
&self.0
}
#[inline]
fn into_object(self) -> $crate::PyObject {
self.0
}
/// Unchecked downcast from PyObject to Self.
/// Undefined behavior if the input object does not have the expected type.
#[inline]
unsafe fn unchecked_downcast_from(obj: $crate::PyObject) -> Self {
$name(obj)
}
/// Unchecked downcast from PyObject to Self.
/// Undefined behavior if the input object does not have the expected type.
#[inline]
unsafe fn unchecked_downcast_borrow_from<'a>(obj: &'a $crate::PyObject) -> &'a Self {
::std::mem::transmute(obj)
}
}
);
($name: ident, $checkfunction: ident) => (
pyobject_newtype!($name);
impl ::python::PythonObjectWithCheckedDowncast for $name {
#[inline]
fn downcast_from<'p>(py: ::python::Python<'p>, obj: ::objects::object::PyObject) -> Result<$name, ::python::PythonObjectDowncastError<'p>> {
unsafe {
if ::ffi::$checkfunction(obj.as_ptr())!= 0 {
Ok($name(obj))
} else {
Err(::python::PythonObjectDowncastError(py))
}
}
}
#[inline]
fn downcast_borrow_from<'a, 'p>(py: ::python::Python<'p>, obj: &'a ::objects::object::PyObject) -> Result<&'a $name, ::python::PythonObjectDowncastError<'p>> {
unsafe {
if ::ffi::$checkfunction(obj.as_ptr())!= 0 {
Ok(::std::mem::transmute(obj))
} else {
Err(::python::PythonObjectDowncastError(py))
}
}
}
}
);
|
fn type_object(py: ::python::Python) -> ::objects::typeobject::PyType {
unsafe { ::objects::typeobject::PyType::from_type_ptr(py, &mut ::ffi::$typeobject) }
}
}
);
);
macro_rules! extract(
($obj:ident to $t:ty; $py:ident => $body: block) => {
impl <'source> ::conversion::FromPyObject<'source>
for $t
{
fn extract($py: Python, $obj: &'source PyObject) -> PyResult<Self> {
$body
}
}
}
);
mod object;
mod typeobject;
mod module;
mod string;
mod dict;
mod iterator;
mod boolobject;
mod tuple;
mod list;
mod num;
mod sequence;
pub mod exc;
#[cfg(feature="python27-sys")]
pub mod oldstyle;
mod tests;
|
($name: ident, $checkfunction: ident, $typeobject: ident) => (
pyobject_newtype!($name, $checkfunction);
impl ::python::PythonObjectWithTypeObject for $name {
#[inline]
|
random_line_split
|
simple.rs
|
extern crate readline as rl;
extern crate libc;
use std::io::{BufRead,BufReader};
use std::fs::File;
use std::path::Path;
use std::ffi::CStr;
use std::str;
fn
|
(text: String) -> Vec<String> {
let path = Path::new("/usr/share/dict/words");
let file = BufReader::new(File::open(&path).unwrap());
let mut entries: Vec<String> = Vec::new();
for line in file.lines() {
let word = line.unwrap();
if (&word).starts_with(&text) {
entries.push(word);
}
}
return entries;
}
extern fn rl_compentry_func(text: *const i8, state: i32) -> *const i8 {
if state == 0 {
let txt = unsafe { CStr::from_ptr(text).to_bytes() };
let entries = complete(str::from_utf8(txt).unwrap().to_string());
rl::set_compentries(entries);
}
rl::get_compentry(state as usize)
}
extern fn my_attempted_completion_function(text: *const i8, _start: i32, _end: i32) -> *mut *const i8 {
return rl::rl_completion_matches(text, rl_compentry_func)
}
// cargo run --example simple
pub fn main() {
rl::rl_initialize().unwrap();
//println!("{}", rl::rl_readline_version())
println!("{}", rl::rl_library_version());
rl::set_rl_attempted_completion_function(Some(my_attempted_completion_function));
loop {
match rl::readline("> ") {
Some(line) => {
let l = line.as_ref();
rl::add_history(l);
println!("{}", l);
//println!("{}", rl::history_get(-2));
},
_ => {
println!("");
break
}
}
}
}
|
complete
|
identifier_name
|
simple.rs
|
extern crate readline as rl;
extern crate libc;
use std::io::{BufRead,BufReader};
use std::fs::File;
use std::path::Path;
use std::ffi::CStr;
use std::str;
fn complete(text: String) -> Vec<String> {
let path = Path::new("/usr/share/dict/words");
let file = BufReader::new(File::open(&path).unwrap());
let mut entries: Vec<String> = Vec::new();
for line in file.lines() {
let word = line.unwrap();
if (&word).starts_with(&text) {
entries.push(word);
}
}
return entries;
}
extern fn rl_compentry_func(text: *const i8, state: i32) -> *const i8 {
if state == 0 {
let txt = unsafe { CStr::from_ptr(text).to_bytes() };
let entries = complete(str::from_utf8(txt).unwrap().to_string());
rl::set_compentries(entries);
}
rl::get_compentry(state as usize)
}
extern fn my_attempted_completion_function(text: *const i8, _start: i32, _end: i32) -> *mut *const i8 {
return rl::rl_completion_matches(text, rl_compentry_func)
}
// cargo run --example simple
pub fn main() {
rl::rl_initialize().unwrap();
//println!("{}", rl::rl_readline_version())
println!("{}", rl::rl_library_version());
rl::set_rl_attempted_completion_function(Some(my_attempted_completion_function));
loop {
match rl::readline("> ") {
Some(line) => {
let l = line.as_ref();
rl::add_history(l);
|
println!("");
break
}
}
}
}
|
println!("{}", l);
//println!("{}", rl::history_get(-2));
},
_ => {
|
random_line_split
|
simple.rs
|
extern crate readline as rl;
extern crate libc;
use std::io::{BufRead,BufReader};
use std::fs::File;
use std::path::Path;
use std::ffi::CStr;
use std::str;
fn complete(text: String) -> Vec<String> {
let path = Path::new("/usr/share/dict/words");
let file = BufReader::new(File::open(&path).unwrap());
let mut entries: Vec<String> = Vec::new();
for line in file.lines() {
let word = line.unwrap();
if (&word).starts_with(&text)
|
}
return entries;
}
extern fn rl_compentry_func(text: *const i8, state: i32) -> *const i8 {
if state == 0 {
let txt = unsafe { CStr::from_ptr(text).to_bytes() };
let entries = complete(str::from_utf8(txt).unwrap().to_string());
rl::set_compentries(entries);
}
rl::get_compentry(state as usize)
}
extern fn my_attempted_completion_function(text: *const i8, _start: i32, _end: i32) -> *mut *const i8 {
return rl::rl_completion_matches(text, rl_compentry_func)
}
// cargo run --example simple
pub fn main() {
rl::rl_initialize().unwrap();
//println!("{}", rl::rl_readline_version())
println!("{}", rl::rl_library_version());
rl::set_rl_attempted_completion_function(Some(my_attempted_completion_function));
loop {
match rl::readline("> ") {
Some(line) => {
let l = line.as_ref();
rl::add_history(l);
println!("{}", l);
//println!("{}", rl::history_get(-2));
},
_ => {
println!("");
break
}
}
}
}
|
{
entries.push(word);
}
|
conditional_block
|
simple.rs
|
extern crate readline as rl;
extern crate libc;
use std::io::{BufRead,BufReader};
use std::fs::File;
use std::path::Path;
use std::ffi::CStr;
use std::str;
fn complete(text: String) -> Vec<String> {
let path = Path::new("/usr/share/dict/words");
let file = BufReader::new(File::open(&path).unwrap());
let mut entries: Vec<String> = Vec::new();
for line in file.lines() {
let word = line.unwrap();
if (&word).starts_with(&text) {
entries.push(word);
}
}
return entries;
}
extern fn rl_compentry_func(text: *const i8, state: i32) -> *const i8 {
if state == 0 {
let txt = unsafe { CStr::from_ptr(text).to_bytes() };
let entries = complete(str::from_utf8(txt).unwrap().to_string());
rl::set_compentries(entries);
}
rl::get_compentry(state as usize)
}
extern fn my_attempted_completion_function(text: *const i8, _start: i32, _end: i32) -> *mut *const i8 {
return rl::rl_completion_matches(text, rl_compentry_func)
}
// cargo run --example simple
pub fn main()
|
}
}
|
{
rl::rl_initialize().unwrap();
//println!("{}", rl::rl_readline_version())
println!("{}", rl::rl_library_version());
rl::set_rl_attempted_completion_function(Some(my_attempted_completion_function));
loop {
match rl::readline("> ") {
Some(line) => {
let l = line.as_ref();
rl::add_history(l);
println!("{}", l);
//println!("{}", rl::history_get(-2));
},
_ => {
println!("");
break
}
}
|
identifier_body
|
template.rs
|
use handlebars::Handlebars;
use std::path::{Path, PathBuf};
use glob::glob;
use std::sync::Mutex;
use itertools::Itertools;
use std::error::Error;
use std::borrow::Cow;
use serde_json::{Value, to_value};
use rocket::response::{Responder, Response};
use rocket::request::Request;
use rocket::http::ContentType;
use rocket::http::Status;
use serde::ser::Serialize;
use std::io::Cursor;
#[derive(Debug)]
pub struct Template {
name: Cow<'static, str>,
value: Option<Value>,
}
lazy_static! {
static ref HANDLEBARS: Mutex<Handlebars> = Mutex::new(Handlebars::new());
}
pub fn init_handlebars(f: fn(&mut Handlebars)) {
let mut hb = HANDLEBARS.lock().unwrap();
f(&mut hb)
}
pub fn add_templates<P>(root: P) -> Result<(), Box<Error>>
where P: Into<PathBuf>
{
let mut hb = HANDLEBARS.lock().unwrap();
let root_buf = root.into();
let mut mask_buf = root_buf.clone();
mask_buf.push("**");
mask_buf.push("*.hbs");
let mask = mask_buf.to_str().ok_or("read error")?;
let add_template = &mut |entry: &Path| -> Result<(), Box<Error>> {
let stripped = entry.strip_prefix(&root_buf)?.with_extension(""); // strip prefix and.hbs
//let ext = stripped.extension().ok_or("no type extension")?; // skip if no.html or smth else
let name: String = stripped
.with_extension("")
.to_str()
.ok_or("can't convert path to string")?
.chars()
.filter_map(|c| Some(if c == '\\' { '/' } else { c }))
.collect();
println!("{}", &name);
if let Err(e) = hb.register_template_file(&name, &entry) {
// TODO: make correct error loagging
println!("{} {}", &name, &e);
error!("Error in Handlebars template {}", &name);
info!("{}", e);
info!("Template path: '{}'", entry.to_string_lossy());
}
Ok(())
};
glob(mask)
.unwrap()
.filter_map(Result::ok)
.foreach(|entry| { let _ = add_template(&entry); });
Result::Ok(())
}
impl Template {
pub fn render<S, C>(name: S, context: C) -> Template
where S: Into<Cow<'static, str>>,
C: Serialize
|
}
impl Responder<'static> for Template {
fn respond_to(self, _: &Request) -> Result<Response<'static>, Status> {
let hb = HANDLEBARS.lock().unwrap();
let render = hb.render(&self.name, &self.value).unwrap_or_else(|e| e.to_string());
Response::build()
.header(ContentType::HTML)
.sized_body(Cursor::new(render))
.ok()
}
}
|
{
Template {
name: name.into(),
value: to_value(context).ok(),
}
}
|
identifier_body
|
template.rs
|
use handlebars::Handlebars;
use std::path::{Path, PathBuf};
use glob::glob;
use std::sync::Mutex;
use itertools::Itertools;
use std::error::Error;
use std::borrow::Cow;
use serde_json::{Value, to_value};
use rocket::response::{Responder, Response};
use rocket::request::Request;
use rocket::http::ContentType;
use rocket::http::Status;
use serde::ser::Serialize;
use std::io::Cursor;
#[derive(Debug)]
pub struct Template {
name: Cow<'static, str>,
value: Option<Value>,
}
lazy_static! {
static ref HANDLEBARS: Mutex<Handlebars> = Mutex::new(Handlebars::new());
}
pub fn init_handlebars(f: fn(&mut Handlebars)) {
let mut hb = HANDLEBARS.lock().unwrap();
f(&mut hb)
}
pub fn add_templates<P>(root: P) -> Result<(), Box<Error>>
where P: Into<PathBuf>
{
let mut hb = HANDLEBARS.lock().unwrap();
let root_buf = root.into();
let mut mask_buf = root_buf.clone();
mask_buf.push("**");
mask_buf.push("*.hbs");
let mask = mask_buf.to_str().ok_or("read error")?;
let add_template = &mut |entry: &Path| -> Result<(), Box<Error>> {
let stripped = entry.strip_prefix(&root_buf)?.with_extension(""); // strip prefix and.hbs
//let ext = stripped.extension().ok_or("no type extension")?; // skip if no.html or smth else
let name: String = stripped
.with_extension("")
.to_str()
.ok_or("can't convert path to string")?
.chars()
.filter_map(|c| Some(if c == '\\'
|
else { c }))
.collect();
println!("{}", &name);
if let Err(e) = hb.register_template_file(&name, &entry) {
// TODO: make correct error loagging
println!("{} {}", &name, &e);
error!("Error in Handlebars template {}", &name);
info!("{}", e);
info!("Template path: '{}'", entry.to_string_lossy());
}
Ok(())
};
glob(mask)
.unwrap()
.filter_map(Result::ok)
.foreach(|entry| { let _ = add_template(&entry); });
Result::Ok(())
}
impl Template {
pub fn render<S, C>(name: S, context: C) -> Template
where S: Into<Cow<'static, str>>,
C: Serialize
{
Template {
name: name.into(),
value: to_value(context).ok(),
}
}
}
impl Responder<'static> for Template {
fn respond_to(self, _: &Request) -> Result<Response<'static>, Status> {
let hb = HANDLEBARS.lock().unwrap();
let render = hb.render(&self.name, &self.value).unwrap_or_else(|e| e.to_string());
Response::build()
.header(ContentType::HTML)
.sized_body(Cursor::new(render))
.ok()
}
}
|
{ '/' }
|
conditional_block
|
template.rs
|
use handlebars::Handlebars;
use std::path::{Path, PathBuf};
use glob::glob;
use std::sync::Mutex;
use itertools::Itertools;
use std::error::Error;
use std::borrow::Cow;
use serde_json::{Value, to_value};
use rocket::response::{Responder, Response};
use rocket::request::Request;
use rocket::http::ContentType;
use rocket::http::Status;
use serde::ser::Serialize;
use std::io::Cursor;
#[derive(Debug)]
pub struct Template {
name: Cow<'static, str>,
value: Option<Value>,
}
lazy_static! {
static ref HANDLEBARS: Mutex<Handlebars> = Mutex::new(Handlebars::new());
}
pub fn init_handlebars(f: fn(&mut Handlebars)) {
let mut hb = HANDLEBARS.lock().unwrap();
f(&mut hb)
}
pub fn add_templates<P>(root: P) -> Result<(), Box<Error>>
where P: Into<PathBuf>
{
let mut hb = HANDLEBARS.lock().unwrap();
let root_buf = root.into();
let mut mask_buf = root_buf.clone();
mask_buf.push("**");
mask_buf.push("*.hbs");
let mask = mask_buf.to_str().ok_or("read error")?;
let add_template = &mut |entry: &Path| -> Result<(), Box<Error>> {
let stripped = entry.strip_prefix(&root_buf)?.with_extension(""); // strip prefix and.hbs
//let ext = stripped.extension().ok_or("no type extension")?; // skip if no.html or smth else
let name: String = stripped
.with_extension("")
.to_str()
.ok_or("can't convert path to string")?
.chars()
.filter_map(|c| Some(if c == '\\' { '/' } else { c }))
.collect();
println!("{}", &name);
if let Err(e) = hb.register_template_file(&name, &entry) {
// TODO: make correct error loagging
println!("{} {}", &name, &e);
error!("Error in Handlebars template {}", &name);
info!("{}", e);
info!("Template path: '{}'", entry.to_string_lossy());
}
Ok(())
};
glob(mask)
.unwrap()
.filter_map(Result::ok)
.foreach(|entry| { let _ = add_template(&entry); });
Result::Ok(())
}
impl Template {
pub fn render<S, C>(name: S, context: C) -> Template
where S: Into<Cow<'static, str>>,
C: Serialize
{
Template {
name: name.into(),
value: to_value(context).ok(),
}
}
}
impl Responder<'static> for Template {
fn
|
(self, _: &Request) -> Result<Response<'static>, Status> {
let hb = HANDLEBARS.lock().unwrap();
let render = hb.render(&self.name, &self.value).unwrap_or_else(|e| e.to_string());
Response::build()
.header(ContentType::HTML)
.sized_body(Cursor::new(render))
.ok()
}
}
|
respond_to
|
identifier_name
|
template.rs
|
use handlebars::Handlebars;
use std::path::{Path, PathBuf};
use glob::glob;
use std::sync::Mutex;
|
use itertools::Itertools;
use std::error::Error;
use std::borrow::Cow;
use serde_json::{Value, to_value};
use rocket::response::{Responder, Response};
use rocket::request::Request;
use rocket::http::ContentType;
use rocket::http::Status;
use serde::ser::Serialize;
use std::io::Cursor;
#[derive(Debug)]
pub struct Template {
name: Cow<'static, str>,
value: Option<Value>,
}
lazy_static! {
static ref HANDLEBARS: Mutex<Handlebars> = Mutex::new(Handlebars::new());
}
pub fn init_handlebars(f: fn(&mut Handlebars)) {
let mut hb = HANDLEBARS.lock().unwrap();
f(&mut hb)
}
pub fn add_templates<P>(root: P) -> Result<(), Box<Error>>
where P: Into<PathBuf>
{
let mut hb = HANDLEBARS.lock().unwrap();
let root_buf = root.into();
let mut mask_buf = root_buf.clone();
mask_buf.push("**");
mask_buf.push("*.hbs");
let mask = mask_buf.to_str().ok_or("read error")?;
let add_template = &mut |entry: &Path| -> Result<(), Box<Error>> {
let stripped = entry.strip_prefix(&root_buf)?.with_extension(""); // strip prefix and.hbs
//let ext = stripped.extension().ok_or("no type extension")?; // skip if no.html or smth else
let name: String = stripped
.with_extension("")
.to_str()
.ok_or("can't convert path to string")?
.chars()
.filter_map(|c| Some(if c == '\\' { '/' } else { c }))
.collect();
println!("{}", &name);
if let Err(e) = hb.register_template_file(&name, &entry) {
// TODO: make correct error loagging
println!("{} {}", &name, &e);
error!("Error in Handlebars template {}", &name);
info!("{}", e);
info!("Template path: '{}'", entry.to_string_lossy());
}
Ok(())
};
glob(mask)
.unwrap()
.filter_map(Result::ok)
.foreach(|entry| { let _ = add_template(&entry); });
Result::Ok(())
}
impl Template {
pub fn render<S, C>(name: S, context: C) -> Template
where S: Into<Cow<'static, str>>,
C: Serialize
{
Template {
name: name.into(),
value: to_value(context).ok(),
}
}
}
impl Responder<'static> for Template {
fn respond_to(self, _: &Request) -> Result<Response<'static>, Status> {
let hb = HANDLEBARS.lock().unwrap();
let render = hb.render(&self.name, &self.value).unwrap_or_else(|e| e.to_string());
Response::build()
.header(ContentType::HTML)
.sized_body(Cursor::new(render))
.ok()
}
}
|
random_line_split
|
|
handler.rs
|
use Rpc;
use std::{fmt, mem, str};
use std::sync::Arc;
use hyper::{self, mime, server, Method};
use hyper::header::{self, Headers};
use unicase::Ascii;
use jsonrpc::{self as core, FutureResult, Metadata, Middleware, NoopMiddleware};
use jsonrpc::futures::{Future, Poll, Async, Stream, future};
use jsonrpc::serde_json;
use response::Response;
use server_utils::cors;
use {utils, RequestMiddleware, RequestMiddlewareAction, CorsDomains, AllowedHosts, RestApi};
/// jsonrpc http request handler.
pub struct ServerHandler<M: Metadata = (), S: Middleware<M> = NoopMiddleware> {
jsonrpc_handler: Rpc<M, S>,
allowed_hosts: AllowedHosts,
cors_domains: CorsDomains,
cors_max_age: Option<u32>,
middleware: Arc<RequestMiddleware>,
rest_api: RestApi,
max_request_body_size: usize,
}
impl<M: Metadata, S: Middleware<M>> ServerHandler<M, S> {
/// Create new request handler.
pub fn new(
jsonrpc_handler: Rpc<M, S>,
cors_domains: CorsDomains,
cors_max_age: Option<u32>,
allowed_hosts: AllowedHosts,
middleware: Arc<RequestMiddleware>,
rest_api: RestApi,
max_request_body_size: usize,
) -> Self {
ServerHandler {
jsonrpc_handler,
allowed_hosts,
cors_domains,
cors_max_age,
middleware,
rest_api,
max_request_body_size,
}
}
}
impl<M: Metadata, S: Middleware<M>> server::Service for ServerHandler<M, S> {
type Request = server::Request;
type Response = server::Response;
type Error = hyper::Error;
type Future = Handler<M, S>;
fn call(&self, request: Self::Request) -> Self::Future {
let is_host_allowed = utils::is_host_allowed(&request, &self.allowed_hosts);
let action = self.middleware.on_request(request);
let (should_validate_hosts, should_continue_on_invalid_cors, response) = match action {
RequestMiddlewareAction::Proceed { should_continue_on_invalid_cors, request }=> (
true, should_continue_on_invalid_cors, Err(request)
),
RequestMiddlewareAction::Respond { should_validate_hosts, response } => (
should_validate_hosts, false, Ok(response)
),
};
// Validate host
if should_validate_hosts &&!is_host_allowed {
return Handler::Error(Some(Response::host_not_allowed()));
}
// Replace response with the one returned by middleware.
match response {
Ok(response) => Handler::Middleware(response),
Err(request) => {
Handler::Rpc(RpcHandler {
jsonrpc_handler: self.jsonrpc_handler.clone(),
state: RpcHandlerState::ReadingHeaders {
request: request,
cors_domains: self.cors_domains.clone(),
continue_on_invalid_cors: should_continue_on_invalid_cors,
},
is_options: false,
cors_header: cors::CorsHeader::NotRequired,
rest_api: self.rest_api,
cors_max_age: self.cors_max_age,
max_request_body_size: self.max_request_body_size,
})
}
}
}
}
pub enum Handler<M: Metadata, S: Middleware<M>> {
Rpc(RpcHandler<M, S>),
Error(Option<Response>),
Middleware(Box<Future<Item=server::Response, Error=hyper::Error> + Send>),
}
impl<M: Metadata, S: Middleware<M>> Future for Handler<M, S> {
type Item = server::Response;
type Error = hyper::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match *self {
Handler::Rpc(ref mut handler) => handler.poll(),
Handler::Middleware(ref mut middleware) => middleware.poll(),
Handler::Error(ref mut response) => Ok(Async::Ready(
response.take().expect("Response always Some initialy. Returning `Ready` so will never be polled again; qed").into()
)),
}
}
}
enum RpcPollState<M, F> where
F: Future<Item = Option<core::Response>, Error = ()>,
{
Ready(RpcHandlerState<M, F>),
NotReady(RpcHandlerState<M, F>),
}
impl<M, F> RpcPollState<M, F> where
F: Future<Item = Option<core::Response>, Error = ()>,
{
fn decompose(self) -> (RpcHandlerState<M, F>, bool) {
use self::RpcPollState::*;
match self {
Ready(handler) => (handler, true),
NotReady(handler) => (handler, false),
}
}
}
enum RpcHandlerState<M, F> where
F: Future<Item = Option<core::Response>, Error = ()>,
{
ReadingHeaders {
request: server::Request,
cors_domains: CorsDomains,
continue_on_invalid_cors: bool,
},
ReadingBody {
body: hyper::Body,
uri: Option<hyper::Uri>,
request: Vec<u8>,
metadata: M,
},
ProcessRest {
uri: hyper::Uri,
metadata: M,
},
Writing(Response),
Waiting(FutureResult<F>),
Done,
}
impl<M, F> fmt::Debug for RpcHandlerState<M, F> where
F: Future<Item = Option<core::Response>, Error = ()>,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::RpcHandlerState::*;
match *self {
ReadingHeaders {..} => write!(fmt, "ReadingHeaders"),
ReadingBody {..} => write!(fmt, "ReadingBody"),
ProcessRest {..} => write!(fmt, "ProcessRest"),
Writing(ref res) => write!(fmt, "Writing({:?})", res),
Waiting(_) => write!(fmt, "Waiting"),
Done => write!(fmt, "Done"),
}
}
}
pub struct RpcHandler<M: Metadata, S: Middleware<M>> {
jsonrpc_handler: Rpc<M, S>,
state: RpcHandlerState<M, S::Future>,
is_options: bool,
cors_header: cors::CorsHeader<header::AccessControlAllowOrigin>,
cors_max_age: Option<u32>,
rest_api: RestApi,
max_request_body_size: usize,
}
impl<M: Metadata, S: Middleware<M>> Future for RpcHandler<M, S> {
type Item = server::Response;
type Error = hyper::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let new_state = match mem::replace(&mut self.state, RpcHandlerState::Done) {
RpcHandlerState::ReadingHeaders { request, cors_domains, continue_on_invalid_cors, } => {
// Read cors header
self.cors_header = utils::cors_header(&request, &cors_domains);
self.is_options = *request.method() == Method::Options;
// Read other headers
RpcPollState::Ready(self.read_headers(request, continue_on_invalid_cors))
},
RpcHandlerState::ReadingBody { body, request, metadata, uri, } => {
match self.process_body(body, request, uri, metadata) {
Err(BodyError::Utf8(ref e)) => {
let mesg = format!("utf-8 encoding error at byte {} in request body", e.valid_up_to());
let resp = Response::bad_request(mesg);
RpcPollState::Ready(RpcHandlerState::Writing(resp))
}
Err(BodyError::TooLarge) => {
let resp = Response::too_large("request body size exceeds allowed maximum");
RpcPollState::Ready(RpcHandlerState::Writing(resp))
}
Err(BodyError::Hyper(e)) => return Err(e),
Ok(state) => state,
}
},
RpcHandlerState::ProcessRest { uri, metadata } => {
self.process_rest(uri, metadata)?
},
RpcHandlerState::Waiting(mut waiting) => {
match waiting.poll() {
Ok(Async::Ready(response)) => {
RpcPollState::Ready(RpcHandlerState::Writing(match response {
// Notification, just return empty response.
None => Response::ok(String::new()),
// Add new line to have nice output when using CLI clients (curl)
Some(result) => Response::ok(format!("{}\n", result)),
}.into()))
},
Ok(Async::NotReady) => RpcPollState::NotReady(RpcHandlerState::Waiting(waiting)),
Err(_) => RpcPollState::Ready(RpcHandlerState::Writing(Response::internal_error())),
}
},
state => RpcPollState::NotReady(state),
};
let (new_state, is_ready) = new_state.decompose();
match new_state {
RpcHandlerState::Writing(res) => {
let mut response: server::Response = res.into();
let cors_header = mem::replace(&mut self.cors_header, cors::CorsHeader::Invalid);
Self::set_response_headers(
response.headers_mut(),
self.is_options,
cors_header.into(),
self.cors_max_age,
);
Ok(Async::Ready(response))
},
state => {
self.state = state;
if is_ready {
self.poll()
} else {
Ok(Async::NotReady)
}
},
}
}
}
// Intermediate and internal error type to better distinguish
// error cases occuring during request body processing.
enum BodyError {
Hyper(hyper::Error),
Utf8(str::Utf8Error),
TooLarge,
}
impl From<hyper::Error> for BodyError {
fn from(e: hyper::Error) -> BodyError {
BodyError::Hyper(e)
}
}
impl<M: Metadata, S: Middleware<M>> RpcHandler<M, S> {
fn
|
(
&self,
request: server::Request,
continue_on_invalid_cors: bool,
) -> RpcHandlerState<M, S::Future> {
if self.cors_header == cors::CorsHeader::Invalid &&!continue_on_invalid_cors {
return RpcHandlerState::Writing(Response::invalid_cors());
}
// Read metadata
let metadata = self.jsonrpc_handler.extractor.read_metadata(&request);
// Proceed
match *request.method() {
// Validate the ContentType header
// to prevent Cross-Origin XHRs with text/plain
Method::Post if Self::is_json(request.headers().get::<header::ContentType>()) => {
let uri = if self.rest_api!= RestApi::Disabled { Some(request.uri().clone()) } else { None };
RpcHandlerState::ReadingBody {
metadata,
request: Default::default(),
uri,
body: request.body(),
}
},
Method::Post if self.rest_api == RestApi::Unsecure && request.uri().path().split('/').count() > 2 => {
RpcHandlerState::ProcessRest {
metadata,
uri: request.uri().clone(),
}
},
// Just return error for unsupported content type
Method::Post => {
RpcHandlerState::Writing(Response::unsupported_content_type())
},
// Don't validate content type on options
Method::Options => {
RpcHandlerState::Writing(Response::empty())
},
// Disallow other methods.
_ => {
RpcHandlerState::Writing(Response::method_not_allowed())
},
}
}
fn process_rest(
&self,
uri: hyper::Uri,
metadata: M,
) -> Result<RpcPollState<M, S::Future>, hyper::Error> {
use self::core::types::{Call, MethodCall, Version, Params, Request, Id, Value};
// skip the initial /
let mut it = uri.path().split('/').skip(1);
// parse method & params
let method = it.next().unwrap_or("");
let mut params = Vec::new();
for param in it {
let v = serde_json::from_str(param)
.or_else(|_| serde_json::from_str(&format!("\"{}\"", param)))
.unwrap_or(Value::Null);
params.push(v)
}
// Parse request
let call = Request::Single(Call::MethodCall(MethodCall {
jsonrpc: Some(Version::V2),
method: method.into(),
params: Params::Array(params),
id: Id::Num(1),
}));
return Ok(RpcPollState::Ready(RpcHandlerState::Waiting(
future::Either::B(self.jsonrpc_handler.handler.handle_rpc_request(call, metadata))
.map(|res| res.map(|x| serde_json::to_string(&x)
.expect("Serialization of response is infallible;qed")
))
)));
}
fn process_body(
&self,
mut body: hyper::Body,
mut request: Vec<u8>,
uri: Option<hyper::Uri>,
metadata: M,
) -> Result<RpcPollState<M, S::Future>, BodyError> {
loop {
match body.poll()? {
Async::Ready(Some(chunk)) => {
if request.len().checked_add(chunk.len()).map(|n| n > self.max_request_body_size).unwrap_or(true) {
return Err(BodyError::TooLarge)
}
request.extend_from_slice(&*chunk)
},
Async::Ready(None) => {
if let (Some(uri), true) = (uri, request.is_empty()) {
return Ok(RpcPollState::Ready(RpcHandlerState::ProcessRest {
uri,
metadata,
}));
}
let content = match str::from_utf8(&request) {
Ok(content) => content,
Err(err) => {
// Return utf error.
return Err(BodyError::Utf8(err));
},
};
// Content is ready
return Ok(RpcPollState::Ready(RpcHandlerState::Waiting(
self.jsonrpc_handler.handler.handle_request(content, metadata)
)));
},
Async::NotReady => {
return Ok(RpcPollState::NotReady(RpcHandlerState::ReadingBody {
body,
request,
metadata,
uri,
}));
},
}
}
}
fn set_response_headers(
headers: &mut Headers,
is_options: bool,
cors_header: Option<header::AccessControlAllowOrigin>,
cors_max_age: Option<u32>,
) {
if is_options {
headers.set(header::Allow(vec![
Method::Options,
Method::Post,
]));
headers.set(header::Accept(vec![
header::qitem(mime::APPLICATION_JSON)
]));
}
if let Some(cors_domain) = cors_header {
headers.set(header::AccessControlAllowMethods(vec![
Method::Options,
Method::Post
]));
headers.set(header::AccessControlAllowHeaders(vec![
Ascii::new("origin".to_owned()),
Ascii::new("content-type".to_owned()),
Ascii::new("accept".to_owned()),
]));
if let Some(cors_max_age) = cors_max_age {
headers.set(header::AccessControlMaxAge(cors_max_age));
}
headers.set(cors_domain);
headers.set(header::Vary::Items(vec![
Ascii::new("origin".to_owned())
]));
}
}
fn is_json(content_type: Option<&header::ContentType>) -> bool {
const APPLICATION_JSON_UTF_8: &str = "application/json; charset=utf-8";
match content_type {
Some(&header::ContentType(ref mime))
if *mime == mime::APPLICATION_JSON || *mime == APPLICATION_JSON_UTF_8 => true,
_ => false
}
}
}
|
read_headers
|
identifier_name
|
handler.rs
|
use Rpc;
use std::{fmt, mem, str};
use std::sync::Arc;
use hyper::{self, mime, server, Method};
use hyper::header::{self, Headers};
use unicase::Ascii;
use jsonrpc::{self as core, FutureResult, Metadata, Middleware, NoopMiddleware};
use jsonrpc::futures::{Future, Poll, Async, Stream, future};
use jsonrpc::serde_json;
use response::Response;
use server_utils::cors;
use {utils, RequestMiddleware, RequestMiddlewareAction, CorsDomains, AllowedHosts, RestApi};
/// jsonrpc http request handler.
pub struct ServerHandler<M: Metadata = (), S: Middleware<M> = NoopMiddleware> {
jsonrpc_handler: Rpc<M, S>,
allowed_hosts: AllowedHosts,
cors_domains: CorsDomains,
cors_max_age: Option<u32>,
middleware: Arc<RequestMiddleware>,
rest_api: RestApi,
max_request_body_size: usize,
}
impl<M: Metadata, S: Middleware<M>> ServerHandler<M, S> {
/// Create new request handler.
pub fn new(
jsonrpc_handler: Rpc<M, S>,
cors_domains: CorsDomains,
cors_max_age: Option<u32>,
allowed_hosts: AllowedHosts,
middleware: Arc<RequestMiddleware>,
rest_api: RestApi,
max_request_body_size: usize,
) -> Self {
ServerHandler {
jsonrpc_handler,
allowed_hosts,
cors_domains,
cors_max_age,
middleware,
rest_api,
max_request_body_size,
}
}
}
impl<M: Metadata, S: Middleware<M>> server::Service for ServerHandler<M, S> {
type Request = server::Request;
type Response = server::Response;
type Error = hyper::Error;
type Future = Handler<M, S>;
fn call(&self, request: Self::Request) -> Self::Future {
let is_host_allowed = utils::is_host_allowed(&request, &self.allowed_hosts);
let action = self.middleware.on_request(request);
let (should_validate_hosts, should_continue_on_invalid_cors, response) = match action {
RequestMiddlewareAction::Proceed { should_continue_on_invalid_cors, request }=> (
true, should_continue_on_invalid_cors, Err(request)
),
RequestMiddlewareAction::Respond { should_validate_hosts, response } => (
should_validate_hosts, false, Ok(response)
),
};
// Validate host
if should_validate_hosts &&!is_host_allowed {
return Handler::Error(Some(Response::host_not_allowed()));
}
// Replace response with the one returned by middleware.
match response {
Ok(response) => Handler::Middleware(response),
Err(request) => {
Handler::Rpc(RpcHandler {
jsonrpc_handler: self.jsonrpc_handler.clone(),
state: RpcHandlerState::ReadingHeaders {
request: request,
cors_domains: self.cors_domains.clone(),
continue_on_invalid_cors: should_continue_on_invalid_cors,
},
is_options: false,
cors_header: cors::CorsHeader::NotRequired,
rest_api: self.rest_api,
cors_max_age: self.cors_max_age,
max_request_body_size: self.max_request_body_size,
})
}
}
}
}
pub enum Handler<M: Metadata, S: Middleware<M>> {
Rpc(RpcHandler<M, S>),
Error(Option<Response>),
Middleware(Box<Future<Item=server::Response, Error=hyper::Error> + Send>),
}
impl<M: Metadata, S: Middleware<M>> Future for Handler<M, S> {
type Item = server::Response;
type Error = hyper::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match *self {
Handler::Rpc(ref mut handler) => handler.poll(),
Handler::Middleware(ref mut middleware) => middleware.poll(),
Handler::Error(ref mut response) => Ok(Async::Ready(
response.take().expect("Response always Some initialy. Returning `Ready` so will never be polled again; qed").into()
)),
}
}
}
enum RpcPollState<M, F> where
F: Future<Item = Option<core::Response>, Error = ()>,
{
Ready(RpcHandlerState<M, F>),
NotReady(RpcHandlerState<M, F>),
}
impl<M, F> RpcPollState<M, F> where
F: Future<Item = Option<core::Response>, Error = ()>,
{
fn decompose(self) -> (RpcHandlerState<M, F>, bool) {
use self::RpcPollState::*;
match self {
Ready(handler) => (handler, true),
NotReady(handler) => (handler, false),
}
}
}
enum RpcHandlerState<M, F> where
F: Future<Item = Option<core::Response>, Error = ()>,
{
ReadingHeaders {
request: server::Request,
cors_domains: CorsDomains,
continue_on_invalid_cors: bool,
},
ReadingBody {
body: hyper::Body,
uri: Option<hyper::Uri>,
request: Vec<u8>,
metadata: M,
},
ProcessRest {
uri: hyper::Uri,
metadata: M,
},
Writing(Response),
Waiting(FutureResult<F>),
Done,
}
impl<M, F> fmt::Debug for RpcHandlerState<M, F> where
F: Future<Item = Option<core::Response>, Error = ()>,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::RpcHandlerState::*;
match *self {
ReadingHeaders {..} => write!(fmt, "ReadingHeaders"),
ReadingBody {..} => write!(fmt, "ReadingBody"),
ProcessRest {..} => write!(fmt, "ProcessRest"),
Writing(ref res) => write!(fmt, "Writing({:?})", res),
Waiting(_) => write!(fmt, "Waiting"),
Done => write!(fmt, "Done"),
}
}
}
pub struct RpcHandler<M: Metadata, S: Middleware<M>> {
jsonrpc_handler: Rpc<M, S>,
state: RpcHandlerState<M, S::Future>,
is_options: bool,
cors_header: cors::CorsHeader<header::AccessControlAllowOrigin>,
cors_max_age: Option<u32>,
rest_api: RestApi,
max_request_body_size: usize,
}
impl<M: Metadata, S: Middleware<M>> Future for RpcHandler<M, S> {
type Item = server::Response;
type Error = hyper::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let new_state = match mem::replace(&mut self.state, RpcHandlerState::Done) {
RpcHandlerState::ReadingHeaders { request, cors_domains, continue_on_invalid_cors, } => {
// Read cors header
self.cors_header = utils::cors_header(&request, &cors_domains);
self.is_options = *request.method() == Method::Options;
// Read other headers
RpcPollState::Ready(self.read_headers(request, continue_on_invalid_cors))
},
RpcHandlerState::ReadingBody { body, request, metadata, uri, } => {
match self.process_body(body, request, uri, metadata) {
Err(BodyError::Utf8(ref e)) => {
let mesg = format!("utf-8 encoding error at byte {} in request body", e.valid_up_to());
let resp = Response::bad_request(mesg);
RpcPollState::Ready(RpcHandlerState::Writing(resp))
}
Err(BodyError::TooLarge) => {
let resp = Response::too_large("request body size exceeds allowed maximum");
RpcPollState::Ready(RpcHandlerState::Writing(resp))
}
Err(BodyError::Hyper(e)) => return Err(e),
Ok(state) => state,
}
|
self.process_rest(uri, metadata)?
},
RpcHandlerState::Waiting(mut waiting) => {
match waiting.poll() {
Ok(Async::Ready(response)) => {
RpcPollState::Ready(RpcHandlerState::Writing(match response {
// Notification, just return empty response.
None => Response::ok(String::new()),
// Add new line to have nice output when using CLI clients (curl)
Some(result) => Response::ok(format!("{}\n", result)),
}.into()))
},
Ok(Async::NotReady) => RpcPollState::NotReady(RpcHandlerState::Waiting(waiting)),
Err(_) => RpcPollState::Ready(RpcHandlerState::Writing(Response::internal_error())),
}
},
state => RpcPollState::NotReady(state),
};
let (new_state, is_ready) = new_state.decompose();
match new_state {
RpcHandlerState::Writing(res) => {
let mut response: server::Response = res.into();
let cors_header = mem::replace(&mut self.cors_header, cors::CorsHeader::Invalid);
Self::set_response_headers(
response.headers_mut(),
self.is_options,
cors_header.into(),
self.cors_max_age,
);
Ok(Async::Ready(response))
},
state => {
self.state = state;
if is_ready {
self.poll()
} else {
Ok(Async::NotReady)
}
},
}
}
}
// Intermediate and internal error type to better distinguish
// error cases occuring during request body processing.
enum BodyError {
Hyper(hyper::Error),
Utf8(str::Utf8Error),
TooLarge,
}
impl From<hyper::Error> for BodyError {
fn from(e: hyper::Error) -> BodyError {
BodyError::Hyper(e)
}
}
impl<M: Metadata, S: Middleware<M>> RpcHandler<M, S> {
fn read_headers(
&self,
request: server::Request,
continue_on_invalid_cors: bool,
) -> RpcHandlerState<M, S::Future> {
if self.cors_header == cors::CorsHeader::Invalid &&!continue_on_invalid_cors {
return RpcHandlerState::Writing(Response::invalid_cors());
}
// Read metadata
let metadata = self.jsonrpc_handler.extractor.read_metadata(&request);
// Proceed
match *request.method() {
// Validate the ContentType header
// to prevent Cross-Origin XHRs with text/plain
Method::Post if Self::is_json(request.headers().get::<header::ContentType>()) => {
let uri = if self.rest_api!= RestApi::Disabled { Some(request.uri().clone()) } else { None };
RpcHandlerState::ReadingBody {
metadata,
request: Default::default(),
uri,
body: request.body(),
}
},
Method::Post if self.rest_api == RestApi::Unsecure && request.uri().path().split('/').count() > 2 => {
RpcHandlerState::ProcessRest {
metadata,
uri: request.uri().clone(),
}
},
// Just return error for unsupported content type
Method::Post => {
RpcHandlerState::Writing(Response::unsupported_content_type())
},
// Don't validate content type on options
Method::Options => {
RpcHandlerState::Writing(Response::empty())
},
// Disallow other methods.
_ => {
RpcHandlerState::Writing(Response::method_not_allowed())
},
}
}
fn process_rest(
&self,
uri: hyper::Uri,
metadata: M,
) -> Result<RpcPollState<M, S::Future>, hyper::Error> {
use self::core::types::{Call, MethodCall, Version, Params, Request, Id, Value};
// skip the initial /
let mut it = uri.path().split('/').skip(1);
// parse method & params
let method = it.next().unwrap_or("");
let mut params = Vec::new();
for param in it {
let v = serde_json::from_str(param)
.or_else(|_| serde_json::from_str(&format!("\"{}\"", param)))
.unwrap_or(Value::Null);
params.push(v)
}
// Parse request
let call = Request::Single(Call::MethodCall(MethodCall {
jsonrpc: Some(Version::V2),
method: method.into(),
params: Params::Array(params),
id: Id::Num(1),
}));
return Ok(RpcPollState::Ready(RpcHandlerState::Waiting(
future::Either::B(self.jsonrpc_handler.handler.handle_rpc_request(call, metadata))
.map(|res| res.map(|x| serde_json::to_string(&x)
.expect("Serialization of response is infallible;qed")
))
)));
}
fn process_body(
&self,
mut body: hyper::Body,
mut request: Vec<u8>,
uri: Option<hyper::Uri>,
metadata: M,
) -> Result<RpcPollState<M, S::Future>, BodyError> {
loop {
match body.poll()? {
Async::Ready(Some(chunk)) => {
if request.len().checked_add(chunk.len()).map(|n| n > self.max_request_body_size).unwrap_or(true) {
return Err(BodyError::TooLarge)
}
request.extend_from_slice(&*chunk)
},
Async::Ready(None) => {
if let (Some(uri), true) = (uri, request.is_empty()) {
return Ok(RpcPollState::Ready(RpcHandlerState::ProcessRest {
uri,
metadata,
}));
}
let content = match str::from_utf8(&request) {
Ok(content) => content,
Err(err) => {
// Return utf error.
return Err(BodyError::Utf8(err));
},
};
// Content is ready
return Ok(RpcPollState::Ready(RpcHandlerState::Waiting(
self.jsonrpc_handler.handler.handle_request(content, metadata)
)));
},
Async::NotReady => {
return Ok(RpcPollState::NotReady(RpcHandlerState::ReadingBody {
body,
request,
metadata,
uri,
}));
},
}
}
}
fn set_response_headers(
headers: &mut Headers,
is_options: bool,
cors_header: Option<header::AccessControlAllowOrigin>,
cors_max_age: Option<u32>,
) {
if is_options {
headers.set(header::Allow(vec![
Method::Options,
Method::Post,
]));
headers.set(header::Accept(vec![
header::qitem(mime::APPLICATION_JSON)
]));
}
if let Some(cors_domain) = cors_header {
headers.set(header::AccessControlAllowMethods(vec![
Method::Options,
Method::Post
]));
headers.set(header::AccessControlAllowHeaders(vec![
Ascii::new("origin".to_owned()),
Ascii::new("content-type".to_owned()),
Ascii::new("accept".to_owned()),
]));
if let Some(cors_max_age) = cors_max_age {
headers.set(header::AccessControlMaxAge(cors_max_age));
}
headers.set(cors_domain);
headers.set(header::Vary::Items(vec![
Ascii::new("origin".to_owned())
]));
}
}
fn is_json(content_type: Option<&header::ContentType>) -> bool {
const APPLICATION_JSON_UTF_8: &str = "application/json; charset=utf-8";
match content_type {
Some(&header::ContentType(ref mime))
if *mime == mime::APPLICATION_JSON || *mime == APPLICATION_JSON_UTF_8 => true,
_ => false
}
}
}
|
},
RpcHandlerState::ProcessRest { uri, metadata } => {
|
random_line_split
|
custom_error_handler.rs
|
#[macro_use] extern crate nickel;
use std::io::Write;
use nickel::{Nickel, NickelError, Request, HttpRouter, Action};
use nickel::status::StatusCode;
fn main()
|
match res.status() {
StatusCode::ImATeapot => {
// Pass the internal message to the client
let _ = res.write_all(err.message.as_bytes());
return Action::Halt(())
}
StatusCode::NotFound => {
let _ = res.write_all(b"<h1>404 - Not Found</h1>");
return Action::Halt(())
}
_ => {}
}
}
// Fall through to next error handler
Action::Continue(())
}
// issue #20178
let custom_handler: fn(&mut NickelError<()>, &mut Request<()>) -> Action = custom_handler;
server.handle_error(custom_handler);
server.listen("127.0.0.1:6767").unwrap();
}
|
{
let mut server = Nickel::new();
// go to http://localhost:6767/user/4711 to see this route in action
server.get("/user/:userid", middleware! { |request|
if let Some("42") = request.param("userid") {
(StatusCode::Ok, "User 42 was found!")
} else {
(StatusCode::ImATeapot, "Teapot activated!")
}
});
//this is how to overwrite the default error handler to handle 404 cases with a custom view
fn custom_handler<D>(err: &mut NickelError<D>, req: &mut Request<D>) -> Action {
// Print the internal error message and path to the console
println!("[{}] ERROR: {}",
req.path_without_query().unwrap(),
err.message);
if let Some(ref mut res) = err.stream {
|
identifier_body
|
custom_error_handler.rs
|
#[macro_use] extern crate nickel;
use std::io::Write;
use nickel::{Nickel, NickelError, Request, HttpRouter, Action};
use nickel::status::StatusCode;
fn main() {
let mut server = Nickel::new();
// go to http://localhost:6767/user/4711 to see this route in action
server.get("/user/:userid", middleware! { |request|
if let Some("42") = request.param("userid") {
(StatusCode::Ok, "User 42 was found!")
} else {
(StatusCode::ImATeapot, "Teapot activated!")
}
});
//this is how to overwrite the default error handler to handle 404 cases with a custom view
fn custom_handler<D>(err: &mut NickelError<D>, req: &mut Request<D>) -> Action {
// Print the internal error message and path to the console
println!("[{}] ERROR: {}",
req.path_without_query().unwrap(),
err.message);
if let Some(ref mut res) = err.stream {
match res.status() {
StatusCode::ImATeapot => {
// Pass the internal message to the client
let _ = res.write_all(err.message.as_bytes());
return Action::Halt(())
}
StatusCode::NotFound =>
|
_ => {}
}
}
// Fall through to next error handler
Action::Continue(())
}
// issue #20178
let custom_handler: fn(&mut NickelError<()>, &mut Request<()>) -> Action = custom_handler;
server.handle_error(custom_handler);
server.listen("127.0.0.1:6767").unwrap();
}
|
{
let _ = res.write_all(b"<h1>404 - Not Found</h1>");
return Action::Halt(())
}
|
conditional_block
|
custom_error_handler.rs
|
#[macro_use] extern crate nickel;
use std::io::Write;
use nickel::{Nickel, NickelError, Request, HttpRouter, Action};
use nickel::status::StatusCode;
fn main() {
let mut server = Nickel::new();
// go to http://localhost:6767/user/4711 to see this route in action
server.get("/user/:userid", middleware! { |request|
if let Some("42") = request.param("userid") {
(StatusCode::Ok, "User 42 was found!")
} else {
(StatusCode::ImATeapot, "Teapot activated!")
}
});
//this is how to overwrite the default error handler to handle 404 cases with a custom view
fn
|
<D>(err: &mut NickelError<D>, req: &mut Request<D>) -> Action {
// Print the internal error message and path to the console
println!("[{}] ERROR: {}",
req.path_without_query().unwrap(),
err.message);
if let Some(ref mut res) = err.stream {
match res.status() {
StatusCode::ImATeapot => {
// Pass the internal message to the client
let _ = res.write_all(err.message.as_bytes());
return Action::Halt(())
}
StatusCode::NotFound => {
let _ = res.write_all(b"<h1>404 - Not Found</h1>");
return Action::Halt(())
}
_ => {}
}
}
// Fall through to next error handler
Action::Continue(())
}
// issue #20178
let custom_handler: fn(&mut NickelError<()>, &mut Request<()>) -> Action = custom_handler;
server.handle_error(custom_handler);
server.listen("127.0.0.1:6767").unwrap();
}
|
custom_handler
|
identifier_name
|
custom_error_handler.rs
|
#[macro_use] extern crate nickel;
use std::io::Write;
use nickel::{Nickel, NickelError, Request, HttpRouter, Action};
use nickel::status::StatusCode;
fn main() {
let mut server = Nickel::new();
// go to http://localhost:6767/user/4711 to see this route in action
server.get("/user/:userid", middleware! { |request|
if let Some("42") = request.param("userid") {
(StatusCode::Ok, "User 42 was found!")
} else {
(StatusCode::ImATeapot, "Teapot activated!")
}
});
//this is how to overwrite the default error handler to handle 404 cases with a custom view
fn custom_handler<D>(err: &mut NickelError<D>, req: &mut Request<D>) -> Action {
// Print the internal error message and path to the console
println!("[{}] ERROR: {}",
|
match res.status() {
StatusCode::ImATeapot => {
// Pass the internal message to the client
let _ = res.write_all(err.message.as_bytes());
return Action::Halt(())
}
StatusCode::NotFound => {
let _ = res.write_all(b"<h1>404 - Not Found</h1>");
return Action::Halt(())
}
_ => {}
}
}
// Fall through to next error handler
Action::Continue(())
}
// issue #20178
let custom_handler: fn(&mut NickelError<()>, &mut Request<()>) -> Action = custom_handler;
server.handle_error(custom_handler);
server.listen("127.0.0.1:6767").unwrap();
}
|
req.path_without_query().unwrap(),
err.message);
if let Some(ref mut res) = err.stream {
|
random_line_split
|
mod.rs
|
//! Provides the fundamental units of computation for the [Network][1].
//! [1]:../network/index.html
//!
//! These layers provide different type of operations to the data Blobs
#[allow(unused_import_braces)]
pub use blocks::neural::neuron::*;
pub use core::*;
pub mod neural;
pub mod fullmesh;
pub mod mesh;
|
pub mod function;
#[cfg(test)]
mod block_tst;
#[derive(Default, Clone)]
pub struct BlockData
{
pub id: BlockId,
pub name: String,
pub connections: Vec<Connection>,
pub next_run_sequence: Vec<BlockId>,
pub neuron_count: u32,
/// includes bias per neuron
pub synapse_count: u32
}
impl BlockData
{
pub fn new (newid: BlockId, ncount: u32, scount: u32) -> BlockData { BlockData { id : newid, neuron_count: ncount, synapse_count: scount,..Default::default() } }
}
pub fn add_three(a: i32) -> i32 {
a + 3
}
// revist names
pub type LinearByteMutBlock = ::blocks::fullmesh::FullMeshBlock<i8,u8,LinearByteB>;
pub type LogisticMutBlock = ::blocks::fullmesh::FullMeshBlock< f32,f32,DefaultLogistic>;
pub type LogisticMutBiasBlock = ::blocks::fullmesh::FullMeshBlock< f32,f32,DefaultLogisticB>;
pub type LinearByteBlock = ::blocks::mesh::MeshBlock<i8,u8,LinearByte>;
pub type LinearByteBiasBlock = ::blocks::mesh::MeshBlock<i8,u8,LinearByteB>;
pub type LogisticBlock = ::blocks::mesh::MeshBlock<f32,f32,DefaultLogistic>;
pub type LogisticBiasBlock = ::blocks::mesh::MeshBlock<f32,f32,DefaultLogisticB>;
pub type ThreshholdByteBiasBlock = ::blocks::mesh::MeshBlock<i8,u8,ThresholdByteB>;
|
random_line_split
|
|
mod.rs
|
//! Provides the fundamental units of computation for the [Network][1].
//! [1]:../network/index.html
//!
//! These layers provide different type of operations to the data Blobs
#[allow(unused_import_braces)]
pub use blocks::neural::neuron::*;
pub use core::*;
pub mod neural;
pub mod fullmesh;
pub mod mesh;
pub mod function;
#[cfg(test)]
mod block_tst;
#[derive(Default, Clone)]
pub struct BlockData
{
pub id: BlockId,
pub name: String,
pub connections: Vec<Connection>,
pub next_run_sequence: Vec<BlockId>,
pub neuron_count: u32,
/// includes bias per neuron
pub synapse_count: u32
}
impl BlockData
{
pub fn
|
(newid: BlockId, ncount: u32, scount: u32) -> BlockData { BlockData { id : newid, neuron_count: ncount, synapse_count: scount,..Default::default() } }
}
pub fn add_three(a: i32) -> i32 {
a + 3
}
// revist names
pub type LinearByteMutBlock = ::blocks::fullmesh::FullMeshBlock<i8,u8,LinearByteB>;
pub type LogisticMutBlock = ::blocks::fullmesh::FullMeshBlock< f32,f32,DefaultLogistic>;
pub type LogisticMutBiasBlock = ::blocks::fullmesh::FullMeshBlock< f32,f32,DefaultLogisticB>;
pub type LinearByteBlock = ::blocks::mesh::MeshBlock<i8,u8,LinearByte>;
pub type LinearByteBiasBlock = ::blocks::mesh::MeshBlock<i8,u8,LinearByteB>;
pub type LogisticBlock = ::blocks::mesh::MeshBlock<f32,f32,DefaultLogistic>;
pub type LogisticBiasBlock = ::blocks::mesh::MeshBlock<f32,f32,DefaultLogisticB>;
pub type ThreshholdByteBiasBlock = ::blocks::mesh::MeshBlock<i8,u8,ThresholdByteB>;
|
new
|
identifier_name
|
tty.rs
|
use std::{fs, io};
use std::os::unix::io::AsRawFd;
/// Is this stream an TTY?
#[cfg(not(target_os = "redox"))]
pub fn is_tty<T: AsRawFd>(stream: &T) -> bool {
use libc;
unsafe { libc::isatty(stream.as_raw_fd()) == 1 }
}
/// This will panic.
#[cfg(target_os = "redox")]
pub fn is_tty<T: AsRawFd>(_stream: &T) -> bool
|
/// Get the TTY device.
///
/// This allows for getting stdio representing _only_ the TTY, and not other streams.
#[cfg(target_os = "redox")]
pub fn get_tty() -> io::Result<fs::File> {
use std::env;
let tty = try!(env::var("TTY").map_err(|x| io::Error::new(io::ErrorKind::NotFound, x)));
fs::OpenOptions::new().read(true).write(true).open(tty)
}
/// Get the TTY device.
///
/// This allows for getting stdio representing _only_ the TTY, and not other streams.
#[cfg(not(target_os = "redox"))]
pub fn get_tty() -> io::Result<fs::File> {
fs::OpenOptions::new().read(true).write(true).open("/dev/tty")
}
|
{
unimplemented!();
}
|
identifier_body
|
tty.rs
|
use std::{fs, io};
use std::os::unix::io::AsRawFd;
/// Is this stream an TTY?
#[cfg(not(target_os = "redox"))]
pub fn is_tty<T: AsRawFd>(stream: &T) -> bool {
use libc;
unsafe { libc::isatty(stream.as_raw_fd()) == 1 }
}
/// This will panic.
#[cfg(target_os = "redox")]
pub fn is_tty<T: AsRawFd>(_stream: &T) -> bool {
unimplemented!();
}
/// Get the TTY device.
///
/// This allows for getting stdio representing _only_ the TTY, and not other streams.
#[cfg(target_os = "redox")]
pub fn
|
() -> io::Result<fs::File> {
use std::env;
let tty = try!(env::var("TTY").map_err(|x| io::Error::new(io::ErrorKind::NotFound, x)));
fs::OpenOptions::new().read(true).write(true).open(tty)
}
/// Get the TTY device.
///
/// This allows for getting stdio representing _only_ the TTY, and not other streams.
#[cfg(not(target_os = "redox"))]
pub fn get_tty() -> io::Result<fs::File> {
fs::OpenOptions::new().read(true).write(true).open("/dev/tty")
}
|
get_tty
|
identifier_name
|
tty.rs
|
use std::{fs, io};
use std::os::unix::io::AsRawFd;
/// Is this stream an TTY?
#[cfg(not(target_os = "redox"))]
pub fn is_tty<T: AsRawFd>(stream: &T) -> bool {
use libc;
unsafe { libc::isatty(stream.as_raw_fd()) == 1 }
}
/// This will panic.
#[cfg(target_os = "redox")]
pub fn is_tty<T: AsRawFd>(_stream: &T) -> bool {
unimplemented!();
}
/// Get the TTY device.
///
|
#[cfg(target_os = "redox")]
pub fn get_tty() -> io::Result<fs::File> {
use std::env;
let tty = try!(env::var("TTY").map_err(|x| io::Error::new(io::ErrorKind::NotFound, x)));
fs::OpenOptions::new().read(true).write(true).open(tty)
}
/// Get the TTY device.
///
/// This allows for getting stdio representing _only_ the TTY, and not other streams.
#[cfg(not(target_os = "redox"))]
pub fn get_tty() -> io::Result<fs::File> {
fs::OpenOptions::new().read(true).write(true).open("/dev/tty")
}
|
/// This allows for getting stdio representing _only_ the TTY, and not other streams.
|
random_line_split
|
lib.rs
|
//! A TIS-100 emulator.
//!
//! # Example
//!
//! ```
//! use tis_100::save::parse_save;
//! use tis_100::machine::Sandbox;
//!
//! // This program reads the value from the console and simply passes it to the console output.
//! let src = "@1\nMOV UP DOWN\n@5\nMOV UP DOWN\n@9\nMOV UP RIGHT\n@10\nMOV LEFT DOWN\n";
//!
//! let save = parse_save(src).unwrap();
//! let mut sandbox = Sandbox::from_save(&save);
//!
//! sandbox.write_console(42);
//!
//! for _ in 0..5 {
//! sandbox.step();
//! }
//!
//! assert_eq!(sandbox.read_console(), Some(42));
//! ```
extern crate hlua;
|
pub mod lex;
pub mod parse;
pub mod io;
pub mod node;
pub mod image;
pub mod save;
pub mod spec;
pub mod machine;
|
extern crate vec_map;
pub mod core;
|
random_line_split
|
lazy_cell.rs
|
//! A lazily fill Cell, but with frozen contents.
//!
//! With a `RefCell`, the inner contents cannot be borrowed for the lifetime of
//! the entire object, but only of the borrows returned. A `LazyCell` is a
//! variation on `RefCell` which allows borrows tied to the lifetime of the
//! outer object.
//!
//! The limitation of a `LazyCell` is that after initialized, it can never be
//! modified.
use std::cell::RefCell;
use std::mem;
pub struct LazyCell<T> {
inner: RefCell<Option<T>>,
}
impl<T> LazyCell<T> {
/// Creates a new empty lazy cell.
pub fn
|
() -> LazyCell<T> {
LazyCell { inner: RefCell::new(None) }
}
/// Put a value into this cell.
///
/// This function will fail if the cell has already been filled.
pub fn fill(&self, t: T) {
let mut slot = self.inner.borrow_mut();
if slot.is_some() {
panic!("lazy cell is already filled")
}
*slot = Some(t);
}
/// Test whether this cell has been previously filled.
pub fn filled(&self) -> bool { self.inner.borrow().is_some() }
/// Borrows the contents of this lazy cell for the duration of the cell
/// itself.
///
/// This function will return `Some` if the cell has been previously
/// initialized, and `None` if it has not yet been initialized.
pub fn borrow(&self) -> Option<&T> {
match *self.inner.borrow() {
Some(ref inner) => unsafe { Some(mem::transmute(inner)) },
None => None
}
}
/// Consumes this `LazyCell`, returning the underlying value.
pub fn into_inner(self) -> Option<T> {
self.inner.into_inner()
}
}
|
new
|
identifier_name
|
lazy_cell.rs
|
//! A lazily fill Cell, but with frozen contents.
//!
//! With a `RefCell`, the inner contents cannot be borrowed for the lifetime of
//! the entire object, but only of the borrows returned. A `LazyCell` is a
//! variation on `RefCell` which allows borrows tied to the lifetime of the
//! outer object.
//!
//! The limitation of a `LazyCell` is that after initialized, it can never be
//! modified.
use std::cell::RefCell;
use std::mem;
pub struct LazyCell<T> {
inner: RefCell<Option<T>>,
}
impl<T> LazyCell<T> {
/// Creates a new empty lazy cell.
pub fn new() -> LazyCell<T> {
|
///
/// This function will fail if the cell has already been filled.
pub fn fill(&self, t: T) {
let mut slot = self.inner.borrow_mut();
if slot.is_some() {
panic!("lazy cell is already filled")
}
*slot = Some(t);
}
/// Test whether this cell has been previously filled.
pub fn filled(&self) -> bool { self.inner.borrow().is_some() }
/// Borrows the contents of this lazy cell for the duration of the cell
/// itself.
///
/// This function will return `Some` if the cell has been previously
/// initialized, and `None` if it has not yet been initialized.
pub fn borrow(&self) -> Option<&T> {
match *self.inner.borrow() {
Some(ref inner) => unsafe { Some(mem::transmute(inner)) },
None => None
}
}
/// Consumes this `LazyCell`, returning the underlying value.
pub fn into_inner(self) -> Option<T> {
self.inner.into_inner()
}
}
|
LazyCell { inner: RefCell::new(None) }
}
/// Put a value into this cell.
|
random_line_split
|
murmurhash.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
|
const C1: u32 = 0xcc9e_2d51;
const C2: u32 = 0x1b87_3593;
const R1: u32 = 15;
const R2: u32 = 13;
const M: u32 = 5;
const N: u32 = 0xe654_6b64;
const CHARSET: [char; 62] = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'l','m', 'n', 'o', 'p', 'q', 'r','s', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B',
'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z',
];
// An implmentation of murmurHash.js of relay-compiler that produces the same output excepet for non-alphanumeric strings
// It does the 32bit murmurhash3 with seed 0, and applies a base62 to get the final string hash
pub fn murmurhash(data: &str) -> String {
let bytes = data.as_bytes();
let nbytes = bytes.len();
let mut hash = 0; // Hardcoded seed 0
let mut i = 0;
let iterations = nbytes / 4;
for _ in 0..iterations {
hash = (hash ^ calculate_k(read_u32(&bytes[i..i + 4])))
.rotate_left(R2)
.wrapping_mul(M)
.wrapping_add(N);
i += 4;
}
match nbytes - i {
1 => {
hash ^= calculate_k(bytes[i] as u32);
}
2 => {
hash ^= calculate_k((bytes[i] as u32) | ((bytes[i + 1] as u32) << 8));
}
3 => {
hash ^= calculate_k(
(bytes[i] as u32) | ((bytes[i + 1] as u32) << 8) | ((bytes[i + 2] as u32) << 16),
);
}
_ => {}
}
hash ^= nbytes as u32;
hash ^= hash.wrapping_shr(16);
hash = hash.wrapping_mul(0x85eb_ca6b);
hash ^= hash.wrapping_shr(13);
hash = hash.wrapping_mul(0xc2b2_ae35);
hash ^= hash.wrapping_shr(16);
if hash == 0 {
return "0".to_owned();
}
let mut chars = Vec::with_capacity(6);
while hash > 0 {
let d = hash % 62;
chars.push(CHARSET[d as usize]);
hash = (hash - d) / 62;
}
chars.iter().rev().collect()
}
// From byte_order::LittleEndian::read_u32
fn read_u32(src: &[u8]) -> u32 {
let mut data: u32 = 0;
unsafe {
copy_nonoverlapping(src.as_ptr(), &mut data as *mut u32 as *mut u8, 4);
}
data.to_le()
}
fn calculate_k(k: u32) -> u32 {
k.wrapping_mul(C1).rotate_left(R1).wrapping_mul(C2)
}
#[test]
fn test_murmurhash() {
assert_eq!(murmurhash("{count: 20, start: 0, end: 5}"), "31sjku");
assert_eq!(
murmurhash("{arg: \"{arg: {count: 20, start: 0, end: 5}}\"}"),
"3RGiWM"
);
assert_eq!(
murmurhash(&str::repeat(
"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ",
40
)),
"3OKbT6"
);
assert_eq!(murmurhash("{}"), "2wIPj2");
assert_eq!(murmurhash(""), "0");
}
|
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use std::ptr::copy_nonoverlapping;
|
random_line_split
|
murmurhash.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use std::ptr::copy_nonoverlapping;
const C1: u32 = 0xcc9e_2d51;
const C2: u32 = 0x1b87_3593;
const R1: u32 = 15;
const R2: u32 = 13;
const M: u32 = 5;
const N: u32 = 0xe654_6b64;
const CHARSET: [char; 62] = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'l','m', 'n', 'o', 'p', 'q', 'r','s', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B',
'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z',
];
// An implmentation of murmurHash.js of relay-compiler that produces the same output excepet for non-alphanumeric strings
// It does the 32bit murmurhash3 with seed 0, and applies a base62 to get the final string hash
pub fn murmurhash(data: &str) -> String {
let bytes = data.as_bytes();
let nbytes = bytes.len();
let mut hash = 0; // Hardcoded seed 0
let mut i = 0;
let iterations = nbytes / 4;
for _ in 0..iterations {
hash = (hash ^ calculate_k(read_u32(&bytes[i..i + 4])))
.rotate_left(R2)
.wrapping_mul(M)
.wrapping_add(N);
i += 4;
}
match nbytes - i {
1 => {
hash ^= calculate_k(bytes[i] as u32);
}
2 =>
|
3 => {
hash ^= calculate_k(
(bytes[i] as u32) | ((bytes[i + 1] as u32) << 8) | ((bytes[i + 2] as u32) << 16),
);
}
_ => {}
}
hash ^= nbytes as u32;
hash ^= hash.wrapping_shr(16);
hash = hash.wrapping_mul(0x85eb_ca6b);
hash ^= hash.wrapping_shr(13);
hash = hash.wrapping_mul(0xc2b2_ae35);
hash ^= hash.wrapping_shr(16);
if hash == 0 {
return "0".to_owned();
}
let mut chars = Vec::with_capacity(6);
while hash > 0 {
let d = hash % 62;
chars.push(CHARSET[d as usize]);
hash = (hash - d) / 62;
}
chars.iter().rev().collect()
}
// From byte_order::LittleEndian::read_u32
fn read_u32(src: &[u8]) -> u32 {
let mut data: u32 = 0;
unsafe {
copy_nonoverlapping(src.as_ptr(), &mut data as *mut u32 as *mut u8, 4);
}
data.to_le()
}
fn calculate_k(k: u32) -> u32 {
k.wrapping_mul(C1).rotate_left(R1).wrapping_mul(C2)
}
#[test]
fn test_murmurhash() {
assert_eq!(murmurhash("{count: 20, start: 0, end: 5}"), "31sjku");
assert_eq!(
murmurhash("{arg: \"{arg: {count: 20, start: 0, end: 5}}\"}"),
"3RGiWM"
);
assert_eq!(
murmurhash(&str::repeat(
"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ",
40
)),
"3OKbT6"
);
assert_eq!(murmurhash("{}"), "2wIPj2");
assert_eq!(murmurhash(""), "0");
}
|
{
hash ^= calculate_k((bytes[i] as u32) | ((bytes[i + 1] as u32) << 8));
}
|
conditional_block
|
murmurhash.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use std::ptr::copy_nonoverlapping;
const C1: u32 = 0xcc9e_2d51;
const C2: u32 = 0x1b87_3593;
const R1: u32 = 15;
const R2: u32 = 13;
const M: u32 = 5;
const N: u32 = 0xe654_6b64;
const CHARSET: [char; 62] = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'l','m', 'n', 'o', 'p', 'q', 'r','s', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B',
'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z',
];
// An implmentation of murmurHash.js of relay-compiler that produces the same output excepet for non-alphanumeric strings
// It does the 32bit murmurhash3 with seed 0, and applies a base62 to get the final string hash
pub fn murmurhash(data: &str) -> String {
let bytes = data.as_bytes();
let nbytes = bytes.len();
let mut hash = 0; // Hardcoded seed 0
let mut i = 0;
let iterations = nbytes / 4;
for _ in 0..iterations {
hash = (hash ^ calculate_k(read_u32(&bytes[i..i + 4])))
.rotate_left(R2)
.wrapping_mul(M)
.wrapping_add(N);
i += 4;
}
match nbytes - i {
1 => {
hash ^= calculate_k(bytes[i] as u32);
}
2 => {
hash ^= calculate_k((bytes[i] as u32) | ((bytes[i + 1] as u32) << 8));
}
3 => {
hash ^= calculate_k(
(bytes[i] as u32) | ((bytes[i + 1] as u32) << 8) | ((bytes[i + 2] as u32) << 16),
);
}
_ => {}
}
hash ^= nbytes as u32;
hash ^= hash.wrapping_shr(16);
hash = hash.wrapping_mul(0x85eb_ca6b);
hash ^= hash.wrapping_shr(13);
hash = hash.wrapping_mul(0xc2b2_ae35);
hash ^= hash.wrapping_shr(16);
if hash == 0 {
return "0".to_owned();
}
let mut chars = Vec::with_capacity(6);
while hash > 0 {
let d = hash % 62;
chars.push(CHARSET[d as usize]);
hash = (hash - d) / 62;
}
chars.iter().rev().collect()
}
// From byte_order::LittleEndian::read_u32
fn read_u32(src: &[u8]) -> u32 {
let mut data: u32 = 0;
unsafe {
copy_nonoverlapping(src.as_ptr(), &mut data as *mut u32 as *mut u8, 4);
}
data.to_le()
}
fn calculate_k(k: u32) -> u32 {
k.wrapping_mul(C1).rotate_left(R1).wrapping_mul(C2)
}
#[test]
fn test_murmurhash()
|
{
assert_eq!(murmurhash("{count: 20, start: 0, end: 5}"), "31sjku");
assert_eq!(
murmurhash("{arg: \"{arg: {count: 20, start: 0, end: 5}}\"}"),
"3RGiWM"
);
assert_eq!(
murmurhash(&str::repeat(
"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ",
40
)),
"3OKbT6"
);
assert_eq!(murmurhash("{}"), "2wIPj2");
assert_eq!(murmurhash(""), "0");
}
|
identifier_body
|
|
murmurhash.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use std::ptr::copy_nonoverlapping;
const C1: u32 = 0xcc9e_2d51;
const C2: u32 = 0x1b87_3593;
const R1: u32 = 15;
const R2: u32 = 13;
const M: u32 = 5;
const N: u32 = 0xe654_6b64;
const CHARSET: [char; 62] = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'l','m', 'n', 'o', 'p', 'q', 'r','s', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B',
'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z',
];
// An implmentation of murmurHash.js of relay-compiler that produces the same output excepet for non-alphanumeric strings
// It does the 32bit murmurhash3 with seed 0, and applies a base62 to get the final string hash
pub fn murmurhash(data: &str) -> String {
let bytes = data.as_bytes();
let nbytes = bytes.len();
let mut hash = 0; // Hardcoded seed 0
let mut i = 0;
let iterations = nbytes / 4;
for _ in 0..iterations {
hash = (hash ^ calculate_k(read_u32(&bytes[i..i + 4])))
.rotate_left(R2)
.wrapping_mul(M)
.wrapping_add(N);
i += 4;
}
match nbytes - i {
1 => {
hash ^= calculate_k(bytes[i] as u32);
}
2 => {
hash ^= calculate_k((bytes[i] as u32) | ((bytes[i + 1] as u32) << 8));
}
3 => {
hash ^= calculate_k(
(bytes[i] as u32) | ((bytes[i + 1] as u32) << 8) | ((bytes[i + 2] as u32) << 16),
);
}
_ => {}
}
hash ^= nbytes as u32;
hash ^= hash.wrapping_shr(16);
hash = hash.wrapping_mul(0x85eb_ca6b);
hash ^= hash.wrapping_shr(13);
hash = hash.wrapping_mul(0xc2b2_ae35);
hash ^= hash.wrapping_shr(16);
if hash == 0 {
return "0".to_owned();
}
let mut chars = Vec::with_capacity(6);
while hash > 0 {
let d = hash % 62;
chars.push(CHARSET[d as usize]);
hash = (hash - d) / 62;
}
chars.iter().rev().collect()
}
// From byte_order::LittleEndian::read_u32
fn
|
(src: &[u8]) -> u32 {
let mut data: u32 = 0;
unsafe {
copy_nonoverlapping(src.as_ptr(), &mut data as *mut u32 as *mut u8, 4);
}
data.to_le()
}
fn calculate_k(k: u32) -> u32 {
k.wrapping_mul(C1).rotate_left(R1).wrapping_mul(C2)
}
#[test]
fn test_murmurhash() {
assert_eq!(murmurhash("{count: 20, start: 0, end: 5}"), "31sjku");
assert_eq!(
murmurhash("{arg: \"{arg: {count: 20, start: 0, end: 5}}\"}"),
"3RGiWM"
);
assert_eq!(
murmurhash(&str::repeat(
"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ",
40
)),
"3OKbT6"
);
assert_eq!(murmurhash("{}"), "2wIPj2");
assert_eq!(murmurhash(""), "0");
}
|
read_u32
|
identifier_name
|
test_fixture.rs
|
use crate::cpu::Cpu;
use crate::cpu::{Interconnect, Interrupt, ADDRESSABLE_MEMORY};
pub struct TestInterconnect {
addr: [u8; ADDRESSABLE_MEMORY],
elapsed_cycles: usize,
}
impl TestInterconnect {
pub fn new() -> Self {
TestInterconnect {
addr: [0; ADDRESSABLE_MEMORY],
elapsed_cycles: 0,
}
}
pub fn store_many(&mut self, addr: u16, data: &[u8]) {
for (i, byte) in data.iter().enumerate() {
self.write(addr + i as u16, *byte);
}
}
}
|
fn default() -> Self {
Self::new()
}
}
impl Interconnect for TestInterconnect {
fn read(&self, addr: u16) -> u8 {
let addr = addr as usize;
self.addr[addr]
}
fn write(&mut self, addr: u16, data: u8) {
let addr = addr as usize;
self.addr[addr] = data;
}
fn tick(&mut self) -> Interrupt {
self.elapsed_cycles += 1;
Interrupt::None
}
fn elapsed_cycles(&self) -> usize {
self.elapsed_cycles
}
}
pub type TestCpu = Cpu<TestInterconnect>;
impl Cpu<TestInterconnect> {
pub fn new_test() -> Self {
let interconnect = TestInterconnect::default();
Cpu::new(interconnect, 0x200)
}
}
|
impl Default for TestInterconnect {
|
random_line_split
|
test_fixture.rs
|
use crate::cpu::Cpu;
use crate::cpu::{Interconnect, Interrupt, ADDRESSABLE_MEMORY};
pub struct TestInterconnect {
addr: [u8; ADDRESSABLE_MEMORY],
elapsed_cycles: usize,
}
impl TestInterconnect {
pub fn new() -> Self
|
pub fn store_many(&mut self, addr: u16, data: &[u8]) {
for (i, byte) in data.iter().enumerate() {
self.write(addr + i as u16, *byte);
}
}
}
impl Default for TestInterconnect {
fn default() -> Self {
Self::new()
}
}
impl Interconnect for TestInterconnect {
fn read(&self, addr: u16) -> u8 {
let addr = addr as usize;
self.addr[addr]
}
fn write(&mut self, addr: u16, data: u8) {
let addr = addr as usize;
self.addr[addr] = data;
}
fn tick(&mut self) -> Interrupt {
self.elapsed_cycles += 1;
Interrupt::None
}
fn elapsed_cycles(&self) -> usize {
self.elapsed_cycles
}
}
pub type TestCpu = Cpu<TestInterconnect>;
impl Cpu<TestInterconnect> {
pub fn new_test() -> Self {
let interconnect = TestInterconnect::default();
Cpu::new(interconnect, 0x200)
}
}
|
{
TestInterconnect {
addr: [0; ADDRESSABLE_MEMORY],
elapsed_cycles: 0,
}
}
|
identifier_body
|
test_fixture.rs
|
use crate::cpu::Cpu;
use crate::cpu::{Interconnect, Interrupt, ADDRESSABLE_MEMORY};
pub struct
|
{
addr: [u8; ADDRESSABLE_MEMORY],
elapsed_cycles: usize,
}
impl TestInterconnect {
pub fn new() -> Self {
TestInterconnect {
addr: [0; ADDRESSABLE_MEMORY],
elapsed_cycles: 0,
}
}
pub fn store_many(&mut self, addr: u16, data: &[u8]) {
for (i, byte) in data.iter().enumerate() {
self.write(addr + i as u16, *byte);
}
}
}
impl Default for TestInterconnect {
fn default() -> Self {
Self::new()
}
}
impl Interconnect for TestInterconnect {
fn read(&self, addr: u16) -> u8 {
let addr = addr as usize;
self.addr[addr]
}
fn write(&mut self, addr: u16, data: u8) {
let addr = addr as usize;
self.addr[addr] = data;
}
fn tick(&mut self) -> Interrupt {
self.elapsed_cycles += 1;
Interrupt::None
}
fn elapsed_cycles(&self) -> usize {
self.elapsed_cycles
}
}
pub type TestCpu = Cpu<TestInterconnect>;
impl Cpu<TestInterconnect> {
pub fn new_test() -> Self {
let interconnect = TestInterconnect::default();
Cpu::new(interconnect, 0x200)
}
}
|
TestInterconnect
|
identifier_name
|
hud.rs
|
// HUD window
extern crate piston_window;
extern crate engine;
use piston_window::{Context, Transformed, text, Event};
use super::{WindowBase, PostAction, States};
use super::Resources;
use engine::Engine;
use opengl_graphics::GlGraphics;
use std::rc::Rc;
use std::cell::{RefCell, Cell};
pub struct HUDWindow {
engine: Rc<RefCell<Engine>>,
resources: Rc<RefCell<Resources>>
//state: isize,
}
impl HUDWindow {
pub fn
|
(resources: Rc<RefCell<Resources>>, engine: Rc<RefCell<Engine>>) -> HUDWindow {
HUDWindow {
resources: resources,
engine: engine
}
}
}
impl WindowBase for HUDWindow {
fn paint(&mut self, c: Context, g: &mut GlGraphics) {
text(super::GREEN, 15,
&format!("generation {}", self.engine.borrow().cur_iteration()),
&mut self.resources.borrow_mut().font,
c.trans(10.0, 20.0).transform, g);
text(super::GREEN, 15,
&format!("population {}", self.engine.borrow().get_board().get_population()),
&mut self.resources.borrow_mut().font,
c.trans(150.0, 20.0).transform, g);
text(super::GREEN, 15,
&format!("update time {:.*}", 5, self.engine.borrow().get_last_iter_time()),
&mut self.resources.borrow_mut().font,
c.trans(320.0, 20.0).transform, g);
}
fn event_dispatcher(&mut self, _event: &Event, _cur_state: &Cell<States>) -> PostAction {
PostAction::Transfer
}
}
|
new
|
identifier_name
|
hud.rs
|
// HUD window
extern crate piston_window;
extern crate engine;
use piston_window::{Context, Transformed, text, Event};
use super::{WindowBase, PostAction, States};
use super::Resources;
use engine::Engine;
use opengl_graphics::GlGraphics;
use std::rc::Rc;
use std::cell::{RefCell, Cell};
pub struct HUDWindow {
engine: Rc<RefCell<Engine>>,
resources: Rc<RefCell<Resources>>
//state: isize,
}
impl HUDWindow {
pub fn new(resources: Rc<RefCell<Resources>>, engine: Rc<RefCell<Engine>>) -> HUDWindow {
HUDWindow {
resources: resources,
engine: engine
}
}
}
impl WindowBase for HUDWindow {
fn paint(&mut self, c: Context, g: &mut GlGraphics) {
text(super::GREEN, 15,
&format!("generation {}", self.engine.borrow().cur_iteration()),
&mut self.resources.borrow_mut().font,
c.trans(10.0, 20.0).transform, g);
|
text(super::GREEN, 15,
&format!("population {}", self.engine.borrow().get_board().get_population()),
&mut self.resources.borrow_mut().font,
c.trans(150.0, 20.0).transform, g);
text(super::GREEN, 15,
&format!("update time {:.*}", 5, self.engine.borrow().get_last_iter_time()),
&mut self.resources.borrow_mut().font,
c.trans(320.0, 20.0).transform, g);
}
fn event_dispatcher(&mut self, _event: &Event, _cur_state: &Cell<States>) -> PostAction {
PostAction::Transfer
}
}
|
random_line_split
|
|
hud.rs
|
// HUD window
extern crate piston_window;
extern crate engine;
use piston_window::{Context, Transformed, text, Event};
use super::{WindowBase, PostAction, States};
use super::Resources;
use engine::Engine;
use opengl_graphics::GlGraphics;
use std::rc::Rc;
use std::cell::{RefCell, Cell};
pub struct HUDWindow {
engine: Rc<RefCell<Engine>>,
resources: Rc<RefCell<Resources>>
//state: isize,
}
impl HUDWindow {
pub fn new(resources: Rc<RefCell<Resources>>, engine: Rc<RefCell<Engine>>) -> HUDWindow
|
}
impl WindowBase for HUDWindow {
fn paint(&mut self, c: Context, g: &mut GlGraphics) {
text(super::GREEN, 15,
&format!("generation {}", self.engine.borrow().cur_iteration()),
&mut self.resources.borrow_mut().font,
c.trans(10.0, 20.0).transform, g);
text(super::GREEN, 15,
&format!("population {}", self.engine.borrow().get_board().get_population()),
&mut self.resources.borrow_mut().font,
c.trans(150.0, 20.0).transform, g);
text(super::GREEN, 15,
&format!("update time {:.*}", 5, self.engine.borrow().get_last_iter_time()),
&mut self.resources.borrow_mut().font,
c.trans(320.0, 20.0).transform, g);
}
fn event_dispatcher(&mut self, _event: &Event, _cur_state: &Cell<States>) -> PostAction {
PostAction::Transfer
}
}
|
{
HUDWindow {
resources: resources,
engine: engine
}
}
|
identifier_body
|
urlsearchparams.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::URLSearchParamsBinding;
use dom::bindings::codegen::Bindings::URLSearchParamsBinding::URLSearchParamsMethods;
use dom::bindings::codegen::UnionTypes::StringOrURLSearchParams;
use dom::bindings::codegen::UnionTypes::StringOrURLSearchParams::{eURLSearchParams, eString};
use dom::bindings::error::{Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflector, reflect_dom_object};
use util::str::DOMString;
use encoding::all::UTF_8;
use encoding::types::{EncodingRef, EncoderTrap};
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::fmt::radix;
use std::ascii::OwnedAsciiExt;
#[dom_struct]
pub struct URLSearchParams {
|
impl URLSearchParams {
fn new_inherited() -> URLSearchParams {
URLSearchParams {
reflector_: Reflector::new(),
data: DOMRefCell::new(HashMap::new()),
}
}
pub fn new(global: GlobalRef) -> Temporary<URLSearchParams> {
reflect_dom_object(box URLSearchParams::new_inherited(), global, URLSearchParamsBinding::Wrap)
}
pub fn Constructor(global: GlobalRef, init: Option<StringOrURLSearchParams>) -> Fallible<Temporary<URLSearchParams>> {
let usp = URLSearchParams::new(global).root();
match init {
Some(eString(_s)) => {
// XXXManishearth we need to parse the input here
// http://url.spec.whatwg.org/#concept-urlencoded-parser
// We can use rust-url's implementation here:
// https://github.com/SimonSapin/rust-url/blob/master/form_urlencoded.rs#L29
},
Some(eURLSearchParams(u)) => {
let u = u.root();
let usp = usp.r();
let mut map = usp.data.borrow_mut();
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let r = u.r();
let data = r.data.borrow();
*map = data.clone();
},
None => {}
}
Ok(Temporary::from_rooted(usp.r()))
}
}
impl<'a> URLSearchParamsMethods for JSRef<'a, URLSearchParams> {
fn Append(self, name: DOMString, value: DOMString) {
let mut data = self.data.borrow_mut();
match data.entry(name) {
Occupied(entry) => entry.into_mut().push(value),
Vacant(entry) => {
entry.insert(vec!(value));
}
}
self.update_steps();
}
fn Delete(self, name: DOMString) {
self.data.borrow_mut().remove(&name);
self.update_steps();
}
fn Get(self, name: DOMString) -> Option<DOMString> {
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let data = self.data.borrow();
data.get(&name).map(|v| v[0].clone())
}
fn Has(self, name: DOMString) -> bool {
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let data = self.data.borrow();
data.contains_key(&name)
}
fn Set(self, name: DOMString, value: DOMString) {
self.data.borrow_mut().insert(name, vec!(value));
self.update_steps();
}
}
pub trait URLSearchParamsHelpers {
fn serialize(&self, encoding: Option<EncodingRef>) -> Vec<u8>;
fn update_steps(&self);
}
impl URLSearchParamsHelpers for URLSearchParams {
fn serialize(&self, encoding: Option<EncodingRef>) -> Vec<u8> {
// http://url.spec.whatwg.org/#concept-urlencoded-serializer
fn serialize_string(value: &DOMString, encoding: EncodingRef) -> Vec<u8> {
// http://url.spec.whatwg.org/#concept-urlencoded-byte-serializer
let value = value.as_slice();
// XXXManishearth should this be a strict encoding? Can unwrap()ing the result fail?
let value = encoding.encode(value, EncoderTrap::Replace).unwrap();
let mut buf = vec!();
for i in value.iter() {
let append = match *i {
0x20 => vec!(0x2B),
0x2A | 0x2D | 0x2E |
0x30... 0x39 | 0x41... 0x5A |
0x5F | 0x61...0x7A => vec!(*i),
a => {
// http://url.spec.whatwg.org/#percent-encode
let mut encoded = vec!(0x25); // %
let s = format!("{}", radix(a, 16)).into_ascii_uppercase();
let bytes = s.as_bytes();
encoded.push_all(bytes);
encoded
}
};
buf.push_all(append.as_slice());
}
buf
}
let encoding = encoding.unwrap_or(UTF_8 as EncodingRef);
let mut buf = vec!();
let mut first_pair = true;
for (k, v) in self.data.borrow().iter() {
let name = serialize_string(k, encoding);
for val in v.iter() {
let value = serialize_string(val, encoding);
if first_pair {
first_pair = false;
} else {
buf.push(0x26); // &
}
buf.push_all(name.as_slice());
buf.push(0x3D); // =
buf.push_all(value.as_slice())
}
}
buf
}
fn update_steps(&self) {
// XXXManishearth Implement this when the URL interface is implemented
// http://url.spec.whatwg.org/#concept-uq-update
}
}
|
reflector_: Reflector,
data: DOMRefCell<HashMap<DOMString, Vec<DOMString>>>,
}
|
random_line_split
|
urlsearchparams.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::URLSearchParamsBinding;
use dom::bindings::codegen::Bindings::URLSearchParamsBinding::URLSearchParamsMethods;
use dom::bindings::codegen::UnionTypes::StringOrURLSearchParams;
use dom::bindings::codegen::UnionTypes::StringOrURLSearchParams::{eURLSearchParams, eString};
use dom::bindings::error::{Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflector, reflect_dom_object};
use util::str::DOMString;
use encoding::all::UTF_8;
use encoding::types::{EncodingRef, EncoderTrap};
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::fmt::radix;
use std::ascii::OwnedAsciiExt;
#[dom_struct]
pub struct URLSearchParams {
reflector_: Reflector,
data: DOMRefCell<HashMap<DOMString, Vec<DOMString>>>,
}
impl URLSearchParams {
fn new_inherited() -> URLSearchParams {
URLSearchParams {
reflector_: Reflector::new(),
data: DOMRefCell::new(HashMap::new()),
}
}
pub fn new(global: GlobalRef) -> Temporary<URLSearchParams> {
reflect_dom_object(box URLSearchParams::new_inherited(), global, URLSearchParamsBinding::Wrap)
}
pub fn Constructor(global: GlobalRef, init: Option<StringOrURLSearchParams>) -> Fallible<Temporary<URLSearchParams>> {
let usp = URLSearchParams::new(global).root();
match init {
Some(eString(_s)) => {
// XXXManishearth we need to parse the input here
// http://url.spec.whatwg.org/#concept-urlencoded-parser
// We can use rust-url's implementation here:
// https://github.com/SimonSapin/rust-url/blob/master/form_urlencoded.rs#L29
},
Some(eURLSearchParams(u)) =>
|
,
None => {}
}
Ok(Temporary::from_rooted(usp.r()))
}
}
impl<'a> URLSearchParamsMethods for JSRef<'a, URLSearchParams> {
fn Append(self, name: DOMString, value: DOMString) {
let mut data = self.data.borrow_mut();
match data.entry(name) {
Occupied(entry) => entry.into_mut().push(value),
Vacant(entry) => {
entry.insert(vec!(value));
}
}
self.update_steps();
}
fn Delete(self, name: DOMString) {
self.data.borrow_mut().remove(&name);
self.update_steps();
}
fn Get(self, name: DOMString) -> Option<DOMString> {
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let data = self.data.borrow();
data.get(&name).map(|v| v[0].clone())
}
fn Has(self, name: DOMString) -> bool {
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let data = self.data.borrow();
data.contains_key(&name)
}
fn Set(self, name: DOMString, value: DOMString) {
self.data.borrow_mut().insert(name, vec!(value));
self.update_steps();
}
}
pub trait URLSearchParamsHelpers {
fn serialize(&self, encoding: Option<EncodingRef>) -> Vec<u8>;
fn update_steps(&self);
}
impl URLSearchParamsHelpers for URLSearchParams {
fn serialize(&self, encoding: Option<EncodingRef>) -> Vec<u8> {
// http://url.spec.whatwg.org/#concept-urlencoded-serializer
fn serialize_string(value: &DOMString, encoding: EncodingRef) -> Vec<u8> {
// http://url.spec.whatwg.org/#concept-urlencoded-byte-serializer
let value = value.as_slice();
// XXXManishearth should this be a strict encoding? Can unwrap()ing the result fail?
let value = encoding.encode(value, EncoderTrap::Replace).unwrap();
let mut buf = vec!();
for i in value.iter() {
let append = match *i {
0x20 => vec!(0x2B),
0x2A | 0x2D | 0x2E |
0x30... 0x39 | 0x41... 0x5A |
0x5F | 0x61...0x7A => vec!(*i),
a => {
// http://url.spec.whatwg.org/#percent-encode
let mut encoded = vec!(0x25); // %
let s = format!("{}", radix(a, 16)).into_ascii_uppercase();
let bytes = s.as_bytes();
encoded.push_all(bytes);
encoded
}
};
buf.push_all(append.as_slice());
}
buf
}
let encoding = encoding.unwrap_or(UTF_8 as EncodingRef);
let mut buf = vec!();
let mut first_pair = true;
for (k, v) in self.data.borrow().iter() {
let name = serialize_string(k, encoding);
for val in v.iter() {
let value = serialize_string(val, encoding);
if first_pair {
first_pair = false;
} else {
buf.push(0x26); // &
}
buf.push_all(name.as_slice());
buf.push(0x3D); // =
buf.push_all(value.as_slice())
}
}
buf
}
fn update_steps(&self) {
// XXXManishearth Implement this when the URL interface is implemented
// http://url.spec.whatwg.org/#concept-uq-update
}
}
|
{
let u = u.root();
let usp = usp.r();
let mut map = usp.data.borrow_mut();
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let r = u.r();
let data = r.data.borrow();
*map = data.clone();
}
|
conditional_block
|
urlsearchparams.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::URLSearchParamsBinding;
use dom::bindings::codegen::Bindings::URLSearchParamsBinding::URLSearchParamsMethods;
use dom::bindings::codegen::UnionTypes::StringOrURLSearchParams;
use dom::bindings::codegen::UnionTypes::StringOrURLSearchParams::{eURLSearchParams, eString};
use dom::bindings::error::{Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflector, reflect_dom_object};
use util::str::DOMString;
use encoding::all::UTF_8;
use encoding::types::{EncodingRef, EncoderTrap};
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::fmt::radix;
use std::ascii::OwnedAsciiExt;
#[dom_struct]
pub struct URLSearchParams {
reflector_: Reflector,
data: DOMRefCell<HashMap<DOMString, Vec<DOMString>>>,
}
impl URLSearchParams {
fn new_inherited() -> URLSearchParams {
URLSearchParams {
reflector_: Reflector::new(),
data: DOMRefCell::new(HashMap::new()),
}
}
pub fn new(global: GlobalRef) -> Temporary<URLSearchParams> {
reflect_dom_object(box URLSearchParams::new_inherited(), global, URLSearchParamsBinding::Wrap)
}
pub fn Constructor(global: GlobalRef, init: Option<StringOrURLSearchParams>) -> Fallible<Temporary<URLSearchParams>> {
let usp = URLSearchParams::new(global).root();
match init {
Some(eString(_s)) => {
// XXXManishearth we need to parse the input here
// http://url.spec.whatwg.org/#concept-urlencoded-parser
// We can use rust-url's implementation here:
// https://github.com/SimonSapin/rust-url/blob/master/form_urlencoded.rs#L29
},
Some(eURLSearchParams(u)) => {
let u = u.root();
let usp = usp.r();
let mut map = usp.data.borrow_mut();
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let r = u.r();
let data = r.data.borrow();
*map = data.clone();
},
None => {}
}
Ok(Temporary::from_rooted(usp.r()))
}
}
impl<'a> URLSearchParamsMethods for JSRef<'a, URLSearchParams> {
fn
|
(self, name: DOMString, value: DOMString) {
let mut data = self.data.borrow_mut();
match data.entry(name) {
Occupied(entry) => entry.into_mut().push(value),
Vacant(entry) => {
entry.insert(vec!(value));
}
}
self.update_steps();
}
fn Delete(self, name: DOMString) {
self.data.borrow_mut().remove(&name);
self.update_steps();
}
fn Get(self, name: DOMString) -> Option<DOMString> {
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let data = self.data.borrow();
data.get(&name).map(|v| v[0].clone())
}
fn Has(self, name: DOMString) -> bool {
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let data = self.data.borrow();
data.contains_key(&name)
}
fn Set(self, name: DOMString, value: DOMString) {
self.data.borrow_mut().insert(name, vec!(value));
self.update_steps();
}
}
pub trait URLSearchParamsHelpers {
fn serialize(&self, encoding: Option<EncodingRef>) -> Vec<u8>;
fn update_steps(&self);
}
impl URLSearchParamsHelpers for URLSearchParams {
fn serialize(&self, encoding: Option<EncodingRef>) -> Vec<u8> {
// http://url.spec.whatwg.org/#concept-urlencoded-serializer
fn serialize_string(value: &DOMString, encoding: EncodingRef) -> Vec<u8> {
// http://url.spec.whatwg.org/#concept-urlencoded-byte-serializer
let value = value.as_slice();
// XXXManishearth should this be a strict encoding? Can unwrap()ing the result fail?
let value = encoding.encode(value, EncoderTrap::Replace).unwrap();
let mut buf = vec!();
for i in value.iter() {
let append = match *i {
0x20 => vec!(0x2B),
0x2A | 0x2D | 0x2E |
0x30... 0x39 | 0x41... 0x5A |
0x5F | 0x61...0x7A => vec!(*i),
a => {
// http://url.spec.whatwg.org/#percent-encode
let mut encoded = vec!(0x25); // %
let s = format!("{}", radix(a, 16)).into_ascii_uppercase();
let bytes = s.as_bytes();
encoded.push_all(bytes);
encoded
}
};
buf.push_all(append.as_slice());
}
buf
}
let encoding = encoding.unwrap_or(UTF_8 as EncodingRef);
let mut buf = vec!();
let mut first_pair = true;
for (k, v) in self.data.borrow().iter() {
let name = serialize_string(k, encoding);
for val in v.iter() {
let value = serialize_string(val, encoding);
if first_pair {
first_pair = false;
} else {
buf.push(0x26); // &
}
buf.push_all(name.as_slice());
buf.push(0x3D); // =
buf.push_all(value.as_slice())
}
}
buf
}
fn update_steps(&self) {
// XXXManishearth Implement this when the URL interface is implemented
// http://url.spec.whatwg.org/#concept-uq-update
}
}
|
Append
|
identifier_name
|
urlsearchparams.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::cell::DOMRefCell;
use dom::bindings::codegen::Bindings::URLSearchParamsBinding;
use dom::bindings::codegen::Bindings::URLSearchParamsBinding::URLSearchParamsMethods;
use dom::bindings::codegen::UnionTypes::StringOrURLSearchParams;
use dom::bindings::codegen::UnionTypes::StringOrURLSearchParams::{eURLSearchParams, eString};
use dom::bindings::error::{Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflector, reflect_dom_object};
use util::str::DOMString;
use encoding::all::UTF_8;
use encoding::types::{EncodingRef, EncoderTrap};
use std::collections::HashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::fmt::radix;
use std::ascii::OwnedAsciiExt;
#[dom_struct]
pub struct URLSearchParams {
reflector_: Reflector,
data: DOMRefCell<HashMap<DOMString, Vec<DOMString>>>,
}
impl URLSearchParams {
fn new_inherited() -> URLSearchParams {
URLSearchParams {
reflector_: Reflector::new(),
data: DOMRefCell::new(HashMap::new()),
}
}
pub fn new(global: GlobalRef) -> Temporary<URLSearchParams> {
reflect_dom_object(box URLSearchParams::new_inherited(), global, URLSearchParamsBinding::Wrap)
}
pub fn Constructor(global: GlobalRef, init: Option<StringOrURLSearchParams>) -> Fallible<Temporary<URLSearchParams>> {
let usp = URLSearchParams::new(global).root();
match init {
Some(eString(_s)) => {
// XXXManishearth we need to parse the input here
// http://url.spec.whatwg.org/#concept-urlencoded-parser
// We can use rust-url's implementation here:
// https://github.com/SimonSapin/rust-url/blob/master/form_urlencoded.rs#L29
},
Some(eURLSearchParams(u)) => {
let u = u.root();
let usp = usp.r();
let mut map = usp.data.borrow_mut();
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let r = u.r();
let data = r.data.borrow();
*map = data.clone();
},
None => {}
}
Ok(Temporary::from_rooted(usp.r()))
}
}
impl<'a> URLSearchParamsMethods for JSRef<'a, URLSearchParams> {
fn Append(self, name: DOMString, value: DOMString) {
let mut data = self.data.borrow_mut();
match data.entry(name) {
Occupied(entry) => entry.into_mut().push(value),
Vacant(entry) => {
entry.insert(vec!(value));
}
}
self.update_steps();
}
fn Delete(self, name: DOMString) {
self.data.borrow_mut().remove(&name);
self.update_steps();
}
fn Get(self, name: DOMString) -> Option<DOMString> {
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let data = self.data.borrow();
data.get(&name).map(|v| v[0].clone())
}
fn Has(self, name: DOMString) -> bool {
// FIXME(https://github.com/rust-lang/rust/issues/23338)
let data = self.data.borrow();
data.contains_key(&name)
}
fn Set(self, name: DOMString, value: DOMString) {
self.data.borrow_mut().insert(name, vec!(value));
self.update_steps();
}
}
pub trait URLSearchParamsHelpers {
fn serialize(&self, encoding: Option<EncodingRef>) -> Vec<u8>;
fn update_steps(&self);
}
impl URLSearchParamsHelpers for URLSearchParams {
fn serialize(&self, encoding: Option<EncodingRef>) -> Vec<u8> {
// http://url.spec.whatwg.org/#concept-urlencoded-serializer
fn serialize_string(value: &DOMString, encoding: EncodingRef) -> Vec<u8> {
// http://url.spec.whatwg.org/#concept-urlencoded-byte-serializer
let value = value.as_slice();
// XXXManishearth should this be a strict encoding? Can unwrap()ing the result fail?
let value = encoding.encode(value, EncoderTrap::Replace).unwrap();
let mut buf = vec!();
for i in value.iter() {
let append = match *i {
0x20 => vec!(0x2B),
0x2A | 0x2D | 0x2E |
0x30... 0x39 | 0x41... 0x5A |
0x5F | 0x61...0x7A => vec!(*i),
a => {
// http://url.spec.whatwg.org/#percent-encode
let mut encoded = vec!(0x25); // %
let s = format!("{}", radix(a, 16)).into_ascii_uppercase();
let bytes = s.as_bytes();
encoded.push_all(bytes);
encoded
}
};
buf.push_all(append.as_slice());
}
buf
}
let encoding = encoding.unwrap_or(UTF_8 as EncodingRef);
let mut buf = vec!();
let mut first_pair = true;
for (k, v) in self.data.borrow().iter() {
let name = serialize_string(k, encoding);
for val in v.iter() {
let value = serialize_string(val, encoding);
if first_pair {
first_pair = false;
} else {
buf.push(0x26); // &
}
buf.push_all(name.as_slice());
buf.push(0x3D); // =
buf.push_all(value.as_slice())
}
}
buf
}
fn update_steps(&self)
|
}
|
{
// XXXManishearth Implement this when the URL interface is implemented
// http://url.spec.whatwg.org/#concept-uq-update
}
|
identifier_body
|
window_run_return.rs
|
// Limit this example to only compatible platforms.
#[cfg(any(
target_os = "windows",
target_os = "macos",
target_os = "linux",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd"
))]
fn
|
() {
use std::{thread::sleep, time::Duration};
use simple_logger::SimpleLogger;
use winit::{
event::{Event, WindowEvent},
event_loop::{ControlFlow, EventLoop},
platform::run_return::EventLoopExtRunReturn,
window::WindowBuilder,
};
let mut event_loop = EventLoop::new();
SimpleLogger::new().init().unwrap();
let _window = WindowBuilder::new()
.with_title("A fantastic window!")
.build(&event_loop)
.unwrap();
let mut quit = false;
while!quit {
event_loop.run_return(|event, _, control_flow| {
*control_flow = ControlFlow::Wait;
if let Event::WindowEvent { event,.. } = &event {
// Print only Window events to reduce noise
println!("{:?}", event);
}
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
quit = true;
}
Event::MainEventsCleared => {
*control_flow = ControlFlow::Exit;
}
_ => (),
}
});
// Sleep for 1/60 second to simulate rendering
println!("rendering");
sleep(Duration::from_millis(16));
}
}
#[cfg(any(target_os = "ios", target_os = "android", target_arch = "wasm32"))]
fn main() {
println!("This platform doesn't support run_return.");
}
|
main
|
identifier_name
|
window_run_return.rs
|
// Limit this example to only compatible platforms.
#[cfg(any(
target_os = "windows",
target_os = "macos",
target_os = "linux",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd"
))]
fn main()
|
while!quit {
event_loop.run_return(|event, _, control_flow| {
*control_flow = ControlFlow::Wait;
if let Event::WindowEvent { event,.. } = &event {
// Print only Window events to reduce noise
println!("{:?}", event);
}
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
quit = true;
}
Event::MainEventsCleared => {
*control_flow = ControlFlow::Exit;
}
_ => (),
}
});
// Sleep for 1/60 second to simulate rendering
println!("rendering");
sleep(Duration::from_millis(16));
}
}
#[cfg(any(target_os = "ios", target_os = "android", target_arch = "wasm32"))]
fn main() {
println!("This platform doesn't support run_return.");
}
|
{
use std::{thread::sleep, time::Duration};
use simple_logger::SimpleLogger;
use winit::{
event::{Event, WindowEvent},
event_loop::{ControlFlow, EventLoop},
platform::run_return::EventLoopExtRunReturn,
window::WindowBuilder,
};
let mut event_loop = EventLoop::new();
SimpleLogger::new().init().unwrap();
let _window = WindowBuilder::new()
.with_title("A fantastic window!")
.build(&event_loop)
.unwrap();
let mut quit = false;
|
identifier_body
|
window_run_return.rs
|
// Limit this example to only compatible platforms.
#[cfg(any(
target_os = "windows",
target_os = "macos",
target_os = "linux",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd"
))]
fn main() {
use std::{thread::sleep, time::Duration};
use simple_logger::SimpleLogger;
use winit::{
|
event::{Event, WindowEvent},
event_loop::{ControlFlow, EventLoop},
platform::run_return::EventLoopExtRunReturn,
window::WindowBuilder,
};
let mut event_loop = EventLoop::new();
SimpleLogger::new().init().unwrap();
let _window = WindowBuilder::new()
.with_title("A fantastic window!")
.build(&event_loop)
.unwrap();
let mut quit = false;
while!quit {
event_loop.run_return(|event, _, control_flow| {
*control_flow = ControlFlow::Wait;
if let Event::WindowEvent { event,.. } = &event {
// Print only Window events to reduce noise
println!("{:?}", event);
}
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
quit = true;
}
Event::MainEventsCleared => {
*control_flow = ControlFlow::Exit;
}
_ => (),
}
});
// Sleep for 1/60 second to simulate rendering
println!("rendering");
sleep(Duration::from_millis(16));
}
}
#[cfg(any(target_os = "ios", target_os = "android", target_arch = "wasm32"))]
fn main() {
println!("This platform doesn't support run_return.");
}
|
random_line_split
|
|
window_run_return.rs
|
// Limit this example to only compatible platforms.
#[cfg(any(
target_os = "windows",
target_os = "macos",
target_os = "linux",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd"
))]
fn main() {
use std::{thread::sleep, time::Duration};
use simple_logger::SimpleLogger;
use winit::{
event::{Event, WindowEvent},
event_loop::{ControlFlow, EventLoop},
platform::run_return::EventLoopExtRunReturn,
window::WindowBuilder,
};
let mut event_loop = EventLoop::new();
SimpleLogger::new().init().unwrap();
let _window = WindowBuilder::new()
.with_title("A fantastic window!")
.build(&event_loop)
.unwrap();
let mut quit = false;
while!quit {
event_loop.run_return(|event, _, control_flow| {
*control_flow = ControlFlow::Wait;
if let Event::WindowEvent { event,.. } = &event {
// Print only Window events to reduce noise
println!("{:?}", event);
}
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} =>
|
Event::MainEventsCleared => {
*control_flow = ControlFlow::Exit;
}
_ => (),
}
});
// Sleep for 1/60 second to simulate rendering
println!("rendering");
sleep(Duration::from_millis(16));
}
}
#[cfg(any(target_os = "ios", target_os = "android", target_arch = "wasm32"))]
fn main() {
println!("This platform doesn't support run_return.");
}
|
{
quit = true;
}
|
conditional_block
|
router.rs
|
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use crate::fsm::{Fsm, FsmScheduler, FsmState};
use crate::mailbox::{BasicMailbox, Mailbox};
use crate::metrics::CHANNEL_FULL_COUNTER_VEC;
use collections::HashMap;
use crossbeam::channel::{SendError, TrySendError};
use std::cell::Cell;
use std::mem;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use tikv_util::lru::LruCache;
use tikv_util::Either;
use tikv_util::{debug, info};
/// A struct that traces the approximate memory usage of router.
#[derive(Default)]
pub struct RouterTrace {
pub alive: usize,
pub leak: usize,
}
struct NormalMailMap<N: Fsm> {
map: HashMap<u64, BasicMailbox<N>>,
// Count of Mailboxes that is stored in `map`.
alive_cnt: Arc<AtomicUsize>,
}
enum CheckDoResult<T> {
NotExist,
Invalid,
Valid(T),
}
/// Router route messages to its target mailbox.
///
/// Every fsm has a mailbox, hence it's necessary to have an address book
/// that can deliver messages to specified fsm, which is exact router.
///
/// In our abstract model, every batch system has two different kind of
/// fsms. First is normal fsm, which does the common work like peers in a
/// raftstore model or apply delegate in apply model. Second is control fsm,
/// which does some work that requires a global view of resources or creates
/// missing fsm for specified address. Normal fsm and control fsm can have
/// different scheduler, but this is not required.
pub struct Router<N: Fsm, C: Fsm, Ns, Cs> {
normals: Arc<Mutex<NormalMailMap<N>>>,
caches: Cell<LruCache<u64, BasicMailbox<N>>>,
pub(super) control_box: BasicMailbox<C>,
// TODO: These two schedulers should be unified as single one. However
// it's not possible to write FsmScheduler<Fsm=C> + FsmScheduler<Fsm=N>
// for now.
pub(crate) normal_scheduler: Ns,
control_scheduler: Cs,
// Count of Mailboxes that is not destroyed.
// Added when a Mailbox created, and subtracted it when a Mailbox destroyed.
state_cnt: Arc<AtomicUsize>,
// Indicates the router is shutdown down or not.
shutdown: Arc<AtomicBool>,
}
impl<N, C, Ns, Cs> Router<N, C, Ns, Cs>
where
N: Fsm,
C: Fsm,
Ns: FsmScheduler<Fsm = N> + Clone,
Cs: FsmScheduler<Fsm = C> + Clone,
{
pub(super) fn new(
control_box: BasicMailbox<C>,
normal_scheduler: Ns,
control_scheduler: Cs,
state_cnt: Arc<AtomicUsize>,
) -> Router<N, C, Ns, Cs> {
Router {
normals: Arc::new(Mutex::new(NormalMailMap {
map: HashMap::default(),
alive_cnt: Arc::default(),
})),
caches: Cell::new(LruCache::with_capacity_and_sample(1024, 7)),
control_box,
normal_scheduler,
control_scheduler,
state_cnt,
shutdown: Arc::new(AtomicBool::new(false)),
}
}
/// The `Router` has been already shutdown or not.
pub fn is_shutdown(&self) -> bool {
self.shutdown.load(Ordering::SeqCst)
}
/// A helper function that tries to unify a common access pattern to
/// mailbox.
///
/// Generally, when sending a message to a mailbox, cache should be
/// check first, if not found, lock should be acquired.
///
/// Returns None means there is no mailbox inside the normal registry.
/// Some(None) means there is expected mailbox inside the normal registry
/// but it returns None after apply the given function. Some(Some) means
/// the given function returns Some and cache is updated if it's invalid.
#[inline]
fn check_do<F, R>(&self, addr: u64, mut f: F) -> CheckDoResult<R>
where
F: FnMut(&BasicMailbox<N>) -> Option<R>,
{
let caches = unsafe { &mut *self.caches.as_ptr() };
let mut connected = true;
if let Some(mailbox) = caches.get(&addr) {
match f(mailbox) {
Some(r) => return CheckDoResult::Valid(r),
None => {
connected = false;
}
}
}
let (cnt, mailbox) = {
let mut boxes = self.normals.lock().unwrap();
let cnt = boxes.map.len();
let b = match boxes.map.get_mut(&addr) {
Some(mailbox) => mailbox.clone(),
None => {
drop(boxes);
if!connected {
caches.remove(&addr);
}
return CheckDoResult::NotExist;
}
};
(cnt, b)
};
if cnt > caches.capacity() || cnt < caches.capacity() / 2 {
caches.resize(cnt);
}
let res = f(&mailbox);
match res {
Some(r) => {
caches.insert(addr, mailbox);
CheckDoResult::Valid(r)
}
None => {
if!connected {
caches.remove(&addr);
}
CheckDoResult::Invalid
}
}
}
/// Register a mailbox with given address.
pub fn register(&self, addr: u64, mailbox: BasicMailbox<N>) {
let mut normals = self.normals.lock().unwrap();
if let Some(mailbox) = normals.map.insert(addr, mailbox) {
mailbox.close();
}
normals
.alive_cnt
.store(normals.map.len(), Ordering::Relaxed);
}
pub fn register_all(&self, mailboxes: Vec<(u64, BasicMailbox<N>)>) {
let mut normals = self.normals.lock().unwrap();
normals.map.reserve(mailboxes.len());
for (addr, mailbox) in mailboxes {
if let Some(m) = normals.map.insert(addr, mailbox) {
m.close();
}
}
normals
.alive_cnt
.store(normals.map.len(), Ordering::Relaxed);
}
/// Get the mailbox of specified address.
pub fn mailbox(&self, addr: u64) -> Option<Mailbox<N, Ns>> {
let res = self.check_do(addr, |mailbox| {
if mailbox.is_connected() {
Some(Mailbox::new(mailbox.clone(), self.normal_scheduler.clone()))
} else {
None
}
});
match res {
CheckDoResult::Valid(r) => Some(r),
_ => None,
}
}
/// Get the mailbox of control fsm.
pub fn control_mailbox(&self) -> Mailbox<C, Cs> {
Mailbox::new(self.control_box.clone(), self.control_scheduler.clone())
}
/// Try to send a message to specified address.
///
/// If Either::Left is returned, then the message is sent. Otherwise,
/// it indicates mailbox is not found.
#[inline]
pub fn try_send(
&self,
addr: u64,
msg: N::Message,
) -> Either<Result<(), TrySendError<N::Message>>, N::Message> {
let mut msg = Some(msg);
let res = self.check_do(addr, |mailbox| {
let m = msg.take().unwrap();
match mailbox.try_send(m, &self.normal_scheduler) {
Ok(()) => Some(Ok(())),
r @ Err(TrySendError::Full(_)) => {
CHANNEL_FULL_COUNTER_VEC
.with_label_values(&["normal"])
.inc();
Some(r)
}
Err(TrySendError::Disconnected(m)) => {
msg = Some(m);
None
}
}
});
match res {
CheckDoResult::Valid(r) => Either::Left(r),
CheckDoResult::Invalid => Either::Left(Err(TrySendError::Disconnected(msg.unwrap()))),
CheckDoResult::NotExist => Either::Right(msg.unwrap()),
}
}
/// Send the message to specified address.
#[inline]
pub fn send(&self, addr: u64, msg: N::Message) -> Result<(), TrySendError<N::Message>> {
match self.try_send(addr, msg) {
Either::Left(res) => res,
Either::Right(m) => Err(TrySendError::Disconnected(m)),
}
}
/// Force sending message to specified address despite the capacity
/// limit of mailbox.
#[inline]
pub fn force_send(&self, addr: u64, msg: N::Message) -> Result<(), SendError<N::Message>> {
match self.send(addr, msg) {
Ok(()) => Ok(()),
Err(TrySendError::Full(m)) => {
let caches = unsafe { &mut *self.caches.as_ptr() };
caches
.get(&addr)
.unwrap()
.force_send(m, &self.normal_scheduler)
}
Err(TrySendError::Disconnected(m)) => {
if self.is_shutdown() {
Ok(())
} else {
Err(SendError(m))
}
}
}
}
/// Force sending message to control fsm.
#[inline]
pub fn send_control(&self, msg: C::Message) -> Result<(), TrySendError<C::Message>> {
|
.inc();
r
}
r => r,
}
}
/// Try to notify all normal fsm a message.
pub fn broadcast_normal(&self, mut msg_gen: impl FnMut() -> N::Message) {
let mailboxes = self.normals.lock().unwrap();
for mailbox in mailboxes.map.values() {
let _ = mailbox.force_send(msg_gen(), &self.normal_scheduler);
}
}
/// Try to notify all fsm that the cluster is being shutdown.
pub fn broadcast_shutdown(&self) {
info!("broadcasting shutdown");
self.shutdown.store(true, Ordering::SeqCst);
unsafe { &mut *self.caches.as_ptr() }.clear();
let mut mailboxes = self.normals.lock().unwrap();
for (addr, mailbox) in mailboxes.map.drain() {
debug!("[region {}] shutdown mailbox", addr);
mailbox.close();
}
self.control_box.close();
self.normal_scheduler.shutdown();
self.control_scheduler.shutdown();
}
/// Close the mailbox of address.
pub fn close(&self, addr: u64) {
info!("[region {}] shutdown mailbox", addr);
unsafe { &mut *self.caches.as_ptr() }.remove(&addr);
let mut mailboxes = self.normals.lock().unwrap();
if let Some(mb) = mailboxes.map.remove(&addr) {
mb.close();
}
mailboxes
.alive_cnt
.store(mailboxes.map.len(), Ordering::Relaxed);
}
pub fn clear_cache(&self) {
unsafe { &mut *self.caches.as_ptr() }.clear();
}
pub fn state_cnt(&self) -> &Arc<AtomicUsize> {
&self.state_cnt
}
pub fn alive_cnt(&self) -> Arc<AtomicUsize> {
self.normals.lock().unwrap().alive_cnt.clone()
}
pub fn trace(&self) -> RouterTrace {
let alive = self.normals.lock().unwrap().alive_cnt.clone();
let total = self.state_cnt.load(Ordering::Relaxed);
let alive = alive.load(Ordering::Relaxed);
// 1 represents the control fsm.
let leak = if total > alive + 1 {
total - alive - 1
} else {
0
};
let mailbox_unit = mem::size_of::<(u64, BasicMailbox<N>)>();
let state_unit = mem::size_of::<FsmState<N>>();
// Every message in crossbeam sender needs 8 bytes to store state.
let message_unit = mem::size_of::<N::Message>() + 8;
// crossbeam unbounded channel sender has a list of blocks. Every block has 31 unit
// and every sender has at least one sender.
let sender_block_unit = 31;
RouterTrace {
alive: (mailbox_unit * 8 / 7 // hashmap uses 7/8 of allocated memory.
+ state_unit + message_unit * sender_block_unit)
* alive,
leak: (state_unit + message_unit * sender_block_unit) * leak,
}
}
}
impl<N: Fsm, C: Fsm, Ns: Clone, Cs: Clone> Clone for Router<N, C, Ns, Cs> {
fn clone(&self) -> Router<N, C, Ns, Cs> {
Router {
normals: self.normals.clone(),
caches: Cell::new(LruCache::with_capacity_and_sample(1024, 7)),
control_box: self.control_box.clone(),
// These two schedulers should be unified as single one. However
// it's not possible to write FsmScheduler<Fsm=C> + FsmScheduler<Fsm=N>
// for now.
normal_scheduler: self.normal_scheduler.clone(),
control_scheduler: self.control_scheduler.clone(),
shutdown: self.shutdown.clone(),
state_cnt: self.state_cnt.clone(),
}
}
}
|
match self.control_box.try_send(msg, &self.control_scheduler) {
Ok(()) => Ok(()),
r @ Err(TrySendError::Full(_)) => {
CHANNEL_FULL_COUNTER_VEC
.with_label_values(&["control"])
|
random_line_split
|
router.rs
|
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use crate::fsm::{Fsm, FsmScheduler, FsmState};
use crate::mailbox::{BasicMailbox, Mailbox};
use crate::metrics::CHANNEL_FULL_COUNTER_VEC;
use collections::HashMap;
use crossbeam::channel::{SendError, TrySendError};
use std::cell::Cell;
use std::mem;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use tikv_util::lru::LruCache;
use tikv_util::Either;
use tikv_util::{debug, info};
/// A struct that traces the approximate memory usage of router.
#[derive(Default)]
pub struct RouterTrace {
pub alive: usize,
pub leak: usize,
}
struct NormalMailMap<N: Fsm> {
map: HashMap<u64, BasicMailbox<N>>,
// Count of Mailboxes that is stored in `map`.
alive_cnt: Arc<AtomicUsize>,
}
enum CheckDoResult<T> {
NotExist,
Invalid,
Valid(T),
}
/// Router route messages to its target mailbox.
///
/// Every fsm has a mailbox, hence it's necessary to have an address book
/// that can deliver messages to specified fsm, which is exact router.
///
/// In our abstract model, every batch system has two different kind of
/// fsms. First is normal fsm, which does the common work like peers in a
/// raftstore model or apply delegate in apply model. Second is control fsm,
/// which does some work that requires a global view of resources or creates
/// missing fsm for specified address. Normal fsm and control fsm can have
/// different scheduler, but this is not required.
pub struct Router<N: Fsm, C: Fsm, Ns, Cs> {
normals: Arc<Mutex<NormalMailMap<N>>>,
caches: Cell<LruCache<u64, BasicMailbox<N>>>,
pub(super) control_box: BasicMailbox<C>,
// TODO: These two schedulers should be unified as single one. However
// it's not possible to write FsmScheduler<Fsm=C> + FsmScheduler<Fsm=N>
// for now.
pub(crate) normal_scheduler: Ns,
control_scheduler: Cs,
// Count of Mailboxes that is not destroyed.
// Added when a Mailbox created, and subtracted it when a Mailbox destroyed.
state_cnt: Arc<AtomicUsize>,
// Indicates the router is shutdown down or not.
shutdown: Arc<AtomicBool>,
}
impl<N, C, Ns, Cs> Router<N, C, Ns, Cs>
where
N: Fsm,
C: Fsm,
Ns: FsmScheduler<Fsm = N> + Clone,
Cs: FsmScheduler<Fsm = C> + Clone,
{
pub(super) fn new(
control_box: BasicMailbox<C>,
normal_scheduler: Ns,
control_scheduler: Cs,
state_cnt: Arc<AtomicUsize>,
) -> Router<N, C, Ns, Cs> {
Router {
normals: Arc::new(Mutex::new(NormalMailMap {
map: HashMap::default(),
alive_cnt: Arc::default(),
})),
caches: Cell::new(LruCache::with_capacity_and_sample(1024, 7)),
control_box,
normal_scheduler,
control_scheduler,
state_cnt,
shutdown: Arc::new(AtomicBool::new(false)),
}
}
/// The `Router` has been already shutdown or not.
pub fn is_shutdown(&self) -> bool {
self.shutdown.load(Ordering::SeqCst)
}
/// A helper function that tries to unify a common access pattern to
/// mailbox.
///
/// Generally, when sending a message to a mailbox, cache should be
/// check first, if not found, lock should be acquired.
///
/// Returns None means there is no mailbox inside the normal registry.
/// Some(None) means there is expected mailbox inside the normal registry
/// but it returns None after apply the given function. Some(Some) means
/// the given function returns Some and cache is updated if it's invalid.
#[inline]
fn check_do<F, R>(&self, addr: u64, mut f: F) -> CheckDoResult<R>
where
F: FnMut(&BasicMailbox<N>) -> Option<R>,
{
let caches = unsafe { &mut *self.caches.as_ptr() };
let mut connected = true;
if let Some(mailbox) = caches.get(&addr) {
match f(mailbox) {
Some(r) => return CheckDoResult::Valid(r),
None => {
connected = false;
}
}
}
let (cnt, mailbox) = {
let mut boxes = self.normals.lock().unwrap();
let cnt = boxes.map.len();
let b = match boxes.map.get_mut(&addr) {
Some(mailbox) => mailbox.clone(),
None => {
drop(boxes);
if!connected {
caches.remove(&addr);
}
return CheckDoResult::NotExist;
}
};
(cnt, b)
};
if cnt > caches.capacity() || cnt < caches.capacity() / 2 {
caches.resize(cnt);
}
let res = f(&mailbox);
match res {
Some(r) => {
caches.insert(addr, mailbox);
CheckDoResult::Valid(r)
}
None => {
if!connected {
caches.remove(&addr);
}
CheckDoResult::Invalid
}
}
}
/// Register a mailbox with given address.
pub fn register(&self, addr: u64, mailbox: BasicMailbox<N>) {
let mut normals = self.normals.lock().unwrap();
if let Some(mailbox) = normals.map.insert(addr, mailbox) {
mailbox.close();
}
normals
.alive_cnt
.store(normals.map.len(), Ordering::Relaxed);
}
pub fn register_all(&self, mailboxes: Vec<(u64, BasicMailbox<N>)>) {
let mut normals = self.normals.lock().unwrap();
normals.map.reserve(mailboxes.len());
for (addr, mailbox) in mailboxes {
if let Some(m) = normals.map.insert(addr, mailbox) {
m.close();
}
}
normals
.alive_cnt
.store(normals.map.len(), Ordering::Relaxed);
}
/// Get the mailbox of specified address.
pub fn mailbox(&self, addr: u64) -> Option<Mailbox<N, Ns>> {
let res = self.check_do(addr, |mailbox| {
if mailbox.is_connected() {
Some(Mailbox::new(mailbox.clone(), self.normal_scheduler.clone()))
} else {
None
}
});
match res {
CheckDoResult::Valid(r) => Some(r),
_ => None,
}
}
/// Get the mailbox of control fsm.
pub fn control_mailbox(&self) -> Mailbox<C, Cs> {
Mailbox::new(self.control_box.clone(), self.control_scheduler.clone())
}
/// Try to send a message to specified address.
///
/// If Either::Left is returned, then the message is sent. Otherwise,
/// it indicates mailbox is not found.
#[inline]
pub fn try_send(
&self,
addr: u64,
msg: N::Message,
) -> Either<Result<(), TrySendError<N::Message>>, N::Message>
|
CheckDoResult::Invalid => Either::Left(Err(TrySendError::Disconnected(msg.unwrap()))),
CheckDoResult::NotExist => Either::Right(msg.unwrap()),
}
}
/// Send the message to specified address.
#[inline]
pub fn send(&self, addr: u64, msg: N::Message) -> Result<(), TrySendError<N::Message>> {
match self.try_send(addr, msg) {
Either::Left(res) => res,
Either::Right(m) => Err(TrySendError::Disconnected(m)),
}
}
/// Force sending message to specified address despite the capacity
/// limit of mailbox.
#[inline]
pub fn force_send(&self, addr: u64, msg: N::Message) -> Result<(), SendError<N::Message>> {
match self.send(addr, msg) {
Ok(()) => Ok(()),
Err(TrySendError::Full(m)) => {
let caches = unsafe { &mut *self.caches.as_ptr() };
caches
.get(&addr)
.unwrap()
.force_send(m, &self.normal_scheduler)
}
Err(TrySendError::Disconnected(m)) => {
if self.is_shutdown() {
Ok(())
} else {
Err(SendError(m))
}
}
}
}
/// Force sending message to control fsm.
#[inline]
pub fn send_control(&self, msg: C::Message) -> Result<(), TrySendError<C::Message>> {
match self.control_box.try_send(msg, &self.control_scheduler) {
Ok(()) => Ok(()),
r @ Err(TrySendError::Full(_)) => {
CHANNEL_FULL_COUNTER_VEC
.with_label_values(&["control"])
.inc();
r
}
r => r,
}
}
/// Try to notify all normal fsm a message.
pub fn broadcast_normal(&self, mut msg_gen: impl FnMut() -> N::Message) {
let mailboxes = self.normals.lock().unwrap();
for mailbox in mailboxes.map.values() {
let _ = mailbox.force_send(msg_gen(), &self.normal_scheduler);
}
}
/// Try to notify all fsm that the cluster is being shutdown.
pub fn broadcast_shutdown(&self) {
info!("broadcasting shutdown");
self.shutdown.store(true, Ordering::SeqCst);
unsafe { &mut *self.caches.as_ptr() }.clear();
let mut mailboxes = self.normals.lock().unwrap();
for (addr, mailbox) in mailboxes.map.drain() {
debug!("[region {}] shutdown mailbox", addr);
mailbox.close();
}
self.control_box.close();
self.normal_scheduler.shutdown();
self.control_scheduler.shutdown();
}
/// Close the mailbox of address.
pub fn close(&self, addr: u64) {
info!("[region {}] shutdown mailbox", addr);
unsafe { &mut *self.caches.as_ptr() }.remove(&addr);
let mut mailboxes = self.normals.lock().unwrap();
if let Some(mb) = mailboxes.map.remove(&addr) {
mb.close();
}
mailboxes
.alive_cnt
.store(mailboxes.map.len(), Ordering::Relaxed);
}
pub fn clear_cache(&self) {
unsafe { &mut *self.caches.as_ptr() }.clear();
}
pub fn state_cnt(&self) -> &Arc<AtomicUsize> {
&self.state_cnt
}
pub fn alive_cnt(&self) -> Arc<AtomicUsize> {
self.normals.lock().unwrap().alive_cnt.clone()
}
pub fn trace(&self) -> RouterTrace {
let alive = self.normals.lock().unwrap().alive_cnt.clone();
let total = self.state_cnt.load(Ordering::Relaxed);
let alive = alive.load(Ordering::Relaxed);
// 1 represents the control fsm.
let leak = if total > alive + 1 {
total - alive - 1
} else {
0
};
let mailbox_unit = mem::size_of::<(u64, BasicMailbox<N>)>();
let state_unit = mem::size_of::<FsmState<N>>();
// Every message in crossbeam sender needs 8 bytes to store state.
let message_unit = mem::size_of::<N::Message>() + 8;
// crossbeam unbounded channel sender has a list of blocks. Every block has 31 unit
// and every sender has at least one sender.
let sender_block_unit = 31;
RouterTrace {
alive: (mailbox_unit * 8 / 7 // hashmap uses 7/8 of allocated memory.
+ state_unit + message_unit * sender_block_unit)
* alive,
leak: (state_unit + message_unit * sender_block_unit) * leak,
}
}
}
impl<N: Fsm, C: Fsm, Ns: Clone, Cs: Clone> Clone for Router<N, C, Ns, Cs> {
fn clone(&self) -> Router<N, C, Ns, Cs> {
Router {
normals: self.normals.clone(),
caches: Cell::new(LruCache::with_capacity_and_sample(1024, 7)),
control_box: self.control_box.clone(),
// These two schedulers should be unified as single one. However
// it's not possible to write FsmScheduler<Fsm=C> + FsmScheduler<Fsm=N>
// for now.
normal_scheduler: self.normal_scheduler.clone(),
control_scheduler: self.control_scheduler.clone(),
shutdown: self.shutdown.clone(),
state_cnt: self.state_cnt.clone(),
}
}
}
|
{
let mut msg = Some(msg);
let res = self.check_do(addr, |mailbox| {
let m = msg.take().unwrap();
match mailbox.try_send(m, &self.normal_scheduler) {
Ok(()) => Some(Ok(())),
r @ Err(TrySendError::Full(_)) => {
CHANNEL_FULL_COUNTER_VEC
.with_label_values(&["normal"])
.inc();
Some(r)
}
Err(TrySendError::Disconnected(m)) => {
msg = Some(m);
None
}
}
});
match res {
CheckDoResult::Valid(r) => Either::Left(r),
|
identifier_body
|
router.rs
|
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use crate::fsm::{Fsm, FsmScheduler, FsmState};
use crate::mailbox::{BasicMailbox, Mailbox};
use crate::metrics::CHANNEL_FULL_COUNTER_VEC;
use collections::HashMap;
use crossbeam::channel::{SendError, TrySendError};
use std::cell::Cell;
use std::mem;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use tikv_util::lru::LruCache;
use tikv_util::Either;
use tikv_util::{debug, info};
/// A struct that traces the approximate memory usage of router.
#[derive(Default)]
pub struct RouterTrace {
pub alive: usize,
pub leak: usize,
}
struct NormalMailMap<N: Fsm> {
map: HashMap<u64, BasicMailbox<N>>,
// Count of Mailboxes that is stored in `map`.
alive_cnt: Arc<AtomicUsize>,
}
enum CheckDoResult<T> {
NotExist,
Invalid,
Valid(T),
}
/// Router route messages to its target mailbox.
///
/// Every fsm has a mailbox, hence it's necessary to have an address book
/// that can deliver messages to specified fsm, which is exact router.
///
/// In our abstract model, every batch system has two different kind of
/// fsms. First is normal fsm, which does the common work like peers in a
/// raftstore model or apply delegate in apply model. Second is control fsm,
/// which does some work that requires a global view of resources or creates
/// missing fsm for specified address. Normal fsm and control fsm can have
/// different scheduler, but this is not required.
pub struct Router<N: Fsm, C: Fsm, Ns, Cs> {
normals: Arc<Mutex<NormalMailMap<N>>>,
caches: Cell<LruCache<u64, BasicMailbox<N>>>,
pub(super) control_box: BasicMailbox<C>,
// TODO: These two schedulers should be unified as single one. However
// it's not possible to write FsmScheduler<Fsm=C> + FsmScheduler<Fsm=N>
// for now.
pub(crate) normal_scheduler: Ns,
control_scheduler: Cs,
// Count of Mailboxes that is not destroyed.
// Added when a Mailbox created, and subtracted it when a Mailbox destroyed.
state_cnt: Arc<AtomicUsize>,
// Indicates the router is shutdown down or not.
shutdown: Arc<AtomicBool>,
}
impl<N, C, Ns, Cs> Router<N, C, Ns, Cs>
where
N: Fsm,
C: Fsm,
Ns: FsmScheduler<Fsm = N> + Clone,
Cs: FsmScheduler<Fsm = C> + Clone,
{
pub(super) fn new(
control_box: BasicMailbox<C>,
normal_scheduler: Ns,
control_scheduler: Cs,
state_cnt: Arc<AtomicUsize>,
) -> Router<N, C, Ns, Cs> {
Router {
normals: Arc::new(Mutex::new(NormalMailMap {
map: HashMap::default(),
alive_cnt: Arc::default(),
})),
caches: Cell::new(LruCache::with_capacity_and_sample(1024, 7)),
control_box,
normal_scheduler,
control_scheduler,
state_cnt,
shutdown: Arc::new(AtomicBool::new(false)),
}
}
/// The `Router` has been already shutdown or not.
pub fn is_shutdown(&self) -> bool {
self.shutdown.load(Ordering::SeqCst)
}
/// A helper function that tries to unify a common access pattern to
/// mailbox.
///
/// Generally, when sending a message to a mailbox, cache should be
/// check first, if not found, lock should be acquired.
///
/// Returns None means there is no mailbox inside the normal registry.
/// Some(None) means there is expected mailbox inside the normal registry
/// but it returns None after apply the given function. Some(Some) means
/// the given function returns Some and cache is updated if it's invalid.
#[inline]
fn check_do<F, R>(&self, addr: u64, mut f: F) -> CheckDoResult<R>
where
F: FnMut(&BasicMailbox<N>) -> Option<R>,
{
let caches = unsafe { &mut *self.caches.as_ptr() };
let mut connected = true;
if let Some(mailbox) = caches.get(&addr) {
match f(mailbox) {
Some(r) => return CheckDoResult::Valid(r),
None => {
connected = false;
}
}
}
let (cnt, mailbox) = {
let mut boxes = self.normals.lock().unwrap();
let cnt = boxes.map.len();
let b = match boxes.map.get_mut(&addr) {
Some(mailbox) => mailbox.clone(),
None => {
drop(boxes);
if!connected {
caches.remove(&addr);
}
return CheckDoResult::NotExist;
}
};
(cnt, b)
};
if cnt > caches.capacity() || cnt < caches.capacity() / 2 {
caches.resize(cnt);
}
let res = f(&mailbox);
match res {
Some(r) => {
caches.insert(addr, mailbox);
CheckDoResult::Valid(r)
}
None => {
if!connected {
caches.remove(&addr);
}
CheckDoResult::Invalid
}
}
}
/// Register a mailbox with given address.
pub fn register(&self, addr: u64, mailbox: BasicMailbox<N>) {
let mut normals = self.normals.lock().unwrap();
if let Some(mailbox) = normals.map.insert(addr, mailbox) {
mailbox.close();
}
normals
.alive_cnt
.store(normals.map.len(), Ordering::Relaxed);
}
pub fn register_all(&self, mailboxes: Vec<(u64, BasicMailbox<N>)>) {
let mut normals = self.normals.lock().unwrap();
normals.map.reserve(mailboxes.len());
for (addr, mailbox) in mailboxes {
if let Some(m) = normals.map.insert(addr, mailbox) {
m.close();
}
}
normals
.alive_cnt
.store(normals.map.len(), Ordering::Relaxed);
}
/// Get the mailbox of specified address.
pub fn
|
(&self, addr: u64) -> Option<Mailbox<N, Ns>> {
let res = self.check_do(addr, |mailbox| {
if mailbox.is_connected() {
Some(Mailbox::new(mailbox.clone(), self.normal_scheduler.clone()))
} else {
None
}
});
match res {
CheckDoResult::Valid(r) => Some(r),
_ => None,
}
}
/// Get the mailbox of control fsm.
pub fn control_mailbox(&self) -> Mailbox<C, Cs> {
Mailbox::new(self.control_box.clone(), self.control_scheduler.clone())
}
/// Try to send a message to specified address.
///
/// If Either::Left is returned, then the message is sent. Otherwise,
/// it indicates mailbox is not found.
#[inline]
pub fn try_send(
&self,
addr: u64,
msg: N::Message,
) -> Either<Result<(), TrySendError<N::Message>>, N::Message> {
let mut msg = Some(msg);
let res = self.check_do(addr, |mailbox| {
let m = msg.take().unwrap();
match mailbox.try_send(m, &self.normal_scheduler) {
Ok(()) => Some(Ok(())),
r @ Err(TrySendError::Full(_)) => {
CHANNEL_FULL_COUNTER_VEC
.with_label_values(&["normal"])
.inc();
Some(r)
}
Err(TrySendError::Disconnected(m)) => {
msg = Some(m);
None
}
}
});
match res {
CheckDoResult::Valid(r) => Either::Left(r),
CheckDoResult::Invalid => Either::Left(Err(TrySendError::Disconnected(msg.unwrap()))),
CheckDoResult::NotExist => Either::Right(msg.unwrap()),
}
}
/// Send the message to specified address.
#[inline]
pub fn send(&self, addr: u64, msg: N::Message) -> Result<(), TrySendError<N::Message>> {
match self.try_send(addr, msg) {
Either::Left(res) => res,
Either::Right(m) => Err(TrySendError::Disconnected(m)),
}
}
/// Force sending message to specified address despite the capacity
/// limit of mailbox.
#[inline]
pub fn force_send(&self, addr: u64, msg: N::Message) -> Result<(), SendError<N::Message>> {
match self.send(addr, msg) {
Ok(()) => Ok(()),
Err(TrySendError::Full(m)) => {
let caches = unsafe { &mut *self.caches.as_ptr() };
caches
.get(&addr)
.unwrap()
.force_send(m, &self.normal_scheduler)
}
Err(TrySendError::Disconnected(m)) => {
if self.is_shutdown() {
Ok(())
} else {
Err(SendError(m))
}
}
}
}
/// Force sending message to control fsm.
#[inline]
pub fn send_control(&self, msg: C::Message) -> Result<(), TrySendError<C::Message>> {
match self.control_box.try_send(msg, &self.control_scheduler) {
Ok(()) => Ok(()),
r @ Err(TrySendError::Full(_)) => {
CHANNEL_FULL_COUNTER_VEC
.with_label_values(&["control"])
.inc();
r
}
r => r,
}
}
/// Try to notify all normal fsm a message.
pub fn broadcast_normal(&self, mut msg_gen: impl FnMut() -> N::Message) {
let mailboxes = self.normals.lock().unwrap();
for mailbox in mailboxes.map.values() {
let _ = mailbox.force_send(msg_gen(), &self.normal_scheduler);
}
}
/// Try to notify all fsm that the cluster is being shutdown.
pub fn broadcast_shutdown(&self) {
info!("broadcasting shutdown");
self.shutdown.store(true, Ordering::SeqCst);
unsafe { &mut *self.caches.as_ptr() }.clear();
let mut mailboxes = self.normals.lock().unwrap();
for (addr, mailbox) in mailboxes.map.drain() {
debug!("[region {}] shutdown mailbox", addr);
mailbox.close();
}
self.control_box.close();
self.normal_scheduler.shutdown();
self.control_scheduler.shutdown();
}
/// Close the mailbox of address.
pub fn close(&self, addr: u64) {
info!("[region {}] shutdown mailbox", addr);
unsafe { &mut *self.caches.as_ptr() }.remove(&addr);
let mut mailboxes = self.normals.lock().unwrap();
if let Some(mb) = mailboxes.map.remove(&addr) {
mb.close();
}
mailboxes
.alive_cnt
.store(mailboxes.map.len(), Ordering::Relaxed);
}
pub fn clear_cache(&self) {
unsafe { &mut *self.caches.as_ptr() }.clear();
}
pub fn state_cnt(&self) -> &Arc<AtomicUsize> {
&self.state_cnt
}
pub fn alive_cnt(&self) -> Arc<AtomicUsize> {
self.normals.lock().unwrap().alive_cnt.clone()
}
pub fn trace(&self) -> RouterTrace {
let alive = self.normals.lock().unwrap().alive_cnt.clone();
let total = self.state_cnt.load(Ordering::Relaxed);
let alive = alive.load(Ordering::Relaxed);
// 1 represents the control fsm.
let leak = if total > alive + 1 {
total - alive - 1
} else {
0
};
let mailbox_unit = mem::size_of::<(u64, BasicMailbox<N>)>();
let state_unit = mem::size_of::<FsmState<N>>();
// Every message in crossbeam sender needs 8 bytes to store state.
let message_unit = mem::size_of::<N::Message>() + 8;
// crossbeam unbounded channel sender has a list of blocks. Every block has 31 unit
// and every sender has at least one sender.
let sender_block_unit = 31;
RouterTrace {
alive: (mailbox_unit * 8 / 7 // hashmap uses 7/8 of allocated memory.
+ state_unit + message_unit * sender_block_unit)
* alive,
leak: (state_unit + message_unit * sender_block_unit) * leak,
}
}
}
impl<N: Fsm, C: Fsm, Ns: Clone, Cs: Clone> Clone for Router<N, C, Ns, Cs> {
fn clone(&self) -> Router<N, C, Ns, Cs> {
Router {
normals: self.normals.clone(),
caches: Cell::new(LruCache::with_capacity_and_sample(1024, 7)),
control_box: self.control_box.clone(),
// These two schedulers should be unified as single one. However
// it's not possible to write FsmScheduler<Fsm=C> + FsmScheduler<Fsm=N>
// for now.
normal_scheduler: self.normal_scheduler.clone(),
control_scheduler: self.control_scheduler.clone(),
shutdown: self.shutdown.clone(),
state_cnt: self.state_cnt.clone(),
}
}
}
|
mailbox
|
identifier_name
|
env.rs
|
#![crate_id(name="env", vers="1.0.0", author="LeoTestard")]
/*
* This file is part of the uutils coreutils package.
*
* (c) Jordi Boggiano <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: env (GNU coreutils) 8.13 */
#![allow(non_camel_case_types)]
use std::os;
struct options {
ignore_env: bool,
null: bool,
unsets: Vec<String>,
sets: Vec<(String, String)>,
program: Vec<String>
}
fn usage(prog: &str) {
println!("Usage: {:s} [OPTION]... [-] [NAME=VALUE]... [COMMAND [ARG]...]", prog);
println!("Set each NAME to VALUE in the environment and run COMMAND\n");
println!("Possible options are:");
println!(" -i --ignore-environment\t start with an empty environment");
println!(" -0 --null \t end each output line with a 0 byte rather than newline");
println!(" -u --unset NAME \t remove variable from the environment");
println!(" -h --help \t display this help and exit");
println!(" -V --version \t output version information and exit\n");
println!("A mere - implies -i. If no COMMAND, print the resulting environment");
}
fn version()
|
// print name=value env pairs on screen
// if null is true, separate pairs with a \0, \n otherwise
fn print_env(null: bool) {
let env = std::os::env();
for &(ref n, ref v) in env.iter() {
print!("{:s}={:s}{:c}",
n.as_slice(),
v.as_slice(),
if null { '\0' } else { '\n' }
);
}
}
#[allow(dead_code)]
fn main() { uumain(os::args()); }
pub fn uumain(args: Vec<String>) {
let prog = args.get(0).as_slice();
// to handle arguments the same way than GNU env, we can't use getopts
let mut opts = box options {
ignore_env: false,
null: false,
unsets: vec!(),
sets: vec!(),
program: vec!()
};
let mut wait_cmd = false;
let mut iter = args.iter();
iter.next(); // skip program
let mut item = iter.next();
// the for loop doesn't work here,
// because we need sometines to read 2 items forward,
// and the iter can't be borrowed twice
while item!= None {
let opt = item.unwrap();
if wait_cmd {
// we still accept NAME=VAL here but not other options
let mut sp = opt.as_slice().splitn('=', 1);
let name = sp.next();
let value = sp.next();
match (name, value) {
(Some(n), Some(v)) => {
opts.sets.push((n.into_string(), v.into_string()));
}
_ => {
// read the program now
opts.program.push(opt.to_string());
break;
}
}
} else if opt.as_slice().starts_with("--") {
match opt.as_slice() {
"--help" => { usage(prog); return }
"--version" => { version(); return }
"--ignore-environment" => opts.ignore_env = true,
"--null" => opts.null = true,
"--unset" => {
let var = iter.next();
match var {
None => println!("{:s}: this option requires an argument: {:s}", prog, opt.as_slice()),
Some(s) => opts.unsets.push(s.to_string())
}
}
_ => {
println!("{:s}: invalid option \"{:s}\"", prog, *opt);
println!("Type \"{:s} --help\" for detailed informations", prog);
return
}
}
} else if opt.as_slice().starts_with("-") {
if opt.len() == 0 {
// implies -i and stop parsing opts
wait_cmd = true;
opts.ignore_env = true;
continue;
}
let mut chars = opt.as_slice().chars();
chars.next();
for c in chars {
// short versions of options
match c {
'h' => { usage(prog); return }
'V' => { version(); return }
'i' => opts.ignore_env = true,
'0' => opts.null = true,
'u' => {
let var = iter.next();
match var {
None => println!("{:s}: this option requires an argument: {:s}", prog, opt.as_slice()),
Some(s) => opts.unsets.push(s.to_string())
}
}
_ => {
println!("{:s}: illegal option -- {:c}", prog, c);
println!("Type \"{:s} --help\" for detailed informations", prog);
return
}
}
}
} else {
// is it a NAME=VALUE like opt?
let mut sp = opt.as_slice().splitn('=', 1);
let name = sp.next();
let value = sp.next();
match (name, value) {
(Some(n), Some(v)) => {
// yes
opts.sets.push((n.into_string(), v.into_string()));
wait_cmd = true;
}
// no, its a program-like opt
_ => {
opts.program.push(opt.to_string());
break;
}
}
}
item = iter.next();
}
// read program arguments
for opt in iter {
opts.program.push(opt.to_string());
}
let env = std::os::env();
if opts.ignore_env {
for &(ref name, _) in env.iter() {
std::os::unsetenv(name.as_slice())
}
}
for ref name in opts.unsets.iter() {
std::os::unsetenv(name.as_slice())
}
for &(ref name, ref val) in opts.sets.iter() {
std::os::setenv(name.as_slice(), val.as_slice())
}
if opts.program.len() >= 1 {
use std::io::process::{Command, InheritFd};
let prog = opts.program.get(0).clone();
let args = opts.program.slice_from(1);
match Command::new(prog).args(args).stdin(InheritFd(0)).stdout(InheritFd(1)).stderr(InheritFd(2)).status() {
Ok(exit) =>
std::os::set_exit_status(match exit {
std::io::process::ExitStatus(s) => s,
_ => 1
}),
Err(_) => std::os::set_exit_status(1)
}
} else {
// no program provided
print_env(opts.null);
}
}
|
{
println!("env 1.0.0");
}
|
identifier_body
|
env.rs
|
#![crate_id(name="env", vers="1.0.0", author="LeoTestard")]
/*
* This file is part of the uutils coreutils package.
*
* (c) Jordi Boggiano <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: env (GNU coreutils) 8.13 */
#![allow(non_camel_case_types)]
use std::os;
struct options {
ignore_env: bool,
null: bool,
unsets: Vec<String>,
sets: Vec<(String, String)>,
program: Vec<String>
}
fn usage(prog: &str) {
println!("Usage: {:s} [OPTION]... [-] [NAME=VALUE]... [COMMAND [ARG]...]", prog);
println!("Set each NAME to VALUE in the environment and run COMMAND\n");
println!("Possible options are:");
println!(" -i --ignore-environment\t start with an empty environment");
println!(" -0 --null \t end each output line with a 0 byte rather than newline");
println!(" -u --unset NAME \t remove variable from the environment");
println!(" -h --help \t display this help and exit");
println!(" -V --version \t output version information and exit\n");
println!("A mere - implies -i. If no COMMAND, print the resulting environment");
}
fn
|
() {
println!("env 1.0.0");
}
// print name=value env pairs on screen
// if null is true, separate pairs with a \0, \n otherwise
fn print_env(null: bool) {
let env = std::os::env();
for &(ref n, ref v) in env.iter() {
print!("{:s}={:s}{:c}",
n.as_slice(),
v.as_slice(),
if null { '\0' } else { '\n' }
);
}
}
#[allow(dead_code)]
fn main() { uumain(os::args()); }
pub fn uumain(args: Vec<String>) {
let prog = args.get(0).as_slice();
// to handle arguments the same way than GNU env, we can't use getopts
let mut opts = box options {
ignore_env: false,
null: false,
unsets: vec!(),
sets: vec!(),
program: vec!()
};
let mut wait_cmd = false;
let mut iter = args.iter();
iter.next(); // skip program
let mut item = iter.next();
// the for loop doesn't work here,
// because we need sometines to read 2 items forward,
// and the iter can't be borrowed twice
while item!= None {
let opt = item.unwrap();
if wait_cmd {
// we still accept NAME=VAL here but not other options
let mut sp = opt.as_slice().splitn('=', 1);
let name = sp.next();
let value = sp.next();
match (name, value) {
(Some(n), Some(v)) => {
opts.sets.push((n.into_string(), v.into_string()));
}
_ => {
// read the program now
opts.program.push(opt.to_string());
break;
}
}
} else if opt.as_slice().starts_with("--") {
match opt.as_slice() {
"--help" => { usage(prog); return }
"--version" => { version(); return }
"--ignore-environment" => opts.ignore_env = true,
"--null" => opts.null = true,
"--unset" => {
let var = iter.next();
match var {
None => println!("{:s}: this option requires an argument: {:s}", prog, opt.as_slice()),
Some(s) => opts.unsets.push(s.to_string())
}
}
_ => {
println!("{:s}: invalid option \"{:s}\"", prog, *opt);
println!("Type \"{:s} --help\" for detailed informations", prog);
return
}
}
} else if opt.as_slice().starts_with("-") {
if opt.len() == 0 {
// implies -i and stop parsing opts
wait_cmd = true;
opts.ignore_env = true;
continue;
}
let mut chars = opt.as_slice().chars();
chars.next();
for c in chars {
// short versions of options
match c {
'h' => { usage(prog); return }
'V' => { version(); return }
'i' => opts.ignore_env = true,
'0' => opts.null = true,
'u' => {
let var = iter.next();
match var {
None => println!("{:s}: this option requires an argument: {:s}", prog, opt.as_slice()),
Some(s) => opts.unsets.push(s.to_string())
}
}
_ => {
println!("{:s}: illegal option -- {:c}", prog, c);
println!("Type \"{:s} --help\" for detailed informations", prog);
return
}
}
}
} else {
// is it a NAME=VALUE like opt?
let mut sp = opt.as_slice().splitn('=', 1);
let name = sp.next();
let value = sp.next();
match (name, value) {
(Some(n), Some(v)) => {
// yes
opts.sets.push((n.into_string(), v.into_string()));
wait_cmd = true;
}
// no, its a program-like opt
_ => {
opts.program.push(opt.to_string());
break;
}
}
}
item = iter.next();
}
// read program arguments
for opt in iter {
opts.program.push(opt.to_string());
}
let env = std::os::env();
if opts.ignore_env {
for &(ref name, _) in env.iter() {
std::os::unsetenv(name.as_slice())
}
}
for ref name in opts.unsets.iter() {
std::os::unsetenv(name.as_slice())
}
for &(ref name, ref val) in opts.sets.iter() {
std::os::setenv(name.as_slice(), val.as_slice())
}
if opts.program.len() >= 1 {
use std::io::process::{Command, InheritFd};
let prog = opts.program.get(0).clone();
let args = opts.program.slice_from(1);
match Command::new(prog).args(args).stdin(InheritFd(0)).stdout(InheritFd(1)).stderr(InheritFd(2)).status() {
Ok(exit) =>
std::os::set_exit_status(match exit {
std::io::process::ExitStatus(s) => s,
_ => 1
}),
Err(_) => std::os::set_exit_status(1)
}
} else {
// no program provided
print_env(opts.null);
}
}
|
version
|
identifier_name
|
env.rs
|
#![crate_id(name="env", vers="1.0.0", author="LeoTestard")]
/*
* This file is part of the uutils coreutils package.
*
* (c) Jordi Boggiano <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: env (GNU coreutils) 8.13 */
#![allow(non_camel_case_types)]
use std::os;
struct options {
ignore_env: bool,
null: bool,
unsets: Vec<String>,
sets: Vec<(String, String)>,
program: Vec<String>
}
fn usage(prog: &str) {
println!("Usage: {:s} [OPTION]... [-] [NAME=VALUE]... [COMMAND [ARG]...]", prog);
println!("Set each NAME to VALUE in the environment and run COMMAND\n");
println!("Possible options are:");
println!(" -i --ignore-environment\t start with an empty environment");
println!(" -0 --null \t end each output line with a 0 byte rather than newline");
println!(" -u --unset NAME \t remove variable from the environment");
println!(" -h --help \t display this help and exit");
println!(" -V --version \t output version information and exit\n");
println!("A mere - implies -i. If no COMMAND, print the resulting environment");
}
fn version() {
println!("env 1.0.0");
}
|
for &(ref n, ref v) in env.iter() {
print!("{:s}={:s}{:c}",
n.as_slice(),
v.as_slice(),
if null { '\0' } else { '\n' }
);
}
}
#[allow(dead_code)]
fn main() { uumain(os::args()); }
pub fn uumain(args: Vec<String>) {
let prog = args.get(0).as_slice();
// to handle arguments the same way than GNU env, we can't use getopts
let mut opts = box options {
ignore_env: false,
null: false,
unsets: vec!(),
sets: vec!(),
program: vec!()
};
let mut wait_cmd = false;
let mut iter = args.iter();
iter.next(); // skip program
let mut item = iter.next();
// the for loop doesn't work here,
// because we need sometines to read 2 items forward,
// and the iter can't be borrowed twice
while item!= None {
let opt = item.unwrap();
if wait_cmd {
// we still accept NAME=VAL here but not other options
let mut sp = opt.as_slice().splitn('=', 1);
let name = sp.next();
let value = sp.next();
match (name, value) {
(Some(n), Some(v)) => {
opts.sets.push((n.into_string(), v.into_string()));
}
_ => {
// read the program now
opts.program.push(opt.to_string());
break;
}
}
} else if opt.as_slice().starts_with("--") {
match opt.as_slice() {
"--help" => { usage(prog); return }
"--version" => { version(); return }
"--ignore-environment" => opts.ignore_env = true,
"--null" => opts.null = true,
"--unset" => {
let var = iter.next();
match var {
None => println!("{:s}: this option requires an argument: {:s}", prog, opt.as_slice()),
Some(s) => opts.unsets.push(s.to_string())
}
}
_ => {
println!("{:s}: invalid option \"{:s}\"", prog, *opt);
println!("Type \"{:s} --help\" for detailed informations", prog);
return
}
}
} else if opt.as_slice().starts_with("-") {
if opt.len() == 0 {
// implies -i and stop parsing opts
wait_cmd = true;
opts.ignore_env = true;
continue;
}
let mut chars = opt.as_slice().chars();
chars.next();
for c in chars {
// short versions of options
match c {
'h' => { usage(prog); return }
'V' => { version(); return }
'i' => opts.ignore_env = true,
'0' => opts.null = true,
'u' => {
let var = iter.next();
match var {
None => println!("{:s}: this option requires an argument: {:s}", prog, opt.as_slice()),
Some(s) => opts.unsets.push(s.to_string())
}
}
_ => {
println!("{:s}: illegal option -- {:c}", prog, c);
println!("Type \"{:s} --help\" for detailed informations", prog);
return
}
}
}
} else {
// is it a NAME=VALUE like opt?
let mut sp = opt.as_slice().splitn('=', 1);
let name = sp.next();
let value = sp.next();
match (name, value) {
(Some(n), Some(v)) => {
// yes
opts.sets.push((n.into_string(), v.into_string()));
wait_cmd = true;
}
// no, its a program-like opt
_ => {
opts.program.push(opt.to_string());
break;
}
}
}
item = iter.next();
}
// read program arguments
for opt in iter {
opts.program.push(opt.to_string());
}
let env = std::os::env();
if opts.ignore_env {
for &(ref name, _) in env.iter() {
std::os::unsetenv(name.as_slice())
}
}
for ref name in opts.unsets.iter() {
std::os::unsetenv(name.as_slice())
}
for &(ref name, ref val) in opts.sets.iter() {
std::os::setenv(name.as_slice(), val.as_slice())
}
if opts.program.len() >= 1 {
use std::io::process::{Command, InheritFd};
let prog = opts.program.get(0).clone();
let args = opts.program.slice_from(1);
match Command::new(prog).args(args).stdin(InheritFd(0)).stdout(InheritFd(1)).stderr(InheritFd(2)).status() {
Ok(exit) =>
std::os::set_exit_status(match exit {
std::io::process::ExitStatus(s) => s,
_ => 1
}),
Err(_) => std::os::set_exit_status(1)
}
} else {
// no program provided
print_env(opts.null);
}
}
|
// print name=value env pairs on screen
// if null is true, separate pairs with a \0, \n otherwise
fn print_env(null: bool) {
let env = std::os::env();
|
random_line_split
|
dst-bad-deep.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Try to initialise a DST struct where the lost information is deeply nested.
// This is an error because it requires an unsized rvalue. This is a problem
// because it would require stack allocation of an unsized temporary (*g in the
// test).
struct Fat<Sized? T> {
ptr: T
|
let g: &Fat<[int]> = &f;
let h: &Fat<Fat<[int]>> = &Fat { ptr: *g };
//~^ ERROR the trait `core::kinds::Sized` is not implemented
}
|
}
pub fn main() {
let f: Fat<[int, ..3]> = Fat { ptr: [5i, 6, 7] };
|
random_line_split
|
dst-bad-deep.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Try to initialise a DST struct where the lost information is deeply nested.
// This is an error because it requires an unsized rvalue. This is a problem
// because it would require stack allocation of an unsized temporary (*g in the
// test).
struct
|
<Sized? T> {
ptr: T
}
pub fn main() {
let f: Fat<[int,..3]> = Fat { ptr: [5i, 6, 7] };
let g: &Fat<[int]> = &f;
let h: &Fat<Fat<[int]>> = &Fat { ptr: *g };
//~^ ERROR the trait `core::kinds::Sized` is not implemented
}
|
Fat
|
identifier_name
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains shared types and messages for use by devtools/script.
//! The traits are here instead of in script so that the devtools crate can be
//! modified independently of the rest of Servo.
#![crate_name = "devtools_traits"]
#![crate_type = "rlib"]
#![allow(non_snake_case)]
#![feature(custom_derive, plugin)]
#![plugin(serde_macros)]
#[macro_use]
extern crate bitflags;
extern crate ipc_channel;
extern crate msg;
extern crate rustc_serialize;
extern crate serde;
extern crate url;
extern crate hyper;
extern crate util;
extern crate time;
use rustc_serialize::{Decodable, Decoder};
use msg::constellation_msg::{PipelineId, WorkerId};
use util::str::DOMString;
use url::Url;
|
use hyper::http::RawStatus;
use hyper::method::Method;
use ipc_channel::ipc::IpcSender;
use time::Duration;
use std::net::TcpStream;
use std::sync::mpsc::{Receiver, Sender};
pub type DevtoolsControlChan = Sender<DevtoolsControlMsg>;
pub type DevtoolsControlPort = Receiver<DevtoolScriptControlMsg>;
// Information would be attached to NewGlobal to be received and show in devtools.
// Extend these fields if we need more information.
#[derive(Deserialize, Serialize)]
pub struct DevtoolsPageInfo {
pub title: DOMString,
pub url: Url
}
/// Messages to the instruct the devtools server to update its known actors/state
/// according to changes in the browser.
pub enum DevtoolsControlMsg {
FromChrome(ChromeToDevtoolsControlMsg),
FromScript(ScriptToDevtoolsControlMsg),
}
pub enum ChromeToDevtoolsControlMsg {
AddClient(TcpStream),
FramerateTick(String, f64),
ServerExitMsg,
NetworkEventMessage(String, NetworkEvent),
}
#[derive(Deserialize, Serialize)]
pub enum ScriptToDevtoolsControlMsg {
NewGlobal((PipelineId, Option<WorkerId>),
IpcSender<DevtoolScriptControlMsg>,
DevtoolsPageInfo),
SendConsoleMessage(PipelineId, ConsoleMessage),
}
/// Serialized JS return values
/// TODO: generalize this beyond the EvaluateJS message?
#[derive(Deserialize, Serialize)]
pub enum EvaluateJSReply {
VoidValue,
NullValue,
BooleanValue(bool),
NumberValue(f64),
StringValue(String),
ActorValue(String),
}
#[derive(Deserialize, Serialize)]
pub struct AttrInfo {
pub namespace: String,
pub name: String,
pub value: String,
}
#[derive(Deserialize, Serialize)]
pub struct NodeInfo {
pub uniqueId: String,
pub baseURI: String,
pub parent: String,
pub nodeType: u16,
pub namespaceURI: String,
pub nodeName: String,
pub numChildren: usize,
pub name: String,
pub publicId: String,
pub systemId: String,
pub attrs: Vec<AttrInfo>,
pub isDocumentElement: bool,
pub shortValue: String,
pub incompleteValue: bool,
}
#[derive(PartialEq, Eq, Deserialize, Serialize)]
pub enum TracingMetadata {
Default,
IntervalStart,
IntervalEnd,
Event,
EventBacktrace,
}
#[derive(Deserialize, Serialize)]
pub struct TimelineMarker {
pub name: String,
pub metadata: TracingMetadata,
pub time: PreciseTime,
pub stack: Option<Vec<()>>,
}
#[derive(PartialEq, Eq, Hash, Clone, Deserialize, Serialize)]
pub enum TimelineMarkerType {
Reflow,
DOMEvent,
}
/// Messages to process in a particular script task, as instructed by a devtools client.
#[derive(Deserialize, Serialize)]
pub enum DevtoolScriptControlMsg {
EvaluateJS(PipelineId, String, IpcSender<EvaluateJSReply>),
GetRootNode(PipelineId, IpcSender<NodeInfo>),
GetDocumentElement(PipelineId, IpcSender<NodeInfo>),
GetChildren(PipelineId, String, IpcSender<Vec<NodeInfo>>),
GetLayout(PipelineId, String, IpcSender<(f32, f32)>),
GetCachedMessages(PipelineId, CachedConsoleMessageTypes, IpcSender<Vec<CachedConsoleMessage>>),
ModifyAttribute(PipelineId, String, Vec<Modification>),
WantsLiveNotifications(PipelineId, bool),
SetTimelineMarkers(PipelineId, Vec<TimelineMarkerType>, IpcSender<TimelineMarker>),
DropTimelineMarkers(PipelineId, Vec<TimelineMarkerType>),
RequestAnimationFrame(PipelineId, IpcSender<f64>),
}
#[derive(RustcEncodable, Deserialize, Serialize)]
pub struct Modification {
pub attributeName: String,
pub newValue: Option<String>,
}
impl Decodable for Modification {
fn decode<D: Decoder>(d: &mut D) -> Result<Modification, D::Error> {
d.read_struct("Modification", 2, |d|
Ok(Modification {
attributeName: try!(d.read_struct_field("attributeName", 0, |d| Decodable::decode(d))),
newValue: match d.read_struct_field("newValue", 1, |d| Decodable::decode(d)) {
Ok(opt) => opt,
Err(_) => None
}
})
)
}
}
#[derive(Clone, Deserialize, Serialize)]
pub enum LogLevel {
Log,
Debug,
Info,
Warn,
Error,
}
#[derive(Clone, Deserialize, Serialize)]
pub struct ConsoleMessage {
pub message: String,
pub logLevel: LogLevel,
pub filename: String,
pub lineNumber: u32,
pub columnNumber: u32,
}
bitflags! {
#[derive(Deserialize, Serialize)]
flags CachedConsoleMessageTypes: u8 {
const PAGE_ERROR = 1 << 0,
const CONSOLE_API = 1 << 1,
}
}
#[derive(RustcEncodable, Deserialize, Serialize)]
pub struct PageError {
pub _type: String,
pub errorMessage: String,
pub sourceName: String,
pub lineText: String,
pub lineNumber: u32,
pub columnNumber: u32,
pub category: String,
pub timeStamp: u64,
pub error: bool,
pub warning: bool,
pub exception: bool,
pub strict: bool,
pub private: bool,
}
#[derive(RustcEncodable, Deserialize, Serialize)]
pub struct ConsoleAPI {
pub _type: String,
pub level: String,
pub filename: String,
pub lineNumber: u32,
pub functionName: String,
pub timeStamp: u64,
pub private: bool,
pub arguments: Vec<String>,
}
#[derive(Deserialize, Serialize)]
pub enum CachedConsoleMessage {
PageError(PageError),
ConsoleAPI(ConsoleAPI),
}
#[derive(Clone)]
pub enum NetworkEvent {
HttpRequest(Url, Method, Headers, Option<Vec<u8>>),
HttpResponse(Option<Headers>, Option<RawStatus>, Option<Vec<u8>>)
}
impl TimelineMarker {
pub fn new(name: String, metadata: TracingMetadata) -> TimelineMarker {
TimelineMarker {
name: name,
metadata: metadata,
time: PreciseTime::now(),
stack: None,
}
}
}
/// A replacement for `time::PreciseTime` that isn't opaque, so we can serialize it.
///
/// The reason why this doesn't go upstream is that `time` is slated to be part of Rust's standard
/// library, which definitely can't have any dependencies on `serde`. But `serde` can't implement
/// `Deserialize` and `Serialize` itself, because `time::PreciseTime` is opaque! A Catch-22. So I'm
/// duplicating the definition here.
#[derive(Copy, Clone, Deserialize, Serialize)]
pub struct PreciseTime(u64);
impl PreciseTime {
pub fn now() -> PreciseTime {
PreciseTime(time::precise_time_ns())
}
pub fn to(&self, later: PreciseTime) -> Duration {
Duration::nanoseconds((later.0 - self.0) as i64)
}
}
|
use hyper::header::Headers;
|
random_line_split
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains shared types and messages for use by devtools/script.
//! The traits are here instead of in script so that the devtools crate can be
//! modified independently of the rest of Servo.
#![crate_name = "devtools_traits"]
#![crate_type = "rlib"]
#![allow(non_snake_case)]
#![feature(custom_derive, plugin)]
#![plugin(serde_macros)]
#[macro_use]
extern crate bitflags;
extern crate ipc_channel;
extern crate msg;
extern crate rustc_serialize;
extern crate serde;
extern crate url;
extern crate hyper;
extern crate util;
extern crate time;
use rustc_serialize::{Decodable, Decoder};
use msg::constellation_msg::{PipelineId, WorkerId};
use util::str::DOMString;
use url::Url;
use hyper::header::Headers;
use hyper::http::RawStatus;
use hyper::method::Method;
use ipc_channel::ipc::IpcSender;
use time::Duration;
use std::net::TcpStream;
use std::sync::mpsc::{Receiver, Sender};
pub type DevtoolsControlChan = Sender<DevtoolsControlMsg>;
pub type DevtoolsControlPort = Receiver<DevtoolScriptControlMsg>;
// Information would be attached to NewGlobal to be received and show in devtools.
// Extend these fields if we need more information.
#[derive(Deserialize, Serialize)]
pub struct DevtoolsPageInfo {
pub title: DOMString,
pub url: Url
}
/// Messages to the instruct the devtools server to update its known actors/state
/// according to changes in the browser.
pub enum DevtoolsControlMsg {
FromChrome(ChromeToDevtoolsControlMsg),
FromScript(ScriptToDevtoolsControlMsg),
}
pub enum ChromeToDevtoolsControlMsg {
AddClient(TcpStream),
FramerateTick(String, f64),
ServerExitMsg,
NetworkEventMessage(String, NetworkEvent),
}
#[derive(Deserialize, Serialize)]
pub enum ScriptToDevtoolsControlMsg {
NewGlobal((PipelineId, Option<WorkerId>),
IpcSender<DevtoolScriptControlMsg>,
DevtoolsPageInfo),
SendConsoleMessage(PipelineId, ConsoleMessage),
}
/// Serialized JS return values
/// TODO: generalize this beyond the EvaluateJS message?
#[derive(Deserialize, Serialize)]
pub enum EvaluateJSReply {
VoidValue,
NullValue,
BooleanValue(bool),
NumberValue(f64),
StringValue(String),
ActorValue(String),
}
#[derive(Deserialize, Serialize)]
pub struct AttrInfo {
pub namespace: String,
pub name: String,
pub value: String,
}
#[derive(Deserialize, Serialize)]
pub struct NodeInfo {
pub uniqueId: String,
pub baseURI: String,
pub parent: String,
pub nodeType: u16,
pub namespaceURI: String,
pub nodeName: String,
pub numChildren: usize,
pub name: String,
pub publicId: String,
pub systemId: String,
pub attrs: Vec<AttrInfo>,
pub isDocumentElement: bool,
pub shortValue: String,
pub incompleteValue: bool,
}
#[derive(PartialEq, Eq, Deserialize, Serialize)]
pub enum TracingMetadata {
Default,
IntervalStart,
IntervalEnd,
Event,
EventBacktrace,
}
#[derive(Deserialize, Serialize)]
pub struct TimelineMarker {
pub name: String,
pub metadata: TracingMetadata,
pub time: PreciseTime,
pub stack: Option<Vec<()>>,
}
#[derive(PartialEq, Eq, Hash, Clone, Deserialize, Serialize)]
pub enum TimelineMarkerType {
Reflow,
DOMEvent,
}
/// Messages to process in a particular script task, as instructed by a devtools client.
#[derive(Deserialize, Serialize)]
pub enum DevtoolScriptControlMsg {
EvaluateJS(PipelineId, String, IpcSender<EvaluateJSReply>),
GetRootNode(PipelineId, IpcSender<NodeInfo>),
GetDocumentElement(PipelineId, IpcSender<NodeInfo>),
GetChildren(PipelineId, String, IpcSender<Vec<NodeInfo>>),
GetLayout(PipelineId, String, IpcSender<(f32, f32)>),
GetCachedMessages(PipelineId, CachedConsoleMessageTypes, IpcSender<Vec<CachedConsoleMessage>>),
ModifyAttribute(PipelineId, String, Vec<Modification>),
WantsLiveNotifications(PipelineId, bool),
SetTimelineMarkers(PipelineId, Vec<TimelineMarkerType>, IpcSender<TimelineMarker>),
DropTimelineMarkers(PipelineId, Vec<TimelineMarkerType>),
RequestAnimationFrame(PipelineId, IpcSender<f64>),
}
#[derive(RustcEncodable, Deserialize, Serialize)]
pub struct Modification {
pub attributeName: String,
pub newValue: Option<String>,
}
impl Decodable for Modification {
fn decode<D: Decoder>(d: &mut D) -> Result<Modification, D::Error> {
d.read_struct("Modification", 2, |d|
Ok(Modification {
attributeName: try!(d.read_struct_field("attributeName", 0, |d| Decodable::decode(d))),
newValue: match d.read_struct_field("newValue", 1, |d| Decodable::decode(d)) {
Ok(opt) => opt,
Err(_) => None
}
})
)
}
}
#[derive(Clone, Deserialize, Serialize)]
pub enum LogLevel {
Log,
Debug,
Info,
Warn,
Error,
}
#[derive(Clone, Deserialize, Serialize)]
pub struct ConsoleMessage {
pub message: String,
pub logLevel: LogLevel,
pub filename: String,
pub lineNumber: u32,
pub columnNumber: u32,
}
bitflags! {
#[derive(Deserialize, Serialize)]
flags CachedConsoleMessageTypes: u8 {
const PAGE_ERROR = 1 << 0,
const CONSOLE_API = 1 << 1,
}
}
#[derive(RustcEncodable, Deserialize, Serialize)]
pub struct PageError {
pub _type: String,
pub errorMessage: String,
pub sourceName: String,
pub lineText: String,
pub lineNumber: u32,
pub columnNumber: u32,
pub category: String,
pub timeStamp: u64,
pub error: bool,
pub warning: bool,
pub exception: bool,
pub strict: bool,
pub private: bool,
}
#[derive(RustcEncodable, Deserialize, Serialize)]
pub struct ConsoleAPI {
pub _type: String,
pub level: String,
pub filename: String,
pub lineNumber: u32,
pub functionName: String,
pub timeStamp: u64,
pub private: bool,
pub arguments: Vec<String>,
}
#[derive(Deserialize, Serialize)]
pub enum CachedConsoleMessage {
PageError(PageError),
ConsoleAPI(ConsoleAPI),
}
#[derive(Clone)]
pub enum NetworkEvent {
HttpRequest(Url, Method, Headers, Option<Vec<u8>>),
HttpResponse(Option<Headers>, Option<RawStatus>, Option<Vec<u8>>)
}
impl TimelineMarker {
pub fn new(name: String, metadata: TracingMetadata) -> TimelineMarker {
TimelineMarker {
name: name,
metadata: metadata,
time: PreciseTime::now(),
stack: None,
}
}
}
/// A replacement for `time::PreciseTime` that isn't opaque, so we can serialize it.
///
/// The reason why this doesn't go upstream is that `time` is slated to be part of Rust's standard
/// library, which definitely can't have any dependencies on `serde`. But `serde` can't implement
/// `Deserialize` and `Serialize` itself, because `time::PreciseTime` is opaque! A Catch-22. So I'm
/// duplicating the definition here.
#[derive(Copy, Clone, Deserialize, Serialize)]
pub struct PreciseTime(u64);
impl PreciseTime {
pub fn now() -> PreciseTime {
PreciseTime(time::precise_time_ns())
}
pub fn
|
(&self, later: PreciseTime) -> Duration {
Duration::nanoseconds((later.0 - self.0) as i64)
}
}
|
to
|
identifier_name
|
package_id.rs
|
use std::cmp::Ordering;
use std::fmt::{self, Formatter};
use std::hash::Hash;
use std::hash;
use std::path::Path;
use std::sync::Arc;
use semver;
use serde::de;
use serde::ser;
use util::{CargoResult, ToSemver};
use core::source::SourceId;
use core::interning::InternedString;
/// Identifier for a specific version of a package in a specific source.
#[derive(Clone)]
pub struct PackageId {
inner: Arc<PackageIdInner>,
}
#[derive(PartialEq, PartialOrd, Eq, Ord)]
struct PackageIdInner {
name: InternedString,
version: semver::Version,
source_id: SourceId,
}
impl ser::Serialize for PackageId {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
s.collect_str(&format_args!(
"{} {} ({})",
self.inner.name,
self.inner.version,
self.inner.source_id.to_url()
))
}
}
impl<'de> de::Deserialize<'de> for PackageId {
fn deserialize<D>(d: D) -> Result<PackageId, D::Error>
where
D: de::Deserializer<'de>,
{
let string = String::deserialize(d)?;
let mut s = string.splitn(3,'');
let name = s.next().unwrap();
let version = match s.next() {
Some(s) => s,
None => return Err(de::Error::custom("invalid serialized PackageId")),
};
let version = semver::Version::parse(version).map_err(de::Error::custom)?;
let url = match s.next() {
Some(s) => s,
None => return Err(de::Error::custom("invalid serialized PackageId")),
};
let url = if url.starts_with('(') && url.ends_with(')') {
&url[1..url.len() - 1]
} else {
return Err(de::Error::custom("invalid serialized PackageId"));
};
let source_id = SourceId::from_url(url).map_err(de::Error::custom)?;
Ok(PackageId {
inner: Arc::new(PackageIdInner {
name: InternedString::new(name),
version,
source_id,
}),
})
}
}
impl Hash for PackageId {
fn hash<S: hash::Hasher>(&self, state: &mut S) {
self.inner.name.hash(state);
self.inner.version.hash(state);
self.inner.source_id.hash(state);
}
}
impl PartialEq for PackageId {
fn eq(&self, other: &PackageId) -> bool {
(*self.inner).eq(&*other.inner)
}
}
impl PartialOrd for PackageId {
fn partial_cmp(&self, other: &PackageId) -> Option<Ordering> {
(*self.inner).partial_cmp(&*other.inner)
}
}
impl Eq for PackageId {}
impl Ord for PackageId {
fn cmp(&self, other: &PackageId) -> Ordering {
(*self.inner).cmp(&*other.inner)
}
}
impl PackageId {
pub fn new<T: ToSemver>(name: &str, version: T, sid: &SourceId) -> CargoResult<PackageId> {
let v = version.to_semver()?;
Ok(PackageId {
inner: Arc::new(PackageIdInner {
name: InternedString::new(name),
version: v,
source_id: sid.clone(),
}),
})
}
pub fn name(&self) -> InternedString {
self.inner.name
}
pub fn version(&self) -> &semver::Version {
&self.inner.version
}
pub fn source_id(&self) -> &SourceId {
&self.inner.source_id
}
pub fn with_precise(&self, precise: Option<String>) -> PackageId {
PackageId {
inner: Arc::new(PackageIdInner {
name: self.inner.name,
version: self.inner.version.clone(),
source_id: self.inner.source_id.with_precise(precise),
}),
}
}
pub fn with_source_id(&self, source: &SourceId) -> PackageId {
PackageId {
inner: Arc::new(PackageIdInner {
name: self.inner.name,
version: self.inner.version.clone(),
source_id: source.clone(),
}),
}
}
pub fn stable_hash<'a>(&'a self, workspace: &'a Path) -> PackageIdStableHash<'a> {
PackageIdStableHash(self, workspace)
}
}
pub struct PackageIdStableHash<'a>(&'a PackageId, &'a Path);
impl<'a> Hash for PackageIdStableHash<'a> {
fn hash<S: hash::Hasher>(&self, state: &mut S) {
self.0.inner.name.hash(state);
self.0.inner.version.hash(state);
self.0.inner.source_id.stable_hash(self.1, state);
}
}
impl fmt::Display for PackageId {
fn
|
(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{} v{}", self.inner.name, self.inner.version)?;
if!self.inner.source_id.is_default_registry() {
write!(f, " ({})", self.inner.source_id)?;
}
Ok(())
}
}
impl fmt::Debug for PackageId {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.debug_struct("PackageId")
.field("name", &self.inner.name)
.field("version", &self.inner.version.to_string())
.field("source", &self.inner.source_id.to_string())
.finish()
}
}
#[cfg(test)]
mod tests {
use super::PackageId;
use core::source::SourceId;
use sources::CRATES_IO;
use util::ToUrl;
#[test]
fn invalid_version_handled_nicely() {
let loc = CRATES_IO.to_url().unwrap();
let repo = SourceId::for_registry(&loc).unwrap();
assert!(PackageId::new("foo", "1.0", &repo).is_err());
assert!(PackageId::new("foo", "1", &repo).is_err());
assert!(PackageId::new("foo", "bar", &repo).is_err());
assert!(PackageId::new("foo", "", &repo).is_err());
}
}
|
fmt
|
identifier_name
|
package_id.rs
|
use std::cmp::Ordering;
use std::fmt::{self, Formatter};
use std::hash::Hash;
use std::hash;
use std::path::Path;
use std::sync::Arc;
use semver;
use serde::de;
use serde::ser;
use util::{CargoResult, ToSemver};
use core::source::SourceId;
use core::interning::InternedString;
/// Identifier for a specific version of a package in a specific source.
#[derive(Clone)]
pub struct PackageId {
inner: Arc<PackageIdInner>,
}
#[derive(PartialEq, PartialOrd, Eq, Ord)]
struct PackageIdInner {
name: InternedString,
version: semver::Version,
source_id: SourceId,
}
impl ser::Serialize for PackageId {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
s.collect_str(&format_args!(
"{} {} ({})",
self.inner.name,
self.inner.version,
self.inner.source_id.to_url()
))
}
}
impl<'de> de::Deserialize<'de> for PackageId {
fn deserialize<D>(d: D) -> Result<PackageId, D::Error>
where
D: de::Deserializer<'de>,
{
let string = String::deserialize(d)?;
let mut s = string.splitn(3,'');
let name = s.next().unwrap();
let version = match s.next() {
Some(s) => s,
None => return Err(de::Error::custom("invalid serialized PackageId")),
};
let version = semver::Version::parse(version).map_err(de::Error::custom)?;
let url = match s.next() {
Some(s) => s,
None => return Err(de::Error::custom("invalid serialized PackageId")),
};
let url = if url.starts_with('(') && url.ends_with(')') {
&url[1..url.len() - 1]
} else {
return Err(de::Error::custom("invalid serialized PackageId"));
};
let source_id = SourceId::from_url(url).map_err(de::Error::custom)?;
Ok(PackageId {
inner: Arc::new(PackageIdInner {
name: InternedString::new(name),
version,
source_id,
}),
})
}
}
impl Hash for PackageId {
fn hash<S: hash::Hasher>(&self, state: &mut S) {
self.inner.name.hash(state);
self.inner.version.hash(state);
self.inner.source_id.hash(state);
}
}
impl PartialEq for PackageId {
fn eq(&self, other: &PackageId) -> bool {
(*self.inner).eq(&*other.inner)
}
}
impl PartialOrd for PackageId {
fn partial_cmp(&self, other: &PackageId) -> Option<Ordering> {
(*self.inner).partial_cmp(&*other.inner)
}
}
impl Eq for PackageId {}
impl Ord for PackageId {
fn cmp(&self, other: &PackageId) -> Ordering {
(*self.inner).cmp(&*other.inner)
}
}
impl PackageId {
pub fn new<T: ToSemver>(name: &str, version: T, sid: &SourceId) -> CargoResult<PackageId> {
|
version: v,
source_id: sid.clone(),
}),
})
}
pub fn name(&self) -> InternedString {
self.inner.name
}
pub fn version(&self) -> &semver::Version {
&self.inner.version
}
pub fn source_id(&self) -> &SourceId {
&self.inner.source_id
}
pub fn with_precise(&self, precise: Option<String>) -> PackageId {
PackageId {
inner: Arc::new(PackageIdInner {
name: self.inner.name,
version: self.inner.version.clone(),
source_id: self.inner.source_id.with_precise(precise),
}),
}
}
pub fn with_source_id(&self, source: &SourceId) -> PackageId {
PackageId {
inner: Arc::new(PackageIdInner {
name: self.inner.name,
version: self.inner.version.clone(),
source_id: source.clone(),
}),
}
}
pub fn stable_hash<'a>(&'a self, workspace: &'a Path) -> PackageIdStableHash<'a> {
PackageIdStableHash(self, workspace)
}
}
pub struct PackageIdStableHash<'a>(&'a PackageId, &'a Path);
impl<'a> Hash for PackageIdStableHash<'a> {
fn hash<S: hash::Hasher>(&self, state: &mut S) {
self.0.inner.name.hash(state);
self.0.inner.version.hash(state);
self.0.inner.source_id.stable_hash(self.1, state);
}
}
impl fmt::Display for PackageId {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{} v{}", self.inner.name, self.inner.version)?;
if!self.inner.source_id.is_default_registry() {
write!(f, " ({})", self.inner.source_id)?;
}
Ok(())
}
}
impl fmt::Debug for PackageId {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.debug_struct("PackageId")
.field("name", &self.inner.name)
.field("version", &self.inner.version.to_string())
.field("source", &self.inner.source_id.to_string())
.finish()
}
}
#[cfg(test)]
mod tests {
use super::PackageId;
use core::source::SourceId;
use sources::CRATES_IO;
use util::ToUrl;
#[test]
fn invalid_version_handled_nicely() {
let loc = CRATES_IO.to_url().unwrap();
let repo = SourceId::for_registry(&loc).unwrap();
assert!(PackageId::new("foo", "1.0", &repo).is_err());
assert!(PackageId::new("foo", "1", &repo).is_err());
assert!(PackageId::new("foo", "bar", &repo).is_err());
assert!(PackageId::new("foo", "", &repo).is_err());
}
}
|
let v = version.to_semver()?;
Ok(PackageId {
inner: Arc::new(PackageIdInner {
name: InternedString::new(name),
|
random_line_split
|
package_id.rs
|
use std::cmp::Ordering;
use std::fmt::{self, Formatter};
use std::hash::Hash;
use std::hash;
use std::path::Path;
use std::sync::Arc;
use semver;
use serde::de;
use serde::ser;
use util::{CargoResult, ToSemver};
use core::source::SourceId;
use core::interning::InternedString;
/// Identifier for a specific version of a package in a specific source.
#[derive(Clone)]
pub struct PackageId {
inner: Arc<PackageIdInner>,
}
#[derive(PartialEq, PartialOrd, Eq, Ord)]
struct PackageIdInner {
name: InternedString,
version: semver::Version,
source_id: SourceId,
}
impl ser::Serialize for PackageId {
fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
s.collect_str(&format_args!(
"{} {} ({})",
self.inner.name,
self.inner.version,
self.inner.source_id.to_url()
))
}
}
impl<'de> de::Deserialize<'de> for PackageId {
fn deserialize<D>(d: D) -> Result<PackageId, D::Error>
where
D: de::Deserializer<'de>,
{
let string = String::deserialize(d)?;
let mut s = string.splitn(3,'');
let name = s.next().unwrap();
let version = match s.next() {
Some(s) => s,
None => return Err(de::Error::custom("invalid serialized PackageId")),
};
let version = semver::Version::parse(version).map_err(de::Error::custom)?;
let url = match s.next() {
Some(s) => s,
None => return Err(de::Error::custom("invalid serialized PackageId")),
};
let url = if url.starts_with('(') && url.ends_with(')') {
&url[1..url.len() - 1]
} else {
return Err(de::Error::custom("invalid serialized PackageId"));
};
let source_id = SourceId::from_url(url).map_err(de::Error::custom)?;
Ok(PackageId {
inner: Arc::new(PackageIdInner {
name: InternedString::new(name),
version,
source_id,
}),
})
}
}
impl Hash for PackageId {
fn hash<S: hash::Hasher>(&self, state: &mut S) {
self.inner.name.hash(state);
self.inner.version.hash(state);
self.inner.source_id.hash(state);
}
}
impl PartialEq for PackageId {
fn eq(&self, other: &PackageId) -> bool {
(*self.inner).eq(&*other.inner)
}
}
impl PartialOrd for PackageId {
fn partial_cmp(&self, other: &PackageId) -> Option<Ordering> {
(*self.inner).partial_cmp(&*other.inner)
}
}
impl Eq for PackageId {}
impl Ord for PackageId {
fn cmp(&self, other: &PackageId) -> Ordering {
(*self.inner).cmp(&*other.inner)
}
}
impl PackageId {
pub fn new<T: ToSemver>(name: &str, version: T, sid: &SourceId) -> CargoResult<PackageId> {
let v = version.to_semver()?;
Ok(PackageId {
inner: Arc::new(PackageIdInner {
name: InternedString::new(name),
version: v,
source_id: sid.clone(),
}),
})
}
pub fn name(&self) -> InternedString {
self.inner.name
}
pub fn version(&self) -> &semver::Version {
&self.inner.version
}
pub fn source_id(&self) -> &SourceId {
&self.inner.source_id
}
pub fn with_precise(&self, precise: Option<String>) -> PackageId {
PackageId {
inner: Arc::new(PackageIdInner {
name: self.inner.name,
version: self.inner.version.clone(),
source_id: self.inner.source_id.with_precise(precise),
}),
}
}
pub fn with_source_id(&self, source: &SourceId) -> PackageId {
PackageId {
inner: Arc::new(PackageIdInner {
name: self.inner.name,
version: self.inner.version.clone(),
source_id: source.clone(),
}),
}
}
pub fn stable_hash<'a>(&'a self, workspace: &'a Path) -> PackageIdStableHash<'a> {
PackageIdStableHash(self, workspace)
}
}
pub struct PackageIdStableHash<'a>(&'a PackageId, &'a Path);
impl<'a> Hash for PackageIdStableHash<'a> {
fn hash<S: hash::Hasher>(&self, state: &mut S) {
self.0.inner.name.hash(state);
self.0.inner.version.hash(state);
self.0.inner.source_id.stable_hash(self.1, state);
}
}
impl fmt::Display for PackageId {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{} v{}", self.inner.name, self.inner.version)?;
if!self.inner.source_id.is_default_registry() {
write!(f, " ({})", self.inner.source_id)?;
}
Ok(())
}
}
impl fmt::Debug for PackageId {
fn fmt(&self, f: &mut Formatter) -> fmt::Result
|
}
#[cfg(test)]
mod tests {
use super::PackageId;
use core::source::SourceId;
use sources::CRATES_IO;
use util::ToUrl;
#[test]
fn invalid_version_handled_nicely() {
let loc = CRATES_IO.to_url().unwrap();
let repo = SourceId::for_registry(&loc).unwrap();
assert!(PackageId::new("foo", "1.0", &repo).is_err());
assert!(PackageId::new("foo", "1", &repo).is_err());
assert!(PackageId::new("foo", "bar", &repo).is_err());
assert!(PackageId::new("foo", "", &repo).is_err());
}
}
|
{
f.debug_struct("PackageId")
.field("name", &self.inner.name)
.field("version", &self.inner.version.to_string())
.field("source", &self.inner.source_id.to_string())
.finish()
}
|
identifier_body
|
wasm.rs
|
use crate::*;
use std::alloc::Layout;
pub static mut APPS2: Vec<Application> = Vec::new();
static LOGGER: WebConsoleLogger = WebConsoleLogger {};
struct WebConsoleLogger {}
impl log::Log for WebConsoleLogger {
fn enabled(&self, metadata: &log::Metadata) -> bool {
metadata.level() <= log::max_level()
}
fn log(&self, record: &log::Record) {
if!self.enabled(record.metadata()) {
return;
}
let txt = &format!(
"{} [{}] @ {}:{}",
record.target(),
record.args(),
record.file().unwrap(),
record.line().unwrap()
);
unsafe {
match record.level() {
log::Level::Error => console(0, txt.len() as u32, txt.as_ptr() as u32),
log::Level::Warn => console(1, txt.len() as u32, txt.as_ptr() as u32),
log::Level::Info => console(2, txt.len() as u32, txt.as_ptr() as u32),
log::Level::Debug => console(3, txt.len() as u32, txt.as_ptr() as u32),
log::Level::Trace => console(4, txt.len() as u32, txt.as_ptr() as u32),
}
}
}
fn flush(&self) {}
}
#[cfg(target_arch = "wasm32")]
extern "C" {
pub fn apply_to(size: u32, ptr: u32);
pub fn console(category: u32, size: u32, ptr: u32);
}
#[cfg(target_arch = "x86_64")]
pub unsafe fn apply_to(_size: u32, _ptr: u32) {}
#[cfg(target_arch = "x86_64")]
pub unsafe fn console(_category: u32, _size: u32, _ptr: u32) {}
#[no_mangle]
pub fn init() {
std::panic::set_hook(Box::new(|panic_info| {
let msg = format!("{}", panic_info);
unsafe { console(0, msg.len() as u32, msg.as_ptr() as u32) };
}));
let _ = log::set_logger(&LOGGER);
log::set_max_level(log::LevelFilter::Trace);
}
#[no_mangle]
pub fn application_new() -> usize {
unsafe {
let app = Application::new(APPS2.len());
APPS2.push(app);
APPS2.len() - 1
}
}
#[macro_export]
macro_rules! mount {
( $($id:expr => $type:ty),*) => {
$(impl runtime::ApplicationFacade for $type {
fn send_by_id(&mut self,
app: usize,
actor: usize,
id: usize,
p: Vec<u64>,
messages: *mut (),
executor: &mut runtime::executor::Executor) -> Result<(), u64>
{
match self.build_message(id, p, messages) {
Ok(message) => {
#[cfg(feature = "derive_debug")]
{
log::trace!(target: "actor", "{}:{} Update: [{:?}]", app, actor, message);
}
let mut actions = self.send(&message);
while let Some(action) = actions.pop() {
#[cfg(feature = "derive_debug")]
{
log::trace!(target: "actor", "{}:{} Action: [{:?}]", app, actor, action);
}
let env = env::Env::new(app, actor);
let handle = action.handle(env);
executor.spawn(handle);
}
Ok(())
}
_ => Err(0)
}
}
fn render(&mut self, app: usize, actor: usize, messages: *mut ()) -> Html {
<$type as DisplayHtml>::render2(self, app, actor, messages)
}
}
)*
#[no_mangle]
pub fn application_mount(app_id: usize, type_id: usize) -> isize {
unsafe {
match APPS2.get_mut(app_id as usize) {
Some(app) => {
let r = match type_id {
$(
$id => Some((
Box::new(<$type as std::default::Default>::default()) as Box<dyn ApplicationFacade>,
Box::leak(
Box::new(Vec::<MessageFactory<<$type as UpdatableState>::Message>>::new())
) as *mut Vec::<MessageFactory<<$type as UpdatableState>::Message>> as *mut ()
)),
)*
_ => None,
};
match r {
Some((facade, messages)) => unsafe {
let id = app.mount(Actor { facade, messages });
app.render(id);
id as isize
},
None => -1,
}
},
None => -1
}
}
}
};
}
#[no_mangle]
pub fn application_send(
app: usize,
actor: usize,
msg: usize,
p0: u64,
p1: u64,
p2: u64,
p3: u64,
p4: u64,
) -> bool {
let p = vec![p0, p1, p2, p3, p4];
unsafe {
match APPS2.get_mut(app as usize) {
Some(app) => {
app.send(actor as usize, msg as usize, p);
app.render(actor);
true
}
None => false,
}
}
}
pub fn application_get_messages(app_id: usize, wrapper_id: usize) -> Option<*mut ()> {
unsafe {
match APPS2
.get_mut(app_id as usize)
.and_then(|x| x.actors.get(wrapper_id))
{
Some(actor) => Some(actor.messages),
None => None,
}
}
}
#[track_caller]
pub fn console_error_str(s: &str) {
let location = core::panic::Location::caller();
let s = format!("{} at {}", s, location);
unsafe { console(0, s.len() as u32, s.as_ptr() as u32) };
}
pub struct
|
{
ptr: *mut u8,
}
impl AllocBuffer {
pub fn header_size() -> usize {
std::mem::size_of::<usize>()
}
pub fn from_data_ptr(ptr: usize) -> Self {
let ptr_start = ptr - Self::header_size();
let block = Self {
ptr: ptr_start as *mut u8,
};
block
}
pub fn new(size: usize) -> Self {
let total_size = Self::header_size() + size as usize;
let layout = Layout::array::<u8>(total_size).unwrap();
let ptr = unsafe { std::alloc::alloc(layout) };
let block_size = ptr as *mut usize;
unsafe { *block_size = total_size };
Self { ptr }
}
pub fn total_size(&self) -> usize {
let total_size = self.ptr as *mut usize;
unsafe { *total_size }
}
pub fn data_size(&self) -> usize {
self.total_size() - Self::header_size()
}
pub fn data_ptr(&self) -> *mut u8 {
let ptr = self.ptr as usize;
(ptr + Self::header_size()) as *mut u8
}
pub fn as_slice_mut(&mut self) -> &mut [u8] {
unsafe { std::slice::from_raw_parts_mut(self.data_ptr(), self.data_size()) }
}
pub fn as_slice(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.data_ptr(), self.data_size()) }
}
pub fn as_str(&self) -> Result<&str, std::str::Utf8Error> {
let slice = self.as_slice();
std::str::from_utf8(slice)
}
pub fn copy_str(&mut self, s: &str) -> Result<(), u64> {
if s.len() > self.data_size() {
Err(0)
} else {
let data = self.as_slice_mut();
data.copy_from_slice(s.as_bytes());
Ok(())
}
}
}
impl Drop for AllocBuffer {
fn drop(&mut self) {
let layout = Layout::array::<u8>(self.total_size()).unwrap();
unsafe { std::alloc::dealloc(self.ptr, layout) };
}
}
#[no_mangle]
pub fn alloc(size: u64) -> u64 {
let buffer = AllocBuffer::new(size as usize);
let ptr = buffer.data_ptr() as u64;
std::mem::forget(buffer);
ptr
}
pub fn alloc_str(s: &str) -> AllocBuffer {
let mut buffer = AllocBuffer::new(s.len());
buffer.copy_str(s).unwrap();
buffer
}
|
AllocBuffer
|
identifier_name
|
wasm.rs
|
use crate::*;
use std::alloc::Layout;
pub static mut APPS2: Vec<Application> = Vec::new();
static LOGGER: WebConsoleLogger = WebConsoleLogger {};
struct WebConsoleLogger {}
impl log::Log for WebConsoleLogger {
fn enabled(&self, metadata: &log::Metadata) -> bool {
metadata.level() <= log::max_level()
}
fn log(&self, record: &log::Record) {
if!self.enabled(record.metadata()) {
return;
}
let txt = &format!(
"{} [{}] @ {}:{}",
record.target(),
record.args(),
record.file().unwrap(),
record.line().unwrap()
);
unsafe {
match record.level() {
log::Level::Error => console(0, txt.len() as u32, txt.as_ptr() as u32),
log::Level::Warn => console(1, txt.len() as u32, txt.as_ptr() as u32),
log::Level::Info => console(2, txt.len() as u32, txt.as_ptr() as u32),
log::Level::Debug => console(3, txt.len() as u32, txt.as_ptr() as u32),
log::Level::Trace => console(4, txt.len() as u32, txt.as_ptr() as u32),
}
}
}
fn flush(&self) {}
}
#[cfg(target_arch = "wasm32")]
extern "C" {
pub fn apply_to(size: u32, ptr: u32);
pub fn console(category: u32, size: u32, ptr: u32);
|
#[cfg(target_arch = "x86_64")]
pub unsafe fn apply_to(_size: u32, _ptr: u32) {}
#[cfg(target_arch = "x86_64")]
pub unsafe fn console(_category: u32, _size: u32, _ptr: u32) {}
#[no_mangle]
pub fn init() {
std::panic::set_hook(Box::new(|panic_info| {
let msg = format!("{}", panic_info);
unsafe { console(0, msg.len() as u32, msg.as_ptr() as u32) };
}));
let _ = log::set_logger(&LOGGER);
log::set_max_level(log::LevelFilter::Trace);
}
#[no_mangle]
pub fn application_new() -> usize {
unsafe {
let app = Application::new(APPS2.len());
APPS2.push(app);
APPS2.len() - 1
}
}
#[macro_export]
macro_rules! mount {
( $($id:expr => $type:ty),*) => {
$(impl runtime::ApplicationFacade for $type {
fn send_by_id(&mut self,
app: usize,
actor: usize,
id: usize,
p: Vec<u64>,
messages: *mut (),
executor: &mut runtime::executor::Executor) -> Result<(), u64>
{
match self.build_message(id, p, messages) {
Ok(message) => {
#[cfg(feature = "derive_debug")]
{
log::trace!(target: "actor", "{}:{} Update: [{:?}]", app, actor, message);
}
let mut actions = self.send(&message);
while let Some(action) = actions.pop() {
#[cfg(feature = "derive_debug")]
{
log::trace!(target: "actor", "{}:{} Action: [{:?}]", app, actor, action);
}
let env = env::Env::new(app, actor);
let handle = action.handle(env);
executor.spawn(handle);
}
Ok(())
}
_ => Err(0)
}
}
fn render(&mut self, app: usize, actor: usize, messages: *mut ()) -> Html {
<$type as DisplayHtml>::render2(self, app, actor, messages)
}
}
)*
#[no_mangle]
pub fn application_mount(app_id: usize, type_id: usize) -> isize {
unsafe {
match APPS2.get_mut(app_id as usize) {
Some(app) => {
let r = match type_id {
$(
$id => Some((
Box::new(<$type as std::default::Default>::default()) as Box<dyn ApplicationFacade>,
Box::leak(
Box::new(Vec::<MessageFactory<<$type as UpdatableState>::Message>>::new())
) as *mut Vec::<MessageFactory<<$type as UpdatableState>::Message>> as *mut ()
)),
)*
_ => None,
};
match r {
Some((facade, messages)) => unsafe {
let id = app.mount(Actor { facade, messages });
app.render(id);
id as isize
},
None => -1,
}
},
None => -1
}
}
}
};
}
#[no_mangle]
pub fn application_send(
app: usize,
actor: usize,
msg: usize,
p0: u64,
p1: u64,
p2: u64,
p3: u64,
p4: u64,
) -> bool {
let p = vec![p0, p1, p2, p3, p4];
unsafe {
match APPS2.get_mut(app as usize) {
Some(app) => {
app.send(actor as usize, msg as usize, p);
app.render(actor);
true
}
None => false,
}
}
}
pub fn application_get_messages(app_id: usize, wrapper_id: usize) -> Option<*mut ()> {
unsafe {
match APPS2
.get_mut(app_id as usize)
.and_then(|x| x.actors.get(wrapper_id))
{
Some(actor) => Some(actor.messages),
None => None,
}
}
}
#[track_caller]
pub fn console_error_str(s: &str) {
let location = core::panic::Location::caller();
let s = format!("{} at {}", s, location);
unsafe { console(0, s.len() as u32, s.as_ptr() as u32) };
}
pub struct AllocBuffer {
ptr: *mut u8,
}
impl AllocBuffer {
pub fn header_size() -> usize {
std::mem::size_of::<usize>()
}
pub fn from_data_ptr(ptr: usize) -> Self {
let ptr_start = ptr - Self::header_size();
let block = Self {
ptr: ptr_start as *mut u8,
};
block
}
pub fn new(size: usize) -> Self {
let total_size = Self::header_size() + size as usize;
let layout = Layout::array::<u8>(total_size).unwrap();
let ptr = unsafe { std::alloc::alloc(layout) };
let block_size = ptr as *mut usize;
unsafe { *block_size = total_size };
Self { ptr }
}
pub fn total_size(&self) -> usize {
let total_size = self.ptr as *mut usize;
unsafe { *total_size }
}
pub fn data_size(&self) -> usize {
self.total_size() - Self::header_size()
}
pub fn data_ptr(&self) -> *mut u8 {
let ptr = self.ptr as usize;
(ptr + Self::header_size()) as *mut u8
}
pub fn as_slice_mut(&mut self) -> &mut [u8] {
unsafe { std::slice::from_raw_parts_mut(self.data_ptr(), self.data_size()) }
}
pub fn as_slice(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.data_ptr(), self.data_size()) }
}
pub fn as_str(&self) -> Result<&str, std::str::Utf8Error> {
let slice = self.as_slice();
std::str::from_utf8(slice)
}
pub fn copy_str(&mut self, s: &str) -> Result<(), u64> {
if s.len() > self.data_size() {
Err(0)
} else {
let data = self.as_slice_mut();
data.copy_from_slice(s.as_bytes());
Ok(())
}
}
}
impl Drop for AllocBuffer {
fn drop(&mut self) {
let layout = Layout::array::<u8>(self.total_size()).unwrap();
unsafe { std::alloc::dealloc(self.ptr, layout) };
}
}
#[no_mangle]
pub fn alloc(size: u64) -> u64 {
let buffer = AllocBuffer::new(size as usize);
let ptr = buffer.data_ptr() as u64;
std::mem::forget(buffer);
ptr
}
pub fn alloc_str(s: &str) -> AllocBuffer {
let mut buffer = AllocBuffer::new(s.len());
buffer.copy_str(s).unwrap();
buffer
}
|
}
|
random_line_split
|
wasm.rs
|
use crate::*;
use std::alloc::Layout;
pub static mut APPS2: Vec<Application> = Vec::new();
static LOGGER: WebConsoleLogger = WebConsoleLogger {};
struct WebConsoleLogger {}
impl log::Log for WebConsoleLogger {
fn enabled(&self, metadata: &log::Metadata) -> bool {
metadata.level() <= log::max_level()
}
fn log(&self, record: &log::Record) {
if!self.enabled(record.metadata()) {
return;
}
let txt = &format!(
"{} [{}] @ {}:{}",
record.target(),
record.args(),
record.file().unwrap(),
record.line().unwrap()
);
unsafe {
match record.level() {
log::Level::Error => console(0, txt.len() as u32, txt.as_ptr() as u32),
log::Level::Warn => console(1, txt.len() as u32, txt.as_ptr() as u32),
log::Level::Info => console(2, txt.len() as u32, txt.as_ptr() as u32),
log::Level::Debug => console(3, txt.len() as u32, txt.as_ptr() as u32),
log::Level::Trace => console(4, txt.len() as u32, txt.as_ptr() as u32),
}
}
}
fn flush(&self) {}
}
#[cfg(target_arch = "wasm32")]
extern "C" {
pub fn apply_to(size: u32, ptr: u32);
pub fn console(category: u32, size: u32, ptr: u32);
}
#[cfg(target_arch = "x86_64")]
pub unsafe fn apply_to(_size: u32, _ptr: u32) {}
#[cfg(target_arch = "x86_64")]
pub unsafe fn console(_category: u32, _size: u32, _ptr: u32) {}
#[no_mangle]
pub fn init()
|
#[no_mangle]
pub fn application_new() -> usize {
unsafe {
let app = Application::new(APPS2.len());
APPS2.push(app);
APPS2.len() - 1
}
}
#[macro_export]
macro_rules! mount {
( $($id:expr => $type:ty),*) => {
$(impl runtime::ApplicationFacade for $type {
fn send_by_id(&mut self,
app: usize,
actor: usize,
id: usize,
p: Vec<u64>,
messages: *mut (),
executor: &mut runtime::executor::Executor) -> Result<(), u64>
{
match self.build_message(id, p, messages) {
Ok(message) => {
#[cfg(feature = "derive_debug")]
{
log::trace!(target: "actor", "{}:{} Update: [{:?}]", app, actor, message);
}
let mut actions = self.send(&message);
while let Some(action) = actions.pop() {
#[cfg(feature = "derive_debug")]
{
log::trace!(target: "actor", "{}:{} Action: [{:?}]", app, actor, action);
}
let env = env::Env::new(app, actor);
let handle = action.handle(env);
executor.spawn(handle);
}
Ok(())
}
_ => Err(0)
}
}
fn render(&mut self, app: usize, actor: usize, messages: *mut ()) -> Html {
<$type as DisplayHtml>::render2(self, app, actor, messages)
}
}
)*
#[no_mangle]
pub fn application_mount(app_id: usize, type_id: usize) -> isize {
unsafe {
match APPS2.get_mut(app_id as usize) {
Some(app) => {
let r = match type_id {
$(
$id => Some((
Box::new(<$type as std::default::Default>::default()) as Box<dyn ApplicationFacade>,
Box::leak(
Box::new(Vec::<MessageFactory<<$type as UpdatableState>::Message>>::new())
) as *mut Vec::<MessageFactory<<$type as UpdatableState>::Message>> as *mut ()
)),
)*
_ => None,
};
match r {
Some((facade, messages)) => unsafe {
let id = app.mount(Actor { facade, messages });
app.render(id);
id as isize
},
None => -1,
}
},
None => -1
}
}
}
};
}
#[no_mangle]
pub fn application_send(
app: usize,
actor: usize,
msg: usize,
p0: u64,
p1: u64,
p2: u64,
p3: u64,
p4: u64,
) -> bool {
let p = vec![p0, p1, p2, p3, p4];
unsafe {
match APPS2.get_mut(app as usize) {
Some(app) => {
app.send(actor as usize, msg as usize, p);
app.render(actor);
true
}
None => false,
}
}
}
pub fn application_get_messages(app_id: usize, wrapper_id: usize) -> Option<*mut ()> {
unsafe {
match APPS2
.get_mut(app_id as usize)
.and_then(|x| x.actors.get(wrapper_id))
{
Some(actor) => Some(actor.messages),
None => None,
}
}
}
#[track_caller]
pub fn console_error_str(s: &str) {
let location = core::panic::Location::caller();
let s = format!("{} at {}", s, location);
unsafe { console(0, s.len() as u32, s.as_ptr() as u32) };
}
pub struct AllocBuffer {
ptr: *mut u8,
}
impl AllocBuffer {
pub fn header_size() -> usize {
std::mem::size_of::<usize>()
}
pub fn from_data_ptr(ptr: usize) -> Self {
let ptr_start = ptr - Self::header_size();
let block = Self {
ptr: ptr_start as *mut u8,
};
block
}
pub fn new(size: usize) -> Self {
let total_size = Self::header_size() + size as usize;
let layout = Layout::array::<u8>(total_size).unwrap();
let ptr = unsafe { std::alloc::alloc(layout) };
let block_size = ptr as *mut usize;
unsafe { *block_size = total_size };
Self { ptr }
}
pub fn total_size(&self) -> usize {
let total_size = self.ptr as *mut usize;
unsafe { *total_size }
}
pub fn data_size(&self) -> usize {
self.total_size() - Self::header_size()
}
pub fn data_ptr(&self) -> *mut u8 {
let ptr = self.ptr as usize;
(ptr + Self::header_size()) as *mut u8
}
pub fn as_slice_mut(&mut self) -> &mut [u8] {
unsafe { std::slice::from_raw_parts_mut(self.data_ptr(), self.data_size()) }
}
pub fn as_slice(&self) -> &[u8] {
unsafe { std::slice::from_raw_parts(self.data_ptr(), self.data_size()) }
}
pub fn as_str(&self) -> Result<&str, std::str::Utf8Error> {
let slice = self.as_slice();
std::str::from_utf8(slice)
}
pub fn copy_str(&mut self, s: &str) -> Result<(), u64> {
if s.len() > self.data_size() {
Err(0)
} else {
let data = self.as_slice_mut();
data.copy_from_slice(s.as_bytes());
Ok(())
}
}
}
impl Drop for AllocBuffer {
fn drop(&mut self) {
let layout = Layout::array::<u8>(self.total_size()).unwrap();
unsafe { std::alloc::dealloc(self.ptr, layout) };
}
}
#[no_mangle]
pub fn alloc(size: u64) -> u64 {
let buffer = AllocBuffer::new(size as usize);
let ptr = buffer.data_ptr() as u64;
std::mem::forget(buffer);
ptr
}
pub fn alloc_str(s: &str) -> AllocBuffer {
let mut buffer = AllocBuffer::new(s.len());
buffer.copy_str(s).unwrap();
buffer
}
|
{
std::panic::set_hook(Box::new(|panic_info| {
let msg = format!("{}", panic_info);
unsafe { console(0, msg.len() as u32, msg.as_ptr() as u32) };
}));
let _ = log::set_logger(&LOGGER);
log::set_max_level(log::LevelFilter::Trace);
}
|
identifier_body
|
entry.rs
|
//! # Page table entry
//! Some code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use memory::Frame;
use super::PhysicalAddress;
/// A page table entry
pub struct Entry(u64);
bitflags! {
pub flags EntryFlags: u64 {
const PRESENT = 1 << 0,
const WRITABLE = 1 << 1,
const USER_ACCESSIBLE = 1 << 2,
const WRITE_THROUGH = 1 << 3,
const NO_CACHE = 1 << 4,
const ACCESSED = 1 << 5,
const DIRTY = 1 << 6,
const HUGE_PAGE = 1 << 7,
const GLOBAL = 1 << 8,
const NO_EXECUTE = 1 << 63,
}
}
pub const ADDRESS_MASK: usize = 0x000f_ffff_ffff_f000;
impl Entry {
/// Is the entry unused?
pub fn is_unused(&self) -> bool {
self.0 == 0
}
/// Make the entry unused
pub fn set_unused(&mut self) {
self.0 = 0;
}
/// Get the address this page references
pub fn address(&self) -> PhysicalAddress {
PhysicalAddress::new(self.0 as usize & ADDRESS_MASK)
}
/// Get the current entry flags
pub fn
|
(&self) -> EntryFlags {
EntryFlags::from_bits_truncate(self.0)
}
/// Get the associated frame, if available
pub fn pointed_frame(&self) -> Option<Frame> {
if self.flags().contains(PRESENT) {
Some(Frame::containing_address(self.address()))
} else {
None
}
}
pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
debug_assert!(frame.start_address().get() &!ADDRESS_MASK == 0);
self.0 = (frame.start_address().get() as u64) | flags.bits();
}
}
|
flags
|
identifier_name
|
entry.rs
|
//! # Page table entry
|
//! Some code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use memory::Frame;
use super::PhysicalAddress;
/// A page table entry
pub struct Entry(u64);
bitflags! {
pub flags EntryFlags: u64 {
const PRESENT = 1 << 0,
const WRITABLE = 1 << 1,
const USER_ACCESSIBLE = 1 << 2,
const WRITE_THROUGH = 1 << 3,
const NO_CACHE = 1 << 4,
const ACCESSED = 1 << 5,
const DIRTY = 1 << 6,
const HUGE_PAGE = 1 << 7,
const GLOBAL = 1 << 8,
const NO_EXECUTE = 1 << 63,
}
}
pub const ADDRESS_MASK: usize = 0x000f_ffff_ffff_f000;
impl Entry {
/// Is the entry unused?
pub fn is_unused(&self) -> bool {
self.0 == 0
}
/// Make the entry unused
pub fn set_unused(&mut self) {
self.0 = 0;
}
/// Get the address this page references
pub fn address(&self) -> PhysicalAddress {
PhysicalAddress::new(self.0 as usize & ADDRESS_MASK)
}
/// Get the current entry flags
pub fn flags(&self) -> EntryFlags {
EntryFlags::from_bits_truncate(self.0)
}
/// Get the associated frame, if available
pub fn pointed_frame(&self) -> Option<Frame> {
if self.flags().contains(PRESENT) {
Some(Frame::containing_address(self.address()))
} else {
None
}
}
pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
debug_assert!(frame.start_address().get() &!ADDRESS_MASK == 0);
self.0 = (frame.start_address().get() as u64) | flags.bits();
}
}
|
random_line_split
|
|
entry.rs
|
//! # Page table entry
//! Some code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use memory::Frame;
use super::PhysicalAddress;
/// A page table entry
pub struct Entry(u64);
bitflags! {
pub flags EntryFlags: u64 {
const PRESENT = 1 << 0,
const WRITABLE = 1 << 1,
const USER_ACCESSIBLE = 1 << 2,
const WRITE_THROUGH = 1 << 3,
const NO_CACHE = 1 << 4,
const ACCESSED = 1 << 5,
const DIRTY = 1 << 6,
const HUGE_PAGE = 1 << 7,
const GLOBAL = 1 << 8,
const NO_EXECUTE = 1 << 63,
}
}
pub const ADDRESS_MASK: usize = 0x000f_ffff_ffff_f000;
impl Entry {
/// Is the entry unused?
pub fn is_unused(&self) -> bool {
self.0 == 0
}
/// Make the entry unused
pub fn set_unused(&mut self) {
self.0 = 0;
}
/// Get the address this page references
pub fn address(&self) -> PhysicalAddress {
PhysicalAddress::new(self.0 as usize & ADDRESS_MASK)
}
/// Get the current entry flags
pub fn flags(&self) -> EntryFlags
|
/// Get the associated frame, if available
pub fn pointed_frame(&self) -> Option<Frame> {
if self.flags().contains(PRESENT) {
Some(Frame::containing_address(self.address()))
} else {
None
}
}
pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
debug_assert!(frame.start_address().get() &!ADDRESS_MASK == 0);
self.0 = (frame.start_address().get() as u64) | flags.bits();
}
}
|
{
EntryFlags::from_bits_truncate(self.0)
}
|
identifier_body
|
entry.rs
|
//! # Page table entry
//! Some code borrowed from [Phil Opp's Blog](http://os.phil-opp.com/modifying-page-tables.html)
use memory::Frame;
use super::PhysicalAddress;
/// A page table entry
pub struct Entry(u64);
bitflags! {
pub flags EntryFlags: u64 {
const PRESENT = 1 << 0,
const WRITABLE = 1 << 1,
const USER_ACCESSIBLE = 1 << 2,
const WRITE_THROUGH = 1 << 3,
const NO_CACHE = 1 << 4,
const ACCESSED = 1 << 5,
const DIRTY = 1 << 6,
const HUGE_PAGE = 1 << 7,
const GLOBAL = 1 << 8,
const NO_EXECUTE = 1 << 63,
}
}
pub const ADDRESS_MASK: usize = 0x000f_ffff_ffff_f000;
impl Entry {
/// Is the entry unused?
pub fn is_unused(&self) -> bool {
self.0 == 0
}
/// Make the entry unused
pub fn set_unused(&mut self) {
self.0 = 0;
}
/// Get the address this page references
pub fn address(&self) -> PhysicalAddress {
PhysicalAddress::new(self.0 as usize & ADDRESS_MASK)
}
/// Get the current entry flags
pub fn flags(&self) -> EntryFlags {
EntryFlags::from_bits_truncate(self.0)
}
/// Get the associated frame, if available
pub fn pointed_frame(&self) -> Option<Frame> {
if self.flags().contains(PRESENT)
|
else {
None
}
}
pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
debug_assert!(frame.start_address().get() &!ADDRESS_MASK == 0);
self.0 = (frame.start_address().get() as u64) | flags.bits();
}
}
|
{
Some(Frame::containing_address(self.address()))
}
|
conditional_block
|
test.rs
|
#![feature(phase)]
extern crate debug;
extern crate time;
extern crate stomp;
#[phase(plugin, link)] extern crate log;
use stomp::Client;
use stomp::{CONNECTED, RECEIPT};
// Test CONNECT
#[test]
fn test_connect() {
let mut client = Client::with_uri("localhost:61613");
let response = client.connect("user", "pw").unwrap();
assert_eq!(response.command, CONNECTED);
drop(client.stream);
}
// Test SEND
#[test]
fn test_send() {
let mut client = Client::with_uri("localhost:61613");
let _ = client.connect("user", "pw").unwrap();
let mut buf = [0,..128];
// Temp hack to distinguish consecutive requests over discrete conns
let amt = {
let mut wr = std::io::BufWriter::new(buf);
let t = time::get_time();
let _ = write!(&mut wr, "testing 123: {}", t.sec);
wr.tell().unwrap() as uint
};
let s = std::str::from_utf8(buf.slice(0, amt));
let response = client.send("/queue/test", s.unwrap()).unwrap();
assert_eq!(response.command, RECEIPT);
let receipt_response = client.send_with_receipt("/queue/test", s.unwrap(), "receipt1234").unwrap();
let id = receipt_response.get_header("receipt-id");
assert_eq!(id.as_slice(), "receipt1234");
assert_eq!(receipt_response.command, RECEIPT);
drop(client.stream);
}
// Test SUBSCRIBE
#[test]
fn test_subscribe()
|
{
let mut client = Client::with_uri("localhost:61613");
let _ = client.connect("user", "pw").unwrap();
let _ = client.send("/queue/test", "auto").unwrap();
}
|
identifier_body
|
|
test.rs
|
#![feature(phase)]
extern crate debug;
extern crate time;
extern crate stomp;
#[phase(plugin, link)] extern crate log;
use stomp::Client;
use stomp::{CONNECTED, RECEIPT};
// Test CONNECT
#[test]
fn test_connect() {
let mut client = Client::with_uri("localhost:61613");
let response = client.connect("user", "pw").unwrap();
assert_eq!(response.command, CONNECTED);
drop(client.stream);
}
// Test SEND
#[test]
fn test_send() {
let mut client = Client::with_uri("localhost:61613");
let _ = client.connect("user", "pw").unwrap();
let mut buf = [0,..128];
// Temp hack to distinguish consecutive requests over discrete conns
let amt = {
let mut wr = std::io::BufWriter::new(buf);
let t = time::get_time();
let _ = write!(&mut wr, "testing 123: {}", t.sec);
wr.tell().unwrap() as uint
};
let s = std::str::from_utf8(buf.slice(0, amt));
let response = client.send("/queue/test", s.unwrap()).unwrap();
assert_eq!(response.command, RECEIPT);
let receipt_response = client.send_with_receipt("/queue/test", s.unwrap(), "receipt1234").unwrap();
let id = receipt_response.get_header("receipt-id");
assert_eq!(id.as_slice(), "receipt1234");
assert_eq!(receipt_response.command, RECEIPT);
drop(client.stream);
}
// Test SUBSCRIBE
#[test]
fn
|
() {
let mut client = Client::with_uri("localhost:61613");
let _ = client.connect("user", "pw").unwrap();
let _ = client.send("/queue/test", "auto").unwrap();
}
|
test_subscribe
|
identifier_name
|
test.rs
|
#![feature(phase)]
extern crate debug;
extern crate time;
extern crate stomp;
#[phase(plugin, link)] extern crate log;
use stomp::Client;
use stomp::{CONNECTED, RECEIPT};
|
let response = client.connect("user", "pw").unwrap();
assert_eq!(response.command, CONNECTED);
drop(client.stream);
}
// Test SEND
#[test]
fn test_send() {
let mut client = Client::with_uri("localhost:61613");
let _ = client.connect("user", "pw").unwrap();
let mut buf = [0,..128];
// Temp hack to distinguish consecutive requests over discrete conns
let amt = {
let mut wr = std::io::BufWriter::new(buf);
let t = time::get_time();
let _ = write!(&mut wr, "testing 123: {}", t.sec);
wr.tell().unwrap() as uint
};
let s = std::str::from_utf8(buf.slice(0, amt));
let response = client.send("/queue/test", s.unwrap()).unwrap();
assert_eq!(response.command, RECEIPT);
let receipt_response = client.send_with_receipt("/queue/test", s.unwrap(), "receipt1234").unwrap();
let id = receipt_response.get_header("receipt-id");
assert_eq!(id.as_slice(), "receipt1234");
assert_eq!(receipt_response.command, RECEIPT);
drop(client.stream);
}
// Test SUBSCRIBE
#[test]
fn test_subscribe() {
let mut client = Client::with_uri("localhost:61613");
let _ = client.connect("user", "pw").unwrap();
let _ = client.send("/queue/test", "auto").unwrap();
}
|
// Test CONNECT
#[test]
fn test_connect() {
let mut client = Client::with_uri("localhost:61613");
|
random_line_split
|
header.rs
|
use std::convert::From;
use std::ascii::AsciiExt;
/// Headers that may be used by an HTTP request or response.
#[derive(Clone, Debug, PartialEq)]
pub enum
|
{
Host,
Connection,
KeepAlive,
ContentLength,
TransferEncoding,
Raw(String),
}
impl<'a> From<&'a str> for Header {
fn from(s: &'a str) -> Header {
if s.eq_ignore_ascii_case("Host") {
Header::Host
} else if s.eq_ignore_ascii_case("Connection") {
Header::Connection
} else if s.eq_ignore_ascii_case("Keep-Alive") {
Header::KeepAlive
} else if s.eq_ignore_ascii_case("Content-Length") {
Header::ContentLength
} else if s.eq_ignore_ascii_case("Transfer-Encoding") {
Header::TransferEncoding
} else {
Header::Raw(s.to_string())
}
}
}
|
Header
|
identifier_name
|
magics.rs
|
use rand::{Rng, thread_rng};
use types::{BISHOP, ROOK};
use util::*;
static mut KING_MAP: [u64; 64] = [0; 64];
static mut KNIGHT_MAP: [u64; 64] = [0; 64];
static mut BISHOP_MAP: [SMagic; 64] = [SMagic { offset: 0, mask: 0, magic: 0, shift: 0 }; 64];
static mut ROOK_MAP: [SMagic; 64] = [SMagic { offset: 0, mask: 0, magic: 0, shift: 0 }; 64];
const MAP_SIZE: usize = 107648;
static mut MAP: [u64; MAP_SIZE] = [0; MAP_SIZE];
pub fn knight_moves(from: u32) -> u64 {
unsafe { KNIGHT_MAP[from as usize] }
}
pub fn king_moves(from: u32) -> u64 {
unsafe { KING_MAP[from as usize] }
}
pub fn bishop_moves(from: u32, occ: u64) -> u64 {
unsafe { BISHOP_MAP[from as usize].att(occ) }
}
pub fn rook_moves(from: u32, occ: u64) -> u64
|
pub fn queen_moves(from: u32, occ: u64) -> u64 {
unsafe { BISHOP_MAP[from as usize].att(occ) |
ROOK_MAP[from as usize].att(occ) }
}
pub unsafe fn init() {
king_map_init();
knight_map_init();
let size = get_piece_map(BISHOP, &mut BISHOP_MAP, 0, false);
let total = get_piece_map(ROOK, &mut ROOK_MAP, size, false);
assert!(total == MAP_SIZE);
}
pub unsafe fn knight_map_init() {
let offsets = vec![
(-1, -2), (-2, -1), (-2, 1), (-1, 2),
(1, -2), (2, -1), (2, 1), (1, 2)];
for (i, attacks) in KNIGHT_MAP.iter_mut().enumerate() {
let (r, c) = ((i / 8) as isize, (i % 8) as isize);
for &(dr, dc) in &offsets {
if r+dr >= 0 && c+dc >= 0 && r+dr < 8 && c+dc < 8 {
*attacks |= 1 << ((r + dr)*8 + c + dc);
}
}
}
}
pub unsafe fn king_map_init() {
let offsets = vec![
(1, -1), (1, 0), (1, 1),
(0, -1), (0, 1),
(-1,-1), (-1, 0), (-1, 1)];
for (i, attacks) in KING_MAP.iter_mut().enumerate() {
let (r, c) = ((i / 8) as isize, (i % 8) as isize);
for &(dr, dc) in &offsets {
if r+dr >= 0 && c+dc >= 0 && r+dr < 8 && c+dc < 8 {
*attacks |= 1 << ((r + dr)*8 + c + dc);
}
}
}
}
#[derive(Copy, Clone)]
pub struct SMagic {
pub offset: usize,
pub mask: u64,
pub magic: u64,
pub shift: u32
}
impl SMagic {
pub unsafe fn att(&self, occ: u64) -> u64 {
let ind = (self.magic * (occ & self.mask)) >> self.shift;
MAP[self.offset + ind as usize]
}
}
pub unsafe fn get_piece_map(piece: u8, piece_map: &mut [SMagic; 64],
mut offset: usize, from_scratch: bool) -> usize {
let mut rng = thread_rng();
for (pos, entry) in piece_map.iter_mut().enumerate() {
let s = pos as u32;
let edges = ((ROW_1 | ROW_8) &!row(s)) |
((FILE_A | FILE_H) &!file(s));
// The mask for square's' is the set of moves on an empty board
let attacks: fn(u64, u32, u64) -> u64 = if piece == BISHOP { bishop_attacks } else { rook_attacks };
let mask = attacks(1 << s, s, 1 << s) &!edges;
let num_ones = mask.count_ones();
let shift = 64 - num_ones;
let mut occupancy = vec![0; 1 << num_ones];
let mut reference = vec![0; 1 << num_ones];
let mut size = 0;
let mut occ = 0;
loop {
occupancy[size] = occ;
reference[size] = attacks(1 << s, s, occ | (1 << s));
size += 1;
occ = (occ - mask) & mask;
if occ == 0 { break } // We will have enumerated all subsets of mask
}
let mut magic = if piece == BISHOP { BISHOP_MAGICS[pos] } else { ROOK_MAGICS[pos] };
'outer: loop {
if from_scratch { // Generate a new random magic from scratch
loop {
magic = rng.gen::<u64>() & rng.gen::<u64>() & rng.gen::<u64>();
if ((magic * mask) >> 56).count_ones() >= 6 { break }
}
}
let mut attacks = vec![0; size];
for i in 0..size {
let index = (magic * occupancy[i]) >> shift;
let attack = &mut attacks[index as usize];
if *attack!= 0 && *attack!= reference[i] {
assert!(from_scratch, "Error: Precalculated magic is incorrect. Square {}, for {} magic",
pos, if piece == BISHOP { "bishop" } else { "rook" } );
continue 'outer
}
*attack = reference[i];
}
*entry = SMagic { offset: offset, mask: mask, magic: magic, shift: shift };
for (i, &att) in attacks.iter().enumerate() {
MAP[offset + i] = att;
}
offset += size;
break // If we've reached this point, all from 0..size have been verified
}
}
offset
}
static BISHOP_MAGICS: [u64; 64] =
[306397059236266368, 6638343277122827280, 10377420549504106496, 9193021019258913, 2306408226914042898, 10379110636817760276, 27167319028441088, 7566153073497751552,
1513227076520969216, 301917653126479936, 72075465430409232, 2343002121441460228, 36033212782477344, 9223373154083475456, 6935629192638251008, 72621648200664064,
2310506081245267984, 2533291987569153, 146934404644733024, 1838417834950912, 579856052833622016, 1729946448243595776, 705208029025040, 2886877732040869888,
10092575566416331020, 5635409948247040, 738739924278198804, 4648849515743289408, 9233786889293807616, 1155253577929753088, 435164712050360592, 3026700562025580641,
4612284839965491969, 10448650511900137472, 571823356120080, 40569782189687936, 148620986995048708, 4901113822871308288, 4612077461748908288, 10204585674276944,
2534512027246592, 5766297627561820676, 13809969191200768, 1153062656578422784, 9318235838682899712, 11533824475839595776, 433770548762247233, 92326036501692936,
9227053213059129360, 577024872779350852, 108087561569959936, 582151826703646856, 81404176367767, 316415319130374273, 9113856212762624, 145453328103440392,
441392350330618400, 1126492748710916, 2309220790581891072, 3026423624667006980, 18019391702696464, 4516931289817600, 1450317422841301124, 9246488805123342592];
static ROOK_MAGICS: [u64; 64] =
[36028867955671040, 2395917338224361536, 936757656041832464, 648535942831284356, 36037595259731970, 13943151043426386048, 432349966580056576, 4683745813775001856,
1191624314978336800, 4611756662317916160, 4625338105090543616, 140806208356480, 1688987371057664, 9288708641522688, 153403870897537280, 281550411726850,
2401883155071024, 1206964838111645696, 166705754384925184, 36039792408011264, 10376580514281768960, 9148486532465664, 578787319189340418, 398007816633254020,
2341872150903791616, 2314850762536009728, 297238127310798880, 2251868801728768, 2594082183614301184, 820222482337235456, 37717655469424904, 577596144088011012,
1152991874030502016, 3171026856472219648, 20415869351890944, 4611844348286345472, 2455605323386324224, 140754676613632, 1740713828645089416, 58361257132164,
70370893791232, 9227880322828615684, 72092778695295040, 577023839834341392, 4723150143565660416, 563087661073408, 651083773116450, 72128789630550047,
153192758223054976, 869194865525653568, 4972009250306933248, 1031325449119138048, 1297041090863464576, 580401419157405824, 1657992643584, 306245066729521664,
15206439601351819394, 14143290885479661953, 1688988407201810, 18065251325837538, 1152927311403745429, 162411078742050817, 334255838724676, 27323018585852550];
|
{
unsafe { ROOK_MAP[from as usize].att(occ) }
}
|
identifier_body
|
magics.rs
|
use rand::{Rng, thread_rng};
use types::{BISHOP, ROOK};
use util::*;
static mut KING_MAP: [u64; 64] = [0; 64];
static mut KNIGHT_MAP: [u64; 64] = [0; 64];
static mut BISHOP_MAP: [SMagic; 64] = [SMagic { offset: 0, mask: 0, magic: 0, shift: 0 }; 64];
static mut ROOK_MAP: [SMagic; 64] = [SMagic { offset: 0, mask: 0, magic: 0, shift: 0 }; 64];
const MAP_SIZE: usize = 107648;
static mut MAP: [u64; MAP_SIZE] = [0; MAP_SIZE];
pub fn knight_moves(from: u32) -> u64 {
unsafe { KNIGHT_MAP[from as usize] }
}
pub fn king_moves(from: u32) -> u64 {
unsafe { KING_MAP[from as usize] }
}
pub fn bishop_moves(from: u32, occ: u64) -> u64 {
unsafe { BISHOP_MAP[from as usize].att(occ) }
}
pub fn rook_moves(from: u32, occ: u64) -> u64 {
unsafe { ROOK_MAP[from as usize].att(occ) }
}
pub fn queen_moves(from: u32, occ: u64) -> u64 {
unsafe { BISHOP_MAP[from as usize].att(occ) |
ROOK_MAP[from as usize].att(occ) }
}
pub unsafe fn init() {
king_map_init();
knight_map_init();
let size = get_piece_map(BISHOP, &mut BISHOP_MAP, 0, false);
let total = get_piece_map(ROOK, &mut ROOK_MAP, size, false);
assert!(total == MAP_SIZE);
}
pub unsafe fn knight_map_init() {
let offsets = vec![
(-1, -2), (-2, -1), (-2, 1), (-1, 2),
(1, -2), (2, -1), (2, 1), (1, 2)];
for (i, attacks) in KNIGHT_MAP.iter_mut().enumerate() {
let (r, c) = ((i / 8) as isize, (i % 8) as isize);
for &(dr, dc) in &offsets {
if r+dr >= 0 && c+dc >= 0 && r+dr < 8 && c+dc < 8
|
}
}
}
pub unsafe fn king_map_init() {
let offsets = vec![
(1, -1), (1, 0), (1, 1),
(0, -1), (0, 1),
(-1,-1), (-1, 0), (-1, 1)];
for (i, attacks) in KING_MAP.iter_mut().enumerate() {
let (r, c) = ((i / 8) as isize, (i % 8) as isize);
for &(dr, dc) in &offsets {
if r+dr >= 0 && c+dc >= 0 && r+dr < 8 && c+dc < 8 {
*attacks |= 1 << ((r + dr)*8 + c + dc);
}
}
}
}
#[derive(Copy, Clone)]
pub struct SMagic {
pub offset: usize,
pub mask: u64,
pub magic: u64,
pub shift: u32
}
impl SMagic {
pub unsafe fn att(&self, occ: u64) -> u64 {
let ind = (self.magic * (occ & self.mask)) >> self.shift;
MAP[self.offset + ind as usize]
}
}
pub unsafe fn get_piece_map(piece: u8, piece_map: &mut [SMagic; 64],
mut offset: usize, from_scratch: bool) -> usize {
let mut rng = thread_rng();
for (pos, entry) in piece_map.iter_mut().enumerate() {
let s = pos as u32;
let edges = ((ROW_1 | ROW_8) &!row(s)) |
((FILE_A | FILE_H) &!file(s));
// The mask for square's' is the set of moves on an empty board
let attacks: fn(u64, u32, u64) -> u64 = if piece == BISHOP { bishop_attacks } else { rook_attacks };
let mask = attacks(1 << s, s, 1 << s) &!edges;
let num_ones = mask.count_ones();
let shift = 64 - num_ones;
let mut occupancy = vec![0; 1 << num_ones];
let mut reference = vec![0; 1 << num_ones];
let mut size = 0;
let mut occ = 0;
loop {
occupancy[size] = occ;
reference[size] = attacks(1 << s, s, occ | (1 << s));
size += 1;
occ = (occ - mask) & mask;
if occ == 0 { break } // We will have enumerated all subsets of mask
}
let mut magic = if piece == BISHOP { BISHOP_MAGICS[pos] } else { ROOK_MAGICS[pos] };
'outer: loop {
if from_scratch { // Generate a new random magic from scratch
loop {
magic = rng.gen::<u64>() & rng.gen::<u64>() & rng.gen::<u64>();
if ((magic * mask) >> 56).count_ones() >= 6 { break }
}
}
let mut attacks = vec![0; size];
for i in 0..size {
let index = (magic * occupancy[i]) >> shift;
let attack = &mut attacks[index as usize];
if *attack!= 0 && *attack!= reference[i] {
assert!(from_scratch, "Error: Precalculated magic is incorrect. Square {}, for {} magic",
pos, if piece == BISHOP { "bishop" } else { "rook" } );
continue 'outer
}
*attack = reference[i];
}
*entry = SMagic { offset: offset, mask: mask, magic: magic, shift: shift };
for (i, &att) in attacks.iter().enumerate() {
MAP[offset + i] = att;
}
offset += size;
break // If we've reached this point, all from 0..size have been verified
}
}
offset
}
static BISHOP_MAGICS: [u64; 64] =
[306397059236266368, 6638343277122827280, 10377420549504106496, 9193021019258913, 2306408226914042898, 10379110636817760276, 27167319028441088, 7566153073497751552,
1513227076520969216, 301917653126479936, 72075465430409232, 2343002121441460228, 36033212782477344, 9223373154083475456, 6935629192638251008, 72621648200664064,
2310506081245267984, 2533291987569153, 146934404644733024, 1838417834950912, 579856052833622016, 1729946448243595776, 705208029025040, 2886877732040869888,
10092575566416331020, 5635409948247040, 738739924278198804, 4648849515743289408, 9233786889293807616, 1155253577929753088, 435164712050360592, 3026700562025580641,
4612284839965491969, 10448650511900137472, 571823356120080, 40569782189687936, 148620986995048708, 4901113822871308288, 4612077461748908288, 10204585674276944,
2534512027246592, 5766297627561820676, 13809969191200768, 1153062656578422784, 9318235838682899712, 11533824475839595776, 433770548762247233, 92326036501692936,
9227053213059129360, 577024872779350852, 108087561569959936, 582151826703646856, 81404176367767, 316415319130374273, 9113856212762624, 145453328103440392,
441392350330618400, 1126492748710916, 2309220790581891072, 3026423624667006980, 18019391702696464, 4516931289817600, 1450317422841301124, 9246488805123342592];
static ROOK_MAGICS: [u64; 64] =
[36028867955671040, 2395917338224361536, 936757656041832464, 648535942831284356, 36037595259731970, 13943151043426386048, 432349966580056576, 4683745813775001856,
1191624314978336800, 4611756662317916160, 4625338105090543616, 140806208356480, 1688987371057664, 9288708641522688, 153403870897537280, 281550411726850,
2401883155071024, 1206964838111645696, 166705754384925184, 36039792408011264, 10376580514281768960, 9148486532465664, 578787319189340418, 398007816633254020,
2341872150903791616, 2314850762536009728, 297238127310798880, 2251868801728768, 2594082183614301184, 820222482337235456, 37717655469424904, 577596144088011012,
1152991874030502016, 3171026856472219648, 20415869351890944, 4611844348286345472, 2455605323386324224, 140754676613632, 1740713828645089416, 58361257132164,
70370893791232, 9227880322828615684, 72092778695295040, 577023839834341392, 4723150143565660416, 563087661073408, 651083773116450, 72128789630550047,
153192758223054976, 869194865525653568, 4972009250306933248, 1031325449119138048, 1297041090863464576, 580401419157405824, 1657992643584, 306245066729521664,
15206439601351819394, 14143290885479661953, 1688988407201810, 18065251325837538, 1152927311403745429, 162411078742050817, 334255838724676, 27323018585852550];
|
{
*attacks |= 1 << ((r + dr)*8 + c + dc);
}
|
conditional_block
|
magics.rs
|
use rand::{Rng, thread_rng};
use types::{BISHOP, ROOK};
use util::*;
static mut KING_MAP: [u64; 64] = [0; 64];
static mut KNIGHT_MAP: [u64; 64] = [0; 64];
static mut BISHOP_MAP: [SMagic; 64] = [SMagic { offset: 0, mask: 0, magic: 0, shift: 0 }; 64];
static mut ROOK_MAP: [SMagic; 64] = [SMagic { offset: 0, mask: 0, magic: 0, shift: 0 }; 64];
const MAP_SIZE: usize = 107648;
static mut MAP: [u64; MAP_SIZE] = [0; MAP_SIZE];
pub fn knight_moves(from: u32) -> u64 {
unsafe { KNIGHT_MAP[from as usize] }
}
pub fn king_moves(from: u32) -> u64 {
unsafe { KING_MAP[from as usize] }
}
pub fn bishop_moves(from: u32, occ: u64) -> u64 {
unsafe { BISHOP_MAP[from as usize].att(occ) }
}
pub fn rook_moves(from: u32, occ: u64) -> u64 {
unsafe { ROOK_MAP[from as usize].att(occ) }
}
pub fn queen_moves(from: u32, occ: u64) -> u64 {
unsafe { BISHOP_MAP[from as usize].att(occ) |
ROOK_MAP[from as usize].att(occ) }
}
pub unsafe fn init() {
king_map_init();
knight_map_init();
let size = get_piece_map(BISHOP, &mut BISHOP_MAP, 0, false);
let total = get_piece_map(ROOK, &mut ROOK_MAP, size, false);
assert!(total == MAP_SIZE);
}
pub unsafe fn knight_map_init() {
let offsets = vec![
(-1, -2), (-2, -1), (-2, 1), (-1, 2),
(1, -2), (2, -1), (2, 1), (1, 2)];
for (i, attacks) in KNIGHT_MAP.iter_mut().enumerate() {
let (r, c) = ((i / 8) as isize, (i % 8) as isize);
for &(dr, dc) in &offsets {
if r+dr >= 0 && c+dc >= 0 && r+dr < 8 && c+dc < 8 {
*attacks |= 1 << ((r + dr)*8 + c + dc);
}
}
}
}
pub unsafe fn king_map_init() {
let offsets = vec![
(1, -1), (1, 0), (1, 1),
(0, -1), (0, 1),
(-1,-1), (-1, 0), (-1, 1)];
for (i, attacks) in KING_MAP.iter_mut().enumerate() {
let (r, c) = ((i / 8) as isize, (i % 8) as isize);
for &(dr, dc) in &offsets {
if r+dr >= 0 && c+dc >= 0 && r+dr < 8 && c+dc < 8 {
*attacks |= 1 << ((r + dr)*8 + c + dc);
}
}
}
}
#[derive(Copy, Clone)]
pub struct
|
{
pub offset: usize,
pub mask: u64,
pub magic: u64,
pub shift: u32
}
impl SMagic {
pub unsafe fn att(&self, occ: u64) -> u64 {
let ind = (self.magic * (occ & self.mask)) >> self.shift;
MAP[self.offset + ind as usize]
}
}
pub unsafe fn get_piece_map(piece: u8, piece_map: &mut [SMagic; 64],
mut offset: usize, from_scratch: bool) -> usize {
let mut rng = thread_rng();
for (pos, entry) in piece_map.iter_mut().enumerate() {
let s = pos as u32;
let edges = ((ROW_1 | ROW_8) &!row(s)) |
((FILE_A | FILE_H) &!file(s));
// The mask for square's' is the set of moves on an empty board
let attacks: fn(u64, u32, u64) -> u64 = if piece == BISHOP { bishop_attacks } else { rook_attacks };
let mask = attacks(1 << s, s, 1 << s) &!edges;
let num_ones = mask.count_ones();
let shift = 64 - num_ones;
let mut occupancy = vec![0; 1 << num_ones];
let mut reference = vec![0; 1 << num_ones];
let mut size = 0;
let mut occ = 0;
loop {
occupancy[size] = occ;
reference[size] = attacks(1 << s, s, occ | (1 << s));
size += 1;
occ = (occ - mask) & mask;
if occ == 0 { break } // We will have enumerated all subsets of mask
}
let mut magic = if piece == BISHOP { BISHOP_MAGICS[pos] } else { ROOK_MAGICS[pos] };
'outer: loop {
if from_scratch { // Generate a new random magic from scratch
loop {
magic = rng.gen::<u64>() & rng.gen::<u64>() & rng.gen::<u64>();
if ((magic * mask) >> 56).count_ones() >= 6 { break }
}
}
let mut attacks = vec![0; size];
for i in 0..size {
let index = (magic * occupancy[i]) >> shift;
let attack = &mut attacks[index as usize];
if *attack!= 0 && *attack!= reference[i] {
assert!(from_scratch, "Error: Precalculated magic is incorrect. Square {}, for {} magic",
pos, if piece == BISHOP { "bishop" } else { "rook" } );
continue 'outer
}
*attack = reference[i];
}
*entry = SMagic { offset: offset, mask: mask, magic: magic, shift: shift };
for (i, &att) in attacks.iter().enumerate() {
MAP[offset + i] = att;
}
offset += size;
break // If we've reached this point, all from 0..size have been verified
}
}
offset
}
static BISHOP_MAGICS: [u64; 64] =
[306397059236266368, 6638343277122827280, 10377420549504106496, 9193021019258913, 2306408226914042898, 10379110636817760276, 27167319028441088, 7566153073497751552,
1513227076520969216, 301917653126479936, 72075465430409232, 2343002121441460228, 36033212782477344, 9223373154083475456, 6935629192638251008, 72621648200664064,
2310506081245267984, 2533291987569153, 146934404644733024, 1838417834950912, 579856052833622016, 1729946448243595776, 705208029025040, 2886877732040869888,
10092575566416331020, 5635409948247040, 738739924278198804, 4648849515743289408, 9233786889293807616, 1155253577929753088, 435164712050360592, 3026700562025580641,
4612284839965491969, 10448650511900137472, 571823356120080, 40569782189687936, 148620986995048708, 4901113822871308288, 4612077461748908288, 10204585674276944,
2534512027246592, 5766297627561820676, 13809969191200768, 1153062656578422784, 9318235838682899712, 11533824475839595776, 433770548762247233, 92326036501692936,
9227053213059129360, 577024872779350852, 108087561569959936, 582151826703646856, 81404176367767, 316415319130374273, 9113856212762624, 145453328103440392,
441392350330618400, 1126492748710916, 2309220790581891072, 3026423624667006980, 18019391702696464, 4516931289817600, 1450317422841301124, 9246488805123342592];
static ROOK_MAGICS: [u64; 64] =
[36028867955671040, 2395917338224361536, 936757656041832464, 648535942831284356, 36037595259731970, 13943151043426386048, 432349966580056576, 4683745813775001856,
1191624314978336800, 4611756662317916160, 4625338105090543616, 140806208356480, 1688987371057664, 9288708641522688, 153403870897537280, 281550411726850,
2401883155071024, 1206964838111645696, 166705754384925184, 36039792408011264, 10376580514281768960, 9148486532465664, 578787319189340418, 398007816633254020,
2341872150903791616, 2314850762536009728, 297238127310798880, 2251868801728768, 2594082183614301184, 820222482337235456, 37717655469424904, 577596144088011012,
1152991874030502016, 3171026856472219648, 20415869351890944, 4611844348286345472, 2455605323386324224, 140754676613632, 1740713828645089416, 58361257132164,
70370893791232, 9227880322828615684, 72092778695295040, 577023839834341392, 4723150143565660416, 563087661073408, 651083773116450, 72128789630550047,
153192758223054976, 869194865525653568, 4972009250306933248, 1031325449119138048, 1297041090863464576, 580401419157405824, 1657992643584, 306245066729521664,
15206439601351819394, 14143290885479661953, 1688988407201810, 18065251325837538, 1152927311403745429, 162411078742050817, 334255838724676, 27323018585852550];
|
SMagic
|
identifier_name
|
magics.rs
|
use rand::{Rng, thread_rng};
use types::{BISHOP, ROOK};
use util::*;
static mut KING_MAP: [u64; 64] = [0; 64];
static mut KNIGHT_MAP: [u64; 64] = [0; 64];
static mut BISHOP_MAP: [SMagic; 64] = [SMagic { offset: 0, mask: 0, magic: 0, shift: 0 }; 64];
static mut ROOK_MAP: [SMagic; 64] = [SMagic { offset: 0, mask: 0, magic: 0, shift: 0 }; 64];
const MAP_SIZE: usize = 107648;
static mut MAP: [u64; MAP_SIZE] = [0; MAP_SIZE];
pub fn knight_moves(from: u32) -> u64 {
unsafe { KNIGHT_MAP[from as usize] }
}
pub fn king_moves(from: u32) -> u64 {
unsafe { KING_MAP[from as usize] }
}
|
pub fn rook_moves(from: u32, occ: u64) -> u64 {
unsafe { ROOK_MAP[from as usize].att(occ) }
}
pub fn queen_moves(from: u32, occ: u64) -> u64 {
unsafe { BISHOP_MAP[from as usize].att(occ) |
ROOK_MAP[from as usize].att(occ) }
}
pub unsafe fn init() {
king_map_init();
knight_map_init();
let size = get_piece_map(BISHOP, &mut BISHOP_MAP, 0, false);
let total = get_piece_map(ROOK, &mut ROOK_MAP, size, false);
assert!(total == MAP_SIZE);
}
pub unsafe fn knight_map_init() {
let offsets = vec![
(-1, -2), (-2, -1), (-2, 1), (-1, 2),
(1, -2), (2, -1), (2, 1), (1, 2)];
for (i, attacks) in KNIGHT_MAP.iter_mut().enumerate() {
let (r, c) = ((i / 8) as isize, (i % 8) as isize);
for &(dr, dc) in &offsets {
if r+dr >= 0 && c+dc >= 0 && r+dr < 8 && c+dc < 8 {
*attacks |= 1 << ((r + dr)*8 + c + dc);
}
}
}
}
pub unsafe fn king_map_init() {
let offsets = vec![
(1, -1), (1, 0), (1, 1),
(0, -1), (0, 1),
(-1,-1), (-1, 0), (-1, 1)];
for (i, attacks) in KING_MAP.iter_mut().enumerate() {
let (r, c) = ((i / 8) as isize, (i % 8) as isize);
for &(dr, dc) in &offsets {
if r+dr >= 0 && c+dc >= 0 && r+dr < 8 && c+dc < 8 {
*attacks |= 1 << ((r + dr)*8 + c + dc);
}
}
}
}
#[derive(Copy, Clone)]
pub struct SMagic {
pub offset: usize,
pub mask: u64,
pub magic: u64,
pub shift: u32
}
impl SMagic {
pub unsafe fn att(&self, occ: u64) -> u64 {
let ind = (self.magic * (occ & self.mask)) >> self.shift;
MAP[self.offset + ind as usize]
}
}
pub unsafe fn get_piece_map(piece: u8, piece_map: &mut [SMagic; 64],
mut offset: usize, from_scratch: bool) -> usize {
let mut rng = thread_rng();
for (pos, entry) in piece_map.iter_mut().enumerate() {
let s = pos as u32;
let edges = ((ROW_1 | ROW_8) &!row(s)) |
((FILE_A | FILE_H) &!file(s));
// The mask for square's' is the set of moves on an empty board
let attacks: fn(u64, u32, u64) -> u64 = if piece == BISHOP { bishop_attacks } else { rook_attacks };
let mask = attacks(1 << s, s, 1 << s) &!edges;
let num_ones = mask.count_ones();
let shift = 64 - num_ones;
let mut occupancy = vec![0; 1 << num_ones];
let mut reference = vec![0; 1 << num_ones];
let mut size = 0;
let mut occ = 0;
loop {
occupancy[size] = occ;
reference[size] = attacks(1 << s, s, occ | (1 << s));
size += 1;
occ = (occ - mask) & mask;
if occ == 0 { break } // We will have enumerated all subsets of mask
}
let mut magic = if piece == BISHOP { BISHOP_MAGICS[pos] } else { ROOK_MAGICS[pos] };
'outer: loop {
if from_scratch { // Generate a new random magic from scratch
loop {
magic = rng.gen::<u64>() & rng.gen::<u64>() & rng.gen::<u64>();
if ((magic * mask) >> 56).count_ones() >= 6 { break }
}
}
let mut attacks = vec![0; size];
for i in 0..size {
let index = (magic * occupancy[i]) >> shift;
let attack = &mut attacks[index as usize];
if *attack!= 0 && *attack!= reference[i] {
assert!(from_scratch, "Error: Precalculated magic is incorrect. Square {}, for {} magic",
pos, if piece == BISHOP { "bishop" } else { "rook" } );
continue 'outer
}
*attack = reference[i];
}
*entry = SMagic { offset: offset, mask: mask, magic: magic, shift: shift };
for (i, &att) in attacks.iter().enumerate() {
MAP[offset + i] = att;
}
offset += size;
break // If we've reached this point, all from 0..size have been verified
}
}
offset
}
static BISHOP_MAGICS: [u64; 64] =
[306397059236266368, 6638343277122827280, 10377420549504106496, 9193021019258913, 2306408226914042898, 10379110636817760276, 27167319028441088, 7566153073497751552,
1513227076520969216, 301917653126479936, 72075465430409232, 2343002121441460228, 36033212782477344, 9223373154083475456, 6935629192638251008, 72621648200664064,
2310506081245267984, 2533291987569153, 146934404644733024, 1838417834950912, 579856052833622016, 1729946448243595776, 705208029025040, 2886877732040869888,
10092575566416331020, 5635409948247040, 738739924278198804, 4648849515743289408, 9233786889293807616, 1155253577929753088, 435164712050360592, 3026700562025580641,
4612284839965491969, 10448650511900137472, 571823356120080, 40569782189687936, 148620986995048708, 4901113822871308288, 4612077461748908288, 10204585674276944,
2534512027246592, 5766297627561820676, 13809969191200768, 1153062656578422784, 9318235838682899712, 11533824475839595776, 433770548762247233, 92326036501692936,
9227053213059129360, 577024872779350852, 108087561569959936, 582151826703646856, 81404176367767, 316415319130374273, 9113856212762624, 145453328103440392,
441392350330618400, 1126492748710916, 2309220790581891072, 3026423624667006980, 18019391702696464, 4516931289817600, 1450317422841301124, 9246488805123342592];
static ROOK_MAGICS: [u64; 64] =
[36028867955671040, 2395917338224361536, 936757656041832464, 648535942831284356, 36037595259731970, 13943151043426386048, 432349966580056576, 4683745813775001856,
1191624314978336800, 4611756662317916160, 4625338105090543616, 140806208356480, 1688987371057664, 9288708641522688, 153403870897537280, 281550411726850,
2401883155071024, 1206964838111645696, 166705754384925184, 36039792408011264, 10376580514281768960, 9148486532465664, 578787319189340418, 398007816633254020,
2341872150903791616, 2314850762536009728, 297238127310798880, 2251868801728768, 2594082183614301184, 820222482337235456, 37717655469424904, 577596144088011012,
1152991874030502016, 3171026856472219648, 20415869351890944, 4611844348286345472, 2455605323386324224, 140754676613632, 1740713828645089416, 58361257132164,
70370893791232, 9227880322828615684, 72092778695295040, 577023839834341392, 4723150143565660416, 563087661073408, 651083773116450, 72128789630550047,
153192758223054976, 869194865525653568, 4972009250306933248, 1031325449119138048, 1297041090863464576, 580401419157405824, 1657992643584, 306245066729521664,
15206439601351819394, 14143290885479661953, 1688988407201810, 18065251325837538, 1152927311403745429, 162411078742050817, 334255838724676, 27323018585852550];
|
pub fn bishop_moves(from: u32, occ: u64) -> u64 {
unsafe { BISHOP_MAP[from as usize].att(occ) }
}
|
random_line_split
|
main.rs
|
extern crate clap;
use clap::{App, SubCommand};
fn main()
|
// + Used by "$ myapp test" with the following arguments
// > A list flag
// = Uses "-l" (usage is "$ myapp test -l"
// > A help flag (automatically generated by clap
// = Uses "-h" or "--help" (full usage "$ myapp test -h" or "$ myapp test --help")
// > A version flag (automatically generated by clap
// = Uses "-V" or "--version" (full usage "$ myapp test -V" or "$ myapp test --version")
// - A subcommand "help" (automatically generated by clap because we specified a subcommand of our own)
// + Used by "$ myapp help" (same functionality as "-h" or "--help")
let matches = App::new("MyApp")
.version("1.0")
.author("Kevin K. <[email protected]>")
.about("Does awesome things")
.args_from_usage(
"-c, --config=[FILE] 'Sets a custom config file'
<output> 'Sets an optional output file'
-d... 'Turn debugging information on'",
).subcommand(
SubCommand::with_name("test")
.about("does testing things")
.arg_from_usage("-l, --list 'lists test values'"),
).get_matches();
// You can check the value provided by positional arguments, or option arguments
if let Some(o) = matches.value_of("output") {
println!("Value for output: {}", o);
}
if let Some(c) = matches.value_of("config") {
println!("Value for config: {}", c);
}
// You can see how many times a particular flag or argument occurred
// Note, only flags can have multiple occurrences
match matches.occurrences_of("d") {
0 => println!("Debug mode is off"),
1 => println!("Debug mode is kind of on"),
2 => println!("Debug mode is on"),
_ => println!("Don't be crazy"),
}
// You can check for the existence of subcommands, and if found use their
// matches just as you would the top level app
if let Some(matches) = matches.subcommand_matches("test") {
// "$ myapp test" was run
if matches.is_present("list") {
// "$ myapp test -l" was run
println!("Printing testing lists...");
} else {
println!("Not printing testing lists...");
}
}
// Continued program logic goes here...
}
|
{
// This example shows how to create an application with several arguments using usage strings, which can be
// far less verbose that shown in 01b_QuickExample.rs, but is more readable. The downside is you cannot set
// the more advanced configuration options using this method (well...actually you can, you'll see ;) )
//
// The example below is functionally identical to the 01b_quick_example.rs and 01c_quick_example.rs
//
// Create an application with 5 possible arguments (2 auto generated) and 2 subcommands (1 auto generated)
// - A config file
// + Uses "-c filename" or "--config filename"
// - An output file
// + A positional argument (i.e. "$ myapp output_filename")
// - A debug flag
// + Uses "-d" or "--debug"
// + Allows multiple occurrences of such as "-dd" (for vary levels of debugging, as an example)
// - A help flag (automatically generated by clap)
// + Uses "-h" or "--help" (Only autogenerated if you do NOT specify your own "-h" or "--help")
// - A version flag (automatically generated by clap)
// + Uses "-V" or "--version" (Only autogenerated if you do NOT specify your own "-V" or "--version")
// - A subcommand "test" (subcommands behave like their own apps, with their own arguments
|
identifier_body
|
main.rs
|
extern crate clap;
use clap::{App, SubCommand};
fn
|
() {
// This example shows how to create an application with several arguments using usage strings, which can be
// far less verbose that shown in 01b_QuickExample.rs, but is more readable. The downside is you cannot set
// the more advanced configuration options using this method (well...actually you can, you'll see ;) )
//
// The example below is functionally identical to the 01b_quick_example.rs and 01c_quick_example.rs
//
// Create an application with 5 possible arguments (2 auto generated) and 2 subcommands (1 auto generated)
// - A config file
// + Uses "-c filename" or "--config filename"
// - An output file
// + A positional argument (i.e. "$ myapp output_filename")
// - A debug flag
// + Uses "-d" or "--debug"
// + Allows multiple occurrences of such as "-dd" (for vary levels of debugging, as an example)
// - A help flag (automatically generated by clap)
// + Uses "-h" or "--help" (Only autogenerated if you do NOT specify your own "-h" or "--help")
// - A version flag (automatically generated by clap)
// + Uses "-V" or "--version" (Only autogenerated if you do NOT specify your own "-V" or "--version")
// - A subcommand "test" (subcommands behave like their own apps, with their own arguments
// + Used by "$ myapp test" with the following arguments
// > A list flag
// = Uses "-l" (usage is "$ myapp test -l"
// > A help flag (automatically generated by clap
// = Uses "-h" or "--help" (full usage "$ myapp test -h" or "$ myapp test --help")
// > A version flag (automatically generated by clap
// = Uses "-V" or "--version" (full usage "$ myapp test -V" or "$ myapp test --version")
// - A subcommand "help" (automatically generated by clap because we specified a subcommand of our own)
// + Used by "$ myapp help" (same functionality as "-h" or "--help")
let matches = App::new("MyApp")
.version("1.0")
.author("Kevin K. <[email protected]>")
.about("Does awesome things")
.args_from_usage(
"-c, --config=[FILE] 'Sets a custom config file'
<output> 'Sets an optional output file'
-d... 'Turn debugging information on'",
).subcommand(
SubCommand::with_name("test")
.about("does testing things")
.arg_from_usage("-l, --list 'lists test values'"),
).get_matches();
// You can check the value provided by positional arguments, or option arguments
if let Some(o) = matches.value_of("output") {
println!("Value for output: {}", o);
}
if let Some(c) = matches.value_of("config") {
println!("Value for config: {}", c);
}
// You can see how many times a particular flag or argument occurred
// Note, only flags can have multiple occurrences
match matches.occurrences_of("d") {
0 => println!("Debug mode is off"),
1 => println!("Debug mode is kind of on"),
2 => println!("Debug mode is on"),
_ => println!("Don't be crazy"),
}
// You can check for the existence of subcommands, and if found use their
// matches just as you would the top level app
if let Some(matches) = matches.subcommand_matches("test") {
// "$ myapp test" was run
if matches.is_present("list") {
// "$ myapp test -l" was run
println!("Printing testing lists...");
} else {
println!("Not printing testing lists...");
}
}
// Continued program logic goes here...
}
|
main
|
identifier_name
|
main.rs
|
extern crate clap;
use clap::{App, SubCommand};
fn main() {
// This example shows how to create an application with several arguments using usage strings, which can be
// far less verbose that shown in 01b_QuickExample.rs, but is more readable. The downside is you cannot set
// the more advanced configuration options using this method (well...actually you can, you'll see ;) )
//
// The example below is functionally identical to the 01b_quick_example.rs and 01c_quick_example.rs
//
// Create an application with 5 possible arguments (2 auto generated) and 2 subcommands (1 auto generated)
// - A config file
// + Uses "-c filename" or "--config filename"
// - An output file
// + A positional argument (i.e. "$ myapp output_filename")
// - A debug flag
// + Uses "-d" or "--debug"
// + Allows multiple occurrences of such as "-dd" (for vary levels of debugging, as an example)
// - A help flag (automatically generated by clap)
// + Uses "-h" or "--help" (Only autogenerated if you do NOT specify your own "-h" or "--help")
// - A version flag (automatically generated by clap)
// + Uses "-V" or "--version" (Only autogenerated if you do NOT specify your own "-V" or "--version")
// - A subcommand "test" (subcommands behave like their own apps, with their own arguments
// + Used by "$ myapp test" with the following arguments
// > A list flag
// = Uses "-l" (usage is "$ myapp test -l"
// > A help flag (automatically generated by clap
// = Uses "-h" or "--help" (full usage "$ myapp test -h" or "$ myapp test --help")
// > A version flag (automatically generated by clap
// = Uses "-V" or "--version" (full usage "$ myapp test -V" or "$ myapp test --version")
// - A subcommand "help" (automatically generated by clap because we specified a subcommand of our own)
// + Used by "$ myapp help" (same functionality as "-h" or "--help")
let matches = App::new("MyApp")
.version("1.0")
.author("Kevin K. <[email protected]>")
.about("Does awesome things")
.args_from_usage(
"-c, --config=[FILE] 'Sets a custom config file'
<output> 'Sets an optional output file'
-d... 'Turn debugging information on'",
).subcommand(
SubCommand::with_name("test")
.about("does testing things")
.arg_from_usage("-l, --list 'lists test values'"),
).get_matches();
// You can check the value provided by positional arguments, or option arguments
if let Some(o) = matches.value_of("output")
|
if let Some(c) = matches.value_of("config") {
println!("Value for config: {}", c);
}
// You can see how many times a particular flag or argument occurred
// Note, only flags can have multiple occurrences
match matches.occurrences_of("d") {
0 => println!("Debug mode is off"),
1 => println!("Debug mode is kind of on"),
2 => println!("Debug mode is on"),
_ => println!("Don't be crazy"),
}
// You can check for the existence of subcommands, and if found use their
// matches just as you would the top level app
if let Some(matches) = matches.subcommand_matches("test") {
// "$ myapp test" was run
if matches.is_present("list") {
// "$ myapp test -l" was run
println!("Printing testing lists...");
} else {
println!("Not printing testing lists...");
}
}
// Continued program logic goes here...
}
|
{
println!("Value for output: {}", o);
}
|
conditional_block
|
main.rs
|
extern crate clap;
use clap::{App, SubCommand};
fn main() {
// This example shows how to create an application with several arguments using usage strings, which can be
// far less verbose that shown in 01b_QuickExample.rs, but is more readable. The downside is you cannot set
// the more advanced configuration options using this method (well...actually you can, you'll see ;) )
//
// The example below is functionally identical to the 01b_quick_example.rs and 01c_quick_example.rs
//
// Create an application with 5 possible arguments (2 auto generated) and 2 subcommands (1 auto generated)
// - A config file
// + Uses "-c filename" or "--config filename"
// - An output file
// + A positional argument (i.e. "$ myapp output_filename")
// - A debug flag
// + Uses "-d" or "--debug"
// + Allows multiple occurrences of such as "-dd" (for vary levels of debugging, as an example)
// - A help flag (automatically generated by clap)
// + Uses "-h" or "--help" (Only autogenerated if you do NOT specify your own "-h" or "--help")
// - A version flag (automatically generated by clap)
// + Uses "-V" or "--version" (Only autogenerated if you do NOT specify your own "-V" or "--version")
// - A subcommand "test" (subcommands behave like their own apps, with their own arguments
// + Used by "$ myapp test" with the following arguments
// > A list flag
// = Uses "-l" (usage is "$ myapp test -l"
// > A help flag (automatically generated by clap
// = Uses "-h" or "--help" (full usage "$ myapp test -h" or "$ myapp test --help")
// > A version flag (automatically generated by clap
// = Uses "-V" or "--version" (full usage "$ myapp test -V" or "$ myapp test --version")
// - A subcommand "help" (automatically generated by clap because we specified a subcommand of our own)
// + Used by "$ myapp help" (same functionality as "-h" or "--help")
let matches = App::new("MyApp")
.version("1.0")
.author("Kevin K. <[email protected]>")
.about("Does awesome things")
.args_from_usage(
"-c, --config=[FILE] 'Sets a custom config file'
<output> 'Sets an optional output file'
-d... 'Turn debugging information on'",
).subcommand(
SubCommand::with_name("test")
.about("does testing things")
.arg_from_usage("-l, --list 'lists test values'"),
).get_matches();
// You can check the value provided by positional arguments, or option arguments
if let Some(o) = matches.value_of("output") {
println!("Value for output: {}", o);
}
|
// You can see how many times a particular flag or argument occurred
// Note, only flags can have multiple occurrences
match matches.occurrences_of("d") {
0 => println!("Debug mode is off"),
1 => println!("Debug mode is kind of on"),
2 => println!("Debug mode is on"),
_ => println!("Don't be crazy"),
}
// You can check for the existence of subcommands, and if found use their
// matches just as you would the top level app
if let Some(matches) = matches.subcommand_matches("test") {
// "$ myapp test" was run
if matches.is_present("list") {
// "$ myapp test -l" was run
println!("Printing testing lists...");
} else {
println!("Not printing testing lists...");
}
}
// Continued program logic goes here...
}
|
if let Some(c) = matches.value_of("config") {
println!("Value for config: {}", c);
}
|
random_line_split
|
cache.rs
|
use std::env;
use std::ffi::OsStr;
use std::fs;
use std::io::Read;
use std::iter;
use std::path::{Path, PathBuf};
use app_dirs::{get_app_root, AppDataType};
use flate2::read::GzDecoder;
use log::debug;
use reqwest::{blocking::Client, Proxy};
use std::time::{Duration, SystemTime};
use tar::Archive;
use walkdir::{DirEntry, WalkDir};
use crate::error::TealdeerError::{self, CacheError, UpdateError};
use crate::types::{OsType, PathSource};
#[derive(Debug)]
pub struct Cache {
url: String,
os: OsType,
}
#[derive(Debug)]
pub struct PageLookupResult {
page_path: PathBuf,
patch_path: Option<PathBuf>,
}
impl PageLookupResult {
pub fn with_page(page_path: PathBuf) -> Self {
Self {
page_path,
patch_path: None,
}
}
pub fn with_optional_patch(mut self, patch_path: Option<PathBuf>) -> Self {
self.patch_path = patch_path;
self
}
pub fn paths(&self) -> impl Iterator<Item = &Path> {
iter::once(self.page_path.as_path()).chain(self.patch_path.as_deref().into_iter())
}
}
impl Cache {
pub fn new<S>(url: S, os: OsType) -> Self
where
S: Into<String>,
{
Self {
url: url.into(),
os,
}
}
/// Return the path to the cache directory.
pub fn get_cache_dir() -> Result<(PathBuf, PathSource), TealdeerError> {
// Allow overriding the cache directory by setting the
// $TEALDEER_CACHE_DIR env variable.
if let Ok(value) = env::var("TEALDEER_CACHE_DIR") {
let path = PathBuf::from(value);
if path.exists() && path.is_dir() {
return Ok((path, PathSource::EnvVar));
}
return Err(CacheError(
"Path specified by $TEALDEER_CACHE_DIR \
does not exist or is not a directory."
.into(),
));
};
// Otherwise, fall back to user cache directory.
match get_app_root(AppDataType::UserCache, &crate::APP_INFO) {
Ok(dirs) => Ok((dirs, PathSource::OsConvention)),
Err(_) => Err(CacheError(
"Could not determine user cache directory.".into(),
)),
}
}
/// Download the archive
fn download(&self) -> Result<Vec<u8>, TealdeerError> {
let mut builder = Client::builder();
if let Ok(ref host) = env::var("HTTP_PROXY") {
if let Ok(proxy) = Proxy::http(host) {
builder = builder.proxy(proxy);
}
}
if let Ok(ref host) = env::var("HTTPS_PROXY") {
if let Ok(proxy) = Proxy::https(host) {
builder = builder.proxy(proxy);
}
}
let client = builder.build().unwrap_or_else(|_| Client::new());
let mut resp = client.get(&self.url).send()?;
let mut buf: Vec<u8> = vec![];
let bytes_downloaded = resp.copy_to(&mut buf)?;
debug!("{} bytes downloaded", bytes_downloaded);
Ok(buf)
}
/// Decompress and open the archive
fn decompress<R: Read>(reader: R) -> Archive<GzDecoder<R>> {
Archive::new(GzDecoder::new(reader))
}
/// Update the pages cache.
pub fn update(&self) -> Result<(), TealdeerError> {
// First, download the compressed data
let bytes: Vec<u8> = self.download()?;
// Decompress the response body into an `Archive`
let mut archive = Self::decompress(&bytes[..]);
// Determine paths
let (cache_dir, _) = Self::get_cache_dir()?;
// Make sure that cache directory exists
debug!("Ensure cache directory {:?} exists", &cache_dir);
fs::create_dir_all(&cache_dir)
.map_err(|e| UpdateError(format!("Could not create cache directory: {}", e)))?;
// Clear cache directory
// Note: This is not the best solution. Ideally we would download the
// archive to a temporary directory and then swap the two directories.
// But renaming a directory doesn't work across filesystems and Rust
// does not yet offer a recursive directory copying function. So for
// now, we'll use this approach.
Self::clear()?;
// Extract archive
archive
.unpack(&cache_dir)
.map_err(|e| UpdateError(format!("Could not unpack compressed data: {}", e)))?;
Ok(())
}
/// Return the duration since the cache directory was last modified.
pub fn last_update() -> Option<Duration> {
if let Ok((cache_dir, _)) = Self::get_cache_dir() {
if let Ok(metadata) = fs::metadata(cache_dir.join("tldr-master")) {
if let Ok(mtime) = metadata.modified() {
let now = SystemTime::now();
return now.duration_since(mtime).ok();
};
};
};
None
}
/// Return the platform directory.
fn get_platform_dir(&self) -> Option<&'static str> {
match self.os {
OsType::Linux => Some("linux"),
OsType::OsX => Some("osx"),
OsType::SunOs => Some("sunos"),
OsType::Windows => Some("windows"),
OsType::Other => None,
}
}
/// Check for pages for a given platform in one of the given languages.
fn find_page_for_platform(
page_name: &str,
cache_dir: &Path,
platform: &str,
language_dirs: &[String],
) -> Option<PathBuf> {
language_dirs
.iter()
.map(|lang_dir| cache_dir.join(lang_dir).join(platform).join(page_name))
.find(|path| path.exists() && path.is_file())
}
/// Look up custom patch (<name>.patch). If it exists, store it in a variable.
fn find_patch(patch_name: &str, custom_pages_dir: Option<&Path>) -> Option<PathBuf> {
custom_pages_dir
.map(|custom_dir| custom_dir.join(patch_name))
.filter(|path| path.exists() && path.is_file())
}
/// Search for a page and return the path to it.
pub fn find_page(
&self,
name: &str,
languages: &[String],
custom_pages_dir: Option<&Path>,
) -> Option<PageLookupResult> {
let page_filename = format!("{}.md", name);
let patch_filename = format!("{}.patch", name);
let custom_filename = format!("{}.page", name);
// Get cache dir
let cache_dir = match Self::get_cache_dir() {
Ok((cache_dir, _)) => cache_dir.join("tldr-master"),
Err(e) => {
log::error!("Could not get cache directory: {}", e);
return None;
}
};
let lang_dirs: Vec<String> = languages
.iter()
.map(|lang| {
if lang == "en" {
String::from("pages")
} else {
format!("pages.{}", lang)
}
})
.collect();
// Look up custom page (<name>.page). If it exists, return it directly
if let Some(config_dir) = custom_pages_dir {
let custom_page = config_dir.join(custom_filename);
if custom_page.exists() && custom_page.is_file() {
return Some(PageLookupResult::with_page(custom_page));
|
let patch_path = Self::find_patch(&patch_filename, custom_pages_dir.as_deref());
// Try to find a platform specific path next, append custom patch to it.
if let Some(pf) = self.get_platform_dir() {
if let Some(page) =
Self::find_page_for_platform(&page_filename, &cache_dir, pf, &lang_dirs)
{
return Some(PageLookupResult::with_page(page).with_optional_patch(patch_path));
}
}
// Did not find platform specific results, fall back to "common"
Self::find_page_for_platform(&page_filename, &cache_dir, "common", &lang_dirs)
.map(|page| PageLookupResult::with_page(page).with_optional_patch(patch_path))
}
/// Return the available pages.
pub fn list_pages(&self) -> Result<Vec<String>, TealdeerError> {
// Determine platforms directory and platform
let (cache_dir, _) = Self::get_cache_dir()?;
let platforms_dir = cache_dir.join("tldr-master").join("pages");
let platform_dir = self.get_platform_dir();
// Closure that allows the WalkDir instance to traverse platform
// specific and common page directories, but not others.
let should_walk = |entry: &DirEntry| -> bool {
let file_type = entry.file_type();
let file_name = match entry.file_name().to_str() {
Some(name) => name,
None => return false,
};
if file_type.is_dir() {
if file_name == "common" {
return true;
}
if let Some(platform) = platform_dir {
return file_name == platform;
}
} else if file_type.is_file() {
return true;
}
false
};
// Recursively walk through common and (if applicable) platform specific directory
let mut pages = WalkDir::new(platforms_dir)
.min_depth(1) // Skip root directory
.into_iter()
.filter_entry(|e| should_walk(e)) // Filter out pages for other architectures
.filter_map(Result::ok) // Convert results to options, filter out errors
.filter_map(|e| {
let path = e.path();
let extension = &path.extension().and_then(OsStr::to_str).unwrap_or("");
if e.file_type().is_file() && extension == &"md" {
path.file_stem()
.and_then(|stem| stem.to_str().map(|s| s.into()))
} else {
None
}
})
.collect::<Vec<String>>();
pages.sort();
pages.dedup();
Ok(pages)
}
/// Delete the cache directory.
pub fn clear() -> Result<(), TealdeerError> {
let (path, _) = Self::get_cache_dir()?;
if path.exists() && path.is_dir() {
fs::remove_dir_all(&path).map_err(|_| {
CacheError(format!(
"Could not remove cache directory ({}).",
path.display()
))
})?;
} else if path.exists() {
return Err(CacheError(format!(
"Cache path ({}) is not a directory.",
path.display()
)));
} else {
return Err(CacheError(format!(
"Cache path ({}) does not exist.",
path.display()
)));
};
Ok(())
}
}
/// Unit Tests for cache module
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_page_lookup_result_iter_with_patch() {
let lookup = PageLookupResult::with_page(PathBuf::from("test.page"))
.with_optional_patch(Some(PathBuf::from("test.patch")));
let mut iter = lookup.paths();
assert_eq!(iter.next(), Some(Path::new("test.page")));
assert_eq!(iter.next(), Some(Path::new("test.patch")));
assert_eq!(iter.next(), None);
}
#[test]
fn test_page_lookup_result_iter_no_patch() {
let lookup = PageLookupResult::with_page(PathBuf::from("test.page"));
let mut iter = lookup.paths();
assert_eq!(iter.next(), Some(Path::new("test.page")));
assert_eq!(iter.next(), None);
}
}
|
}
}
|
random_line_split
|
cache.rs
|
use std::env;
use std::ffi::OsStr;
use std::fs;
use std::io::Read;
use std::iter;
use std::path::{Path, PathBuf};
use app_dirs::{get_app_root, AppDataType};
use flate2::read::GzDecoder;
use log::debug;
use reqwest::{blocking::Client, Proxy};
use std::time::{Duration, SystemTime};
use tar::Archive;
use walkdir::{DirEntry, WalkDir};
use crate::error::TealdeerError::{self, CacheError, UpdateError};
use crate::types::{OsType, PathSource};
#[derive(Debug)]
pub struct Cache {
url: String,
os: OsType,
}
#[derive(Debug)]
pub struct PageLookupResult {
page_path: PathBuf,
patch_path: Option<PathBuf>,
}
impl PageLookupResult {
pub fn with_page(page_path: PathBuf) -> Self {
Self {
page_path,
patch_path: None,
}
}
pub fn with_optional_patch(mut self, patch_path: Option<PathBuf>) -> Self
|
pub fn paths(&self) -> impl Iterator<Item = &Path> {
iter::once(self.page_path.as_path()).chain(self.patch_path.as_deref().into_iter())
}
}
impl Cache {
pub fn new<S>(url: S, os: OsType) -> Self
where
S: Into<String>,
{
Self {
url: url.into(),
os,
}
}
/// Return the path to the cache directory.
pub fn get_cache_dir() -> Result<(PathBuf, PathSource), TealdeerError> {
// Allow overriding the cache directory by setting the
// $TEALDEER_CACHE_DIR env variable.
if let Ok(value) = env::var("TEALDEER_CACHE_DIR") {
let path = PathBuf::from(value);
if path.exists() && path.is_dir() {
return Ok((path, PathSource::EnvVar));
}
return Err(CacheError(
"Path specified by $TEALDEER_CACHE_DIR \
does not exist or is not a directory."
.into(),
));
};
// Otherwise, fall back to user cache directory.
match get_app_root(AppDataType::UserCache, &crate::APP_INFO) {
Ok(dirs) => Ok((dirs, PathSource::OsConvention)),
Err(_) => Err(CacheError(
"Could not determine user cache directory.".into(),
)),
}
}
/// Download the archive
fn download(&self) -> Result<Vec<u8>, TealdeerError> {
let mut builder = Client::builder();
if let Ok(ref host) = env::var("HTTP_PROXY") {
if let Ok(proxy) = Proxy::http(host) {
builder = builder.proxy(proxy);
}
}
if let Ok(ref host) = env::var("HTTPS_PROXY") {
if let Ok(proxy) = Proxy::https(host) {
builder = builder.proxy(proxy);
}
}
let client = builder.build().unwrap_or_else(|_| Client::new());
let mut resp = client.get(&self.url).send()?;
let mut buf: Vec<u8> = vec![];
let bytes_downloaded = resp.copy_to(&mut buf)?;
debug!("{} bytes downloaded", bytes_downloaded);
Ok(buf)
}
/// Decompress and open the archive
fn decompress<R: Read>(reader: R) -> Archive<GzDecoder<R>> {
Archive::new(GzDecoder::new(reader))
}
/// Update the pages cache.
pub fn update(&self) -> Result<(), TealdeerError> {
// First, download the compressed data
let bytes: Vec<u8> = self.download()?;
// Decompress the response body into an `Archive`
let mut archive = Self::decompress(&bytes[..]);
// Determine paths
let (cache_dir, _) = Self::get_cache_dir()?;
// Make sure that cache directory exists
debug!("Ensure cache directory {:?} exists", &cache_dir);
fs::create_dir_all(&cache_dir)
.map_err(|e| UpdateError(format!("Could not create cache directory: {}", e)))?;
// Clear cache directory
// Note: This is not the best solution. Ideally we would download the
// archive to a temporary directory and then swap the two directories.
// But renaming a directory doesn't work across filesystems and Rust
// does not yet offer a recursive directory copying function. So for
// now, we'll use this approach.
Self::clear()?;
// Extract archive
archive
.unpack(&cache_dir)
.map_err(|e| UpdateError(format!("Could not unpack compressed data: {}", e)))?;
Ok(())
}
/// Return the duration since the cache directory was last modified.
pub fn last_update() -> Option<Duration> {
if let Ok((cache_dir, _)) = Self::get_cache_dir() {
if let Ok(metadata) = fs::metadata(cache_dir.join("tldr-master")) {
if let Ok(mtime) = metadata.modified() {
let now = SystemTime::now();
return now.duration_since(mtime).ok();
};
};
};
None
}
/// Return the platform directory.
fn get_platform_dir(&self) -> Option<&'static str> {
match self.os {
OsType::Linux => Some("linux"),
OsType::OsX => Some("osx"),
OsType::SunOs => Some("sunos"),
OsType::Windows => Some("windows"),
OsType::Other => None,
}
}
/// Check for pages for a given platform in one of the given languages.
fn find_page_for_platform(
page_name: &str,
cache_dir: &Path,
platform: &str,
language_dirs: &[String],
) -> Option<PathBuf> {
language_dirs
.iter()
.map(|lang_dir| cache_dir.join(lang_dir).join(platform).join(page_name))
.find(|path| path.exists() && path.is_file())
}
/// Look up custom patch (<name>.patch). If it exists, store it in a variable.
fn find_patch(patch_name: &str, custom_pages_dir: Option<&Path>) -> Option<PathBuf> {
custom_pages_dir
.map(|custom_dir| custom_dir.join(patch_name))
.filter(|path| path.exists() && path.is_file())
}
/// Search for a page and return the path to it.
pub fn find_page(
&self,
name: &str,
languages: &[String],
custom_pages_dir: Option<&Path>,
) -> Option<PageLookupResult> {
let page_filename = format!("{}.md", name);
let patch_filename = format!("{}.patch", name);
let custom_filename = format!("{}.page", name);
// Get cache dir
let cache_dir = match Self::get_cache_dir() {
Ok((cache_dir, _)) => cache_dir.join("tldr-master"),
Err(e) => {
log::error!("Could not get cache directory: {}", e);
return None;
}
};
let lang_dirs: Vec<String> = languages
.iter()
.map(|lang| {
if lang == "en" {
String::from("pages")
} else {
format!("pages.{}", lang)
}
})
.collect();
// Look up custom page (<name>.page). If it exists, return it directly
if let Some(config_dir) = custom_pages_dir {
let custom_page = config_dir.join(custom_filename);
if custom_page.exists() && custom_page.is_file() {
return Some(PageLookupResult::with_page(custom_page));
}
}
let patch_path = Self::find_patch(&patch_filename, custom_pages_dir.as_deref());
// Try to find a platform specific path next, append custom patch to it.
if let Some(pf) = self.get_platform_dir() {
if let Some(page) =
Self::find_page_for_platform(&page_filename, &cache_dir, pf, &lang_dirs)
{
return Some(PageLookupResult::with_page(page).with_optional_patch(patch_path));
}
}
// Did not find platform specific results, fall back to "common"
Self::find_page_for_platform(&page_filename, &cache_dir, "common", &lang_dirs)
.map(|page| PageLookupResult::with_page(page).with_optional_patch(patch_path))
}
/// Return the available pages.
pub fn list_pages(&self) -> Result<Vec<String>, TealdeerError> {
// Determine platforms directory and platform
let (cache_dir, _) = Self::get_cache_dir()?;
let platforms_dir = cache_dir.join("tldr-master").join("pages");
let platform_dir = self.get_platform_dir();
// Closure that allows the WalkDir instance to traverse platform
// specific and common page directories, but not others.
let should_walk = |entry: &DirEntry| -> bool {
let file_type = entry.file_type();
let file_name = match entry.file_name().to_str() {
Some(name) => name,
None => return false,
};
if file_type.is_dir() {
if file_name == "common" {
return true;
}
if let Some(platform) = platform_dir {
return file_name == platform;
}
} else if file_type.is_file() {
return true;
}
false
};
// Recursively walk through common and (if applicable) platform specific directory
let mut pages = WalkDir::new(platforms_dir)
.min_depth(1) // Skip root directory
.into_iter()
.filter_entry(|e| should_walk(e)) // Filter out pages for other architectures
.filter_map(Result::ok) // Convert results to options, filter out errors
.filter_map(|e| {
let path = e.path();
let extension = &path.extension().and_then(OsStr::to_str).unwrap_or("");
if e.file_type().is_file() && extension == &"md" {
path.file_stem()
.and_then(|stem| stem.to_str().map(|s| s.into()))
} else {
None
}
})
.collect::<Vec<String>>();
pages.sort();
pages.dedup();
Ok(pages)
}
/// Delete the cache directory.
pub fn clear() -> Result<(), TealdeerError> {
let (path, _) = Self::get_cache_dir()?;
if path.exists() && path.is_dir() {
fs::remove_dir_all(&path).map_err(|_| {
CacheError(format!(
"Could not remove cache directory ({}).",
path.display()
))
})?;
} else if path.exists() {
return Err(CacheError(format!(
"Cache path ({}) is not a directory.",
path.display()
)));
} else {
return Err(CacheError(format!(
"Cache path ({}) does not exist.",
path.display()
)));
};
Ok(())
}
}
/// Unit Tests for cache module
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_page_lookup_result_iter_with_patch() {
let lookup = PageLookupResult::with_page(PathBuf::from("test.page"))
.with_optional_patch(Some(PathBuf::from("test.patch")));
let mut iter = lookup.paths();
assert_eq!(iter.next(), Some(Path::new("test.page")));
assert_eq!(iter.next(), Some(Path::new("test.patch")));
assert_eq!(iter.next(), None);
}
#[test]
fn test_page_lookup_result_iter_no_patch() {
let lookup = PageLookupResult::with_page(PathBuf::from("test.page"));
let mut iter = lookup.paths();
assert_eq!(iter.next(), Some(Path::new("test.page")));
assert_eq!(iter.next(), None);
}
}
|
{
self.patch_path = patch_path;
self
}
|
identifier_body
|
cache.rs
|
use std::env;
use std::ffi::OsStr;
use std::fs;
use std::io::Read;
use std::iter;
use std::path::{Path, PathBuf};
use app_dirs::{get_app_root, AppDataType};
use flate2::read::GzDecoder;
use log::debug;
use reqwest::{blocking::Client, Proxy};
use std::time::{Duration, SystemTime};
use tar::Archive;
use walkdir::{DirEntry, WalkDir};
use crate::error::TealdeerError::{self, CacheError, UpdateError};
use crate::types::{OsType, PathSource};
#[derive(Debug)]
pub struct Cache {
url: String,
os: OsType,
}
#[derive(Debug)]
pub struct PageLookupResult {
page_path: PathBuf,
patch_path: Option<PathBuf>,
}
impl PageLookupResult {
pub fn with_page(page_path: PathBuf) -> Self {
Self {
page_path,
patch_path: None,
}
}
pub fn with_optional_patch(mut self, patch_path: Option<PathBuf>) -> Self {
self.patch_path = patch_path;
self
}
pub fn paths(&self) -> impl Iterator<Item = &Path> {
iter::once(self.page_path.as_path()).chain(self.patch_path.as_deref().into_iter())
}
}
impl Cache {
pub fn new<S>(url: S, os: OsType) -> Self
where
S: Into<String>,
{
Self {
url: url.into(),
os,
}
}
/// Return the path to the cache directory.
pub fn get_cache_dir() -> Result<(PathBuf, PathSource), TealdeerError> {
// Allow overriding the cache directory by setting the
// $TEALDEER_CACHE_DIR env variable.
if let Ok(value) = env::var("TEALDEER_CACHE_DIR") {
let path = PathBuf::from(value);
if path.exists() && path.is_dir() {
return Ok((path, PathSource::EnvVar));
}
return Err(CacheError(
"Path specified by $TEALDEER_CACHE_DIR \
does not exist or is not a directory."
.into(),
));
};
// Otherwise, fall back to user cache directory.
match get_app_root(AppDataType::UserCache, &crate::APP_INFO) {
Ok(dirs) => Ok((dirs, PathSource::OsConvention)),
Err(_) => Err(CacheError(
"Could not determine user cache directory.".into(),
)),
}
}
/// Download the archive
fn download(&self) -> Result<Vec<u8>, TealdeerError> {
let mut builder = Client::builder();
if let Ok(ref host) = env::var("HTTP_PROXY") {
if let Ok(proxy) = Proxy::http(host) {
builder = builder.proxy(proxy);
}
}
if let Ok(ref host) = env::var("HTTPS_PROXY") {
if let Ok(proxy) = Proxy::https(host) {
builder = builder.proxy(proxy);
}
}
let client = builder.build().unwrap_or_else(|_| Client::new());
let mut resp = client.get(&self.url).send()?;
let mut buf: Vec<u8> = vec![];
let bytes_downloaded = resp.copy_to(&mut buf)?;
debug!("{} bytes downloaded", bytes_downloaded);
Ok(buf)
}
/// Decompress and open the archive
fn decompress<R: Read>(reader: R) -> Archive<GzDecoder<R>> {
Archive::new(GzDecoder::new(reader))
}
/// Update the pages cache.
pub fn update(&self) -> Result<(), TealdeerError> {
// First, download the compressed data
let bytes: Vec<u8> = self.download()?;
// Decompress the response body into an `Archive`
let mut archive = Self::decompress(&bytes[..]);
// Determine paths
let (cache_dir, _) = Self::get_cache_dir()?;
// Make sure that cache directory exists
debug!("Ensure cache directory {:?} exists", &cache_dir);
fs::create_dir_all(&cache_dir)
.map_err(|e| UpdateError(format!("Could not create cache directory: {}", e)))?;
// Clear cache directory
// Note: This is not the best solution. Ideally we would download the
// archive to a temporary directory and then swap the two directories.
// But renaming a directory doesn't work across filesystems and Rust
// does not yet offer a recursive directory copying function. So for
// now, we'll use this approach.
Self::clear()?;
// Extract archive
archive
.unpack(&cache_dir)
.map_err(|e| UpdateError(format!("Could not unpack compressed data: {}", e)))?;
Ok(())
}
/// Return the duration since the cache directory was last modified.
pub fn last_update() -> Option<Duration> {
if let Ok((cache_dir, _)) = Self::get_cache_dir() {
if let Ok(metadata) = fs::metadata(cache_dir.join("tldr-master")) {
if let Ok(mtime) = metadata.modified() {
let now = SystemTime::now();
return now.duration_since(mtime).ok();
};
};
};
None
}
/// Return the platform directory.
fn get_platform_dir(&self) -> Option<&'static str> {
match self.os {
OsType::Linux => Some("linux"),
OsType::OsX => Some("osx"),
OsType::SunOs => Some("sunos"),
OsType::Windows => Some("windows"),
OsType::Other => None,
}
}
/// Check for pages for a given platform in one of the given languages.
fn find_page_for_platform(
page_name: &str,
cache_dir: &Path,
platform: &str,
language_dirs: &[String],
) -> Option<PathBuf> {
language_dirs
.iter()
.map(|lang_dir| cache_dir.join(lang_dir).join(platform).join(page_name))
.find(|path| path.exists() && path.is_file())
}
/// Look up custom patch (<name>.patch). If it exists, store it in a variable.
fn find_patch(patch_name: &str, custom_pages_dir: Option<&Path>) -> Option<PathBuf> {
custom_pages_dir
.map(|custom_dir| custom_dir.join(patch_name))
.filter(|path| path.exists() && path.is_file())
}
/// Search for a page and return the path to it.
pub fn find_page(
&self,
name: &str,
languages: &[String],
custom_pages_dir: Option<&Path>,
) -> Option<PageLookupResult> {
let page_filename = format!("{}.md", name);
let patch_filename = format!("{}.patch", name);
let custom_filename = format!("{}.page", name);
// Get cache dir
let cache_dir = match Self::get_cache_dir() {
Ok((cache_dir, _)) => cache_dir.join("tldr-master"),
Err(e) => {
log::error!("Could not get cache directory: {}", e);
return None;
}
};
let lang_dirs: Vec<String> = languages
.iter()
.map(|lang| {
if lang == "en" {
String::from("pages")
} else {
format!("pages.{}", lang)
}
})
.collect();
// Look up custom page (<name>.page). If it exists, return it directly
if let Some(config_dir) = custom_pages_dir {
let custom_page = config_dir.join(custom_filename);
if custom_page.exists() && custom_page.is_file() {
return Some(PageLookupResult::with_page(custom_page));
}
}
let patch_path = Self::find_patch(&patch_filename, custom_pages_dir.as_deref());
// Try to find a platform specific path next, append custom patch to it.
if let Some(pf) = self.get_platform_dir() {
if let Some(page) =
Self::find_page_for_platform(&page_filename, &cache_dir, pf, &lang_dirs)
{
return Some(PageLookupResult::with_page(page).with_optional_patch(patch_path));
}
}
// Did not find platform specific results, fall back to "common"
Self::find_page_for_platform(&page_filename, &cache_dir, "common", &lang_dirs)
.map(|page| PageLookupResult::with_page(page).with_optional_patch(patch_path))
}
/// Return the available pages.
pub fn list_pages(&self) -> Result<Vec<String>, TealdeerError> {
// Determine platforms directory and platform
let (cache_dir, _) = Self::get_cache_dir()?;
let platforms_dir = cache_dir.join("tldr-master").join("pages");
let platform_dir = self.get_platform_dir();
// Closure that allows the WalkDir instance to traverse platform
// specific and common page directories, but not others.
let should_walk = |entry: &DirEntry| -> bool {
let file_type = entry.file_type();
let file_name = match entry.file_name().to_str() {
Some(name) => name,
None => return false,
};
if file_type.is_dir() {
if file_name == "common" {
return true;
}
if let Some(platform) = platform_dir {
return file_name == platform;
}
} else if file_type.is_file() {
return true;
}
false
};
// Recursively walk through common and (if applicable) platform specific directory
let mut pages = WalkDir::new(platforms_dir)
.min_depth(1) // Skip root directory
.into_iter()
.filter_entry(|e| should_walk(e)) // Filter out pages for other architectures
.filter_map(Result::ok) // Convert results to options, filter out errors
.filter_map(|e| {
let path = e.path();
let extension = &path.extension().and_then(OsStr::to_str).unwrap_or("");
if e.file_type().is_file() && extension == &"md"
|
else {
None
}
})
.collect::<Vec<String>>();
pages.sort();
pages.dedup();
Ok(pages)
}
/// Delete the cache directory.
pub fn clear() -> Result<(), TealdeerError> {
let (path, _) = Self::get_cache_dir()?;
if path.exists() && path.is_dir() {
fs::remove_dir_all(&path).map_err(|_| {
CacheError(format!(
"Could not remove cache directory ({}).",
path.display()
))
})?;
} else if path.exists() {
return Err(CacheError(format!(
"Cache path ({}) is not a directory.",
path.display()
)));
} else {
return Err(CacheError(format!(
"Cache path ({}) does not exist.",
path.display()
)));
};
Ok(())
}
}
/// Unit Tests for cache module
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_page_lookup_result_iter_with_patch() {
let lookup = PageLookupResult::with_page(PathBuf::from("test.page"))
.with_optional_patch(Some(PathBuf::from("test.patch")));
let mut iter = lookup.paths();
assert_eq!(iter.next(), Some(Path::new("test.page")));
assert_eq!(iter.next(), Some(Path::new("test.patch")));
assert_eq!(iter.next(), None);
}
#[test]
fn test_page_lookup_result_iter_no_patch() {
let lookup = PageLookupResult::with_page(PathBuf::from("test.page"));
let mut iter = lookup.paths();
assert_eq!(iter.next(), Some(Path::new("test.page")));
assert_eq!(iter.next(), None);
}
}
|
{
path.file_stem()
.and_then(|stem| stem.to_str().map(|s| s.into()))
}
|
conditional_block
|
cache.rs
|
use std::env;
use std::ffi::OsStr;
use std::fs;
use std::io::Read;
use std::iter;
use std::path::{Path, PathBuf};
use app_dirs::{get_app_root, AppDataType};
use flate2::read::GzDecoder;
use log::debug;
use reqwest::{blocking::Client, Proxy};
use std::time::{Duration, SystemTime};
use tar::Archive;
use walkdir::{DirEntry, WalkDir};
use crate::error::TealdeerError::{self, CacheError, UpdateError};
use crate::types::{OsType, PathSource};
#[derive(Debug)]
pub struct Cache {
url: String,
os: OsType,
}
#[derive(Debug)]
pub struct PageLookupResult {
page_path: PathBuf,
patch_path: Option<PathBuf>,
}
impl PageLookupResult {
pub fn with_page(page_path: PathBuf) -> Self {
Self {
page_path,
patch_path: None,
}
}
pub fn with_optional_patch(mut self, patch_path: Option<PathBuf>) -> Self {
self.patch_path = patch_path;
self
}
pub fn paths(&self) -> impl Iterator<Item = &Path> {
iter::once(self.page_path.as_path()).chain(self.patch_path.as_deref().into_iter())
}
}
impl Cache {
pub fn new<S>(url: S, os: OsType) -> Self
where
S: Into<String>,
{
Self {
url: url.into(),
os,
}
}
/// Return the path to the cache directory.
pub fn get_cache_dir() -> Result<(PathBuf, PathSource), TealdeerError> {
// Allow overriding the cache directory by setting the
// $TEALDEER_CACHE_DIR env variable.
if let Ok(value) = env::var("TEALDEER_CACHE_DIR") {
let path = PathBuf::from(value);
if path.exists() && path.is_dir() {
return Ok((path, PathSource::EnvVar));
}
return Err(CacheError(
"Path specified by $TEALDEER_CACHE_DIR \
does not exist or is not a directory."
.into(),
));
};
// Otherwise, fall back to user cache directory.
match get_app_root(AppDataType::UserCache, &crate::APP_INFO) {
Ok(dirs) => Ok((dirs, PathSource::OsConvention)),
Err(_) => Err(CacheError(
"Could not determine user cache directory.".into(),
)),
}
}
/// Download the archive
fn download(&self) -> Result<Vec<u8>, TealdeerError> {
let mut builder = Client::builder();
if let Ok(ref host) = env::var("HTTP_PROXY") {
if let Ok(proxy) = Proxy::http(host) {
builder = builder.proxy(proxy);
}
}
if let Ok(ref host) = env::var("HTTPS_PROXY") {
if let Ok(proxy) = Proxy::https(host) {
builder = builder.proxy(proxy);
}
}
let client = builder.build().unwrap_or_else(|_| Client::new());
let mut resp = client.get(&self.url).send()?;
let mut buf: Vec<u8> = vec![];
let bytes_downloaded = resp.copy_to(&mut buf)?;
debug!("{} bytes downloaded", bytes_downloaded);
Ok(buf)
}
/// Decompress and open the archive
fn
|
<R: Read>(reader: R) -> Archive<GzDecoder<R>> {
Archive::new(GzDecoder::new(reader))
}
/// Update the pages cache.
pub fn update(&self) -> Result<(), TealdeerError> {
// First, download the compressed data
let bytes: Vec<u8> = self.download()?;
// Decompress the response body into an `Archive`
let mut archive = Self::decompress(&bytes[..]);
// Determine paths
let (cache_dir, _) = Self::get_cache_dir()?;
// Make sure that cache directory exists
debug!("Ensure cache directory {:?} exists", &cache_dir);
fs::create_dir_all(&cache_dir)
.map_err(|e| UpdateError(format!("Could not create cache directory: {}", e)))?;
// Clear cache directory
// Note: This is not the best solution. Ideally we would download the
// archive to a temporary directory and then swap the two directories.
// But renaming a directory doesn't work across filesystems and Rust
// does not yet offer a recursive directory copying function. So for
// now, we'll use this approach.
Self::clear()?;
// Extract archive
archive
.unpack(&cache_dir)
.map_err(|e| UpdateError(format!("Could not unpack compressed data: {}", e)))?;
Ok(())
}
/// Return the duration since the cache directory was last modified.
pub fn last_update() -> Option<Duration> {
if let Ok((cache_dir, _)) = Self::get_cache_dir() {
if let Ok(metadata) = fs::metadata(cache_dir.join("tldr-master")) {
if let Ok(mtime) = metadata.modified() {
let now = SystemTime::now();
return now.duration_since(mtime).ok();
};
};
};
None
}
/// Return the platform directory.
fn get_platform_dir(&self) -> Option<&'static str> {
match self.os {
OsType::Linux => Some("linux"),
OsType::OsX => Some("osx"),
OsType::SunOs => Some("sunos"),
OsType::Windows => Some("windows"),
OsType::Other => None,
}
}
/// Check for pages for a given platform in one of the given languages.
fn find_page_for_platform(
page_name: &str,
cache_dir: &Path,
platform: &str,
language_dirs: &[String],
) -> Option<PathBuf> {
language_dirs
.iter()
.map(|lang_dir| cache_dir.join(lang_dir).join(platform).join(page_name))
.find(|path| path.exists() && path.is_file())
}
/// Look up custom patch (<name>.patch). If it exists, store it in a variable.
fn find_patch(patch_name: &str, custom_pages_dir: Option<&Path>) -> Option<PathBuf> {
custom_pages_dir
.map(|custom_dir| custom_dir.join(patch_name))
.filter(|path| path.exists() && path.is_file())
}
/// Search for a page and return the path to it.
pub fn find_page(
&self,
name: &str,
languages: &[String],
custom_pages_dir: Option<&Path>,
) -> Option<PageLookupResult> {
let page_filename = format!("{}.md", name);
let patch_filename = format!("{}.patch", name);
let custom_filename = format!("{}.page", name);
// Get cache dir
let cache_dir = match Self::get_cache_dir() {
Ok((cache_dir, _)) => cache_dir.join("tldr-master"),
Err(e) => {
log::error!("Could not get cache directory: {}", e);
return None;
}
};
let lang_dirs: Vec<String> = languages
.iter()
.map(|lang| {
if lang == "en" {
String::from("pages")
} else {
format!("pages.{}", lang)
}
})
.collect();
// Look up custom page (<name>.page). If it exists, return it directly
if let Some(config_dir) = custom_pages_dir {
let custom_page = config_dir.join(custom_filename);
if custom_page.exists() && custom_page.is_file() {
return Some(PageLookupResult::with_page(custom_page));
}
}
let patch_path = Self::find_patch(&patch_filename, custom_pages_dir.as_deref());
// Try to find a platform specific path next, append custom patch to it.
if let Some(pf) = self.get_platform_dir() {
if let Some(page) =
Self::find_page_for_platform(&page_filename, &cache_dir, pf, &lang_dirs)
{
return Some(PageLookupResult::with_page(page).with_optional_patch(patch_path));
}
}
// Did not find platform specific results, fall back to "common"
Self::find_page_for_platform(&page_filename, &cache_dir, "common", &lang_dirs)
.map(|page| PageLookupResult::with_page(page).with_optional_patch(patch_path))
}
/// Return the available pages.
pub fn list_pages(&self) -> Result<Vec<String>, TealdeerError> {
// Determine platforms directory and platform
let (cache_dir, _) = Self::get_cache_dir()?;
let platforms_dir = cache_dir.join("tldr-master").join("pages");
let platform_dir = self.get_platform_dir();
// Closure that allows the WalkDir instance to traverse platform
// specific and common page directories, but not others.
let should_walk = |entry: &DirEntry| -> bool {
let file_type = entry.file_type();
let file_name = match entry.file_name().to_str() {
Some(name) => name,
None => return false,
};
if file_type.is_dir() {
if file_name == "common" {
return true;
}
if let Some(platform) = platform_dir {
return file_name == platform;
}
} else if file_type.is_file() {
return true;
}
false
};
// Recursively walk through common and (if applicable) platform specific directory
let mut pages = WalkDir::new(platforms_dir)
.min_depth(1) // Skip root directory
.into_iter()
.filter_entry(|e| should_walk(e)) // Filter out pages for other architectures
.filter_map(Result::ok) // Convert results to options, filter out errors
.filter_map(|e| {
let path = e.path();
let extension = &path.extension().and_then(OsStr::to_str).unwrap_or("");
if e.file_type().is_file() && extension == &"md" {
path.file_stem()
.and_then(|stem| stem.to_str().map(|s| s.into()))
} else {
None
}
})
.collect::<Vec<String>>();
pages.sort();
pages.dedup();
Ok(pages)
}
/// Delete the cache directory.
pub fn clear() -> Result<(), TealdeerError> {
let (path, _) = Self::get_cache_dir()?;
if path.exists() && path.is_dir() {
fs::remove_dir_all(&path).map_err(|_| {
CacheError(format!(
"Could not remove cache directory ({}).",
path.display()
))
})?;
} else if path.exists() {
return Err(CacheError(format!(
"Cache path ({}) is not a directory.",
path.display()
)));
} else {
return Err(CacheError(format!(
"Cache path ({}) does not exist.",
path.display()
)));
};
Ok(())
}
}
/// Unit Tests for cache module
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_page_lookup_result_iter_with_patch() {
let lookup = PageLookupResult::with_page(PathBuf::from("test.page"))
.with_optional_patch(Some(PathBuf::from("test.patch")));
let mut iter = lookup.paths();
assert_eq!(iter.next(), Some(Path::new("test.page")));
assert_eq!(iter.next(), Some(Path::new("test.patch")));
assert_eq!(iter.next(), None);
}
#[test]
fn test_page_lookup_result_iter_no_patch() {
let lookup = PageLookupResult::with_page(PathBuf::from("test.page"));
let mut iter = lookup.paths();
assert_eq!(iter.next(), Some(Path::new("test.page")));
assert_eq!(iter.next(), None);
}
}
|
decompress
|
identifier_name
|
atom.rs
|
use crate::syntax::Type;
use proc_macro2::Ident;
use std::fmt::{self, Display};
#[derive(Copy, Clone, PartialEq)]
pub enum Atom {
Bool,
Char, // C char, not Rust char
U8,
U16,
U32,
U64,
Usize,
I8,
I16,
I32,
I64,
Isize,
F32,
F64,
CxxString,
RustString,
}
impl Atom {
pub fn from(ident: &Ident) -> Option<Self> {
Self::from_str(ident.to_string().as_str())
}
pub fn from_str(s: &str) -> Option<Self> {
use self::Atom::*;
match s {
"bool" => Some(Bool),
"c_char" => Some(Char),
"u8" => Some(U8),
"u16" => Some(U16),
"u32" => Some(U32),
"u64" => Some(U64),
"usize" => Some(Usize),
"i8" => Some(I8),
"i16" => Some(I16),
"i32" => Some(I32),
"i64" => Some(I64),
"isize" => Some(Isize),
"f32" => Some(F32),
"f64" => Some(F64),
"CxxString" => Some(CxxString),
"String" => Some(RustString),
_ => None,
}
}
}
impl Display for Atom {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str(self.as_ref())
}
}
|
impl AsRef<str> for Atom {
fn as_ref(&self) -> &str {
use self::Atom::*;
match self {
Bool => "bool",
Char => "c_char",
U8 => "u8",
U16 => "u16",
U32 => "u32",
U64 => "u64",
Usize => "usize",
I8 => "i8",
I16 => "i16",
I32 => "i32",
I64 => "i64",
Isize => "isize",
F32 => "f32",
F64 => "f64",
CxxString => "CxxString",
RustString => "String",
}
}
}
impl PartialEq<Atom> for Type {
fn eq(&self, atom: &Atom) -> bool {
match self {
Type::Ident(ident) => ident.rust == atom,
_ => false,
}
}
}
impl PartialEq<Atom> for &Ident {
fn eq(&self, atom: &Atom) -> bool {
*self == atom
}
}
impl PartialEq<Atom> for &Type {
fn eq(&self, atom: &Atom) -> bool {
*self == atom
}
}
|
random_line_split
|
|
atom.rs
|
use crate::syntax::Type;
use proc_macro2::Ident;
use std::fmt::{self, Display};
#[derive(Copy, Clone, PartialEq)]
pub enum Atom {
Bool,
Char, // C char, not Rust char
U8,
U16,
U32,
U64,
Usize,
I8,
I16,
I32,
I64,
Isize,
F32,
F64,
CxxString,
RustString,
}
impl Atom {
pub fn from(ident: &Ident) -> Option<Self> {
Self::from_str(ident.to_string().as_str())
}
pub fn from_str(s: &str) -> Option<Self> {
use self::Atom::*;
match s {
"bool" => Some(Bool),
"c_char" => Some(Char),
"u8" => Some(U8),
"u16" => Some(U16),
"u32" => Some(U32),
"u64" => Some(U64),
"usize" => Some(Usize),
"i8" => Some(I8),
"i16" => Some(I16),
"i32" => Some(I32),
"i64" => Some(I64),
"isize" => Some(Isize),
"f32" => Some(F32),
"f64" => Some(F64),
"CxxString" => Some(CxxString),
"String" => Some(RustString),
_ => None,
}
}
}
impl Display for Atom {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str(self.as_ref())
}
}
impl AsRef<str> for Atom {
fn
|
(&self) -> &str {
use self::Atom::*;
match self {
Bool => "bool",
Char => "c_char",
U8 => "u8",
U16 => "u16",
U32 => "u32",
U64 => "u64",
Usize => "usize",
I8 => "i8",
I16 => "i16",
I32 => "i32",
I64 => "i64",
Isize => "isize",
F32 => "f32",
F64 => "f64",
CxxString => "CxxString",
RustString => "String",
}
}
}
impl PartialEq<Atom> for Type {
fn eq(&self, atom: &Atom) -> bool {
match self {
Type::Ident(ident) => ident.rust == atom,
_ => false,
}
}
}
impl PartialEq<Atom> for &Ident {
fn eq(&self, atom: &Atom) -> bool {
*self == atom
}
}
impl PartialEq<Atom> for &Type {
fn eq(&self, atom: &Atom) -> bool {
*self == atom
}
}
|
as_ref
|
identifier_name
|
atom.rs
|
use crate::syntax::Type;
use proc_macro2::Ident;
use std::fmt::{self, Display};
#[derive(Copy, Clone, PartialEq)]
pub enum Atom {
Bool,
Char, // C char, not Rust char
U8,
U16,
U32,
U64,
Usize,
I8,
I16,
I32,
I64,
Isize,
F32,
F64,
CxxString,
RustString,
}
impl Atom {
pub fn from(ident: &Ident) -> Option<Self> {
Self::from_str(ident.to_string().as_str())
}
pub fn from_str(s: &str) -> Option<Self> {
use self::Atom::*;
match s {
"bool" => Some(Bool),
"c_char" => Some(Char),
"u8" => Some(U8),
"u16" => Some(U16),
"u32" => Some(U32),
"u64" => Some(U64),
"usize" => Some(Usize),
"i8" => Some(I8),
"i16" => Some(I16),
"i32" => Some(I32),
"i64" => Some(I64),
"isize" => Some(Isize),
"f32" => Some(F32),
"f64" => Some(F64),
"CxxString" => Some(CxxString),
"String" => Some(RustString),
_ => None,
}
}
}
impl Display for Atom {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str(self.as_ref())
}
}
impl AsRef<str> for Atom {
fn as_ref(&self) -> &str {
use self::Atom::*;
match self {
Bool => "bool",
Char => "c_char",
U8 => "u8",
U16 => "u16",
U32 => "u32",
U64 => "u64",
Usize => "usize",
I8 => "i8",
I16 => "i16",
I32 => "i32",
I64 => "i64",
Isize => "isize",
F32 => "f32",
F64 => "f64",
CxxString => "CxxString",
RustString => "String",
}
}
}
impl PartialEq<Atom> for Type {
fn eq(&self, atom: &Atom) -> bool
|
}
impl PartialEq<Atom> for &Ident {
fn eq(&self, atom: &Atom) -> bool {
*self == atom
}
}
impl PartialEq<Atom> for &Type {
fn eq(&self, atom: &Atom) -> bool {
*self == atom
}
}
|
{
match self {
Type::Ident(ident) => ident.rust == atom,
_ => false,
}
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.